diff options
Diffstat (limited to 'drivers/net')
263 files changed, 15316 insertions, 6993 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 9e63b8c43f3e..950a09f021dd 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -583,18 +583,7 @@ config FUJITSU_ES This driver provides support for Extended Socket network device on Extended Partitioning of FUJITSU PRIMEQUEST 2000 E2 series. -config USB4_NET - tristate "Networking over USB4 and Thunderbolt cables" - depends on USB4 && INET - help - Select this if you want to create network between two computers - over a USB4 and Thunderbolt cables. The driver supports Apple - ThunderboltIP protocol and allows communication with any host - supporting the same protocol including Windows and macOS. - - To compile this driver a module, choose M here. The module will be - called thunderbolt-net. - +source "drivers/net/thunderbolt/Kconfig" source "drivers/net/hyperv/Kconfig" config NETDEVSIM diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 6ce076462dbf..e26f98f897c5 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -84,8 +84,6 @@ obj-$(CONFIG_HYPERV_NET) += hyperv/ obj-$(CONFIG_NTB_NETDEV) += ntb_netdev.o obj-$(CONFIG_FUJITSU_ES) += fjes/ - -thunderbolt-net-y += thunderbolt.o -obj-$(CONFIG_USB4_NET) += thunderbolt-net.o +obj-$(CONFIG_USB4_NET) += thunderbolt/ obj-$(CONFIG_NETDEVSIM) += netdevsim/ obj-$(CONFIG_NET_FAILOVER) += net_failover.o diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c index 2e270b479143..cbe831875347 100644 --- a/drivers/net/dsa/lan9303-core.c +++ b/drivers/net/dsa/lan9303-core.c @@ -15,6 +15,9 @@ #include "lan9303.h" +/* For the LAN9303 and LAN9354, only port 0 is an XMII port. */ +#define IS_PORT_XMII(port) ((port) == 0) + #define LAN9303_NUM_PORTS 3 /* 13.2 System Control and Status Registers @@ -50,6 +53,9 @@ #define LAN9303_MANUAL_FC_1 0x68 #define LAN9303_MANUAL_FC_2 0x69 #define LAN9303_MANUAL_FC_0 0x6a +# define LAN9303_BP_EN BIT(6) +# define LAN9303_RX_FC_EN BIT(2) +# define LAN9303_TX_FC_EN BIT(1) #define LAN9303_SWITCH_CSR_DATA 0x6b #define LAN9303_SWITCH_CSR_CMD 0x6c #define LAN9303_SWITCH_CSR_CMD_BUSY BIT(31) @@ -225,6 +231,13 @@ const struct regmap_access_table lan9303_register_set = { }; EXPORT_SYMBOL(lan9303_register_set); +/* Flow Control registers indexed by port number */ +static unsigned int flow_ctl_reg[] = { + LAN9303_MANUAL_FC_0, + LAN9303_MANUAL_FC_1, + LAN9303_MANUAL_FC_2 +}; + static int lan9303_read(struct regmap *regmap, unsigned int offset, u32 *reg) { int ret, i; @@ -902,6 +915,7 @@ static int lan9303_setup(struct dsa_switch *ds) { struct lan9303 *chip = ds->priv; int ret; + u32 reg; /* Make sure that port 0 is the cpu port */ if (!dsa_is_cpu_port(ds, 0)) { @@ -909,6 +923,17 @@ static int lan9303_setup(struct dsa_switch *ds) return -EINVAL; } + /* Virtual Phy: Remove Turbo 200Mbit mode */ + ret = lan9303_read(chip->regmap, LAN9303_VIRT_SPECIAL_CTRL, ®); + if (ret) + return (ret); + + /* Clear the TURBO Mode bit if it was set. */ + if (reg & LAN9303_VIRT_SPECIAL_TURBO) { + reg &= ~LAN9303_VIRT_SPECIAL_TURBO; + regmap_write(chip->regmap, LAN9303_VIRT_SPECIAL_CTRL, reg); + } + ret = lan9303_setup_tagging(chip); if (ret) dev_err(chip->dev, "failed to setup port tagging %d\n", ret); @@ -1049,42 +1074,6 @@ static int lan9303_phy_write(struct dsa_switch *ds, int phy, int regnum, return chip->ops->phy_write(chip, phy, regnum, val); } -static void lan9303_adjust_link(struct dsa_switch *ds, int port, - struct phy_device *phydev) -{ - struct lan9303 *chip = ds->priv; - int ctl; - - if (!phy_is_pseudo_fixed_link(phydev)) - return; - - ctl = lan9303_phy_read(ds, port, MII_BMCR); - - ctl &= ~BMCR_ANENABLE; - - if (phydev->speed == SPEED_100) - ctl |= BMCR_SPEED100; - else if (phydev->speed == SPEED_10) - ctl &= ~BMCR_SPEED100; - else - dev_err(ds->dev, "unsupported speed: %d\n", phydev->speed); - - if (phydev->duplex == DUPLEX_FULL) - ctl |= BMCR_FULLDPLX; - else - ctl &= ~BMCR_FULLDPLX; - - lan9303_phy_write(ds, port, MII_BMCR, ctl); - - if (port == chip->phy_addr_base) { - /* Virtual Phy: Remove Turbo 200Mbit mode */ - lan9303_read(chip->regmap, LAN9303_VIRT_SPECIAL_CTRL, &ctl); - - ctl &= ~LAN9303_VIRT_SPECIAL_TURBO; - regmap_write(chip->regmap, LAN9303_VIRT_SPECIAL_CTRL, ctl); - } -} - static int lan9303_port_enable(struct dsa_switch *ds, int port, struct phy_device *phy) { @@ -1281,26 +1270,96 @@ static int lan9303_port_mdb_del(struct dsa_switch *ds, int port, return 0; } +static void lan9303_phylink_get_caps(struct dsa_switch *ds, int port, + struct phylink_config *config) +{ + struct lan9303 *chip = ds->priv; + + dev_dbg(chip->dev, "%s(%d) entered.", __func__, port); + + config->mac_capabilities = MAC_10 | MAC_100 | MAC_ASYM_PAUSE | + MAC_SYM_PAUSE; + + if (port == 0) { + __set_bit(PHY_INTERFACE_MODE_RMII, + config->supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_MII, + config->supported_interfaces); + } else { + __set_bit(PHY_INTERFACE_MODE_INTERNAL, + config->supported_interfaces); + /* Compatibility for phylib's default interface type when the + * phy-mode property is absent + */ + __set_bit(PHY_INTERFACE_MODE_GMII, + config->supported_interfaces); + } + + /* This driver does not make use of the speed, duplex, pause or the + * advertisement in its mac_config, so it is safe to mark this driver + * as non-legacy. + */ + config->legacy_pre_march2020 = false; +} + +static void lan9303_phylink_mac_link_up(struct dsa_switch *ds, int port, + unsigned int mode, + phy_interface_t interface, + struct phy_device *phydev, int speed, + int duplex, bool tx_pause, + bool rx_pause) +{ + struct lan9303 *chip = ds->priv; + u32 ctl; + u32 reg; + + /* On this device, we are only interested in doing something here if + * this is the xMII port. All other ports are 10/100 phys using MDIO + * to control there link settings. + */ + if (!IS_PORT_XMII(port)) + return; + + /* Disable auto-negotiation and force the speed/duplex settings. */ + ctl = lan9303_phy_read(ds, port, MII_BMCR); + ctl &= ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_FULLDPLX); + if (speed == SPEED_100) + ctl |= BMCR_SPEED100; + if (duplex == DUPLEX_FULL) + ctl |= BMCR_FULLDPLX; + lan9303_phy_write(ds, port, MII_BMCR, ctl); + + /* Force the flow control settings. */ + lan9303_read(chip->regmap, flow_ctl_reg[port], ®); + reg &= ~(LAN9303_BP_EN | LAN9303_RX_FC_EN | LAN9303_TX_FC_EN); + if (rx_pause) + reg |= (LAN9303_RX_FC_EN | LAN9303_BP_EN); + if (tx_pause) + reg |= LAN9303_TX_FC_EN; + regmap_write(chip->regmap, flow_ctl_reg[port], reg); +} + static const struct dsa_switch_ops lan9303_switch_ops = { - .get_tag_protocol = lan9303_get_tag_protocol, - .setup = lan9303_setup, - .get_strings = lan9303_get_strings, - .phy_read = lan9303_phy_read, - .phy_write = lan9303_phy_write, - .adjust_link = lan9303_adjust_link, - .get_ethtool_stats = lan9303_get_ethtool_stats, - .get_sset_count = lan9303_get_sset_count, - .port_enable = lan9303_port_enable, - .port_disable = lan9303_port_disable, - .port_bridge_join = lan9303_port_bridge_join, - .port_bridge_leave = lan9303_port_bridge_leave, - .port_stp_state_set = lan9303_port_stp_state_set, - .port_fast_age = lan9303_port_fast_age, - .port_fdb_add = lan9303_port_fdb_add, - .port_fdb_del = lan9303_port_fdb_del, - .port_fdb_dump = lan9303_port_fdb_dump, - .port_mdb_add = lan9303_port_mdb_add, - .port_mdb_del = lan9303_port_mdb_del, + .get_tag_protocol = lan9303_get_tag_protocol, + .setup = lan9303_setup, + .get_strings = lan9303_get_strings, + .phy_read = lan9303_phy_read, + .phy_write = lan9303_phy_write, + .phylink_get_caps = lan9303_phylink_get_caps, + .phylink_mac_link_up = lan9303_phylink_mac_link_up, + .get_ethtool_stats = lan9303_get_ethtool_stats, + .get_sset_count = lan9303_get_sset_count, + .port_enable = lan9303_port_enable, + .port_disable = lan9303_port_disable, + .port_bridge_join = lan9303_port_bridge_join, + .port_bridge_leave = lan9303_port_bridge_leave, + .port_stp_state_set = lan9303_port_stp_state_set, + .port_fast_age = lan9303_port_fast_age, + .port_fdb_add = lan9303_port_fdb_add, + .port_fdb_del = lan9303_port_fdb_del, + .port_fdb_dump = lan9303_port_fdb_dump, + .port_mdb_add = lan9303_port_mdb_add, + .port_mdb_del = lan9303_port_mdb_del, }; static int lan9303_register_switch(struct lan9303 *chip) diff --git a/drivers/net/dsa/microchip/Kconfig b/drivers/net/dsa/microchip/Kconfig index 913f83ef013c..0546c573668a 100644 --- a/drivers/net/dsa/microchip/Kconfig +++ b/drivers/net/dsa/microchip/Kconfig @@ -11,6 +11,7 @@ menuconfig NET_DSA_MICROCHIP_KSZ_COMMON config NET_DSA_MICROCHIP_KSZ9477_I2C tristate "KSZ series I2C connected switch driver" depends on NET_DSA_MICROCHIP_KSZ_COMMON && I2C + depends on PTP_1588_CLOCK_OPTIONAL select REGMAP_I2C help Select to enable support for registering switches configured through I2C. @@ -18,10 +19,20 @@ config NET_DSA_MICROCHIP_KSZ9477_I2C config NET_DSA_MICROCHIP_KSZ_SPI tristate "KSZ series SPI connected switch driver" depends on NET_DSA_MICROCHIP_KSZ_COMMON && SPI + depends on PTP_1588_CLOCK_OPTIONAL select REGMAP_SPI help Select to enable support for registering switches configured through SPI. +config NET_DSA_MICROCHIP_KSZ_PTP + bool "Support for the PTP clock on the KSZ9563/LAN937x Ethernet Switch" + depends on NET_DSA_MICROCHIP_KSZ_COMMON && PTP_1588_CLOCK + help + Select to enable support for timestamping & PTP clock manipulation in + KSZ8563/KSZ9563/LAN937x series of switches. KSZ9563/KSZ8563 supports + only one step timestamping. LAN937x switch supports both one step and + two step timestamping. + config NET_DSA_MICROCHIP_KSZ8863_SMI tristate "KSZ series SMI connected switch driver" depends on NET_DSA_MICROCHIP_KSZ_COMMON diff --git a/drivers/net/dsa/microchip/Makefile b/drivers/net/dsa/microchip/Makefile index 28873559efc2..48360cc9fc68 100644 --- a/drivers/net/dsa/microchip/Makefile +++ b/drivers/net/dsa/microchip/Makefile @@ -4,6 +4,11 @@ ksz_switch-objs := ksz_common.o ksz_switch-objs += ksz9477.o ksz_switch-objs += ksz8795.o ksz_switch-objs += lan937x_main.o + +ifdef CONFIG_NET_DSA_MICROCHIP_KSZ_PTP +ksz_switch-objs += ksz_ptp.o +endif + obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ9477_I2C) += ksz9477_i2c.o obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ_SPI) += ksz_spi.o obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ8863_SMI) += ksz8863_smi.o diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index 9b20c2ee6d62..5e1e5bd555d2 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -6,6 +6,7 @@ */ #include <linux/delay.h> +#include <linux/dsa/ksz_common.h> #include <linux/export.h> #include <linux/gpio/consumer.h> #include <linux/kernel.h> @@ -25,6 +26,7 @@ #include <net/switchdev.h> #include "ksz_common.h" +#include "ksz_ptp.h" #include "ksz8.h" #include "ksz9477.h" #include "lan937x.h" @@ -2099,13 +2101,23 @@ static int ksz_setup(struct dsa_switch *ds) ret = ksz_pirq_setup(dev, dp->index); if (ret) goto out_girq; + + ret = ksz_ptp_irq_setup(ds, dp->index); + if (ret) + goto out_pirq; } } + ret = ksz_ptp_clock_register(ds); + if (ret) { + dev_err(dev->dev, "Failed to register PTP clock: %d\n", ret); + goto out_ptpirq; + } + ret = ksz_mdio_register(dev); if (ret < 0) { dev_err(dev->dev, "failed to register the mdio"); - goto out_pirq; + goto out_ptp_clock_unregister; } /* start switch */ @@ -2114,6 +2126,12 @@ static int ksz_setup(struct dsa_switch *ds) return 0; +out_ptp_clock_unregister: + ksz_ptp_clock_unregister(ds); +out_ptpirq: + if (dev->irq > 0) + dsa_switch_for_each_user_port(dp, dev->ds) + ksz_ptp_irq_free(ds, dp->index); out_pirq: if (dev->irq > 0) dsa_switch_for_each_user_port(dp, dev->ds) @@ -2130,9 +2148,14 @@ static void ksz_teardown(struct dsa_switch *ds) struct ksz_device *dev = ds->priv; struct dsa_port *dp; + ksz_ptp_clock_unregister(ds); + if (dev->irq > 0) { - dsa_switch_for_each_user_port(dp, dev->ds) + dsa_switch_for_each_user_port(dp, dev->ds) { + ksz_ptp_irq_free(ds, dp->index); + ksz_irq_free(&dev->ports[dp->index].pirq); + } ksz_irq_free(&dev->girq); } @@ -2517,6 +2540,17 @@ static enum dsa_tag_protocol ksz_get_tag_protocol(struct dsa_switch *ds, return proto; } +static int ksz_connect_tag_protocol(struct dsa_switch *ds, + enum dsa_tag_protocol proto) +{ + struct ksz_tagger_data *tagger_data; + + tagger_data = ksz_tagger_data(ds); + tagger_data->xmit_work_fn = ksz_port_deferred_xmit; + + return 0; +} + static int ksz_port_vlan_filtering(struct dsa_switch *ds, int port, bool flag, struct netlink_ext_ack *extack) { @@ -2932,6 +2966,7 @@ static int ksz_switch_detect(struct ksz_device *dev) static const struct dsa_switch_ops ksz_switch_ops = { .get_tag_protocol = ksz_get_tag_protocol, + .connect_tag_protocol = ksz_connect_tag_protocol, .get_phy_flags = ksz_get_phy_flags, .setup = ksz_setup, .teardown = ksz_teardown, @@ -2966,6 +3001,11 @@ static const struct dsa_switch_ops ksz_switch_ops = { .get_pause_stats = ksz_get_pause_stats, .port_change_mtu = ksz_change_mtu, .port_max_mtu = ksz_max_mtu, + .get_ts_info = ksz_get_ts_info, + .port_hwtstamp_get = ksz_hwtstamp_get, + .port_hwtstamp_set = ksz_hwtstamp_set, + .port_txtstamp = ksz_port_txtstamp, + .port_rxtstamp = ksz_port_rxtstamp, }; struct ksz_device *ksz_switch_alloc(struct device *base, void *priv) diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h index 055d61ff3fb8..7260528e5c57 100644 --- a/drivers/net/dsa/microchip/ksz_common.h +++ b/drivers/net/dsa/microchip/ksz_common.h @@ -15,9 +15,12 @@ #include <net/dsa.h> #include <linux/irq.h> +#include "ksz_ptp.h" + #define KSZ_MAX_NUM_PORTS 8 struct ksz_device; +struct ksz_port; struct vlan_table { u32 table[3]; @@ -81,6 +84,14 @@ struct ksz_irq { struct ksz_device *dev; }; +struct ksz_ptp_irq { + struct ksz_port *port; + u16 ts_reg; + bool ts_en; + char name[16]; + int num; +}; + struct ksz_port { bool remove_tag; /* Remove Tag flag set, for ksz8795 only */ bool learning; @@ -100,6 +111,15 @@ struct ksz_port { struct ksz_device *ksz_dev; struct ksz_irq pirq; u8 num; +#if IS_ENABLED(CONFIG_NET_DSA_MICROCHIP_KSZ_PTP) + struct hwtstamp_config tstamp_config; + bool hwts_tx_en; + bool hwts_rx_en; + struct ksz_irq ptpirq; + struct ksz_ptp_irq ptpmsg_irq[3]; + ktime_t tstamp_msg; + struct completion tstamp_msg_comp; +#endif }; struct ksz_device { @@ -140,6 +160,7 @@ struct ksz_device { u16 port_mask; struct mutex lock_irq; /* IRQ Access */ struct ksz_irq girq; + struct ksz_ptp_data ptp_data; }; /* List of supported models */ @@ -443,6 +464,32 @@ static inline int ksz_write32(struct ksz_device *dev, u32 reg, u32 value) return ret; } +static inline int ksz_rmw16(struct ksz_device *dev, u32 reg, u16 mask, + u16 value) +{ + int ret; + + ret = regmap_update_bits(dev->regmap[1], reg, mask, value); + if (ret) + dev_err(dev->dev, "can't rmw 16bit reg 0x%x: %pe\n", reg, + ERR_PTR(ret)); + + return ret; +} + +static inline int ksz_rmw32(struct ksz_device *dev, u32 reg, u32 mask, + u32 value) +{ + int ret; + + ret = regmap_update_bits(dev->regmap[2], reg, mask, value); + if (ret) + dev_err(dev->dev, "can't rmw 32bit reg 0x%x: %pe\n", reg, + ERR_PTR(ret)); + + return ret; +} + static inline int ksz_write64(struct ksz_device *dev, u32 reg, u64 value) { u32 val[2]; @@ -591,6 +638,7 @@ static inline int is_lan937x(struct ksz_device *dev) #define REG_PORT_INT_MASK 0x001F #define PORT_SRC_PHY_INT 1 +#define PORT_SRC_PTP_INT 2 #define KSZ8795_HUGE_PACKET_SIZE 2000 #define KSZ8863_HUGE_PACKET_SIZE 1916 diff --git a/drivers/net/dsa/microchip/ksz_ptp.c b/drivers/net/dsa/microchip/ksz_ptp.c new file mode 100644 index 000000000000..4e22a695a64c --- /dev/null +++ b/drivers/net/dsa/microchip/ksz_ptp.c @@ -0,0 +1,1201 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Microchip KSZ PTP Implementation + * + * Copyright (C) 2020 ARRI Lighting + * Copyright (C) 2022 Microchip Technology Inc. + */ + +#include <linux/dsa/ksz_common.h> +#include <linux/irq.h> +#include <linux/irqdomain.h> +#include <linux/kernel.h> +#include <linux/ptp_classify.h> +#include <linux/ptp_clock_kernel.h> + +#include "ksz_common.h" +#include "ksz_ptp.h" +#include "ksz_ptp_reg.h" + +#define ptp_caps_to_data(d) container_of((d), struct ksz_ptp_data, caps) +#define ptp_data_to_ksz_dev(d) container_of((d), struct ksz_device, ptp_data) +#define work_to_xmit_work(w) \ + container_of((w), struct ksz_deferred_xmit_work, work) + +/* Sub-nanoseconds-adj,max * sub-nanoseconds / 40ns * 1ns + * = (2^30-1) * (2 ^ 32) / 40 ns * 1 ns = 6249999 + */ +#define KSZ_MAX_DRIFT_CORR 6249999 +#define KSZ_MAX_PULSE_WIDTH 125000000LL + +#define KSZ_PTP_INC_NS 40ULL /* HW clock is incremented every 40 ns (by 40) */ +#define KSZ_PTP_SUBNS_BITS 32 + +#define KSZ_PTP_INT_START 13 + +static int ksz_ptp_tou_gpio(struct ksz_device *dev) +{ + int ret; + + if (!is_lan937x(dev)) + return 0; + + ret = ksz_rmw32(dev, REG_PTP_CTRL_STAT__4, GPIO_OUT, + GPIO_OUT); + if (ret) + return ret; + + ret = ksz_rmw32(dev, REG_SW_GLOBAL_LED_OVR__4, LED_OVR_1 | LED_OVR_2, + LED_OVR_1 | LED_OVR_2); + if (ret) + return ret; + + return ksz_rmw32(dev, REG_SW_GLOBAL_LED_SRC__4, + LED_SRC_PTP_GPIO_1 | LED_SRC_PTP_GPIO_2, + LED_SRC_PTP_GPIO_1 | LED_SRC_PTP_GPIO_2); +} + +static int ksz_ptp_tou_reset(struct ksz_device *dev, u8 unit) +{ + u32 data; + int ret; + + /* Reset trigger unit (clears TRIGGER_EN, but not GPIOSTATx) */ + ret = ksz_rmw32(dev, REG_PTP_CTRL_STAT__4, TRIG_RESET, TRIG_RESET); + + data = FIELD_PREP(TRIG_DONE_M, BIT(unit)); + ret = ksz_write32(dev, REG_PTP_TRIG_STATUS__4, data); + if (ret) + return ret; + + data = FIELD_PREP(TRIG_INT_M, BIT(unit)); + ret = ksz_write32(dev, REG_PTP_INT_STATUS__4, data); + if (ret) + return ret; + + /* Clear reset and set GPIO direction */ + return ksz_rmw32(dev, REG_PTP_CTRL_STAT__4, (TRIG_RESET | TRIG_ENABLE), + 0); +} + +static int ksz_ptp_tou_pulse_verify(u64 pulse_ns) +{ + u32 data; + + if (pulse_ns & 0x3) + return -EINVAL; + + data = (pulse_ns / 8); + if (!FIELD_FIT(TRIG_PULSE_WIDTH_M, data)) + return -ERANGE; + + return 0; +} + +static int ksz_ptp_tou_target_time_set(struct ksz_device *dev, + struct timespec64 const *ts) +{ + int ret; + + /* Hardware has only 32 bit */ + if ((ts->tv_sec & 0xffffffff) != ts->tv_sec) + return -EINVAL; + + ret = ksz_write32(dev, REG_TRIG_TARGET_NANOSEC, ts->tv_nsec); + if (ret) + return ret; + + ret = ksz_write32(dev, REG_TRIG_TARGET_SEC, ts->tv_sec); + if (ret) + return ret; + + return 0; +} + +static int ksz_ptp_tou_start(struct ksz_device *dev, u8 unit) +{ + u32 data; + int ret; + + ret = ksz_rmw32(dev, REG_PTP_CTRL_STAT__4, TRIG_ENABLE, TRIG_ENABLE); + if (ret) + return ret; + + /* Check error flag: + * - the ACTIVE flag is NOT cleared an error! + */ + ret = ksz_read32(dev, REG_PTP_TRIG_STATUS__4, &data); + if (ret) + return ret; + + if (FIELD_GET(TRIG_ERROR_M, data) & (1 << unit)) { + dev_err(dev->dev, "%s: Trigger unit%d error!\n", __func__, + unit); + ret = -EIO; + /* Unit will be reset on next access */ + return ret; + } + + return 0; +} + +static int ksz_ptp_configure_perout(struct ksz_device *dev, + u32 cycle_width_ns, u32 pulse_width_ns, + struct timespec64 const *target_time, + u8 index) +{ + u32 data; + int ret; + + data = FIELD_PREP(TRIG_NOTIFY, 1) | + FIELD_PREP(TRIG_GPO_M, index) | + FIELD_PREP(TRIG_PATTERN_M, TRIG_POS_PERIOD); + ret = ksz_write32(dev, REG_TRIG_CTRL__4, data); + if (ret) + return ret; + + ret = ksz_write32(dev, REG_TRIG_CYCLE_WIDTH, cycle_width_ns); + if (ret) + return ret; + + /* Set cycle count 0 - Infinite */ + ret = ksz_rmw32(dev, REG_TRIG_CYCLE_CNT, TRIG_CYCLE_CNT_M, 0); + if (ret) + return ret; + + data = (pulse_width_ns / 8); + ret = ksz_write32(dev, REG_TRIG_PULSE_WIDTH__4, data); + if (ret) + return ret; + + ret = ksz_ptp_tou_target_time_set(dev, target_time); + if (ret) + return ret; + + return 0; +} + +static int ksz_ptp_enable_perout(struct ksz_device *dev, + struct ptp_perout_request const *request, + int on) +{ + struct ksz_ptp_data *ptp_data = &dev->ptp_data; + u64 req_pulse_width_ns; + u64 cycle_width_ns; + u64 pulse_width_ns; + int pin = 0; + u32 data32; + int ret; + + if (request->flags & ~PTP_PEROUT_DUTY_CYCLE) + return -EOPNOTSUPP; + + if (ptp_data->tou_mode != KSZ_PTP_TOU_PEROUT && + ptp_data->tou_mode != KSZ_PTP_TOU_IDLE) + return -EBUSY; + + pin = ptp_find_pin(ptp_data->clock, PTP_PF_PEROUT, request->index); + if (pin < 0) + return -EINVAL; + + data32 = FIELD_PREP(PTP_GPIO_INDEX, pin) | + FIELD_PREP(PTP_TOU_INDEX, request->index); + ret = ksz_rmw32(dev, REG_PTP_UNIT_INDEX__4, + PTP_GPIO_INDEX | PTP_TOU_INDEX, data32); + if (ret) + return ret; + + ret = ksz_ptp_tou_reset(dev, request->index); + if (ret) + return ret; + + if (!on) { + ptp_data->tou_mode = KSZ_PTP_TOU_IDLE; + return 0; + } + + ptp_data->perout_target_time_first.tv_sec = request->start.sec; + ptp_data->perout_target_time_first.tv_nsec = request->start.nsec; + + ptp_data->perout_period.tv_sec = request->period.sec; + ptp_data->perout_period.tv_nsec = request->period.nsec; + + cycle_width_ns = timespec64_to_ns(&ptp_data->perout_period); + if ((cycle_width_ns & TRIG_CYCLE_WIDTH_M) != cycle_width_ns) + return -EINVAL; + + if (request->flags & PTP_PEROUT_DUTY_CYCLE) { + pulse_width_ns = request->on.sec * NSEC_PER_SEC + + request->on.nsec; + } else { + /* Use a duty cycle of 50%. Maximum pulse width supported by the + * hardware is a little bit more than 125 ms. + */ + req_pulse_width_ns = (request->period.sec * NSEC_PER_SEC + + request->period.nsec) / 2; + pulse_width_ns = min_t(u64, req_pulse_width_ns, + KSZ_MAX_PULSE_WIDTH); + } + + ret = ksz_ptp_tou_pulse_verify(pulse_width_ns); + if (ret) + return ret; + + ret = ksz_ptp_configure_perout(dev, cycle_width_ns, pulse_width_ns, + &ptp_data->perout_target_time_first, + pin); + if (ret) + return ret; + + ret = ksz_ptp_tou_gpio(dev); + if (ret) + return ret; + + ret = ksz_ptp_tou_start(dev, request->index); + if (ret) + return ret; + + ptp_data->tou_mode = KSZ_PTP_TOU_PEROUT; + + return 0; +} + +static int ksz_ptp_enable_mode(struct ksz_device *dev) +{ + struct ksz_tagger_data *tagger_data = ksz_tagger_data(dev->ds); + struct ksz_ptp_data *ptp_data = &dev->ptp_data; + struct ksz_port *prt; + struct dsa_port *dp; + bool tag_en = false; + int ret; + + dsa_switch_for_each_user_port(dp, dev->ds) { + prt = &dev->ports[dp->index]; + if (prt->hwts_tx_en || prt->hwts_rx_en) { + tag_en = true; + break; + } + } + + if (tag_en) { + ret = ptp_schedule_worker(ptp_data->clock, 0); + if (ret) + return ret; + } else { + ptp_cancel_worker_sync(ptp_data->clock); + } + + tagger_data->hwtstamp_set_state(dev->ds, tag_en); + + return ksz_rmw16(dev, REG_PTP_MSG_CONF1, PTP_ENABLE, + tag_en ? PTP_ENABLE : 0); +} + +/* The function is return back the capability of timestamping feature when + * requested through ethtool -T <interface> utility + */ +int ksz_get_ts_info(struct dsa_switch *ds, int port, struct ethtool_ts_info *ts) +{ + struct ksz_device *dev = ds->priv; + struct ksz_ptp_data *ptp_data; + + ptp_data = &dev->ptp_data; + + if (!ptp_data->clock) + return -ENODEV; + + ts->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + ts->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ONESTEP_P2P); + + if (is_lan937x(dev)) + ts->tx_types |= BIT(HWTSTAMP_TX_ON); + + ts->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | + BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V2_EVENT); + + ts->phc_index = ptp_clock_index(ptp_data->clock); + + return 0; +} + +int ksz_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr) +{ + struct ksz_device *dev = ds->priv; + struct hwtstamp_config *config; + struct ksz_port *prt; + + prt = &dev->ports[port]; + config = &prt->tstamp_config; + + return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? + -EFAULT : 0; +} + +static int ksz_set_hwtstamp_config(struct ksz_device *dev, + struct ksz_port *prt, + struct hwtstamp_config *config) +{ + int ret; + + if (config->flags) + return -EINVAL; + + switch (config->tx_type) { + case HWTSTAMP_TX_OFF: + prt->ptpmsg_irq[KSZ_SYNC_MSG].ts_en = false; + prt->ptpmsg_irq[KSZ_XDREQ_MSG].ts_en = false; + prt->ptpmsg_irq[KSZ_PDRES_MSG].ts_en = false; + prt->hwts_tx_en = false; + break; + case HWTSTAMP_TX_ONESTEP_P2P: + prt->ptpmsg_irq[KSZ_SYNC_MSG].ts_en = false; + prt->ptpmsg_irq[KSZ_XDREQ_MSG].ts_en = true; + prt->ptpmsg_irq[KSZ_PDRES_MSG].ts_en = false; + prt->hwts_tx_en = true; + + ret = ksz_rmw16(dev, REG_PTP_MSG_CONF1, PTP_1STEP, PTP_1STEP); + if (ret) + return ret; + + break; + case HWTSTAMP_TX_ON: + if (!is_lan937x(dev)) + return -ERANGE; + + prt->ptpmsg_irq[KSZ_SYNC_MSG].ts_en = true; + prt->ptpmsg_irq[KSZ_XDREQ_MSG].ts_en = true; + prt->ptpmsg_irq[KSZ_PDRES_MSG].ts_en = true; + prt->hwts_tx_en = true; + + ret = ksz_rmw16(dev, REG_PTP_MSG_CONF1, PTP_1STEP, 0); + if (ret) + return ret; + + break; + default: + return -ERANGE; + } + + switch (config->rx_filter) { + case HWTSTAMP_FILTER_NONE: + prt->hwts_rx_en = false; + break; + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; + prt->hwts_rx_en = true; + break; + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; + prt->hwts_rx_en = true; + break; + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + prt->hwts_rx_en = true; + break; + default: + config->rx_filter = HWTSTAMP_FILTER_NONE; + return -ERANGE; + } + + return ksz_ptp_enable_mode(dev); +} + +int ksz_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr) +{ + struct ksz_device *dev = ds->priv; + struct hwtstamp_config config; + struct ksz_port *prt; + int ret; + + prt = &dev->ports[port]; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + ret = ksz_set_hwtstamp_config(dev, prt, &config); + if (ret) + return ret; + + memcpy(&prt->tstamp_config, &config, sizeof(config)); + + if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) + return -EFAULT; + + return 0; +} + +static ktime_t ksz_tstamp_reconstruct(struct ksz_device *dev, ktime_t tstamp) +{ + struct timespec64 ptp_clock_time; + struct ksz_ptp_data *ptp_data; + struct timespec64 diff; + struct timespec64 ts; + + ptp_data = &dev->ptp_data; + ts = ktime_to_timespec64(tstamp); + + spin_lock_bh(&ptp_data->clock_lock); + ptp_clock_time = ptp_data->clock_time; + spin_unlock_bh(&ptp_data->clock_lock); + + /* calculate full time from partial time stamp */ + ts.tv_sec = (ptp_clock_time.tv_sec & ~3) | ts.tv_sec; + + /* find nearest possible point in time */ + diff = timespec64_sub(ts, ptp_clock_time); + if (diff.tv_sec > 2) + ts.tv_sec -= 4; + else if (diff.tv_sec < -2) + ts.tv_sec += 4; + + return timespec64_to_ktime(ts); +} + +bool ksz_port_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb, + unsigned int type) +{ + struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb); + struct ksz_device *dev = ds->priv; + struct ptp_header *ptp_hdr; + struct ksz_port *prt; + u8 ptp_msg_type; + ktime_t tstamp; + s64 correction; + + prt = &dev->ports[port]; + + tstamp = KSZ_SKB_CB(skb)->tstamp; + memset(hwtstamps, 0, sizeof(*hwtstamps)); + hwtstamps->hwtstamp = ksz_tstamp_reconstruct(dev, tstamp); + + if (prt->tstamp_config.tx_type != HWTSTAMP_TX_ONESTEP_P2P) + goto out; + + ptp_hdr = ptp_parse_header(skb, type); + if (!ptp_hdr) + goto out; + + ptp_msg_type = ptp_get_msgtype(ptp_hdr, type); + if (ptp_msg_type != PTP_MSGTYPE_PDELAY_REQ) + goto out; + + /* Only subtract the partial time stamp from the correction field. When + * the hardware adds the egress time stamp to the correction field of + * the PDelay_Resp message on tx, also only the partial time stamp will + * be added. + */ + correction = (s64)get_unaligned_be64(&ptp_hdr->correction); + correction -= ktime_to_ns(tstamp) << 16; + + ptp_header_update_correction(skb, type, ptp_hdr, correction); + +out: + return false; +} + +void ksz_port_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb) +{ + struct ksz_device *dev = ds->priv; + struct ptp_header *hdr; + struct sk_buff *clone; + struct ksz_port *prt; + unsigned int type; + u8 ptp_msg_type; + + prt = &dev->ports[port]; + + if (!prt->hwts_tx_en) + return; + + type = ptp_classify_raw(skb); + if (type == PTP_CLASS_NONE) + return; + + hdr = ptp_parse_header(skb, type); + if (!hdr) + return; + + ptp_msg_type = ptp_get_msgtype(hdr, type); + + switch (ptp_msg_type) { + case PTP_MSGTYPE_SYNC: + if (prt->tstamp_config.tx_type == HWTSTAMP_TX_ONESTEP_P2P) + return; + break; + case PTP_MSGTYPE_PDELAY_REQ: + break; + case PTP_MSGTYPE_PDELAY_RESP: + if (prt->tstamp_config.tx_type == HWTSTAMP_TX_ONESTEP_P2P) { + KSZ_SKB_CB(skb)->ptp_type = type; + KSZ_SKB_CB(skb)->update_correction = true; + return; + } + break; + + default: + return; + } + + clone = skb_clone_sk(skb); + if (!clone) + return; + + /* caching the value to be used in tag_ksz.c */ + KSZ_SKB_CB(skb)->clone = clone; +} + +static void ksz_ptp_txtstamp_skb(struct ksz_device *dev, + struct ksz_port *prt, struct sk_buff *skb) +{ + struct skb_shared_hwtstamps hwtstamps = {}; + int ret; + + /* timeout must include DSA master to transmit data, tstamp latency, + * IRQ latency and time for reading the time stamp. + */ + ret = wait_for_completion_timeout(&prt->tstamp_msg_comp, + msecs_to_jiffies(100)); + if (!ret) + return; + + hwtstamps.hwtstamp = prt->tstamp_msg; + skb_complete_tx_timestamp(skb, &hwtstamps); +} + +void ksz_port_deferred_xmit(struct kthread_work *work) +{ + struct ksz_deferred_xmit_work *xmit_work = work_to_xmit_work(work); + struct sk_buff *clone, *skb = xmit_work->skb; + struct dsa_switch *ds = xmit_work->dp->ds; + struct ksz_device *dev = ds->priv; + struct ksz_port *prt; + + prt = &dev->ports[xmit_work->dp->index]; + + clone = KSZ_SKB_CB(skb)->clone; + + skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS; + + reinit_completion(&prt->tstamp_msg_comp); + + dsa_enqueue_skb(skb, skb->dev); + + ksz_ptp_txtstamp_skb(dev, prt, clone); + + kfree(xmit_work); +} + +static int _ksz_ptp_gettime(struct ksz_device *dev, struct timespec64 *ts) +{ + u32 nanoseconds; + u32 seconds; + u8 phase; + int ret; + + /* Copy current PTP clock into shadow registers and read */ + ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_READ_TIME, PTP_READ_TIME); + if (ret) + return ret; + + ret = ksz_read8(dev, REG_PTP_RTC_SUB_NANOSEC__2, &phase); + if (ret) + return ret; + + ret = ksz_read32(dev, REG_PTP_RTC_NANOSEC, &nanoseconds); + if (ret) + return ret; + + ret = ksz_read32(dev, REG_PTP_RTC_SEC, &seconds); + if (ret) + return ret; + + ts->tv_sec = seconds; + ts->tv_nsec = nanoseconds + phase * 8; + + return 0; +} + +static int ksz_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) +{ + struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp); + struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data); + int ret; + + mutex_lock(&ptp_data->lock); + ret = _ksz_ptp_gettime(dev, ts); + mutex_unlock(&ptp_data->lock); + + return ret; +} + +static int ksz_ptp_restart_perout(struct ksz_device *dev) +{ + struct ksz_ptp_data *ptp_data = &dev->ptp_data; + s64 now_ns, first_ns, period_ns, next_ns; + struct ptp_perout_request request; + struct timespec64 next; + struct timespec64 now; + unsigned int count; + int ret; + + dev_info(dev->dev, "Restarting periodic output signal\n"); + + ret = _ksz_ptp_gettime(dev, &now); + if (ret) + return ret; + + now_ns = timespec64_to_ns(&now); + first_ns = timespec64_to_ns(&ptp_data->perout_target_time_first); + + /* Calculate next perout event based on start time and period */ + period_ns = timespec64_to_ns(&ptp_data->perout_period); + + if (first_ns < now_ns) { + count = div_u64(now_ns - first_ns, period_ns); + next_ns = first_ns + count * period_ns; + } else { + next_ns = first_ns; + } + + /* Ensure 100 ms guard time prior next event */ + while (next_ns < now_ns + 100000000) + next_ns += period_ns; + + /* Restart periodic output signal */ + next = ns_to_timespec64(next_ns); + request.start.sec = next.tv_sec; + request.start.nsec = next.tv_nsec; + request.period.sec = ptp_data->perout_period.tv_sec; + request.period.nsec = ptp_data->perout_period.tv_nsec; + request.index = 0; + request.flags = 0; + + return ksz_ptp_enable_perout(dev, &request, 1); +} + +static int ksz_ptp_settime(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp); + struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data); + int ret; + + mutex_lock(&ptp_data->lock); + + /* Write to shadow registers and Load PTP clock */ + ret = ksz_write16(dev, REG_PTP_RTC_SUB_NANOSEC__2, PTP_RTC_0NS); + if (ret) + goto unlock; + + ret = ksz_write32(dev, REG_PTP_RTC_NANOSEC, ts->tv_nsec); + if (ret) + goto unlock; + + ret = ksz_write32(dev, REG_PTP_RTC_SEC, ts->tv_sec); + if (ret) + goto unlock; + + ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_LOAD_TIME, PTP_LOAD_TIME); + if (ret) + goto unlock; + + switch (ptp_data->tou_mode) { + case KSZ_PTP_TOU_IDLE: + break; + + case KSZ_PTP_TOU_PEROUT: + ret = ksz_ptp_restart_perout(dev); + if (ret) + goto unlock; + + break; + } + + spin_lock_bh(&ptp_data->clock_lock); + ptp_data->clock_time = *ts; + spin_unlock_bh(&ptp_data->clock_lock); + +unlock: + mutex_unlock(&ptp_data->lock); + + return ret; +} + +static int ksz_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) +{ + struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp); + struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data); + u64 base, adj; + bool negative; + u32 data32; + int ret; + + mutex_lock(&ptp_data->lock); + + if (scaled_ppm) { + base = KSZ_PTP_INC_NS << KSZ_PTP_SUBNS_BITS; + negative = diff_by_scaled_ppm(base, scaled_ppm, &adj); + + data32 = (u32)adj; + data32 &= PTP_SUBNANOSEC_M; + if (!negative) + data32 |= PTP_RATE_DIR; + + ret = ksz_write32(dev, REG_PTP_SUBNANOSEC_RATE, data32); + if (ret) + goto unlock; + + ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_CLK_ADJ_ENABLE, + PTP_CLK_ADJ_ENABLE); + if (ret) + goto unlock; + } else { + ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_CLK_ADJ_ENABLE, 0); + if (ret) + goto unlock; + } + +unlock: + mutex_unlock(&ptp_data->lock); + return ret; +} + +static int ksz_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp); + struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data); + struct timespec64 delta64 = ns_to_timespec64(delta); + s32 sec, nsec; + u16 data16; + int ret; + + mutex_lock(&ptp_data->lock); + + /* do not use ns_to_timespec64(), + * both sec and nsec are subtracted by hw + */ + sec = div_s64_rem(delta, NSEC_PER_SEC, &nsec); + + ret = ksz_write32(dev, REG_PTP_RTC_NANOSEC, abs(nsec)); + if (ret) + goto unlock; + + ret = ksz_write32(dev, REG_PTP_RTC_SEC, abs(sec)); + if (ret) + goto unlock; + + ret = ksz_read16(dev, REG_PTP_CLK_CTRL, &data16); + if (ret) + goto unlock; + + data16 |= PTP_STEP_ADJ; + + /* PTP_STEP_DIR -- 0: subtract, 1: add */ + if (delta < 0) + data16 &= ~PTP_STEP_DIR; + else + data16 |= PTP_STEP_DIR; + + ret = ksz_write16(dev, REG_PTP_CLK_CTRL, data16); + if (ret) + goto unlock; + + switch (ptp_data->tou_mode) { + case KSZ_PTP_TOU_IDLE: + break; + + case KSZ_PTP_TOU_PEROUT: + ret = ksz_ptp_restart_perout(dev); + if (ret) + goto unlock; + + break; + } + + spin_lock_bh(&ptp_data->clock_lock); + ptp_data->clock_time = timespec64_add(ptp_data->clock_time, delta64); + spin_unlock_bh(&ptp_data->clock_lock); + +unlock: + mutex_unlock(&ptp_data->lock); + return ret; +} + +static int ksz_ptp_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *req, int on) +{ + struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp); + struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data); + int ret; + + switch (req->type) { + case PTP_CLK_REQ_PEROUT: + mutex_lock(&ptp_data->lock); + ret = ksz_ptp_enable_perout(dev, &req->perout, on); + mutex_unlock(&ptp_data->lock); + break; + default: + return -EOPNOTSUPP; + } + + return ret; +} + +static int ksz_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin, + enum ptp_pin_function func, unsigned int chan) +{ + int ret = 0; + + switch (func) { + case PTP_PF_NONE: + case PTP_PF_PEROUT: + break; + default: + ret = -1; + break; + } + + return ret; +} + +/* Function is pointer to the do_aux_work in the ptp_clock capability */ +static long ksz_ptp_do_aux_work(struct ptp_clock_info *ptp) +{ + struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp); + struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data); + struct timespec64 ts; + int ret; + + mutex_lock(&ptp_data->lock); + ret = _ksz_ptp_gettime(dev, &ts); + if (ret) + goto out; + + spin_lock_bh(&ptp_data->clock_lock); + ptp_data->clock_time = ts; + spin_unlock_bh(&ptp_data->clock_lock); + +out: + mutex_unlock(&ptp_data->lock); + + return HZ; /* reschedule in 1 second */ +} + +static int ksz_ptp_start_clock(struct ksz_device *dev) +{ + struct ksz_ptp_data *ptp_data = &dev->ptp_data; + int ret; + + ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_CLK_ENABLE, PTP_CLK_ENABLE); + if (ret) + return ret; + + ptp_data->clock_time.tv_sec = 0; + ptp_data->clock_time.tv_nsec = 0; + + return 0; +} + +int ksz_ptp_clock_register(struct dsa_switch *ds) +{ + struct ksz_device *dev = ds->priv; + struct ksz_ptp_data *ptp_data; + int ret; + u8 i; + + ptp_data = &dev->ptp_data; + mutex_init(&ptp_data->lock); + spin_lock_init(&ptp_data->clock_lock); + + ptp_data->caps.owner = THIS_MODULE; + snprintf(ptp_data->caps.name, 16, "Microchip Clock"); + ptp_data->caps.max_adj = KSZ_MAX_DRIFT_CORR; + ptp_data->caps.gettime64 = ksz_ptp_gettime; + ptp_data->caps.settime64 = ksz_ptp_settime; + ptp_data->caps.adjfine = ksz_ptp_adjfine; + ptp_data->caps.adjtime = ksz_ptp_adjtime; + ptp_data->caps.do_aux_work = ksz_ptp_do_aux_work; + ptp_data->caps.enable = ksz_ptp_enable; + ptp_data->caps.verify = ksz_ptp_verify_pin; + ptp_data->caps.n_pins = KSZ_PTP_N_GPIO; + ptp_data->caps.n_per_out = 3; + + ret = ksz_ptp_start_clock(dev); + if (ret) + return ret; + + for (i = 0; i < KSZ_PTP_N_GPIO; i++) { + struct ptp_pin_desc *ptp_pin = &ptp_data->pin_config[i]; + + snprintf(ptp_pin->name, + sizeof(ptp_pin->name), "ksz_ptp_pin_%02d", i); + ptp_pin->index = i; + ptp_pin->func = PTP_PF_NONE; + } + + ptp_data->caps.pin_config = ptp_data->pin_config; + + /* Currently only P2P mode is supported. When 802_1AS bit is set, it + * forwards all PTP packets to host port and none to other ports. + */ + ret = ksz_rmw16(dev, REG_PTP_MSG_CONF1, PTP_TC_P2P | PTP_802_1AS, + PTP_TC_P2P | PTP_802_1AS); + if (ret) + return ret; + + ptp_data->clock = ptp_clock_register(&ptp_data->caps, dev->dev); + if (IS_ERR_OR_NULL(ptp_data->clock)) + return PTR_ERR(ptp_data->clock); + + return 0; +} + +void ksz_ptp_clock_unregister(struct dsa_switch *ds) +{ + struct ksz_device *dev = ds->priv; + struct ksz_ptp_data *ptp_data; + + ptp_data = &dev->ptp_data; + + if (ptp_data->clock) + ptp_clock_unregister(ptp_data->clock); +} + +static irqreturn_t ksz_ptp_msg_thread_fn(int irq, void *dev_id) +{ + struct ksz_ptp_irq *ptpmsg_irq = dev_id; + struct ksz_device *dev; + struct ksz_port *port; + u32 tstamp_raw; + ktime_t tstamp; + int ret; + + port = ptpmsg_irq->port; + dev = port->ksz_dev; + + if (ptpmsg_irq->ts_en) { + ret = ksz_read32(dev, ptpmsg_irq->ts_reg, &tstamp_raw); + if (ret) + return IRQ_NONE; + + tstamp = ksz_decode_tstamp(tstamp_raw); + + port->tstamp_msg = ksz_tstamp_reconstruct(dev, tstamp); + + complete(&port->tstamp_msg_comp); + } + + return IRQ_HANDLED; +} + +static irqreturn_t ksz_ptp_irq_thread_fn(int irq, void *dev_id) +{ + struct ksz_irq *ptpirq = dev_id; + unsigned int nhandled = 0; + struct ksz_device *dev; + unsigned int sub_irq; + u16 data; + int ret; + u8 n; + + dev = ptpirq->dev; + + ret = ksz_read16(dev, ptpirq->reg_status, &data); + if (ret) + goto out; + + /* Clear the interrupts W1C */ + ret = ksz_write16(dev, ptpirq->reg_status, data); + if (ret) + return IRQ_NONE; + + for (n = 0; n < ptpirq->nirqs; ++n) { + if (data & BIT(n + KSZ_PTP_INT_START)) { + sub_irq = irq_find_mapping(ptpirq->domain, n); + handle_nested_irq(sub_irq); + ++nhandled; + } + } + +out: + return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE); +} + +static void ksz_ptp_irq_mask(struct irq_data *d) +{ + struct ksz_irq *kirq = irq_data_get_irq_chip_data(d); + + kirq->masked &= ~BIT(d->hwirq + KSZ_PTP_INT_START); +} + +static void ksz_ptp_irq_unmask(struct irq_data *d) +{ + struct ksz_irq *kirq = irq_data_get_irq_chip_data(d); + + kirq->masked |= BIT(d->hwirq + KSZ_PTP_INT_START); +} + +static void ksz_ptp_irq_bus_lock(struct irq_data *d) +{ + struct ksz_irq *kirq = irq_data_get_irq_chip_data(d); + + mutex_lock(&kirq->dev->lock_irq); +} + +static void ksz_ptp_irq_bus_sync_unlock(struct irq_data *d) +{ + struct ksz_irq *kirq = irq_data_get_irq_chip_data(d); + struct ksz_device *dev = kirq->dev; + int ret; + + ret = ksz_write16(dev, kirq->reg_mask, kirq->masked); + if (ret) + dev_err(dev->dev, "failed to change IRQ mask\n"); + + mutex_unlock(&dev->lock_irq); +} + +static const struct irq_chip ksz_ptp_irq_chip = { + .name = "ksz-irq", + .irq_mask = ksz_ptp_irq_mask, + .irq_unmask = ksz_ptp_irq_unmask, + .irq_bus_lock = ksz_ptp_irq_bus_lock, + .irq_bus_sync_unlock = ksz_ptp_irq_bus_sync_unlock, +}; + +static int ksz_ptp_irq_domain_map(struct irq_domain *d, + unsigned int irq, irq_hw_number_t hwirq) +{ + irq_set_chip_data(irq, d->host_data); + irq_set_chip_and_handler(irq, &ksz_ptp_irq_chip, handle_level_irq); + irq_set_noprobe(irq); + + return 0; +} + +static const struct irq_domain_ops ksz_ptp_irq_domain_ops = { + .map = ksz_ptp_irq_domain_map, + .xlate = irq_domain_xlate_twocell, +}; + +static void ksz_ptp_msg_irq_free(struct ksz_port *port, u8 n) +{ + struct ksz_ptp_irq *ptpmsg_irq; + + ptpmsg_irq = &port->ptpmsg_irq[n]; + + free_irq(ptpmsg_irq->num, ptpmsg_irq); + irq_dispose_mapping(ptpmsg_irq->num); +} + +static int ksz_ptp_msg_irq_setup(struct ksz_port *port, u8 n) +{ + u16 ts_reg[] = {REG_PTP_PORT_PDRESP_TS, REG_PTP_PORT_XDELAY_TS, + REG_PTP_PORT_SYNC_TS}; + static const char * const name[] = {"pdresp-msg", "xdreq-msg", + "sync-msg"}; + const struct ksz_dev_ops *ops = port->ksz_dev->dev_ops; + struct ksz_ptp_irq *ptpmsg_irq; + + ptpmsg_irq = &port->ptpmsg_irq[n]; + + ptpmsg_irq->port = port; + ptpmsg_irq->ts_reg = ops->get_port_addr(port->num, ts_reg[n]); + + snprintf(ptpmsg_irq->name, sizeof(ptpmsg_irq->name), name[n]); + + ptpmsg_irq->num = irq_find_mapping(port->ptpirq.domain, n); + if (ptpmsg_irq->num < 0) + return ptpmsg_irq->num; + + return request_threaded_irq(ptpmsg_irq->num, NULL, + ksz_ptp_msg_thread_fn, IRQF_ONESHOT, + ptpmsg_irq->name, ptpmsg_irq); +} + +int ksz_ptp_irq_setup(struct dsa_switch *ds, u8 p) +{ + struct ksz_device *dev = ds->priv; + const struct ksz_dev_ops *ops = dev->dev_ops; + struct ksz_port *port = &dev->ports[p]; + struct ksz_irq *ptpirq = &port->ptpirq; + int irq; + int ret; + + ptpirq->dev = dev; + ptpirq->masked = 0; + ptpirq->nirqs = 3; + ptpirq->reg_mask = ops->get_port_addr(p, REG_PTP_PORT_TX_INT_ENABLE__2); + ptpirq->reg_status = ops->get_port_addr(p, + REG_PTP_PORT_TX_INT_STATUS__2); + snprintf(ptpirq->name, sizeof(ptpirq->name), "ptp-irq-%d", p); + + init_completion(&port->tstamp_msg_comp); + + ptpirq->domain = irq_domain_add_linear(dev->dev->of_node, ptpirq->nirqs, + &ksz_ptp_irq_domain_ops, ptpirq); + if (!ptpirq->domain) + return -ENOMEM; + + for (irq = 0; irq < ptpirq->nirqs; irq++) + irq_create_mapping(ptpirq->domain, irq); + + ptpirq->irq_num = irq_find_mapping(port->pirq.domain, PORT_SRC_PTP_INT); + if (ptpirq->irq_num < 0) { + ret = ptpirq->irq_num; + goto out; + } + + ret = request_threaded_irq(ptpirq->irq_num, NULL, ksz_ptp_irq_thread_fn, + IRQF_ONESHOT, ptpirq->name, ptpirq); + if (ret) + goto out; + + for (irq = 0; irq < ptpirq->nirqs; irq++) { + ret = ksz_ptp_msg_irq_setup(port, irq); + if (ret) + goto out_ptp_msg; + } + + return 0; + +out_ptp_msg: + free_irq(ptpirq->irq_num, ptpirq); + while (irq--) + free_irq(port->ptpmsg_irq[irq].num, &port->ptpmsg_irq[irq]); +out: + for (irq = 0; irq < ptpirq->nirqs; irq++) + irq_dispose_mapping(port->ptpmsg_irq[irq].num); + + irq_domain_remove(ptpirq->domain); + + return ret; +} + +void ksz_ptp_irq_free(struct dsa_switch *ds, u8 p) +{ + struct ksz_device *dev = ds->priv; + struct ksz_port *port = &dev->ports[p]; + struct ksz_irq *ptpirq = &port->ptpirq; + u8 n; + + for (n = 0; n < ptpirq->nirqs; n++) + ksz_ptp_msg_irq_free(port, n); + + free_irq(ptpirq->irq_num, ptpirq); + irq_dispose_mapping(ptpirq->irq_num); + + irq_domain_remove(ptpirq->domain); +} + +MODULE_AUTHOR("Christian Eggers <ceggers@arri.de>"); +MODULE_AUTHOR("Arun Ramadoss <arun.ramadoss@microchip.com>"); +MODULE_DESCRIPTION("PTP support for KSZ switch"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/dsa/microchip/ksz_ptp.h b/drivers/net/dsa/microchip/ksz_ptp.h new file mode 100644 index 000000000000..0ca8ca4f804e --- /dev/null +++ b/drivers/net/dsa/microchip/ksz_ptp.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Microchip KSZ PTP Implementation + * + * Copyright (C) 2020 ARRI Lighting + * Copyright (C) 2022 Microchip Technology Inc. + */ + +#ifndef _NET_DSA_DRIVERS_KSZ_PTP_H +#define _NET_DSA_DRIVERS_KSZ_PTP_H + +#if IS_ENABLED(CONFIG_NET_DSA_MICROCHIP_KSZ_PTP) + +#include <linux/ptp_clock_kernel.h> + +#define KSZ_PTP_N_GPIO 2 + +enum ksz_ptp_tou_mode { + KSZ_PTP_TOU_IDLE, + KSZ_PTP_TOU_PEROUT, +}; + +struct ksz_ptp_data { + struct ptp_clock_info caps; + struct ptp_clock *clock; + struct ptp_pin_desc pin_config[KSZ_PTP_N_GPIO]; + /* Serializes all operations on the PTP hardware clock */ + struct mutex lock; + /* lock for accessing the clock_time */ + spinlock_t clock_lock; + struct timespec64 clock_time; + enum ksz_ptp_tou_mode tou_mode; + struct timespec64 perout_target_time_first; /* start of first pulse */ + struct timespec64 perout_period; +}; + +int ksz_ptp_clock_register(struct dsa_switch *ds); + +void ksz_ptp_clock_unregister(struct dsa_switch *ds); + +int ksz_get_ts_info(struct dsa_switch *ds, int port, + struct ethtool_ts_info *ts); +int ksz_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr); +int ksz_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr); +void ksz_port_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb); +void ksz_port_deferred_xmit(struct kthread_work *work); +bool ksz_port_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb, + unsigned int type); +int ksz_ptp_irq_setup(struct dsa_switch *ds, u8 p); +void ksz_ptp_irq_free(struct dsa_switch *ds, u8 p); + +#else + +struct ksz_ptp_data { + /* Serializes all operations on the PTP hardware clock */ + struct mutex lock; +}; + +static inline int ksz_ptp_clock_register(struct dsa_switch *ds) +{ + return 0; +} + +static inline void ksz_ptp_clock_unregister(struct dsa_switch *ds) { } + +static inline int ksz_ptp_irq_setup(struct dsa_switch *ds, u8 p) +{ + return 0; +} + +static inline void ksz_ptp_irq_free(struct dsa_switch *ds, u8 p) {} + +#define ksz_get_ts_info NULL + +#define ksz_hwtstamp_get NULL + +#define ksz_hwtstamp_set NULL + +#define ksz_port_rxtstamp NULL + +#define ksz_port_txtstamp NULL + +#define ksz_port_deferred_xmit NULL + +#endif /* End of CONFIG_NET_DSA_MICROCHIP_KSZ_PTP */ + +#endif diff --git a/drivers/net/dsa/microchip/ksz_ptp_reg.h b/drivers/net/dsa/microchip/ksz_ptp_reg.h new file mode 100644 index 000000000000..d71e85510cda --- /dev/null +++ b/drivers/net/dsa/microchip/ksz_ptp_reg.h @@ -0,0 +1,142 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Microchip KSZ PTP register definitions + * Copyright (C) 2022 Microchip Technology Inc. + */ + +#ifndef __KSZ_PTP_REGS_H +#define __KSZ_PTP_REGS_H + +#define REG_SW_GLOBAL_LED_OVR__4 0x0120 +#define LED_OVR_2 BIT(1) +#define LED_OVR_1 BIT(0) + +#define REG_SW_GLOBAL_LED_SRC__4 0x0128 +#define LED_SRC_PTP_GPIO_1 BIT(3) +#define LED_SRC_PTP_GPIO_2 BIT(2) + +/* 5 - PTP Clock */ +#define REG_PTP_CLK_CTRL 0x0500 + +#define PTP_STEP_ADJ BIT(6) +#define PTP_STEP_DIR BIT(5) +#define PTP_READ_TIME BIT(4) +#define PTP_LOAD_TIME BIT(3) +#define PTP_CLK_ADJ_ENABLE BIT(2) +#define PTP_CLK_ENABLE BIT(1) +#define PTP_CLK_RESET BIT(0) + +#define REG_PTP_RTC_SUB_NANOSEC__2 0x0502 + +#define PTP_RTC_SUB_NANOSEC_M 0x0007 +#define PTP_RTC_0NS 0x00 + +#define REG_PTP_RTC_NANOSEC 0x0504 + +#define REG_PTP_RTC_SEC 0x0508 + +#define REG_PTP_SUBNANOSEC_RATE 0x050C + +#define PTP_SUBNANOSEC_M 0x3FFFFFFF +#define PTP_RATE_DIR BIT(31) +#define PTP_TMP_RATE_ENABLE BIT(30) + +#define REG_PTP_SUBNANOSEC_RATE_L 0x050E + +#define REG_PTP_RATE_DURATION 0x0510 +#define REG_PTP_RATE_DURATION_H 0x0510 +#define REG_PTP_RATE_DURATION_L 0x0512 + +#define REG_PTP_MSG_CONF1 0x0514 + +#define PTP_802_1AS BIT(7) +#define PTP_ENABLE BIT(6) +#define PTP_ETH_ENABLE BIT(5) +#define PTP_IPV4_UDP_ENABLE BIT(4) +#define PTP_IPV6_UDP_ENABLE BIT(3) +#define PTP_TC_P2P BIT(2) +#define PTP_MASTER BIT(1) +#define PTP_1STEP BIT(0) + +#define REG_PTP_UNIT_INDEX__4 0x0520 + +#define PTP_GPIO_INDEX GENMASK(19, 16) +#define PTP_TSI_INDEX BIT(8) +#define PTP_TOU_INDEX GENMASK(1, 0) + +#define REG_PTP_TRIG_STATUS__4 0x0524 + +#define TRIG_ERROR_M GENMASK(18, 16) +#define TRIG_DONE_M GENMASK(2, 0) + +#define REG_PTP_INT_STATUS__4 0x0528 + +#define TRIG_INT_M GENMASK(18, 16) +#define TS_INT_M GENMASK(1, 0) + +#define REG_PTP_CTRL_STAT__4 0x052C + +#define GPIO_IN BIT(7) +#define GPIO_OUT BIT(6) +#define TS_INT_ENABLE BIT(5) +#define TRIG_ACTIVE BIT(4) +#define TRIG_ENABLE BIT(3) +#define TRIG_RESET BIT(2) +#define TS_ENABLE BIT(1) +#define TS_RESET BIT(0) + +#define REG_TRIG_TARGET_NANOSEC 0x0530 +#define REG_TRIG_TARGET_SEC 0x0534 + +#define REG_TRIG_CTRL__4 0x0538 + +#define TRIG_CASCADE_ENABLE BIT(31) +#define TRIG_CASCADE_TAIL BIT(30) +#define TRIG_CASCADE_UPS_M GENMASK(29, 26) +#define TRIG_NOW BIT(25) +#define TRIG_NOTIFY BIT(24) +#define TRIG_EDGE BIT(23) +#define TRIG_PATTERN_M GENMASK(22, 20) +#define TRIG_NEG_EDGE 0 +#define TRIG_POS_EDGE 1 +#define TRIG_NEG_PULSE 2 +#define TRIG_POS_PULSE 3 +#define TRIG_NEG_PERIOD 4 +#define TRIG_POS_PERIOD 5 +#define TRIG_REG_OUTPUT 6 +#define TRIG_GPO_M GENMASK(19, 16) +#define TRIG_CASCADE_ITERATE_CNT_M GENMASK(15, 0) + +#define REG_TRIG_CYCLE_WIDTH 0x053C +#define TRIG_CYCLE_WIDTH_M GENMASK(31, 0) + +#define REG_TRIG_CYCLE_CNT 0x0540 + +#define TRIG_CYCLE_CNT_M GENMASK(31, 16) +#define TRIG_BIT_PATTERN_M GENMASK(15, 0) + +#define REG_TRIG_ITERATE_TIME 0x0544 + +#define REG_TRIG_PULSE_WIDTH__4 0x0548 + +#define TRIG_PULSE_WIDTH_M GENMASK(23, 0) + +/* Port PTP Register */ +#define REG_PTP_PORT_RX_DELAY__2 0x0C00 +#define REG_PTP_PORT_TX_DELAY__2 0x0C02 +#define REG_PTP_PORT_ASYM_DELAY__2 0x0C04 + +#define REG_PTP_PORT_XDELAY_TS 0x0C08 +#define REG_PTP_PORT_SYNC_TS 0x0C0C +#define REG_PTP_PORT_PDRESP_TS 0x0C10 + +#define REG_PTP_PORT_TX_INT_STATUS__2 0x0C14 +#define REG_PTP_PORT_TX_INT_ENABLE__2 0x0C16 + +#define PTP_PORT_SYNC_INT BIT(15) +#define PTP_PORT_XDELAY_REQ_INT BIT(14) +#define PTP_PORT_PDELAY_RESP_INT BIT(13) +#define KSZ_SYNC_MSG 2 +#define KSZ_XDREQ_MSG 1 +#define KSZ_PDRES_MSG 0 + +#endif diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 908fa89444c9..616b21c90d05 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -608,17 +608,29 @@ mt7530_mib_reset(struct dsa_switch *ds) mt7530_write(priv, MT7530_MIB_CCR, CCR_MIB_ACTIVATE); } -static int mt7530_phy_read(struct mt7530_priv *priv, int port, int regnum) +static int mt7530_phy_read_c22(struct mt7530_priv *priv, int port, int regnum) { return mdiobus_read_nested(priv->bus, port, regnum); } -static int mt7530_phy_write(struct mt7530_priv *priv, int port, int regnum, - u16 val) +static int mt7530_phy_write_c22(struct mt7530_priv *priv, int port, int regnum, + u16 val) { return mdiobus_write_nested(priv->bus, port, regnum, val); } +static int mt7530_phy_read_c45(struct mt7530_priv *priv, int port, + int devad, int regnum) +{ + return mdiobus_c45_read_nested(priv->bus, port, devad, regnum); +} + +static int mt7530_phy_write_c45(struct mt7530_priv *priv, int port, int devad, + int regnum, u16 val) +{ + return mdiobus_c45_write_nested(priv->bus, port, devad, regnum, val); +} + static int mt7531_ind_c45_phy_read(struct mt7530_priv *priv, int port, int devad, int regnum) @@ -670,7 +682,7 @@ out: static int mt7531_ind_c45_phy_write(struct mt7530_priv *priv, int port, int devad, - int regnum, u32 data) + int regnum, u16 data) { struct mii_bus *bus = priv->bus; struct mt7530_dummy_poll p; @@ -793,55 +805,36 @@ out: } static int -mt7531_ind_phy_read(struct mt7530_priv *priv, int port, int regnum) +mt753x_phy_read_c22(struct mii_bus *bus, int port, int regnum) { - int devad; - int ret; - - if (regnum & MII_ADDR_C45) { - devad = (regnum >> MII_DEVADDR_C45_SHIFT) & 0x1f; - ret = mt7531_ind_c45_phy_read(priv, port, devad, - regnum & MII_REGADDR_C45_MASK); - } else { - ret = mt7531_ind_c22_phy_read(priv, port, regnum); - } + struct mt7530_priv *priv = bus->priv; - return ret; + return priv->info->phy_read_c22(priv, port, regnum); } static int -mt7531_ind_phy_write(struct mt7530_priv *priv, int port, int regnum, - u16 data) +mt753x_phy_read_c45(struct mii_bus *bus, int port, int devad, int regnum) { - int devad; - int ret; - - if (regnum & MII_ADDR_C45) { - devad = (regnum >> MII_DEVADDR_C45_SHIFT) & 0x1f; - ret = mt7531_ind_c45_phy_write(priv, port, devad, - regnum & MII_REGADDR_C45_MASK, - data); - } else { - ret = mt7531_ind_c22_phy_write(priv, port, regnum, data); - } + struct mt7530_priv *priv = bus->priv; - return ret; + return priv->info->phy_read_c45(priv, port, devad, regnum); } static int -mt753x_phy_read(struct mii_bus *bus, int port, int regnum) +mt753x_phy_write_c22(struct mii_bus *bus, int port, int regnum, u16 val) { struct mt7530_priv *priv = bus->priv; - return priv->info->phy_read(priv, port, regnum); + return priv->info->phy_write_c22(priv, port, regnum, val); } static int -mt753x_phy_write(struct mii_bus *bus, int port, int regnum, u16 val) +mt753x_phy_write_c45(struct mii_bus *bus, int port, int devad, int regnum, + u16 val) { struct mt7530_priv *priv = bus->priv; - return priv->info->phy_write(priv, port, regnum, val); + return priv->info->phy_write_c45(priv, port, devad, regnum, val); } static void @@ -2086,8 +2079,10 @@ mt7530_setup_mdio(struct mt7530_priv *priv) bus->priv = priv; bus->name = KBUILD_MODNAME "-mii"; snprintf(bus->id, MII_BUS_ID_SIZE, KBUILD_MODNAME "-%d", idx++); - bus->read = mt753x_phy_read; - bus->write = mt753x_phy_write; + bus->read = mt753x_phy_read_c22; + bus->write = mt753x_phy_write_c22; + bus->read_c45 = mt753x_phy_read_c45; + bus->write_c45 = mt753x_phy_write_c45; bus->parent = dev; bus->phy_mask = ~ds->phys_mii_mask; @@ -3182,8 +3177,10 @@ static const struct mt753x_info mt753x_table[] = { .id = ID_MT7621, .pcs_ops = &mt7530_pcs_ops, .sw_setup = mt7530_setup, - .phy_read = mt7530_phy_read, - .phy_write = mt7530_phy_write, + .phy_read_c22 = mt7530_phy_read_c22, + .phy_write_c22 = mt7530_phy_write_c22, + .phy_read_c45 = mt7530_phy_read_c45, + .phy_write_c45 = mt7530_phy_write_c45, .pad_setup = mt7530_pad_clk_setup, .mac_port_get_caps = mt7530_mac_port_get_caps, .mac_port_config = mt7530_mac_config, @@ -3192,8 +3189,10 @@ static const struct mt753x_info mt753x_table[] = { .id = ID_MT7530, .pcs_ops = &mt7530_pcs_ops, .sw_setup = mt7530_setup, - .phy_read = mt7530_phy_read, - .phy_write = mt7530_phy_write, + .phy_read_c22 = mt7530_phy_read_c22, + .phy_write_c22 = mt7530_phy_write_c22, + .phy_read_c45 = mt7530_phy_read_c45, + .phy_write_c45 = mt7530_phy_write_c45, .pad_setup = mt7530_pad_clk_setup, .mac_port_get_caps = mt7530_mac_port_get_caps, .mac_port_config = mt7530_mac_config, @@ -3202,8 +3201,10 @@ static const struct mt753x_info mt753x_table[] = { .id = ID_MT7531, .pcs_ops = &mt7531_pcs_ops, .sw_setup = mt7531_setup, - .phy_read = mt7531_ind_phy_read, - .phy_write = mt7531_ind_phy_write, + .phy_read_c22 = mt7531_ind_c22_phy_read, + .phy_write_c22 = mt7531_ind_c22_phy_write, + .phy_read_c45 = mt7531_ind_c45_phy_read, + .phy_write_c45 = mt7531_ind_c45_phy_write, .pad_setup = mt7531_pad_setup, .cpu_port_config = mt7531_cpu_port_config, .mac_port_get_caps = mt7531_mac_port_get_caps, @@ -3263,7 +3264,7 @@ mt7530_probe(struct mdio_device *mdiodev) * properly. */ if (!priv->info->sw_setup || !priv->info->pad_setup || - !priv->info->phy_read || !priv->info->phy_write || + !priv->info->phy_read_c22 || !priv->info->phy_write_c22 || !priv->info->mac_port_get_caps || !priv->info->mac_port_config) return -EINVAL; diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h index e8d966435350..6b2fc6290ea8 100644 --- a/drivers/net/dsa/mt7530.h +++ b/drivers/net/dsa/mt7530.h @@ -750,8 +750,10 @@ struct mt753x_pcs { /* struct mt753x_info - This is the main data structure for holding the specific * part for each supported device * @sw_setup: Holding the handler to a device initialization - * @phy_read: Holding the way reading PHY port - * @phy_write: Holding the way writing PHY port + * @phy_read_c22: Holding the way reading PHY port using C22 + * @phy_write_c22: Holding the way writing PHY port using C22 + * @phy_read_c45: Holding the way reading PHY port using C45 + * @phy_write_c45: Holding the way writing PHY port using C45 * @pad_setup: Holding the way setting up the bus pad for a certain * MAC port * @phy_mode_supported: Check if the PHY type is being supported on a certain @@ -767,8 +769,13 @@ struct mt753x_info { const struct phylink_pcs_ops *pcs_ops; int (*sw_setup)(struct dsa_switch *ds); - int (*phy_read)(struct mt7530_priv *priv, int port, int regnum); - int (*phy_write)(struct mt7530_priv *priv, int port, int regnum, u16 val); + int (*phy_read_c22)(struct mt7530_priv *priv, int port, int regnum); + int (*phy_write_c22)(struct mt7530_priv *priv, int port, int regnum, + u16 val); + int (*phy_read_c45)(struct mt7530_priv *priv, int port, int devad, + int regnum); + int (*phy_write_c45)(struct mt7530_priv *priv, int port, int devad, + int regnum, u16 val); int (*pad_setup)(struct dsa_switch *ds, phy_interface_t interface); int (*cpu_port_config)(struct dsa_switch *ds, int port); void (*mac_port_get_caps)(struct dsa_switch *ds, int port, diff --git a/drivers/net/dsa/mv88e6xxx/Makefile b/drivers/net/dsa/mv88e6xxx/Makefile index 49bf358b9c4f..1409e691ab77 100644 --- a/drivers/net/dsa/mv88e6xxx/Makefile +++ b/drivers/net/dsa/mv88e6xxx/Makefile @@ -15,6 +15,7 @@ mv88e6xxx-objs += port_hidden.o mv88e6xxx-$(CONFIG_NET_DSA_MV88E6XXX_PTP) += ptp.o mv88e6xxx-objs += serdes.o mv88e6xxx-objs += smi.o +mv88e6xxx-objs += switchdev.o mv88e6xxx-objs += trace.o # for tracing framework to find trace.h diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 242b8b325504..0a5d6c7bb128 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -1728,11 +1728,11 @@ static int mv88e6xxx_vtu_get(struct mv88e6xxx_chip *chip, u16 vid, return err; } -static int mv88e6xxx_vtu_walk(struct mv88e6xxx_chip *chip, - int (*cb)(struct mv88e6xxx_chip *chip, - const struct mv88e6xxx_vtu_entry *entry, - void *priv), - void *priv) +int mv88e6xxx_vtu_walk(struct mv88e6xxx_chip *chip, + int (*cb)(struct mv88e6xxx_chip *chip, + const struct mv88e6xxx_vtu_entry *entry, + void *priv), + void *priv) { struct mv88e6xxx_vtu_entry entry = { .vid = mv88e6xxx_max_vid(chip), @@ -3884,6 +3884,24 @@ static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg) return err ? err : val; } +static int mv88e6xxx_mdio_read_c45(struct mii_bus *bus, int phy, int devad, + int reg) +{ + struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv; + struct mv88e6xxx_chip *chip = mdio_bus->chip; + u16 val; + int err; + + if (!chip->info->ops->phy_read_c45) + return -EOPNOTSUPP; + + mv88e6xxx_reg_lock(chip); + err = chip->info->ops->phy_read_c45(chip, bus, phy, devad, reg, &val); + mv88e6xxx_reg_unlock(chip); + + return err ? err : val; +} + static int mv88e6xxx_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val) { struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv; @@ -3900,6 +3918,23 @@ static int mv88e6xxx_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val) return err; } +static int mv88e6xxx_mdio_write_c45(struct mii_bus *bus, int phy, int devad, + int reg, u16 val) +{ + struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv; + struct mv88e6xxx_chip *chip = mdio_bus->chip; + int err; + + if (!chip->info->ops->phy_write_c45) + return -EOPNOTSUPP; + + mv88e6xxx_reg_lock(chip); + err = chip->info->ops->phy_write_c45(chip, bus, phy, devad, reg, val); + mv88e6xxx_reg_unlock(chip); + + return err; +} + static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip, struct device_node *np, bool external) @@ -3938,6 +3973,8 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip, bus->read = mv88e6xxx_mdio_read; bus->write = mv88e6xxx_mdio_write; + bus->read_c45 = mv88e6xxx_mdio_read_c45; + bus->write_c45 = mv88e6xxx_mdio_write_c45; bus->parent = chip->dev; if (!external) { @@ -4149,8 +4186,10 @@ static const struct mv88e6xxx_ops mv88e6097_ops = { .ip_pri_map = mv88e6085_g1_ip_pri_map, .irl_init_all = mv88e6352_g2_irl_init_all, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, - .phy_read = mv88e6xxx_g2_smi_phy_read, - .phy_write = mv88e6xxx_g2_smi_phy_write, + .phy_read = mv88e6xxx_g2_smi_phy_read_c22, + .phy_write = mv88e6xxx_g2_smi_phy_write_c22, + .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45, + .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45, .port_set_link = mv88e6xxx_port_set_link, .port_sync_link = mv88e6185_port_sync_link, .port_set_speed_duplex = mv88e6185_port_set_speed_duplex, @@ -4198,8 +4237,10 @@ static const struct mv88e6xxx_ops mv88e6123_ops = { .ip_pri_map = mv88e6085_g1_ip_pri_map, .irl_init_all = mv88e6352_g2_irl_init_all, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, - .phy_read = mv88e6xxx_g2_smi_phy_read, - .phy_write = mv88e6xxx_g2_smi_phy_write, + .phy_read = mv88e6xxx_g2_smi_phy_read_c22, + .phy_write = mv88e6xxx_g2_smi_phy_write_c22, + .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45, + .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45, .port_set_link = mv88e6xxx_port_set_link, .port_sync_link = mv88e6xxx_port_sync_link, .port_set_speed_duplex = mv88e6185_port_set_speed_duplex, @@ -4279,8 +4320,10 @@ static const struct mv88e6xxx_ops mv88e6141_ops = { .get_eeprom = mv88e6xxx_g2_get_eeprom8, .set_eeprom = mv88e6xxx_g2_set_eeprom8, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, - .phy_read = mv88e6xxx_g2_smi_phy_read, - .phy_write = mv88e6xxx_g2_smi_phy_write, + .phy_read = mv88e6xxx_g2_smi_phy_read_c22, + .phy_write = mv88e6xxx_g2_smi_phy_write_c22, + .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45, + .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45, .port_set_link = mv88e6xxx_port_set_link, .port_sync_link = mv88e6xxx_port_sync_link, .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, @@ -4343,8 +4386,10 @@ static const struct mv88e6xxx_ops mv88e6161_ops = { .ip_pri_map = mv88e6085_g1_ip_pri_map, .irl_init_all = mv88e6352_g2_irl_init_all, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, - .phy_read = mv88e6xxx_g2_smi_phy_read, - .phy_write = mv88e6xxx_g2_smi_phy_write, + .phy_read = mv88e6xxx_g2_smi_phy_read_c22, + .phy_write = mv88e6xxx_g2_smi_phy_write_c22, + .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45, + .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45, .port_set_link = mv88e6xxx_port_set_link, .port_sync_link = mv88e6xxx_port_sync_link, .port_set_speed_duplex = mv88e6185_port_set_speed_duplex, @@ -4426,8 +4471,10 @@ static const struct mv88e6xxx_ops mv88e6171_ops = { .ip_pri_map = mv88e6085_g1_ip_pri_map, .irl_init_all = mv88e6352_g2_irl_init_all, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, - .phy_read = mv88e6xxx_g2_smi_phy_read, - .phy_write = mv88e6xxx_g2_smi_phy_write, + .phy_read = mv88e6xxx_g2_smi_phy_read_c22, + .phy_write = mv88e6xxx_g2_smi_phy_write_c22, + .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45, + .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45, .port_set_link = mv88e6xxx_port_set_link, .port_sync_link = mv88e6xxx_port_sync_link, .port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay, @@ -4472,8 +4519,10 @@ static const struct mv88e6xxx_ops mv88e6172_ops = { .get_eeprom = mv88e6xxx_g2_get_eeprom16, .set_eeprom = mv88e6xxx_g2_set_eeprom16, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, - .phy_read = mv88e6xxx_g2_smi_phy_read, - .phy_write = mv88e6xxx_g2_smi_phy_write, + .phy_read = mv88e6xxx_g2_smi_phy_read_c22, + .phy_write = mv88e6xxx_g2_smi_phy_write_c22, + .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45, + .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45, .port_set_link = mv88e6xxx_port_set_link, .port_sync_link = mv88e6xxx_port_sync_link, .port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay, @@ -4527,8 +4576,10 @@ static const struct mv88e6xxx_ops mv88e6175_ops = { .ip_pri_map = mv88e6085_g1_ip_pri_map, .irl_init_all = mv88e6352_g2_irl_init_all, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, - .phy_read = mv88e6xxx_g2_smi_phy_read, - .phy_write = mv88e6xxx_g2_smi_phy_write, + .phy_read = mv88e6xxx_g2_smi_phy_read_c22, + .phy_write = mv88e6xxx_g2_smi_phy_write_c22, + .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45, + .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45, .port_set_link = mv88e6xxx_port_set_link, .port_sync_link = mv88e6xxx_port_sync_link, .port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay, @@ -4573,8 +4624,10 @@ static const struct mv88e6xxx_ops mv88e6176_ops = { .get_eeprom = mv88e6xxx_g2_get_eeprom16, .set_eeprom = mv88e6xxx_g2_set_eeprom16, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, - .phy_read = mv88e6xxx_g2_smi_phy_read, - .phy_write = mv88e6xxx_g2_smi_phy_write, + .phy_read = mv88e6xxx_g2_smi_phy_read_c22, + .phy_write = mv88e6xxx_g2_smi_phy_write_c22, + .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45, + .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45, .port_set_link = mv88e6xxx_port_set_link, .port_sync_link = mv88e6xxx_port_sync_link, .port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay, @@ -4673,8 +4726,10 @@ static const struct mv88e6xxx_ops mv88e6190_ops = { .get_eeprom = mv88e6xxx_g2_get_eeprom8, .set_eeprom = mv88e6xxx_g2_set_eeprom8, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, - .phy_read = mv88e6xxx_g2_smi_phy_read, - .phy_write = mv88e6xxx_g2_smi_phy_write, + .phy_read = mv88e6xxx_g2_smi_phy_read_c22, + .phy_write = mv88e6xxx_g2_smi_phy_write_c22, + .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45, + .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45, .port_set_link = mv88e6xxx_port_set_link, .port_sync_link = mv88e6xxx_port_sync_link, .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, @@ -4736,8 +4791,10 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = { .get_eeprom = mv88e6xxx_g2_get_eeprom8, .set_eeprom = mv88e6xxx_g2_set_eeprom8, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, - .phy_read = mv88e6xxx_g2_smi_phy_read, - .phy_write = mv88e6xxx_g2_smi_phy_write, + .phy_read = mv88e6xxx_g2_smi_phy_read_c22, + .phy_write = mv88e6xxx_g2_smi_phy_write_c22, + .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45, + .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45, .port_set_link = mv88e6xxx_port_set_link, .port_sync_link = mv88e6xxx_port_sync_link, .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, @@ -4799,8 +4856,10 @@ static const struct mv88e6xxx_ops mv88e6191_ops = { .get_eeprom = mv88e6xxx_g2_get_eeprom8, .set_eeprom = mv88e6xxx_g2_set_eeprom8, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, - .phy_read = mv88e6xxx_g2_smi_phy_read, - .phy_write = mv88e6xxx_g2_smi_phy_write, + .phy_read = mv88e6xxx_g2_smi_phy_read_c22, + .phy_write = mv88e6xxx_g2_smi_phy_write_c22, + .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45, + .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45, .port_set_link = mv88e6xxx_port_set_link, .port_sync_link = mv88e6xxx_port_sync_link, .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, @@ -4862,8 +4921,10 @@ static const struct mv88e6xxx_ops mv88e6240_ops = { .get_eeprom = mv88e6xxx_g2_get_eeprom16, .set_eeprom = mv88e6xxx_g2_set_eeprom16, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, - .phy_read = mv88e6xxx_g2_smi_phy_read, - .phy_write = mv88e6xxx_g2_smi_phy_write, + .phy_read = mv88e6xxx_g2_smi_phy_read_c22, + .phy_write = mv88e6xxx_g2_smi_phy_write_c22, + .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45, + .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45, .port_set_link = mv88e6xxx_port_set_link, .port_sync_link = mv88e6xxx_port_sync_link, .port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay, @@ -4925,8 +4986,10 @@ static const struct mv88e6xxx_ops mv88e6250_ops = { .get_eeprom = mv88e6xxx_g2_get_eeprom16, .set_eeprom = mv88e6xxx_g2_set_eeprom16, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, - .phy_read = mv88e6xxx_g2_smi_phy_read, - .phy_write = mv88e6xxx_g2_smi_phy_write, + .phy_read = mv88e6xxx_g2_smi_phy_read_c22, + .phy_write = mv88e6xxx_g2_smi_phy_write_c22, + .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45, + .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45, .port_set_link = mv88e6xxx_port_set_link, .port_sync_link = mv88e6xxx_port_sync_link, .port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay, @@ -4964,8 +5027,10 @@ static const struct mv88e6xxx_ops mv88e6290_ops = { .get_eeprom = mv88e6xxx_g2_get_eeprom8, .set_eeprom = mv88e6xxx_g2_set_eeprom8, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, - .phy_read = mv88e6xxx_g2_smi_phy_read, - .phy_write = mv88e6xxx_g2_smi_phy_write, + .phy_read = mv88e6xxx_g2_smi_phy_read_c22, + .phy_write = mv88e6xxx_g2_smi_phy_write_c22, + .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45, + .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45, .port_set_link = mv88e6xxx_port_set_link, .port_sync_link = mv88e6xxx_port_sync_link, .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, @@ -5017,7 +5082,7 @@ static const struct mv88e6xxx_ops mv88e6290_ops = { .serdes_get_regs = mv88e6390_serdes_get_regs, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6390_avb_ops, - .ptp_ops = &mv88e6352_ptp_ops, + .ptp_ops = &mv88e6390_ptp_ops, .phylink_get_caps = mv88e6390_phylink_get_caps, }; @@ -5029,8 +5094,10 @@ static const struct mv88e6xxx_ops mv88e6320_ops = { .get_eeprom = mv88e6xxx_g2_get_eeprom16, .set_eeprom = mv88e6xxx_g2_set_eeprom16, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, - .phy_read = mv88e6xxx_g2_smi_phy_read, - .phy_write = mv88e6xxx_g2_smi_phy_write, + .phy_read = mv88e6xxx_g2_smi_phy_read_c22, + .phy_write = mv88e6xxx_g2_smi_phy_write_c22, + .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45, + .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45, .port_set_link = mv88e6xxx_port_set_link, .port_sync_link = mv88e6xxx_port_sync_link, .port_set_rgmii_delay = mv88e6320_port_set_rgmii_delay, @@ -5074,8 +5141,10 @@ static const struct mv88e6xxx_ops mv88e6321_ops = { .get_eeprom = mv88e6xxx_g2_get_eeprom16, .set_eeprom = mv88e6xxx_g2_set_eeprom16, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, - .phy_read = mv88e6xxx_g2_smi_phy_read, - .phy_write = mv88e6xxx_g2_smi_phy_write, + .phy_read = mv88e6xxx_g2_smi_phy_read_c22, + .phy_write = mv88e6xxx_g2_smi_phy_write_c22, + .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45, + .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45, .port_set_link = mv88e6xxx_port_set_link, .port_sync_link = mv88e6xxx_port_sync_link, .port_set_rgmii_delay = mv88e6320_port_set_rgmii_delay, @@ -5117,8 +5186,10 @@ static const struct mv88e6xxx_ops mv88e6341_ops = { .get_eeprom = mv88e6xxx_g2_get_eeprom8, .set_eeprom = mv88e6xxx_g2_set_eeprom8, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, - .phy_read = mv88e6xxx_g2_smi_phy_read, - .phy_write = mv88e6xxx_g2_smi_phy_write, + .phy_read = mv88e6xxx_g2_smi_phy_read_c22, + .phy_write = mv88e6xxx_g2_smi_phy_write_c22, + .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45, + .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45, .port_set_link = mv88e6xxx_port_set_link, .port_sync_link = mv88e6xxx_port_sync_link, .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, @@ -5183,8 +5254,10 @@ static const struct mv88e6xxx_ops mv88e6350_ops = { .ip_pri_map = mv88e6085_g1_ip_pri_map, .irl_init_all = mv88e6352_g2_irl_init_all, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, - .phy_read = mv88e6xxx_g2_smi_phy_read, - .phy_write = mv88e6xxx_g2_smi_phy_write, + .phy_read = mv88e6xxx_g2_smi_phy_read_c22, + .phy_write = mv88e6xxx_g2_smi_phy_write_c22, + .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45, + .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45, .port_set_link = mv88e6xxx_port_set_link, .port_sync_link = mv88e6xxx_port_sync_link, .port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay, @@ -5227,8 +5300,10 @@ static const struct mv88e6xxx_ops mv88e6351_ops = { .ip_pri_map = mv88e6085_g1_ip_pri_map, .irl_init_all = mv88e6352_g2_irl_init_all, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, - .phy_read = mv88e6xxx_g2_smi_phy_read, - .phy_write = mv88e6xxx_g2_smi_phy_write, + .phy_read = mv88e6xxx_g2_smi_phy_read_c22, + .phy_write = mv88e6xxx_g2_smi_phy_write_c22, + .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45, + .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45, .port_set_link = mv88e6xxx_port_set_link, .port_sync_link = mv88e6xxx_port_sync_link, .port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay, @@ -5275,8 +5350,10 @@ static const struct mv88e6xxx_ops mv88e6352_ops = { .get_eeprom = mv88e6xxx_g2_get_eeprom16, .set_eeprom = mv88e6xxx_g2_set_eeprom16, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, - .phy_read = mv88e6xxx_g2_smi_phy_read, - .phy_write = mv88e6xxx_g2_smi_phy_write, + .phy_read = mv88e6xxx_g2_smi_phy_read_c22, + .phy_write = mv88e6xxx_g2_smi_phy_write_c22, + .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45, + .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45, .port_set_link = mv88e6xxx_port_set_link, .port_sync_link = mv88e6xxx_port_sync_link, .port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay, @@ -5340,8 +5417,10 @@ static const struct mv88e6xxx_ops mv88e6390_ops = { .get_eeprom = mv88e6xxx_g2_get_eeprom8, .set_eeprom = mv88e6xxx_g2_set_eeprom8, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, - .phy_read = mv88e6xxx_g2_smi_phy_read, - .phy_write = mv88e6xxx_g2_smi_phy_write, + .phy_read = mv88e6xxx_g2_smi_phy_read_c22, + .phy_write = mv88e6xxx_g2_smi_phy_write_c22, + .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45, + .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45, .port_set_link = mv88e6xxx_port_set_link, .port_sync_link = mv88e6xxx_port_sync_link, .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, @@ -5391,7 +5470,7 @@ static const struct mv88e6xxx_ops mv88e6390_ops = { .serdes_irq_status = mv88e6390_serdes_irq_status, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6390_avb_ops, - .ptp_ops = &mv88e6352_ptp_ops, + .ptp_ops = &mv88e6390_ptp_ops, .serdes_get_sset_count = mv88e6390_serdes_get_sset_count, .serdes_get_strings = mv88e6390_serdes_get_strings, .serdes_get_stats = mv88e6390_serdes_get_stats, @@ -5407,8 +5486,10 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = { .get_eeprom = mv88e6xxx_g2_get_eeprom8, .set_eeprom = mv88e6xxx_g2_set_eeprom8, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, - .phy_read = mv88e6xxx_g2_smi_phy_read, - .phy_write = mv88e6xxx_g2_smi_phy_write, + .phy_read = mv88e6xxx_g2_smi_phy_read_c22, + .phy_write = mv88e6xxx_g2_smi_phy_write_c22, + .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45, + .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45, .port_set_link = mv88e6xxx_port_set_link, .port_sync_link = mv88e6xxx_port_sync_link, .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, @@ -5462,7 +5543,7 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = { .serdes_get_regs = mv88e6390_serdes_get_regs, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6390_avb_ops, - .ptp_ops = &mv88e6352_ptp_ops, + .ptp_ops = &mv88e6390_ptp_ops, .phylink_get_caps = mv88e6390x_phylink_get_caps, }; @@ -5473,8 +5554,10 @@ static const struct mv88e6xxx_ops mv88e6393x_ops = { .get_eeprom = mv88e6xxx_g2_get_eeprom8, .set_eeprom = mv88e6xxx_g2_set_eeprom8, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, - .phy_read = mv88e6xxx_g2_smi_phy_read, - .phy_write = mv88e6xxx_g2_smi_phy_write, + .phy_read = mv88e6xxx_g2_smi_phy_read_c22, + .phy_write = mv88e6xxx_g2_smi_phy_write_c22, + .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45, + .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45, .port_set_link = mv88e6xxx_port_set_link, .port_sync_link = mv88e6xxx_port_sync_link, .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, @@ -6526,7 +6609,7 @@ static int mv88e6xxx_port_pre_bridge_flags(struct dsa_switch *ds, int port, const struct mv88e6xxx_ops *ops; if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | - BR_BCAST_FLOOD | BR_PORT_LOCKED)) + BR_BCAST_FLOOD | BR_PORT_LOCKED | BR_PORT_MAB)) return -EINVAL; ops = chip->info->ops; @@ -6545,7 +6628,7 @@ static int mv88e6xxx_port_bridge_flags(struct dsa_switch *ds, int port, struct netlink_ext_ack *extack) { struct mv88e6xxx_chip *chip = ds->priv; - int err = -EOPNOTSUPP; + int err = 0; mv88e6xxx_reg_lock(chip); @@ -6584,6 +6667,12 @@ static int mv88e6xxx_port_bridge_flags(struct dsa_switch *ds, int port, goto out; } + if (flags.mask & BR_PORT_MAB) { + bool mab = !!(flags.val & BR_PORT_MAB); + + mv88e6xxx_port_set_mab(chip, port, mab); + } + if (flags.mask & BR_PORT_LOCKED) { bool locked = !!(flags.val & BR_PORT_LOCKED); diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index e693154cf803..da6e1339f809 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h @@ -280,6 +280,9 @@ struct mv88e6xxx_port { unsigned int serdes_irq; char serdes_irq_name[64]; struct devlink_region *region; + + /* MacAuth Bypass control flag */ + bool mab; }; enum mv88e6xxx_region_id { @@ -451,6 +454,13 @@ struct mv88e6xxx_ops { struct mii_bus *bus, int addr, int reg, u16 val); + int (*phy_read_c45)(struct mv88e6xxx_chip *chip, + struct mii_bus *bus, + int addr, int devad, int reg, u16 *val); + int (*phy_write_c45)(struct mv88e6xxx_chip *chip, + struct mii_bus *bus, + int addr, int devad, int reg, u16 val); + /* Priority Override Table operations */ int (*pot_clear)(struct mv88e6xxx_chip *chip); @@ -705,6 +715,7 @@ struct mv88e6xxx_ptp_ops { int (*port_disable)(struct mv88e6xxx_chip *chip, int port); int (*global_enable)(struct mv88e6xxx_chip *chip); int (*global_disable)(struct mv88e6xxx_chip *chip); + int (*set_ptp_cpu_port)(struct mv88e6xxx_chip *chip, int port); int n_ext_ts; int arr0_sts_reg; int arr1_sts_reg; @@ -784,6 +795,12 @@ static inline bool mv88e6xxx_is_invalid_port(struct mv88e6xxx_chip *chip, int po return (chip->info->invalid_port_mask & BIT(port)) != 0; } +static inline void mv88e6xxx_port_set_mab(struct mv88e6xxx_chip *chip, + int port, bool mab) +{ + chip->ports[port].mab = mab; +} + int mv88e6xxx_read(struct mv88e6xxx_chip *chip, int addr, int reg, u16 *val); int mv88e6xxx_write(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val); int mv88e6xxx_wait_mask(struct mv88e6xxx_chip *chip, int addr, int reg, @@ -802,6 +819,12 @@ static inline void mv88e6xxx_reg_unlock(struct mv88e6xxx_chip *chip) mutex_unlock(&chip->reg_lock); } +int mv88e6xxx_vtu_walk(struct mv88e6xxx_chip *chip, + int (*cb)(struct mv88e6xxx_chip *chip, + const struct mv88e6xxx_vtu_entry *entry, + void *priv), + void *priv); + int mv88e6xxx_fid_map(struct mv88e6xxx_chip *chip, unsigned long *bitmap); #endif /* _MV88E6XXX_CHIP_H */ diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c index 5848112036b0..2fa55a643591 100644 --- a/drivers/net/dsa/mv88e6xxx/global1.c +++ b/drivers/net/dsa/mv88e6xxx/global1.c @@ -403,6 +403,18 @@ int mv88e6390_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port) return mv88e6390_g1_monitor_write(chip, ptr, port); } +int mv88e6390_g1_set_ptp_cpu_port(struct mv88e6xxx_chip *chip, int port) +{ + u16 ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_PTP_CPU_DEST; + + /* Use the default high priority for PTP frames sent to + * the CPU. + */ + port |= MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST_MGMTPRI; + + return mv88e6390_g1_monitor_write(chip, ptr, port); +} + int mv88e6390_g1_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip) { u16 ptr; diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h index 65958b2a0d3a..c99ddd117fe6 100644 --- a/drivers/net/dsa/mv88e6xxx/global1.h +++ b/drivers/net/dsa/mv88e6xxx/global1.h @@ -214,6 +214,7 @@ #define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_INGRESS_DEST 0x2000 #define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_EGRESS_DEST 0x2100 #define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST 0x3000 +#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_PTP_CPU_DEST 0x3200 #define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST_MGMTPRI 0x00e0 #define MV88E6390_G1_MONITOR_MGMT_CTL_DATA_MASK 0x00ff @@ -303,6 +304,7 @@ int mv88e6390_g1_set_egress_port(struct mv88e6xxx_chip *chip, int port); int mv88e6095_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port); int mv88e6390_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port); +int mv88e6390_g1_set_ptp_cpu_port(struct mv88e6xxx_chip *chip, int port); int mv88e6390_g1_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip); int mv88e6085_g1_ip_pri_map(struct mv88e6xxx_chip *chip); diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c index 61ae2d61e25c..ce3b3690c3c0 100644 --- a/drivers/net/dsa/mv88e6xxx/global1_atu.c +++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c @@ -12,6 +12,7 @@ #include "chip.h" #include "global1.h" +#include "switchdev.h" #include "trace.h" /* Offset 0x01: ATU FID Register */ @@ -409,23 +410,25 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id) err = mv88e6xxx_g1_read_atu_violation(chip); if (err) - goto out; + goto out_unlock; err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_OP, &val); if (err) - goto out; + goto out_unlock; err = mv88e6xxx_g1_atu_fid_read(chip, &fid); if (err) - goto out; + goto out_unlock; err = mv88e6xxx_g1_atu_data_read(chip, &entry); if (err) - goto out; + goto out_unlock; err = mv88e6xxx_g1_atu_mac_read(chip, &entry); if (err) - goto out; + goto out_unlock; + + mv88e6xxx_reg_unlock(chip); spid = entry.state; @@ -441,6 +444,13 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id) entry.portvec, entry.mac, fid); chip->ports[spid].atu_miss_violation++; + + if (fid != MV88E6XXX_FID_STANDALONE && chip->ports[spid].mab) { + err = mv88e6xxx_handle_miss_violation(chip, spid, + &entry, fid); + if (err) + goto out; + } } if (val & MV88E6XXX_G1_ATU_OP_FULL_VIOLATION) { @@ -449,13 +459,13 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id) fid); chip->ports[spid].atu_full_violation++; } - mv88e6xxx_reg_unlock(chip); return IRQ_HANDLED; -out: +out_unlock: mv88e6xxx_reg_unlock(chip); +out: dev_err(chip->dev, "ATU problem: error %d while handling interrupt\n", err); return IRQ_HANDLED; diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c index fa65ecd9cb85..ed3b2f88e783 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.c +++ b/drivers/net/dsa/mv88e6xxx/global2.c @@ -739,20 +739,18 @@ static int mv88e6xxx_g2_smi_phy_read_data_c45(struct mv88e6xxx_chip *chip, return mv88e6xxx_g2_read(chip, MV88E6XXX_G2_SMI_PHY_DATA, data); } -static int mv88e6xxx_g2_smi_phy_read_c45(struct mv88e6xxx_chip *chip, - bool external, int port, int reg, - u16 *data) +static int _mv88e6xxx_g2_smi_phy_read_c45(struct mv88e6xxx_chip *chip, + bool external, int port, int devad, + int reg, u16 *data) { - int dev = (reg >> 16) & 0x1f; - int addr = reg & 0xffff; int err; - err = mv88e6xxx_g2_smi_phy_write_addr_c45(chip, external, port, dev, - addr); + err = mv88e6xxx_g2_smi_phy_write_addr_c45(chip, external, port, devad, + reg); if (err) return err; - return mv88e6xxx_g2_smi_phy_read_data_c45(chip, external, port, dev, + return mv88e6xxx_g2_smi_phy_read_data_c45(chip, external, port, devad, data); } @@ -771,51 +769,65 @@ static int mv88e6xxx_g2_smi_phy_write_data_c45(struct mv88e6xxx_chip *chip, return mv88e6xxx_g2_smi_phy_access_c45(chip, external, op, port, dev); } -static int mv88e6xxx_g2_smi_phy_write_c45(struct mv88e6xxx_chip *chip, - bool external, int port, int reg, - u16 data) +static int _mv88e6xxx_g2_smi_phy_write_c45(struct mv88e6xxx_chip *chip, + bool external, int port, int devad, + int reg, u16 data) { - int dev = (reg >> 16) & 0x1f; - int addr = reg & 0xffff; int err; - err = mv88e6xxx_g2_smi_phy_write_addr_c45(chip, external, port, dev, - addr); + err = mv88e6xxx_g2_smi_phy_write_addr_c45(chip, external, port, devad, + reg); if (err) return err; - return mv88e6xxx_g2_smi_phy_write_data_c45(chip, external, port, dev, + return mv88e6xxx_g2_smi_phy_write_data_c45(chip, external, port, devad, data); } -int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip, struct mii_bus *bus, - int addr, int reg, u16 *val) +int mv88e6xxx_g2_smi_phy_read_c22(struct mv88e6xxx_chip *chip, + struct mii_bus *bus, + int addr, int reg, u16 *val) { struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv; bool external = mdio_bus->external; - if (reg & MII_ADDR_C45) - return mv88e6xxx_g2_smi_phy_read_c45(chip, external, addr, reg, - val); - return mv88e6xxx_g2_smi_phy_read_data_c22(chip, external, addr, reg, val); } -int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip, struct mii_bus *bus, - int addr, int reg, u16 val) +int mv88e6xxx_g2_smi_phy_read_c45(struct mv88e6xxx_chip *chip, + struct mii_bus *bus, int addr, int devad, + int reg, u16 *val) { struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv; bool external = mdio_bus->external; - if (reg & MII_ADDR_C45) - return mv88e6xxx_g2_smi_phy_write_c45(chip, external, addr, reg, - val); + return _mv88e6xxx_g2_smi_phy_read_c45(chip, external, addr, devad, reg, + val); +} + +int mv88e6xxx_g2_smi_phy_write_c22(struct mv88e6xxx_chip *chip, + struct mii_bus *bus, int addr, int reg, + u16 val) +{ + struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv; + bool external = mdio_bus->external; return mv88e6xxx_g2_smi_phy_write_data_c22(chip, external, addr, reg, val); } +int mv88e6xxx_g2_smi_phy_write_c45(struct mv88e6xxx_chip *chip, + struct mii_bus *bus, int addr, int devad, + int reg, u16 val) +{ + struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv; + bool external = mdio_bus->external; + + return _mv88e6xxx_g2_smi_phy_write_c45(chip, external, addr, devad, reg, + val); +} + /* Offset 0x1B: Watchdog Control */ static int mv88e6097_watchdog_action(struct mv88e6xxx_chip *chip, int irq) { diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h index 7536b8b0ad01..e973114d6890 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.h +++ b/drivers/net/dsa/mv88e6xxx/global2.h @@ -314,12 +314,18 @@ int mv88e6xxx_g2_wait_bit(struct mv88e6xxx_chip *chip, int reg, int mv88e6352_g2_irl_init_all(struct mv88e6xxx_chip *chip, int port); int mv88e6390_g2_irl_init_all(struct mv88e6xxx_chip *chip, int port); -int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip, - struct mii_bus *bus, - int addr, int reg, u16 *val); -int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip, - struct mii_bus *bus, - int addr, int reg, u16 val); +int mv88e6xxx_g2_smi_phy_read_c22(struct mv88e6xxx_chip *chip, + struct mii_bus *bus, + int addr, int reg, u16 *val); +int mv88e6xxx_g2_smi_phy_write_c22(struct mv88e6xxx_chip *chip, + struct mii_bus *bus, + int addr, int reg, u16 val); +int mv88e6xxx_g2_smi_phy_read_c45(struct mv88e6xxx_chip *chip, + struct mii_bus *bus, + int addr, int devad, int reg, u16 *val); +int mv88e6xxx_g2_smi_phy_write_c45(struct mv88e6xxx_chip *chip, + struct mii_bus *bus, + int addr, int devad, int reg, u16 val); int mv88e6xxx_g2_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr); int mv88e6xxx_g2_get_eeprom8(struct mv88e6xxx_chip *chip, diff --git a/drivers/net/dsa/mv88e6xxx/phy.c b/drivers/net/dsa/mv88e6xxx/phy.c index 252b5b3a3efe..8bb88b3d900d 100644 --- a/drivers/net/dsa/mv88e6xxx/phy.c +++ b/drivers/net/dsa/mv88e6xxx/phy.c @@ -55,6 +55,38 @@ int mv88e6xxx_phy_write(struct mv88e6xxx_chip *chip, int phy, int reg, u16 val) return chip->info->ops->phy_write(chip, bus, addr, reg, val); } +int mv88e6xxx_phy_read_c45(struct mv88e6xxx_chip *chip, int phy, int devad, + int reg, u16 *val) +{ + int addr = phy; /* PHY devices addresses start at 0x0 */ + struct mii_bus *bus; + + bus = mv88e6xxx_default_mdio_bus(chip); + if (!bus) + return -EOPNOTSUPP; + + if (!chip->info->ops->phy_read_c45) + return -EOPNOTSUPP; + + return chip->info->ops->phy_read_c45(chip, bus, addr, devad, reg, val); +} + +int mv88e6xxx_phy_write_c45(struct mv88e6xxx_chip *chip, int phy, int devad, + int reg, u16 val) +{ + int addr = phy; /* PHY devices addresses start at 0x0 */ + struct mii_bus *bus; + + bus = mv88e6xxx_default_mdio_bus(chip); + if (!bus) + return -EOPNOTSUPP; + + if (!chip->info->ops->phy_write_c45) + return -EOPNOTSUPP; + + return chip->info->ops->phy_write_c45(chip, bus, addr, devad, reg, val); +} + static int mv88e6xxx_phy_page_get(struct mv88e6xxx_chip *chip, int phy, u8 page) { return mv88e6xxx_phy_write(chip, phy, MV88E6XXX_PHY_PAGE, page); diff --git a/drivers/net/dsa/mv88e6xxx/phy.h b/drivers/net/dsa/mv88e6xxx/phy.h index 05ea0d546969..5f47722364cc 100644 --- a/drivers/net/dsa/mv88e6xxx/phy.h +++ b/drivers/net/dsa/mv88e6xxx/phy.h @@ -28,6 +28,10 @@ int mv88e6xxx_phy_read(struct mv88e6xxx_chip *chip, int phy, int reg, u16 *val); int mv88e6xxx_phy_write(struct mv88e6xxx_chip *chip, int phy, int reg, u16 val); +int mv88e6xxx_phy_read_c45(struct mv88e6xxx_chip *chip, int phy, int devad, + int reg, u16 *val); +int mv88e6xxx_phy_write_c45(struct mv88e6xxx_chip *chip, int phy, int devad, + int reg, u16 val); int mv88e6xxx_phy_page_read(struct mv88e6xxx_chip *chip, int phy, u8 page, int reg, u16 *val); int mv88e6xxx_phy_page_write(struct mv88e6xxx_chip *chip, int phy, diff --git a/drivers/net/dsa/mv88e6xxx/ptp.c b/drivers/net/dsa/mv88e6xxx/ptp.c index d838c174dc0d..ea17231dc34e 100644 --- a/drivers/net/dsa/mv88e6xxx/ptp.c +++ b/drivers/net/dsa/mv88e6xxx/ptp.c @@ -11,6 +11,7 @@ */ #include "chip.h" +#include "global1.h" #include "global2.h" #include "hwtstamp.h" #include "ptp.h" @@ -419,6 +420,34 @@ const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = { .cc_mult_dem = MV88E6XXX_CC_MULT_DEM, }; +const struct mv88e6xxx_ptp_ops mv88e6390_ptp_ops = { + .clock_read = mv88e6352_ptp_clock_read, + .ptp_enable = mv88e6352_ptp_enable, + .ptp_verify = mv88e6352_ptp_verify, + .event_work = mv88e6352_tai_event_work, + .port_enable = mv88e6352_hwtstamp_port_enable, + .port_disable = mv88e6352_hwtstamp_port_disable, + .set_ptp_cpu_port = mv88e6390_g1_set_ptp_cpu_port, + .n_ext_ts = 1, + .arr0_sts_reg = MV88E6XXX_PORT_PTP_ARR0_STS, + .arr1_sts_reg = MV88E6XXX_PORT_PTP_ARR1_STS, + .dep_sts_reg = MV88E6XXX_PORT_PTP_DEP_STS, + .rx_filters = (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ), + .cc_shift = MV88E6XXX_CC_SHIFT, + .cc_mult = MV88E6XXX_CC_MULT, + .cc_mult_num = MV88E6XXX_CC_MULT_NUM, + .cc_mult_dem = MV88E6XXX_CC_MULT_DEM, +}; + static u64 mv88e6xxx_ptp_clock_read(const struct cyclecounter *cc) { struct mv88e6xxx_chip *chip = cc_to_chip(cc); @@ -491,6 +520,23 @@ int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip) chip->ptp_clock_info.verify = ptp_ops->ptp_verify; chip->ptp_clock_info.do_aux_work = mv88e6xxx_hwtstamp_work; + if (ptp_ops->set_ptp_cpu_port) { + struct dsa_port *dp; + int upstream = 0; + int err; + + dsa_switch_for_each_user_port(dp, chip->ds) { + upstream = dsa_upstream_port(chip->ds, dp->index); + break; + } + + err = ptp_ops->set_ptp_cpu_port(chip, upstream); + if (err) { + dev_err(chip->dev, "Failed to set PTP CPU destination port!\n"); + return err; + } + } + chip->ptp_clock = ptp_clock_register(&chip->ptp_clock_info, chip->dev); if (IS_ERR(chip->ptp_clock)) return PTR_ERR(chip->ptp_clock); diff --git a/drivers/net/dsa/mv88e6xxx/ptp.h b/drivers/net/dsa/mv88e6xxx/ptp.h index 269d5d16a466..6c4d09adc93c 100644 --- a/drivers/net/dsa/mv88e6xxx/ptp.h +++ b/drivers/net/dsa/mv88e6xxx/ptp.h @@ -151,6 +151,7 @@ void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip); extern const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops; extern const struct mv88e6xxx_ptp_ops mv88e6250_ptp_ops; extern const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops; +extern const struct mv88e6xxx_ptp_ops mv88e6390_ptp_ops; #else /* !CONFIG_NET_DSA_MV88E6XXX_PTP */ @@ -171,6 +172,7 @@ static inline void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip) static const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops = {}; static const struct mv88e6xxx_ptp_ops mv88e6250_ptp_ops = {}; static const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = {}; +static const struct mv88e6xxx_ptp_ops mv88e6390_ptp_ops = {}; #endif /* CONFIG_NET_DSA_MV88E6XXX_PTP */ diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c index d94150d8f3f4..72faec8f44dc 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.c +++ b/drivers/net/dsa/mv88e6xxx/serdes.c @@ -36,17 +36,13 @@ static int mv88e6352_serdes_write(struct mv88e6xxx_chip *chip, int reg, static int mv88e6390_serdes_read(struct mv88e6xxx_chip *chip, int lane, int device, int reg, u16 *val) { - int reg_c45 = MII_ADDR_C45 | device << 16 | reg; - - return mv88e6xxx_phy_read(chip, lane, reg_c45, val); + return mv88e6xxx_phy_read_c45(chip, lane, device, reg, val); } static int mv88e6390_serdes_write(struct mv88e6xxx_chip *chip, int lane, int device, int reg, u16 val) { - int reg_c45 = MII_ADDR_C45 | device << 16 | reg; - - return mv88e6xxx_phy_write(chip, lane, reg_c45, val); + return mv88e6xxx_phy_write_c45(chip, lane, device, reg, val); } static int mv88e6xxx_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, diff --git a/drivers/net/dsa/mv88e6xxx/switchdev.c b/drivers/net/dsa/mv88e6xxx/switchdev.c new file mode 100644 index 000000000000..4c346a884fb2 --- /dev/null +++ b/drivers/net/dsa/mv88e6xxx/switchdev.c @@ -0,0 +1,83 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * switchdev.c + * + * Authors: + * Hans J. Schultz <netdev@kapio-technology.com> + * + */ + +#include <net/switchdev.h> +#include "chip.h" +#include "global1.h" +#include "switchdev.h" + +struct mv88e6xxx_fid_search_ctx { + u16 fid_search; + u16 vid_found; +}; + +static int __mv88e6xxx_find_vid(struct mv88e6xxx_chip *chip, + const struct mv88e6xxx_vtu_entry *entry, + void *priv) +{ + struct mv88e6xxx_fid_search_ctx *ctx = priv; + + if (ctx->fid_search == entry->fid) { + ctx->vid_found = entry->vid; + return 1; + } + + return 0; +} + +static int mv88e6xxx_find_vid(struct mv88e6xxx_chip *chip, u16 fid, u16 *vid) +{ + struct mv88e6xxx_fid_search_ctx ctx; + int err; + + ctx.fid_search = fid; + mv88e6xxx_reg_lock(chip); + err = mv88e6xxx_vtu_walk(chip, __mv88e6xxx_find_vid, &ctx); + mv88e6xxx_reg_unlock(chip); + if (err < 0) + return err; + if (err == 1) + *vid = ctx.vid_found; + else + return -ENOENT; + + return 0; +} + +int mv88e6xxx_handle_miss_violation(struct mv88e6xxx_chip *chip, int port, + struct mv88e6xxx_atu_entry *entry, u16 fid) +{ + struct switchdev_notifier_fdb_info info = { + .addr = entry->mac, + .locked = true, + }; + struct net_device *brport; + struct dsa_port *dp; + u16 vid; + int err; + + err = mv88e6xxx_find_vid(chip, fid, &vid); + if (err) + return err; + + info.vid = vid; + dp = dsa_to_port(chip->ds, port); + + rtnl_lock(); + brport = dsa_port_to_bridge_port(dp); + if (!brport) { + rtnl_unlock(); + return -ENODEV; + } + err = call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE, + brport, &info.info, NULL); + rtnl_unlock(); + + return err; +} diff --git a/drivers/net/dsa/mv88e6xxx/switchdev.h b/drivers/net/dsa/mv88e6xxx/switchdev.h new file mode 100644 index 000000000000..62214f9d62b0 --- /dev/null +++ b/drivers/net/dsa/mv88e6xxx/switchdev.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later + * + * switchdev.h + * + * Authors: + * Hans J. Schultz <netdev@kapio-technology.com> + * + */ + +#ifndef _MV88E6XXX_SWITCHDEV_H_ +#define _MV88E6XXX_SWITCHDEV_H_ + +#include "chip.h" + +int mv88e6xxx_handle_miss_violation(struct mv88e6xxx_chip *chip, int port, + struct mv88e6xxx_atu_entry *entry, + u16 fid); + +#endif /* _MV88E6XXX_SWITCHDEV_H_ */ diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index 01ac70fd7ddf..cbcc457499f3 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -954,8 +954,10 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot) return -ENOMEM; bus->name = "VSC9959 internal MDIO bus"; - bus->read = enetc_mdio_read; - bus->write = enetc_mdio_write; + bus->read = enetc_mdio_read_c22; + bus->write = enetc_mdio_write_c22; + bus->read_c45 = enetc_mdio_read_c45; + bus->write_c45 = enetc_mdio_write_c45; bus->parent = dev; mdio_priv = bus->priv; mdio_priv->hw = hw; diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h index 9ba2ec2b966d..fb1549a5fe32 100644 --- a/drivers/net/dsa/sja1105/sja1105.h +++ b/drivers/net/dsa/sja1105/sja1105.h @@ -149,8 +149,10 @@ struct sja1105_info { bool (*rxtstamp)(struct dsa_switch *ds, int port, struct sk_buff *skb); void (*txtstamp)(struct dsa_switch *ds, int port, struct sk_buff *skb); int (*clocking_setup)(struct sja1105_private *priv); - int (*pcs_mdio_read)(struct mii_bus *bus, int phy, int reg); - int (*pcs_mdio_write)(struct mii_bus *bus, int phy, int reg, u16 val); + int (*pcs_mdio_read_c45)(struct mii_bus *bus, int phy, int mmd, + int reg); + int (*pcs_mdio_write_c45)(struct mii_bus *bus, int phy, int mmd, + int reg, u16 val); int (*disable_microcontroller)(struct sja1105_private *priv); const char *name; bool supports_mii[SJA1105_MAX_NUM_PORTS]; @@ -303,10 +305,12 @@ void sja1105_frame_memory_partitioning(struct sja1105_private *priv); /* From sja1105_mdio.c */ int sja1105_mdiobus_register(struct dsa_switch *ds); void sja1105_mdiobus_unregister(struct dsa_switch *ds); -int sja1105_pcs_mdio_read(struct mii_bus *bus, int phy, int reg); -int sja1105_pcs_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val); -int sja1110_pcs_mdio_read(struct mii_bus *bus, int phy, int reg); -int sja1110_pcs_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val); +int sja1105_pcs_mdio_read_c45(struct mii_bus *bus, int phy, int mmd, int reg); +int sja1105_pcs_mdio_write_c45(struct mii_bus *bus, int phy, int mmd, int reg, + u16 val); +int sja1110_pcs_mdio_read_c45(struct mii_bus *bus, int phy, int mmd, int reg); +int sja1110_pcs_mdio_write_c45(struct mii_bus *bus, int phy, int mmd, int reg, + u16 val); /* From sja1105_devlink.c */ int sja1105_devlink_setup(struct dsa_switch *ds); diff --git a/drivers/net/dsa/sja1105/sja1105_mdio.c b/drivers/net/dsa/sja1105/sja1105_mdio.c index 4059fcc8c832..2fcb601cb4eb 100644 --- a/drivers/net/dsa/sja1105/sja1105_mdio.c +++ b/drivers/net/dsa/sja1105/sja1105_mdio.c @@ -7,20 +7,15 @@ #define SJA1110_PCS_BANK_REG SJA1110_SPI_ADDR(0x3fc) -int sja1105_pcs_mdio_read(struct mii_bus *bus, int phy, int reg) +int sja1105_pcs_mdio_read_c45(struct mii_bus *bus, int phy, int mmd, int reg) { struct sja1105_mdio_private *mdio_priv = bus->priv; struct sja1105_private *priv = mdio_priv->priv; u64 addr; u32 tmp; - u16 mmd; int rc; - if (!(reg & MII_ADDR_C45)) - return -EINVAL; - - mmd = (reg >> MII_DEVADDR_C45_SHIFT) & 0x1f; - addr = (mmd << 16) | (reg & GENMASK(15, 0)); + addr = (mmd << 16) | reg; if (mmd != MDIO_MMD_VEND1 && mmd != MDIO_MMD_VEND2) return 0xffff; @@ -37,19 +32,15 @@ int sja1105_pcs_mdio_read(struct mii_bus *bus, int phy, int reg) return tmp & 0xffff; } -int sja1105_pcs_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val) +int sja1105_pcs_mdio_write_c45(struct mii_bus *bus, int phy, int mmd, + int reg, u16 val) { struct sja1105_mdio_private *mdio_priv = bus->priv; struct sja1105_private *priv = mdio_priv->priv; u64 addr; u32 tmp; - u16 mmd; - - if (!(reg & MII_ADDR_C45)) - return -EINVAL; - mmd = (reg >> MII_DEVADDR_C45_SHIFT) & 0x1f; - addr = (mmd << 16) | (reg & GENMASK(15, 0)); + addr = (mmd << 16) | reg; tmp = val; if (mmd != MDIO_MMD_VEND1 && mmd != MDIO_MMD_VEND2) @@ -58,7 +49,7 @@ int sja1105_pcs_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val) return sja1105_xfer_u32(priv, SPI_WRITE, addr, &tmp, NULL); } -int sja1110_pcs_mdio_read(struct mii_bus *bus, int phy, int reg) +int sja1110_pcs_mdio_read_c45(struct mii_bus *bus, int phy, int mmd, int reg) { struct sja1105_mdio_private *mdio_priv = bus->priv; struct sja1105_private *priv = mdio_priv->priv; @@ -66,17 +57,12 @@ int sja1110_pcs_mdio_read(struct mii_bus *bus, int phy, int reg) int offset, bank; u64 addr; u32 tmp; - u16 mmd; int rc; - if (!(reg & MII_ADDR_C45)) - return -EINVAL; - if (regs->pcs_base[phy] == SJA1105_RSV_ADDR) return -ENODEV; - mmd = (reg >> MII_DEVADDR_C45_SHIFT) & 0x1f; - addr = (mmd << 16) | (reg & GENMASK(15, 0)); + addr = (mmd << 16) | reg; if (mmd == MDIO_MMD_VEND2 && (reg & GENMASK(15, 0)) == MII_PHYSID1) return NXP_SJA1110_XPCS_ID >> 16; @@ -108,7 +94,8 @@ int sja1110_pcs_mdio_read(struct mii_bus *bus, int phy, int reg) return tmp & 0xffff; } -int sja1110_pcs_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val) +int sja1110_pcs_mdio_write_c45(struct mii_bus *bus, int phy, int reg, int mmd, + u16 val) { struct sja1105_mdio_private *mdio_priv = bus->priv; struct sja1105_private *priv = mdio_priv->priv; @@ -116,17 +103,12 @@ int sja1110_pcs_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val) int offset, bank; u64 addr; u32 tmp; - u16 mmd; int rc; - if (!(reg & MII_ADDR_C45)) - return -EINVAL; - if (regs->pcs_base[phy] == SJA1105_RSV_ADDR) return -ENODEV; - mmd = (reg >> MII_DEVADDR_C45_SHIFT) & 0x1f; - addr = (mmd << 16) | (reg & GENMASK(15, 0)); + addr = (mmd << 16) | reg; bank = addr >> 8; offset = addr & GENMASK(7, 0); @@ -167,7 +149,7 @@ static u64 sja1105_base_t1_encode_addr(struct sja1105_private *priv, return regs->mdio_100base_t1 | (phy << 7) | (op << 5) | (xad << 0); } -static int sja1105_base_t1_mdio_read(struct mii_bus *bus, int phy, int reg) +static int sja1105_base_t1_mdio_read_c22(struct mii_bus *bus, int phy, int reg) { struct sja1105_mdio_private *mdio_priv = bus->priv; struct sja1105_private *priv = mdio_priv->priv; @@ -175,30 +157,31 @@ static int sja1105_base_t1_mdio_read(struct mii_bus *bus, int phy, int reg) u32 tmp; int rc; - if (reg & MII_ADDR_C45) { - u16 mmd = (reg >> MII_DEVADDR_C45_SHIFT) & 0x1f; - - addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_ADDR, - mmd); + addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C22, reg & 0x1f); - tmp = reg & MII_REGADDR_C45_MASK; + rc = sja1105_xfer_u32(priv, SPI_READ, addr, &tmp, NULL); + if (rc < 0) + return rc; - rc = sja1105_xfer_u32(priv, SPI_WRITE, addr, &tmp, NULL); - if (rc < 0) - return rc; + return tmp & 0xffff; +} - addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_DATA, - mmd); +static int sja1105_base_t1_mdio_read_c45(struct mii_bus *bus, int phy, + int mmd, int reg) +{ + struct sja1105_mdio_private *mdio_priv = bus->priv; + struct sja1105_private *priv = mdio_priv->priv; + u64 addr; + u32 tmp; + int rc; - rc = sja1105_xfer_u32(priv, SPI_READ, addr, &tmp, NULL); - if (rc < 0) - return rc; + addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_ADDR, mmd); - return tmp & 0xffff; - } + rc = sja1105_xfer_u32(priv, SPI_WRITE, addr, ®, NULL); + if (rc < 0) + return rc; - /* Clause 22 read */ - addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C22, reg & 0x1f); + addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_DATA, mmd); rc = sja1105_xfer_u32(priv, SPI_READ, addr, &tmp, NULL); if (rc < 0) @@ -207,41 +190,37 @@ static int sja1105_base_t1_mdio_read(struct mii_bus *bus, int phy, int reg) return tmp & 0xffff; } -static int sja1105_base_t1_mdio_write(struct mii_bus *bus, int phy, int reg, - u16 val) +static int sja1105_base_t1_mdio_write_c22(struct mii_bus *bus, int phy, int reg, + u16 val) { struct sja1105_mdio_private *mdio_priv = bus->priv; struct sja1105_private *priv = mdio_priv->priv; u64 addr; u32 tmp; - int rc; - - if (reg & MII_ADDR_C45) { - u16 mmd = (reg >> MII_DEVADDR_C45_SHIFT) & 0x1f; - addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_ADDR, - mmd); - - tmp = reg & MII_REGADDR_C45_MASK; + addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C22, reg & 0x1f); - rc = sja1105_xfer_u32(priv, SPI_WRITE, addr, &tmp, NULL); - if (rc < 0) - return rc; + tmp = val & 0xffff; - addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_DATA, - mmd); + return sja1105_xfer_u32(priv, SPI_WRITE, addr, &tmp, NULL); +} - tmp = val & 0xffff; +static int sja1105_base_t1_mdio_write_c45(struct mii_bus *bus, int phy, + int mmd, int reg, u16 val) +{ + struct sja1105_mdio_private *mdio_priv = bus->priv; + struct sja1105_private *priv = mdio_priv->priv; + u64 addr; + u32 tmp; + int rc; - rc = sja1105_xfer_u32(priv, SPI_WRITE, addr, &tmp, NULL); - if (rc < 0) - return rc; + addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_ADDR, mmd); - return 0; - } + rc = sja1105_xfer_u32(priv, SPI_WRITE, addr, ®, NULL); + if (rc < 0) + return rc; - /* Clause 22 write */ - addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C22, reg & 0x1f); + addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_DATA, mmd); tmp = val & 0xffff; @@ -360,8 +339,10 @@ static int sja1105_mdiobus_base_t1_register(struct sja1105_private *priv, bus->name = "SJA1110 100base-T1 MDIO bus"; snprintf(bus->id, MII_BUS_ID_SIZE, "%s-base-t1", dev_name(priv->ds->dev)); - bus->read = sja1105_base_t1_mdio_read; - bus->write = sja1105_base_t1_mdio_write; + bus->read = sja1105_base_t1_mdio_read_c22; + bus->write = sja1105_base_t1_mdio_write_c22; + bus->read_c45 = sja1105_base_t1_mdio_read_c45; + bus->write_c45 = sja1105_base_t1_mdio_write_c45; bus->parent = priv->ds->dev; mdio_priv = bus->priv; mdio_priv->priv = priv; @@ -398,7 +379,7 @@ static int sja1105_mdiobus_pcs_register(struct sja1105_private *priv) int rc = 0; int port; - if (!priv->info->pcs_mdio_read || !priv->info->pcs_mdio_write) + if (!priv->info->pcs_mdio_read_c45 || !priv->info->pcs_mdio_write_c45) return 0; bus = mdiobus_alloc_size(sizeof(*mdio_priv)); @@ -408,8 +389,8 @@ static int sja1105_mdiobus_pcs_register(struct sja1105_private *priv) bus->name = "SJA1105 PCS MDIO bus"; snprintf(bus->id, MII_BUS_ID_SIZE, "%s-pcs", dev_name(ds->dev)); - bus->read = priv->info->pcs_mdio_read; - bus->write = priv->info->pcs_mdio_write; + bus->read_c45 = priv->info->pcs_mdio_read_c45; + bus->write_c45 = priv->info->pcs_mdio_write_c45; bus->parent = ds->dev; /* There is no PHY on this MDIO bus => mask out all PHY addresses * from auto probing. diff --git a/drivers/net/dsa/sja1105/sja1105_spi.c b/drivers/net/dsa/sja1105/sja1105_spi.c index d3c9ad6d39d4..5ce29c8057a4 100644 --- a/drivers/net/dsa/sja1105/sja1105_spi.c +++ b/drivers/net/dsa/sja1105/sja1105_spi.c @@ -719,8 +719,8 @@ const struct sja1105_info sja1105r_info = { .ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing, .rxtstamp = sja1105_rxtstamp, .clocking_setup = sja1105_clocking_setup, - .pcs_mdio_read = sja1105_pcs_mdio_read, - .pcs_mdio_write = sja1105_pcs_mdio_write, + .pcs_mdio_read_c45 = sja1105_pcs_mdio_read_c45, + .pcs_mdio_write_c45 = sja1105_pcs_mdio_write_c45, .regs = &sja1105pqrs_regs, .port_speed = { [SJA1105_SPEED_AUTO] = 0, @@ -756,8 +756,8 @@ const struct sja1105_info sja1105s_info = { .ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing, .rxtstamp = sja1105_rxtstamp, .clocking_setup = sja1105_clocking_setup, - .pcs_mdio_read = sja1105_pcs_mdio_read, - .pcs_mdio_write = sja1105_pcs_mdio_write, + .pcs_mdio_read_c45 = sja1105_pcs_mdio_read_c45, + .pcs_mdio_write_c45 = sja1105_pcs_mdio_write_c45, .port_speed = { [SJA1105_SPEED_AUTO] = 0, [SJA1105_SPEED_10MBPS] = 3, @@ -794,8 +794,8 @@ const struct sja1105_info sja1110a_info = { .rxtstamp = sja1110_rxtstamp, .txtstamp = sja1110_txtstamp, .disable_microcontroller = sja1110_disable_microcontroller, - .pcs_mdio_read = sja1110_pcs_mdio_read, - .pcs_mdio_write = sja1110_pcs_mdio_write, + .pcs_mdio_read_c45 = sja1110_pcs_mdio_read_c45, + .pcs_mdio_write_c45 = sja1110_pcs_mdio_write_c45, .port_speed = { [SJA1105_SPEED_AUTO] = 0, [SJA1105_SPEED_10MBPS] = 4, @@ -844,8 +844,8 @@ const struct sja1105_info sja1110b_info = { .rxtstamp = sja1110_rxtstamp, .txtstamp = sja1110_txtstamp, .disable_microcontroller = sja1110_disable_microcontroller, - .pcs_mdio_read = sja1110_pcs_mdio_read, - .pcs_mdio_write = sja1110_pcs_mdio_write, + .pcs_mdio_read_c45 = sja1110_pcs_mdio_read_c45, + .pcs_mdio_write_c45 = sja1110_pcs_mdio_write_c45, .port_speed = { [SJA1105_SPEED_AUTO] = 0, [SJA1105_SPEED_10MBPS] = 4, @@ -894,8 +894,8 @@ const struct sja1105_info sja1110c_info = { .rxtstamp = sja1110_rxtstamp, .txtstamp = sja1110_txtstamp, .disable_microcontroller = sja1110_disable_microcontroller, - .pcs_mdio_read = sja1110_pcs_mdio_read, - .pcs_mdio_write = sja1110_pcs_mdio_write, + .pcs_mdio_read_c45 = sja1110_pcs_mdio_read_c45, + .pcs_mdio_write_c45 = sja1110_pcs_mdio_write_c45, .port_speed = { [SJA1105_SPEED_AUTO] = 0, [SJA1105_SPEED_10MBPS] = 4, @@ -944,8 +944,8 @@ const struct sja1105_info sja1110d_info = { .rxtstamp = sja1110_rxtstamp, .txtstamp = sja1110_txtstamp, .disable_microcontroller = sja1110_disable_microcontroller, - .pcs_mdio_read = sja1110_pcs_mdio_read, - .pcs_mdio_write = sja1110_pcs_mdio_write, + .pcs_mdio_read_c45 = sja1110_pcs_mdio_read_c45, + .pcs_mdio_write_c45 = sja1110_pcs_mdio_write_c45, .port_speed = { [SJA1105_SPEED_AUTO] = 0, [SJA1105_SPEED_10MBPS] = 4, diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c index 0805f249fff2..25f55756681d 100644 --- a/drivers/net/ethernet/adi/adin1110.c +++ b/drivers/net/ethernet/adi/adin1110.c @@ -523,7 +523,6 @@ static int adin1110_register_mdiobus(struct adin1110_priv *priv, mii_bus->priv = priv; mii_bus->parent = dev; mii_bus->phy_mask = ~((u32)GENMASK(2, 0)); - mii_bus->probe_capabilities = MDIOBUS_C22; snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev)); ret = devm_mdiobus_register(dev, mii_bus); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h index 466273b22f0a..3fd9728f817f 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h @@ -1699,20 +1699,21 @@ do { \ } while (0) /* Macros for building, reading or writing register values or bits - * using MDIO. Different from above because of the use of standardized - * Linux include values. No shifting is performed with the bit - * operations, everything works on mask values. + * using MDIO. */ + +#define XGBE_ADDR_C45 BIT(30) + #define XMDIO_READ(_pdata, _mmd, _reg) \ ((_pdata)->hw_if.read_mmd_regs((_pdata), 0, \ - MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff))) + XGBE_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff))) #define XMDIO_READ_BITS(_pdata, _mmd, _reg, _mask) \ (XMDIO_READ((_pdata), _mmd, _reg) & _mask) #define XMDIO_WRITE(_pdata, _mmd, _reg, _val) \ ((_pdata)->hw_if.write_mmd_regs((_pdata), 0, \ - MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff), (_val))) + XGBE_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff), (_val))) #define XMDIO_WRITE_BITS(_pdata, _mmd, _reg, _mask, _val) \ do { \ diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index 4030d619e84f..f393228d41c7 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -814,6 +814,9 @@ static int xgbe_set_speed(struct xgbe_prv_data *pdata, int speed) unsigned int ss; switch (speed) { + case SPEED_10: + ss = 0x07; + break; case SPEED_1000: ss = 0x03; break; @@ -1154,8 +1157,8 @@ static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad, unsigned int mmd_address, index, offset; int mmd_data; - if (mmd_reg & MII_ADDR_C45) - mmd_address = mmd_reg & ~MII_ADDR_C45; + if (mmd_reg & XGBE_ADDR_C45) + mmd_address = mmd_reg & ~XGBE_ADDR_C45; else mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); @@ -1186,8 +1189,8 @@ static void xgbe_write_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad, unsigned long flags; unsigned int mmd_address, index, offset; - if (mmd_reg & MII_ADDR_C45) - mmd_address = mmd_reg & ~MII_ADDR_C45; + if (mmd_reg & XGBE_ADDR_C45) + mmd_address = mmd_reg & ~XGBE_ADDR_C45; else mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); @@ -1217,8 +1220,8 @@ static int xgbe_read_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad, unsigned int mmd_address; int mmd_data; - if (mmd_reg & MII_ADDR_C45) - mmd_address = mmd_reg & ~MII_ADDR_C45; + if (mmd_reg & XGBE_ADDR_C45) + mmd_address = mmd_reg & ~XGBE_ADDR_C45; else mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); @@ -1245,8 +1248,8 @@ static void xgbe_write_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad, unsigned int mmd_address; unsigned long flags; - if (mmd_reg & MII_ADDR_C45) - mmd_address = mmd_reg & ~MII_ADDR_C45; + if (mmd_reg & XGBE_ADDR_C45) + mmd_address = mmd_reg & ~XGBE_ADDR_C45; else mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); @@ -1291,11 +1294,20 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad, } } -static unsigned int xgbe_create_mdio_sca(int port, int reg) +static unsigned int xgbe_create_mdio_sca_c22(int port, int reg) { - unsigned int mdio_sca, da; + unsigned int mdio_sca; + + mdio_sca = 0; + XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg); + XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port); - da = (reg & MII_ADDR_C45) ? reg >> 16 : 0; + return mdio_sca; +} + +static unsigned int xgbe_create_mdio_sca_c45(int port, unsigned int da, int reg) +{ + unsigned int mdio_sca; mdio_sca = 0; XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg); @@ -1305,14 +1317,13 @@ static unsigned int xgbe_create_mdio_sca(int port, int reg) return mdio_sca; } -static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr, - int reg, u16 val) +static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, + unsigned int mdio_sca, u16 val) { - unsigned int mdio_sca, mdio_sccd; + unsigned int mdio_sccd; reinit_completion(&pdata->mdio_complete); - mdio_sca = xgbe_create_mdio_sca(addr, reg); XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); mdio_sccd = 0; @@ -1329,14 +1340,33 @@ static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr, return 0; } -static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, int addr, - int reg) +static int xgbe_write_ext_mii_regs_c22(struct xgbe_prv_data *pdata, int addr, + int reg, u16 val) { - unsigned int mdio_sca, mdio_sccd; + unsigned int mdio_sca; + + mdio_sca = xgbe_create_mdio_sca_c22(addr, reg); + + return xgbe_write_ext_mii_regs(pdata, mdio_sca, val); +} + +static int xgbe_write_ext_mii_regs_c45(struct xgbe_prv_data *pdata, int addr, + int devad, int reg, u16 val) +{ + unsigned int mdio_sca; + + mdio_sca = xgbe_create_mdio_sca_c45(addr, devad, reg); + + return xgbe_write_ext_mii_regs(pdata, mdio_sca, val); +} + +static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, + unsigned int mdio_sca) +{ + unsigned int mdio_sccd; reinit_completion(&pdata->mdio_complete); - mdio_sca = xgbe_create_mdio_sca(addr, reg); XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); mdio_sccd = 0; @@ -1352,6 +1382,26 @@ static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, int addr, return XGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, DATA); } +static int xgbe_read_ext_mii_regs_c22(struct xgbe_prv_data *pdata, int addr, + int reg) +{ + unsigned int mdio_sca; + + mdio_sca = xgbe_create_mdio_sca_c22(addr, reg); + + return xgbe_read_ext_mii_regs(pdata, mdio_sca); +} + +static int xgbe_read_ext_mii_regs_c45(struct xgbe_prv_data *pdata, int addr, + int devad, int reg) +{ + unsigned int mdio_sca; + + mdio_sca = xgbe_create_mdio_sca_c45(addr, devad, reg); + + return xgbe_read_ext_mii_regs(pdata, mdio_sca); +} + static int xgbe_set_ext_mii_mode(struct xgbe_prv_data *pdata, unsigned int port, enum xgbe_mdio_mode mode) { @@ -3565,8 +3615,10 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if) hw_if->set_speed = xgbe_set_speed; hw_if->set_ext_mii_mode = xgbe_set_ext_mii_mode; - hw_if->read_ext_mii_regs = xgbe_read_ext_mii_regs; - hw_if->write_ext_mii_regs = xgbe_write_ext_mii_regs; + hw_if->read_ext_mii_regs_c22 = xgbe_read_ext_mii_regs_c22; + hw_if->write_ext_mii_regs_c22 = xgbe_write_ext_mii_regs_c22; + hw_if->read_ext_mii_regs_c45 = xgbe_read_ext_mii_regs_c45; + hw_if->write_ext_mii_regs_c45 = xgbe_write_ext_mii_regs_c45; hw_if->set_gpio = xgbe_set_gpio; hw_if->clr_gpio = xgbe_clr_gpio; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c index 43fdd111235a..33a9574e9e04 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c @@ -274,6 +274,15 @@ static void xgbe_sgmii_1000_mode(struct xgbe_prv_data *pdata) pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_SGMII_1000); } +static void xgbe_sgmii_10_mode(struct xgbe_prv_data *pdata) +{ + /* Set MAC to 10M speed */ + pdata->hw_if.set_speed(pdata, SPEED_10); + + /* Call PHY implementation support to complete rate change */ + pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_SGMII_10); +} + static void xgbe_sgmii_100_mode(struct xgbe_prv_data *pdata) { /* Set MAC to 1G speed */ @@ -306,6 +315,9 @@ static void xgbe_change_mode(struct xgbe_prv_data *pdata, case XGBE_MODE_KR: xgbe_kr_mode(pdata); break; + case XGBE_MODE_SGMII_10: + xgbe_sgmii_10_mode(pdata); + break; case XGBE_MODE_SGMII_100: xgbe_sgmii_100_mode(pdata); break; @@ -1077,6 +1089,8 @@ static const char *xgbe_phy_fc_string(struct xgbe_prv_data *pdata) static const char *xgbe_phy_speed_string(int speed) { switch (speed) { + case SPEED_10: + return "10Mbps"; case SPEED_100: return "100Mbps"; case SPEED_1000: @@ -1164,6 +1178,7 @@ static int xgbe_phy_config_fixed(struct xgbe_prv_data *pdata) case XGBE_MODE_KX_1000: case XGBE_MODE_KX_2500: case XGBE_MODE_KR: + case XGBE_MODE_SGMII_10: case XGBE_MODE_SGMII_100: case XGBE_MODE_SGMII_1000: case XGBE_MODE_X: @@ -1225,6 +1240,8 @@ static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata, bool set_mode) xgbe_set_mode(pdata, XGBE_MODE_SGMII_1000); } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_100)) { xgbe_set_mode(pdata, XGBE_MODE_SGMII_100); + } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_10)) { + xgbe_set_mode(pdata, XGBE_MODE_SGMII_10); } else { enable_irq(pdata->an_irq); ret = -EINVAL; @@ -1325,6 +1342,9 @@ static void xgbe_phy_status_result(struct xgbe_prv_data *pdata) mode = xgbe_phy_status_aneg(pdata); switch (mode) { + case XGBE_MODE_SGMII_10: + pdata->phy.speed = SPEED_10; + break; case XGBE_MODE_SGMII_100: pdata->phy.speed = SPEED_100; break; @@ -1467,6 +1487,8 @@ static int xgbe_phy_start(struct xgbe_prv_data *pdata) xgbe_sgmii_1000_mode(pdata); } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_100)) { xgbe_sgmii_100_mode(pdata); + } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_10)) { + xgbe_sgmii_10_mode(pdata); } else { ret = -EINVAL; goto err_irq; @@ -1564,6 +1586,8 @@ static int xgbe_phy_best_advertised_speed(struct xgbe_prv_data *pdata) return SPEED_1000; else if (XGBE_ADV(lks, 100baseT_Full)) return SPEED_100; + else if (XGBE_ADV(lks, 10baseT_Full)) + return SPEED_10; return SPEED_UNKNOWN; } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c index c731a04731f8..f4683d53e58c 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c @@ -124,6 +124,7 @@ #include "xgbe.h" #include "xgbe-common.h" +#define XGBE_PHY_PORT_SPEED_10 BIT(0) #define XGBE_PHY_PORT_SPEED_100 BIT(1) #define XGBE_PHY_PORT_SPEED_1000 BIT(2) #define XGBE_PHY_PORT_SPEED_2500 BIT(3) @@ -599,20 +600,27 @@ static int xgbe_phy_get_comm_ownership(struct xgbe_prv_data *pdata) return -ETIMEDOUT; } -static int xgbe_phy_mdio_mii_write(struct xgbe_prv_data *pdata, int addr, - int reg, u16 val) +static int xgbe_phy_mdio_mii_write_c22(struct xgbe_prv_data *pdata, int addr, + int reg, u16 val) { struct xgbe_phy_data *phy_data = pdata->phy_data; - if (reg & MII_ADDR_C45) { - if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL45) - return -ENOTSUPP; - } else { - if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL22) - return -ENOTSUPP; - } + if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL22) + return -EOPNOTSUPP; - return pdata->hw_if.write_ext_mii_regs(pdata, addr, reg, val); + return pdata->hw_if.write_ext_mii_regs_c22(pdata, addr, reg, val); +} + +static int xgbe_phy_mdio_mii_write_c45(struct xgbe_prv_data *pdata, int addr, + int devad, int reg, u16 val) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL45) + return -EOPNOTSUPP; + + return pdata->hw_if.write_ext_mii_regs_c45(pdata, addr, devad, + reg, val); } static int xgbe_phy_i2c_mii_write(struct xgbe_prv_data *pdata, int reg, u16 val) @@ -637,7 +645,8 @@ static int xgbe_phy_i2c_mii_write(struct xgbe_prv_data *pdata, int reg, u16 val) return ret; } -static int xgbe_phy_mii_write(struct mii_bus *mii, int addr, int reg, u16 val) +static int xgbe_phy_mii_write_c22(struct mii_bus *mii, int addr, int reg, + u16 val) { struct xgbe_prv_data *pdata = mii->priv; struct xgbe_phy_data *phy_data = pdata->phy_data; @@ -650,29 +659,58 @@ static int xgbe_phy_mii_write(struct mii_bus *mii, int addr, int reg, u16 val) if (phy_data->conn_type == XGBE_CONN_TYPE_SFP) ret = xgbe_phy_i2c_mii_write(pdata, reg, val); else if (phy_data->conn_type & XGBE_CONN_TYPE_MDIO) - ret = xgbe_phy_mdio_mii_write(pdata, addr, reg, val); + ret = xgbe_phy_mdio_mii_write_c22(pdata, addr, reg, val); else - ret = -ENOTSUPP; + ret = -EOPNOTSUPP; xgbe_phy_put_comm_ownership(pdata); return ret; } -static int xgbe_phy_mdio_mii_read(struct xgbe_prv_data *pdata, int addr, - int reg) +static int xgbe_phy_mii_write_c45(struct mii_bus *mii, int addr, int devad, + int reg, u16 val) { + struct xgbe_prv_data *pdata = mii->priv; struct xgbe_phy_data *phy_data = pdata->phy_data; + int ret; - if (reg & MII_ADDR_C45) { - if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL45) - return -ENOTSUPP; - } else { - if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL22) - return -ENOTSUPP; - } + ret = xgbe_phy_get_comm_ownership(pdata); + if (ret) + return ret; - return pdata->hw_if.read_ext_mii_regs(pdata, addr, reg); + if (phy_data->conn_type == XGBE_CONN_TYPE_SFP) + ret = -EOPNOTSUPP; + else if (phy_data->conn_type & XGBE_CONN_TYPE_MDIO) + ret = xgbe_phy_mdio_mii_write_c45(pdata, addr, devad, reg, val); + else + ret = -EOPNOTSUPP; + + xgbe_phy_put_comm_ownership(pdata); + + return ret; +} + +static int xgbe_phy_mdio_mii_read_c22(struct xgbe_prv_data *pdata, int addr, + int reg) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL22) + return -EOPNOTSUPP; + + return pdata->hw_if.read_ext_mii_regs_c22(pdata, addr, reg); +} + +static int xgbe_phy_mdio_mii_read_c45(struct xgbe_prv_data *pdata, int addr, + int devad, int reg) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL45) + return -EOPNOTSUPP; + + return pdata->hw_if.read_ext_mii_regs_c45(pdata, addr, devad, reg); } static int xgbe_phy_i2c_mii_read(struct xgbe_prv_data *pdata, int reg) @@ -697,7 +735,7 @@ static int xgbe_phy_i2c_mii_read(struct xgbe_prv_data *pdata, int reg) return ret; } -static int xgbe_phy_mii_read(struct mii_bus *mii, int addr, int reg) +static int xgbe_phy_mii_read_c22(struct mii_bus *mii, int addr, int reg) { struct xgbe_prv_data *pdata = mii->priv; struct xgbe_phy_data *phy_data = pdata->phy_data; @@ -710,7 +748,30 @@ static int xgbe_phy_mii_read(struct mii_bus *mii, int addr, int reg) if (phy_data->conn_type == XGBE_CONN_TYPE_SFP) ret = xgbe_phy_i2c_mii_read(pdata, reg); else if (phy_data->conn_type & XGBE_CONN_TYPE_MDIO) - ret = xgbe_phy_mdio_mii_read(pdata, addr, reg); + ret = xgbe_phy_mdio_mii_read_c22(pdata, addr, reg); + else + ret = -EOPNOTSUPP; + + xgbe_phy_put_comm_ownership(pdata); + + return ret; +} + +static int xgbe_phy_mii_read_c45(struct mii_bus *mii, int addr, int devad, + int reg) +{ + struct xgbe_prv_data *pdata = mii->priv; + struct xgbe_phy_data *phy_data = pdata->phy_data; + int ret; + + ret = xgbe_phy_get_comm_ownership(pdata); + if (ret) + return ret; + + if (phy_data->conn_type == XGBE_CONN_TYPE_SFP) + ret = -EOPNOTSUPP; + else if (phy_data->conn_type & XGBE_CONN_TYPE_MDIO) + ret = xgbe_phy_mdio_mii_read_c45(pdata, addr, devad, reg); else ret = -ENOTSUPP; @@ -759,6 +820,8 @@ static void xgbe_phy_sfp_phy_settings(struct xgbe_prv_data *pdata) XGBE_SET_SUP(lks, Pause); XGBE_SET_SUP(lks, Asym_Pause); if (phy_data->sfp_base == XGBE_SFP_BASE_1000_T) { + if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10) + XGBE_SET_SUP(lks, 10baseT_Full); if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) XGBE_SET_SUP(lks, 100baseT_Full); if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) @@ -1542,6 +1605,16 @@ static enum xgbe_mode xgbe_phy_an37_sgmii_outcome(struct xgbe_prv_data *pdata) xgbe_phy_phydev_flowctrl(pdata); switch (pdata->an_status & XGBE_SGMII_AN_LINK_SPEED) { + case XGBE_SGMII_AN_LINK_SPEED_10: + if (pdata->an_status & XGBE_SGMII_AN_LINK_DUPLEX) { + XGBE_SET_LP_ADV(lks, 10baseT_Full); + mode = XGBE_MODE_SGMII_10; + } else { + /* Half-duplex not supported */ + XGBE_SET_LP_ADV(lks, 10baseT_Half); + mode = XGBE_MODE_UNKNOWN; + } + break; case XGBE_SGMII_AN_LINK_SPEED_100: if (pdata->an_status & XGBE_SGMII_AN_LINK_DUPLEX) { XGBE_SET_LP_ADV(lks, 100baseT_Full); @@ -1658,7 +1731,10 @@ static enum xgbe_mode xgbe_phy_an73_redrv_outcome(struct xgbe_prv_data *pdata) switch (phy_data->sfp_base) { case XGBE_SFP_BASE_1000_T: if (phy_data->phydev && - (phy_data->phydev->speed == SPEED_100)) + (phy_data->phydev->speed == SPEED_10)) + mode = XGBE_MODE_SGMII_10; + else if (phy_data->phydev && + (phy_data->phydev->speed == SPEED_100)) mode = XGBE_MODE_SGMII_100; else mode = XGBE_MODE_SGMII_1000; @@ -1673,7 +1749,10 @@ static enum xgbe_mode xgbe_phy_an73_redrv_outcome(struct xgbe_prv_data *pdata) break; default: if (phy_data->phydev && - (phy_data->phydev->speed == SPEED_100)) + (phy_data->phydev->speed == SPEED_10)) + mode = XGBE_MODE_SGMII_10; + else if (phy_data->phydev && + (phy_data->phydev->speed == SPEED_100)) mode = XGBE_MODE_SGMII_100; else mode = XGBE_MODE_SGMII_1000; @@ -1910,8 +1989,8 @@ static int xgbe_phy_set_redrv_mode_mdio(struct xgbe_prv_data *pdata, redrv_reg = XGBE_PHY_REDRV_MODE_REG + (phy_data->redrv_lane * 0x1000); redrv_val = (u16)mode; - return pdata->hw_if.write_ext_mii_regs(pdata, phy_data->redrv_addr, - redrv_reg, redrv_val); + return pdata->hw_if.write_ext_mii_regs_c22(pdata, phy_data->redrv_addr, + redrv_reg, redrv_val); } static int xgbe_phy_set_redrv_mode_i2c(struct xgbe_prv_data *pdata, @@ -2127,6 +2206,20 @@ static void xgbe_phy_sgmii_100_mode(struct xgbe_prv_data *pdata) netif_dbg(pdata, link, pdata->netdev, "100MbE SGMII mode set\n"); } +static void xgbe_phy_sgmii_10_mode(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + xgbe_phy_set_redrv_mode(pdata); + + /* 10M/SGMII */ + xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_1G, XGBE_MB_SUBCMD_10MBITS); + + phy_data->cur_mode = XGBE_MODE_SGMII_10; + + netif_dbg(pdata, link, pdata->netdev, "10MbE SGMII mode set\n"); +} + static void xgbe_phy_kr_mode(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; @@ -2185,6 +2278,7 @@ static enum xgbe_mode xgbe_phy_switch_baset_mode(struct xgbe_prv_data *pdata) return xgbe_phy_cur_mode(pdata); switch (xgbe_phy_cur_mode(pdata)) { + case XGBE_MODE_SGMII_10: case XGBE_MODE_SGMII_100: case XGBE_MODE_SGMII_1000: return XGBE_MODE_KR; @@ -2252,6 +2346,8 @@ static enum xgbe_mode xgbe_phy_get_baset_mode(struct xgbe_phy_data *phy_data, int speed) { switch (speed) { + case SPEED_10: + return XGBE_MODE_SGMII_10; case SPEED_100: return XGBE_MODE_SGMII_100; case SPEED_1000: @@ -2269,6 +2365,8 @@ static enum xgbe_mode xgbe_phy_get_sfp_mode(struct xgbe_phy_data *phy_data, int speed) { switch (speed) { + case SPEED_10: + return XGBE_MODE_SGMII_10; case SPEED_100: return XGBE_MODE_SGMII_100; case SPEED_1000: @@ -2343,6 +2441,9 @@ static void xgbe_phy_set_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode) case XGBE_MODE_KR: xgbe_phy_kr_mode(pdata); break; + case XGBE_MODE_SGMII_10: + xgbe_phy_sgmii_10_mode(pdata); + break; case XGBE_MODE_SGMII_100: xgbe_phy_sgmii_100_mode(pdata); break; @@ -2399,6 +2500,9 @@ static bool xgbe_phy_use_baset_mode(struct xgbe_prv_data *pdata, struct ethtool_link_ksettings *lks = &pdata->phy.lks; switch (mode) { + case XGBE_MODE_SGMII_10: + return xgbe_phy_check_mode(pdata, mode, + XGBE_ADV(lks, 10baseT_Full)); case XGBE_MODE_SGMII_100: return xgbe_phy_check_mode(pdata, mode, XGBE_ADV(lks, 100baseT_Full)); @@ -2428,6 +2532,11 @@ static bool xgbe_phy_use_sfp_mode(struct xgbe_prv_data *pdata, return false; return xgbe_phy_check_mode(pdata, mode, XGBE_ADV(lks, 1000baseX_Full)); + case XGBE_MODE_SGMII_10: + if (phy_data->sfp_base != XGBE_SFP_BASE_1000_T) + return false; + return xgbe_phy_check_mode(pdata, mode, + XGBE_ADV(lks, 10baseT_Full)); case XGBE_MODE_SGMII_100: if (phy_data->sfp_base != XGBE_SFP_BASE_1000_T) return false; @@ -2520,10 +2629,17 @@ static bool xgbe_phy_valid_speed_basex_mode(struct xgbe_phy_data *phy_data, } } -static bool xgbe_phy_valid_speed_baset_mode(struct xgbe_phy_data *phy_data, +static bool xgbe_phy_valid_speed_baset_mode(struct xgbe_prv_data *pdata, int speed) { + struct xgbe_phy_data *phy_data = pdata->phy_data; + unsigned int ver; + switch (speed) { + case SPEED_10: + /* Supported in ver >= 30H */ + ver = XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER); + return (ver >= 0x30) ? true : false; case SPEED_100: case SPEED_1000: return true; @@ -2536,10 +2652,17 @@ static bool xgbe_phy_valid_speed_baset_mode(struct xgbe_phy_data *phy_data, } } -static bool xgbe_phy_valid_speed_sfp_mode(struct xgbe_phy_data *phy_data, +static bool xgbe_phy_valid_speed_sfp_mode(struct xgbe_prv_data *pdata, int speed) { + struct xgbe_phy_data *phy_data = pdata->phy_data; + unsigned int ver; + switch (speed) { + case SPEED_10: + /* Supported in ver >= 30H */ + ver = XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER); + return (ver >= 0x30) && (phy_data->sfp_speed == XGBE_SFP_SPEED_100_1000); case SPEED_100: return (phy_data->sfp_speed == XGBE_SFP_SPEED_100_1000); case SPEED_1000: @@ -2586,12 +2709,12 @@ static bool xgbe_phy_valid_speed(struct xgbe_prv_data *pdata, int speed) case XGBE_PORT_MODE_1000BASE_T: case XGBE_PORT_MODE_NBASE_T: case XGBE_PORT_MODE_10GBASE_T: - return xgbe_phy_valid_speed_baset_mode(phy_data, speed); + return xgbe_phy_valid_speed_baset_mode(pdata, speed); case XGBE_PORT_MODE_1000BASE_X: case XGBE_PORT_MODE_10GBASE_R: return xgbe_phy_valid_speed_basex_mode(phy_data, speed); case XGBE_PORT_MODE_SFP: - return xgbe_phy_valid_speed_sfp_mode(phy_data, speed); + return xgbe_phy_valid_speed_sfp_mode(pdata, speed); default: return false; } @@ -2862,6 +2985,12 @@ static int xgbe_phy_mdio_reset_setup(struct xgbe_prv_data *pdata) static bool xgbe_phy_port_mode_mismatch(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; + unsigned int ver; + + /* 10 Mbps speed is not supported in ver < 30H */ + ver = XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER); + if (ver < 0x30 && (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10)) + return true; switch (phy_data->port_mode) { case XGBE_PORT_MODE_BACKPLANE: @@ -2875,7 +3004,8 @@ static bool xgbe_phy_port_mode_mismatch(struct xgbe_prv_data *pdata) return false; break; case XGBE_PORT_MODE_1000BASE_T: - if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) || + if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10) || + (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) || (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000)) return false; break; @@ -2884,13 +3014,15 @@ static bool xgbe_phy_port_mode_mismatch(struct xgbe_prv_data *pdata) return false; break; case XGBE_PORT_MODE_NBASE_T: - if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) || + if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10) || + (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) || (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) || (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_2500)) return false; break; case XGBE_PORT_MODE_10GBASE_T: - if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) || + if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10) || + (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) || (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) || (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000)) return false; @@ -2900,7 +3032,8 @@ static bool xgbe_phy_port_mode_mismatch(struct xgbe_prv_data *pdata) return false; break; case XGBE_PORT_MODE_SFP: - if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) || + if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10) || + (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) || (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) || (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000)) return false; @@ -3269,6 +3402,10 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata) XGBE_SET_SUP(lks, Pause); XGBE_SET_SUP(lks, Asym_Pause); XGBE_SET_SUP(lks, TP); + if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10) { + XGBE_SET_SUP(lks, 10baseT_Full); + phy_data->start_mode = XGBE_MODE_SGMII_10; + } if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) { XGBE_SET_SUP(lks, 100baseT_Full); phy_data->start_mode = XGBE_MODE_SGMII_100; @@ -3299,6 +3436,10 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata) XGBE_SET_SUP(lks, Pause); XGBE_SET_SUP(lks, Asym_Pause); XGBE_SET_SUP(lks, TP); + if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10) { + XGBE_SET_SUP(lks, 10baseT_Full); + phy_data->start_mode = XGBE_MODE_SGMII_10; + } if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) { XGBE_SET_SUP(lks, 100baseT_Full); phy_data->start_mode = XGBE_MODE_SGMII_100; @@ -3321,6 +3462,10 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata) XGBE_SET_SUP(lks, Pause); XGBE_SET_SUP(lks, Asym_Pause); XGBE_SET_SUP(lks, TP); + if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10) { + XGBE_SET_SUP(lks, 10baseT_Full); + phy_data->start_mode = XGBE_MODE_SGMII_10; + } if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) { XGBE_SET_SUP(lks, 100baseT_Full); phy_data->start_mode = XGBE_MODE_SGMII_100; @@ -3361,6 +3506,8 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata) XGBE_SET_SUP(lks, Asym_Pause); XGBE_SET_SUP(lks, TP); XGBE_SET_SUP(lks, FIBRE); + if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10) + phy_data->start_mode = XGBE_MODE_SGMII_10; if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) phy_data->start_mode = XGBE_MODE_SGMII_100; if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) @@ -3415,8 +3562,10 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata) mii->priv = pdata; mii->name = "amd-xgbe-mii"; - mii->read = xgbe_phy_mii_read; - mii->write = xgbe_phy_mii_write; + mii->read = xgbe_phy_mii_read_c22; + mii->write = xgbe_phy_mii_write_c22; + mii->read_c45 = xgbe_phy_mii_read_c45; + mii->write_c45 = xgbe_phy_mii_write_c45; mii->parent = pdata->dev; mii->phy_mask = ~0; snprintf(mii->id, sizeof(mii->id), "%s", dev_name(pdata->dev)); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index 7a41367c437d..16e73df3e9b9 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -294,6 +294,7 @@ #define XGBE_SGMII_AN_LINK_STATUS BIT(1) #define XGBE_SGMII_AN_LINK_SPEED (BIT(2) | BIT(3)) +#define XGBE_SGMII_AN_LINK_SPEED_10 0x00 #define XGBE_SGMII_AN_LINK_SPEED_100 0x04 #define XGBE_SGMII_AN_LINK_SPEED_1000 0x08 #define XGBE_SGMII_AN_LINK_DUPLEX BIT(4) @@ -595,6 +596,7 @@ enum xgbe_mode { XGBE_MODE_KX_2500, XGBE_MODE_KR, XGBE_MODE_X, + XGBE_MODE_SGMII_10, XGBE_MODE_SGMII_100, XGBE_MODE_SGMII_1000, XGBE_MODE_SFI, @@ -774,8 +776,11 @@ struct xgbe_hw_if { int (*set_ext_mii_mode)(struct xgbe_prv_data *, unsigned int, enum xgbe_mdio_mode); - int (*read_ext_mii_regs)(struct xgbe_prv_data *, int, int); - int (*write_ext_mii_regs)(struct xgbe_prv_data *, int, int, u16); + int (*read_ext_mii_regs_c22)(struct xgbe_prv_data *, int, int); + int (*write_ext_mii_regs_c22)(struct xgbe_prv_data *, int, int, u16); + int (*read_ext_mii_regs_c45)(struct xgbe_prv_data *, int, int, int); + int (*write_ext_mii_regs_c45)(struct xgbe_prv_data *, int, int, int, + u16); int (*set_gpio)(struct xgbe_prv_data *, unsigned int); int (*clr_gpio)(struct xgbe_prv_data *, unsigned int); diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index d30d11872719..306393f8eeca 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -1905,7 +1905,6 @@ static void alx_remove(struct pci_dev *pdev) free_netdev(alx->dev); } -#ifdef CONFIG_PM_SLEEP static int alx_suspend(struct device *dev) { struct alx_priv *alx = dev_get_drvdata(dev); @@ -1951,12 +1950,7 @@ unlock: return err; } -static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume); -#define ALX_PM_OPS (&alx_pm_ops) -#else -#define ALX_PM_OPS NULL -#endif - +static DEFINE_SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume); static pci_ers_result_t alx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) @@ -2055,7 +2049,7 @@ static struct pci_driver alx_driver = { .probe = alx_probe, .remove = alx_remove, .err_handler = &alx_err_handlers, - .driver.pm = ALX_PM_OPS, + .driver.pm = pm_sleep_ptr(&alx_pm_ops), }; module_pci_driver(alx_driver); diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index 9c410f93a103..14dfec4db8f9 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -768,8 +768,6 @@ #define gem_readl_n(port, reg, idx) (port)->macb_reg_readl((port), GEM_##reg + idx * 4) #define gem_writel_n(port, reg, idx, value) (port)->macb_reg_writel((port), GEM_##reg + idx * 4, (value)) -#define PTP_TS_BUFFER_SIZE 128 /* must be power of 2 */ - /* Conditional GEM/MACB macros. These perform the operation to the correct * register dependent on whether the device is a GEM or a MACB. For registers * and bitfields that are common across both devices, use macb_{read,write}l @@ -819,11 +817,6 @@ struct macb_dma_desc_ptp { u32 ts_1; u32 ts_2; }; - -struct gem_tx_ts { - struct sk_buff *skb; - struct macb_dma_desc_ptp desc_ptp; -}; #endif /* DMA descriptor bitfields */ @@ -1224,12 +1217,6 @@ struct macb_queue { void *rx_buffers; struct napi_struct napi_rx; struct queue_stats stats; - -#ifdef CONFIG_MACB_USE_HWSTAMP - struct work_struct tx_ts_task; - unsigned int tx_ts_head, tx_ts_tail; - struct gem_tx_ts tx_timestamps[PTP_TS_BUFFER_SIZE]; -#endif }; struct ethtool_rx_fs_item { @@ -1340,14 +1327,14 @@ enum macb_bd_control { void gem_ptp_init(struct net_device *ndev); void gem_ptp_remove(struct net_device *ndev); -int gem_ptp_txstamp(struct macb_queue *queue, struct sk_buff *skb, struct macb_dma_desc *des); +void gem_ptp_txstamp(struct macb *bp, struct sk_buff *skb, struct macb_dma_desc *desc); void gem_ptp_rxstamp(struct macb *bp, struct sk_buff *skb, struct macb_dma_desc *desc); -static inline int gem_ptp_do_txstamp(struct macb_queue *queue, struct sk_buff *skb, struct macb_dma_desc *desc) +static inline void gem_ptp_do_txstamp(struct macb *bp, struct sk_buff *skb, struct macb_dma_desc *desc) { - if (queue->bp->tstamp_config.tx_type == TSTAMP_DISABLED) - return -ENOTSUPP; + if (bp->tstamp_config.tx_type == TSTAMP_DISABLED) + return; - return gem_ptp_txstamp(queue, skb, desc); + gem_ptp_txstamp(bp, skb, desc); } static inline void gem_ptp_do_rxstamp(struct macb *bp, struct sk_buff *skb, struct macb_dma_desc *desc) @@ -1363,11 +1350,7 @@ int gem_set_hwtst(struct net_device *dev, struct ifreq *ifr, int cmd); static inline void gem_ptp_init(struct net_device *ndev) { } static inline void gem_ptp_remove(struct net_device *ndev) { } -static inline int gem_ptp_do_txstamp(struct macb_queue *queue, struct sk_buff *skb, struct macb_dma_desc *desc) -{ - return -1; -} - +static inline void gem_ptp_do_txstamp(struct macb *bp, struct sk_buff *skb, struct macb_dma_desc *desc) { } static inline void gem_ptp_do_rxstamp(struct macb *bp, struct sk_buff *skb, struct macb_dma_desc *desc) { } #endif diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 72e42820713d..41964fd02452 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -334,7 +334,7 @@ static int macb_mdio_wait_for_idle(struct macb *bp) 1, MACB_MDIO_TIMEOUT); } -static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum) +static int macb_mdio_read_c22(struct mii_bus *bus, int mii_id, int regnum) { struct macb *bp = bus->priv; int status; @@ -347,35 +347,62 @@ static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum) if (status < 0) goto mdio_read_exit; - if (regnum & MII_ADDR_C45) { - macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF) - | MACB_BF(RW, MACB_MAN_C45_ADDR) - | MACB_BF(PHYA, mii_id) - | MACB_BF(REGA, (regnum >> 16) & 0x1F) - | MACB_BF(DATA, regnum & 0xFFFF) - | MACB_BF(CODE, MACB_MAN_C45_CODE))); - - status = macb_mdio_wait_for_idle(bp); - if (status < 0) - goto mdio_read_exit; - - macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF) - | MACB_BF(RW, MACB_MAN_C45_READ) - | MACB_BF(PHYA, mii_id) - | MACB_BF(REGA, (regnum >> 16) & 0x1F) - | MACB_BF(CODE, MACB_MAN_C45_CODE))); - } else { - macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF) - | MACB_BF(RW, MACB_MAN_C22_READ) - | MACB_BF(PHYA, mii_id) - | MACB_BF(REGA, regnum) - | MACB_BF(CODE, MACB_MAN_C22_CODE))); + macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF) + | MACB_BF(RW, MACB_MAN_C22_READ) + | MACB_BF(PHYA, mii_id) + | MACB_BF(REGA, regnum) + | MACB_BF(CODE, MACB_MAN_C22_CODE))); + + status = macb_mdio_wait_for_idle(bp); + if (status < 0) + goto mdio_read_exit; + + status = MACB_BFEXT(DATA, macb_readl(bp, MAN)); + +mdio_read_exit: + pm_runtime_mark_last_busy(&bp->pdev->dev); + pm_runtime_put_autosuspend(&bp->pdev->dev); +mdio_pm_exit: + return status; +} + +static int macb_mdio_read_c45(struct mii_bus *bus, int mii_id, int devad, + int regnum) +{ + struct macb *bp = bus->priv; + int status; + + status = pm_runtime_get_sync(&bp->pdev->dev); + if (status < 0) { + pm_runtime_put_noidle(&bp->pdev->dev); + goto mdio_pm_exit; } status = macb_mdio_wait_for_idle(bp); if (status < 0) goto mdio_read_exit; + macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF) + | MACB_BF(RW, MACB_MAN_C45_ADDR) + | MACB_BF(PHYA, mii_id) + | MACB_BF(REGA, devad & 0x1F) + | MACB_BF(DATA, regnum & 0xFFFF) + | MACB_BF(CODE, MACB_MAN_C45_CODE))); + + status = macb_mdio_wait_for_idle(bp); + if (status < 0) + goto mdio_read_exit; + + macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF) + | MACB_BF(RW, MACB_MAN_C45_READ) + | MACB_BF(PHYA, mii_id) + | MACB_BF(REGA, devad & 0x1F) + | MACB_BF(CODE, MACB_MAN_C45_CODE))); + + status = macb_mdio_wait_for_idle(bp); + if (status < 0) + goto mdio_read_exit; + status = MACB_BFEXT(DATA, macb_readl(bp, MAN)); mdio_read_exit: @@ -385,8 +412,8 @@ mdio_pm_exit: return status; } -static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum, - u16 value) +static int macb_mdio_write_c22(struct mii_bus *bus, int mii_id, int regnum, + u16 value) { struct macb *bp = bus->priv; int status; @@ -399,37 +426,63 @@ static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum, if (status < 0) goto mdio_write_exit; - if (regnum & MII_ADDR_C45) { - macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF) - | MACB_BF(RW, MACB_MAN_C45_ADDR) - | MACB_BF(PHYA, mii_id) - | MACB_BF(REGA, (regnum >> 16) & 0x1F) - | MACB_BF(DATA, regnum & 0xFFFF) - | MACB_BF(CODE, MACB_MAN_C45_CODE))); - - status = macb_mdio_wait_for_idle(bp); - if (status < 0) - goto mdio_write_exit; - - macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF) - | MACB_BF(RW, MACB_MAN_C45_WRITE) - | MACB_BF(PHYA, mii_id) - | MACB_BF(REGA, (regnum >> 16) & 0x1F) - | MACB_BF(CODE, MACB_MAN_C45_CODE) - | MACB_BF(DATA, value))); - } else { - macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF) - | MACB_BF(RW, MACB_MAN_C22_WRITE) - | MACB_BF(PHYA, mii_id) - | MACB_BF(REGA, regnum) - | MACB_BF(CODE, MACB_MAN_C22_CODE) - | MACB_BF(DATA, value))); + macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF) + | MACB_BF(RW, MACB_MAN_C22_WRITE) + | MACB_BF(PHYA, mii_id) + | MACB_BF(REGA, regnum) + | MACB_BF(CODE, MACB_MAN_C22_CODE) + | MACB_BF(DATA, value))); + + status = macb_mdio_wait_for_idle(bp); + if (status < 0) + goto mdio_write_exit; + +mdio_write_exit: + pm_runtime_mark_last_busy(&bp->pdev->dev); + pm_runtime_put_autosuspend(&bp->pdev->dev); +mdio_pm_exit: + return status; +} + +static int macb_mdio_write_c45(struct mii_bus *bus, int mii_id, + int devad, int regnum, + u16 value) +{ + struct macb *bp = bus->priv; + int status; + + status = pm_runtime_get_sync(&bp->pdev->dev); + if (status < 0) { + pm_runtime_put_noidle(&bp->pdev->dev); + goto mdio_pm_exit; } status = macb_mdio_wait_for_idle(bp); if (status < 0) goto mdio_write_exit; + macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF) + | MACB_BF(RW, MACB_MAN_C45_ADDR) + | MACB_BF(PHYA, mii_id) + | MACB_BF(REGA, devad & 0x1F) + | MACB_BF(DATA, regnum & 0xFFFF) + | MACB_BF(CODE, MACB_MAN_C45_CODE))); + + status = macb_mdio_wait_for_idle(bp); + if (status < 0) + goto mdio_write_exit; + + macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF) + | MACB_BF(RW, MACB_MAN_C45_WRITE) + | MACB_BF(PHYA, mii_id) + | MACB_BF(REGA, devad & 0x1F) + | MACB_BF(CODE, MACB_MAN_C45_CODE) + | MACB_BF(DATA, value))); + + status = macb_mdio_wait_for_idle(bp); + if (status < 0) + goto mdio_write_exit; + mdio_write_exit: pm_runtime_mark_last_busy(&bp->pdev->dev); pm_runtime_put_autosuspend(&bp->pdev->dev); @@ -902,8 +955,10 @@ static int macb_mii_init(struct macb *bp) } bp->mii_bus->name = "MACB_mii_bus"; - bp->mii_bus->read = &macb_mdio_read; - bp->mii_bus->write = &macb_mdio_write; + bp->mii_bus->read = &macb_mdio_read_c22; + bp->mii_bus->write = &macb_mdio_write_c22; + bp->mii_bus->read_c45 = &macb_mdio_read_c45; + bp->mii_bus->write_c45 = &macb_mdio_write_c45; snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", bp->pdev->name, bp->pdev->id); bp->mii_bus->priv = bp; @@ -1191,13 +1246,9 @@ static int macb_tx_complete(struct macb_queue *queue, int budget) /* First, update TX stats if needed */ if (skb) { if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && - !ptp_one_step_sync(skb) && - gem_ptp_do_txstamp(queue, skb, desc) == 0) { - /* skb now belongs to timestamp buffer - * and will be removed later - */ - tx_skb->skb = NULL; - } + !ptp_one_step_sync(skb)) + gem_ptp_do_txstamp(bp, skb, desc); + netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n", macb_tx_ring_wrap(bp, tail), skb->data); @@ -2253,6 +2304,12 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev) return ret; } +#ifdef CONFIG_MACB_USE_HWSTAMP + if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + (bp->hw_dma_cap & HW_DMA_CAP_PTP)) + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; +#endif + is_lso = (skb_shinfo(skb)->gso_size != 0); if (is_lso) { diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c index e6cb20aaa76a..f962a95068a0 100644 --- a/drivers/net/ethernet/cadence/macb_ptp.c +++ b/drivers/net/ethernet/cadence/macb_ptp.c @@ -292,79 +292,39 @@ void gem_ptp_rxstamp(struct macb *bp, struct sk_buff *skb, } } -static void gem_tstamp_tx(struct macb *bp, struct sk_buff *skb, - struct macb_dma_desc_ptp *desc_ptp) +void gem_ptp_txstamp(struct macb *bp, struct sk_buff *skb, + struct macb_dma_desc *desc) { struct skb_shared_hwtstamps shhwtstamps; - struct timespec64 ts; - - gem_hw_timestamp(bp, desc_ptp->ts_1, desc_ptp->ts_2, &ts); - memset(&shhwtstamps, 0, sizeof(shhwtstamps)); - shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec); - skb_tstamp_tx(skb, &shhwtstamps); -} - -int gem_ptp_txstamp(struct macb_queue *queue, struct sk_buff *skb, - struct macb_dma_desc *desc) -{ - unsigned long tail = READ_ONCE(queue->tx_ts_tail); - unsigned long head = queue->tx_ts_head; struct macb_dma_desc_ptp *desc_ptp; - struct gem_tx_ts *tx_timestamp; - - if (!GEM_BFEXT(DMA_TXVALID, desc->ctrl)) - return -EINVAL; + struct timespec64 ts; - if (CIRC_SPACE(head, tail, PTP_TS_BUFFER_SIZE) == 0) - return -ENOMEM; + if (!GEM_BFEXT(DMA_TXVALID, desc->ctrl)) { + dev_warn_ratelimited(&bp->pdev->dev, + "Timestamp not set in TX BD as expected\n"); + return; + } - desc_ptp = macb_ptp_desc(queue->bp, desc); + desc_ptp = macb_ptp_desc(bp, desc); /* Unlikely but check */ - if (!desc_ptp) - return -EINVAL; - skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; - tx_timestamp = &queue->tx_timestamps[head]; - tx_timestamp->skb = skb; + if (!desc_ptp) { + dev_warn_ratelimited(&bp->pdev->dev, + "Timestamp not supported in BD\n"); + return; + } + /* ensure ts_1/ts_2 is loaded after ctrl (TX_USED check) */ dma_rmb(); - tx_timestamp->desc_ptp.ts_1 = desc_ptp->ts_1; - tx_timestamp->desc_ptp.ts_2 = desc_ptp->ts_2; - /* move head */ - smp_store_release(&queue->tx_ts_head, - (head + 1) & (PTP_TS_BUFFER_SIZE - 1)); - - schedule_work(&queue->tx_ts_task); - return 0; -} + gem_hw_timestamp(bp, desc_ptp->ts_1, desc_ptp->ts_2, &ts); -static void gem_tx_timestamp_flush(struct work_struct *work) -{ - struct macb_queue *queue = - container_of(work, struct macb_queue, tx_ts_task); - unsigned long head, tail; - struct gem_tx_ts *tx_ts; - - /* take current head */ - head = smp_load_acquire(&queue->tx_ts_head); - tail = queue->tx_ts_tail; - - while (CIRC_CNT(head, tail, PTP_TS_BUFFER_SIZE)) { - tx_ts = &queue->tx_timestamps[tail]; - gem_tstamp_tx(queue->bp, tx_ts->skb, &tx_ts->desc_ptp); - /* cleanup */ - dev_kfree_skb_any(tx_ts->skb); - /* remove old tail */ - smp_store_release(&queue->tx_ts_tail, - (tail + 1) & (PTP_TS_BUFFER_SIZE - 1)); - tail = queue->tx_ts_tail; - } + memset(&shhwtstamps, 0, sizeof(shhwtstamps)); + shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec); + skb_tstamp_tx(skb, &shhwtstamps); } void gem_ptp_init(struct net_device *dev) { struct macb *bp = netdev_priv(dev); - struct macb_queue *queue; - unsigned int q; bp->ptp_clock_info = gem_ptp_caps_template; @@ -384,11 +344,6 @@ void gem_ptp_init(struct net_device *dev) } spin_lock_init(&bp->tsu_clk_lock); - for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { - queue->tx_ts_head = 0; - queue->tx_ts_tail = 0; - INIT_WORK(&queue->tx_ts_task, gem_tx_timestamp_flush); - } gem_ptp_init_tsu(bp); diff --git a/drivers/net/ethernet/engleder/Makefile b/drivers/net/ethernet/engleder/Makefile index b6e3b16623de..b98135f65eb7 100644 --- a/drivers/net/ethernet/engleder/Makefile +++ b/drivers/net/ethernet/engleder/Makefile @@ -6,5 +6,5 @@ obj-$(CONFIG_TSNEP) += tsnep.o tsnep-objs := tsnep_main.o tsnep_ethtool.o tsnep_ptp.o tsnep_tc.o \ - tsnep_rxnfc.o $(tsnep-y) + tsnep_rxnfc.o tsnep_xdp.o tsnep-$(CONFIG_TSNEP_SELFTESTS) += tsnep_selftests.o diff --git a/drivers/net/ethernet/engleder/tsnep.h b/drivers/net/ethernet/engleder/tsnep.h index f93ba48bac3f..058c2bcf31a7 100644 --- a/drivers/net/ethernet/engleder/tsnep.h +++ b/drivers/net/ethernet/engleder/tsnep.h @@ -65,7 +65,11 @@ struct tsnep_tx_entry { u32 properties; - struct sk_buff *skb; + u32 type; + union { + struct sk_buff *skb; + struct xdp_frame *xdpf; + }; size_t len; DEFINE_DMA_UNMAP_ADDR(dma); }; @@ -78,8 +82,6 @@ struct tsnep_tx { void *page[TSNEP_RING_PAGE_COUNT]; dma_addr_t page_dma[TSNEP_RING_PAGE_COUNT]; - /* TX ring lock */ - spinlock_t lock; struct tsnep_tx_entry entry[TSNEP_RING_SIZE]; int write; int read; @@ -107,6 +109,7 @@ struct tsnep_rx { struct tsnep_adapter *adapter; void __iomem *addr; int queue_index; + int tx_queue_index; void *page[TSNEP_RING_PAGE_COUNT]; dma_addr_t page_dma[TSNEP_RING_PAGE_COUNT]; @@ -123,6 +126,8 @@ struct tsnep_rx { u32 dropped; u32 multicast; u32 alloc_failed; + + struct xdp_rxq_info xdp_rxq; }; struct tsnep_queue { @@ -172,6 +177,8 @@ struct tsnep_adapter { int rxnfc_count; int rxnfc_max; + struct bpf_prog *xdp_prog; + int num_tx_queues; struct tsnep_tx tx[TSNEP_MAX_QUEUES]; int num_rx_queues; @@ -204,6 +211,9 @@ int tsnep_rxnfc_add_rule(struct tsnep_adapter *adapter, int tsnep_rxnfc_del_rule(struct tsnep_adapter *adapter, struct ethtool_rxnfc *cmd); +int tsnep_xdp_setup_prog(struct tsnep_adapter *adapter, struct bpf_prog *prog, + struct netlink_ext_ack *extack); + #if IS_ENABLED(CONFIG_TSNEP_SELFTESTS) int tsnep_ethtool_get_test_count(void); void tsnep_ethtool_get_test_strings(u8 *data); diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c index bf0190e1d2ea..5a909c1c11bc 100644 --- a/drivers/net/ethernet/engleder/tsnep_main.c +++ b/drivers/net/ethernet/engleder/tsnep_main.c @@ -26,9 +26,11 @@ #include <linux/etherdevice.h> #include <linux/phy.h> #include <linux/iopoll.h> +#include <linux/bpf.h> +#include <linux/bpf_trace.h> -#define TSNEP_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) -#define TSNEP_HEADROOM ALIGN(TSNEP_SKB_PAD, 4) +#define TSNEP_RX_OFFSET (max(NET_SKB_PAD, XDP_PACKET_HEADROOM) + NET_IP_ALIGN) +#define TSNEP_HEADROOM ALIGN(TSNEP_RX_OFFSET, 4) #define TSNEP_MAX_RX_BUF_SIZE (PAGE_SIZE - TSNEP_HEADROOM - \ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) @@ -43,6 +45,14 @@ #define TSNEP_COALESCE_USECS_MAX ((ECM_INT_DELAY_MASK >> ECM_INT_DELAY_SHIFT) * \ ECM_INT_DELAY_BASE_US + ECM_INT_DELAY_BASE_US - 1) +#define TSNEP_TX_TYPE_SKB BIT(0) +#define TSNEP_TX_TYPE_SKB_FRAG BIT(1) +#define TSNEP_TX_TYPE_XDP_TX BIT(2) +#define TSNEP_TX_TYPE_XDP_NDO BIT(3) + +#define TSNEP_XDP_TX BIT(0) +#define TSNEP_XDP_REDIRECT BIT(1) + static void tsnep_enable_irq(struct tsnep_adapter *adapter, u32 mask) { iowrite32(mask, adapter->addr + ECM_INT_ENABLE); @@ -306,10 +316,12 @@ static void tsnep_tx_activate(struct tsnep_tx *tx, int index, int length, struct tsnep_tx_entry *entry = &tx->entry[index]; entry->properties = 0; + /* xdpf is union with skb */ if (entry->skb) { entry->properties = length & TSNEP_DESC_LENGTH_MASK; entry->properties |= TSNEP_DESC_INTERRUPT_FLAG; - if (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) + if ((entry->type & TSNEP_TX_TYPE_SKB) && + (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS)) entry->properties |= TSNEP_DESC_EXTENDED_WRITEBACK_FLAG; /* toggle user flag to prevent false acknowledge @@ -378,15 +390,19 @@ static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count) for (i = 0; i < count; i++) { entry = &tx->entry[(tx->write + i) % TSNEP_RING_SIZE]; - if (i == 0) { + if (!i) { len = skb_headlen(skb); dma = dma_map_single(dmadev, skb->data, len, DMA_TO_DEVICE); + + entry->type = TSNEP_TX_TYPE_SKB; } else { len = skb_frag_size(&skb_shinfo(skb)->frags[i - 1]); dma = skb_frag_dma_map(dmadev, &skb_shinfo(skb)->frags[i - 1], 0, len, DMA_TO_DEVICE); + + entry->type = TSNEP_TX_TYPE_SKB_FRAG; } if (dma_mapping_error(dmadev, dma)) return -ENOMEM; @@ -413,12 +429,13 @@ static int tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count) entry = &tx->entry[(index + i) % TSNEP_RING_SIZE]; if (entry->len) { - if (i == 0) + if (entry->type & TSNEP_TX_TYPE_SKB) dma_unmap_single(dmadev, dma_unmap_addr(entry, dma), dma_unmap_len(entry, len), DMA_TO_DEVICE); - else + else if (entry->type & + (TSNEP_TX_TYPE_SKB_FRAG | TSNEP_TX_TYPE_XDP_NDO)) dma_unmap_page(dmadev, dma_unmap_addr(entry, dma), dma_unmap_len(entry, len), @@ -434,7 +451,6 @@ static int tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count) static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb, struct tsnep_tx *tx) { - unsigned long flags; int count = 1; struct tsnep_tx_entry *entry; int length; @@ -444,16 +460,12 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb, if (skb_shinfo(skb)->nr_frags > 0) count += skb_shinfo(skb)->nr_frags; - spin_lock_irqsave(&tx->lock, flags); - if (tsnep_tx_desc_available(tx) < count) { /* ring full, shall not happen because queue is stopped if full * below */ netif_stop_queue(tx->adapter->netdev); - spin_unlock_irqrestore(&tx->lock, flags); - return NETDEV_TX_BUSY; } @@ -468,10 +480,6 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb, tx->dropped++; - spin_unlock_irqrestore(&tx->lock, flags); - - netdev_err(tx->adapter->netdev, "TX DMA map failed\n"); - return NETDEV_TX_OK; } length = retval; @@ -481,7 +489,7 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb, for (i = 0; i < count; i++) tsnep_tx_activate(tx, (tx->write + i) % TSNEP_RING_SIZE, length, - i == (count - 1)); + i == count - 1); tx->write = (tx->write + count) % TSNEP_RING_SIZE; skb_tx_timestamp(skb); @@ -496,20 +504,146 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb, netif_stop_queue(tx->adapter->netdev); } - spin_unlock_irqrestore(&tx->lock, flags); - return NETDEV_TX_OK; } +static int tsnep_xdp_tx_map(struct xdp_frame *xdpf, struct tsnep_tx *tx, + struct skb_shared_info *shinfo, int count, u32 type) +{ + struct device *dmadev = tx->adapter->dmadev; + struct tsnep_tx_entry *entry; + struct page *page; + skb_frag_t *frag; + unsigned int len; + int map_len = 0; + dma_addr_t dma; + void *data; + int i; + + frag = NULL; + len = xdpf->len; + for (i = 0; i < count; i++) { + entry = &tx->entry[(tx->write + i) % TSNEP_RING_SIZE]; + if (type & TSNEP_TX_TYPE_XDP_NDO) { + data = unlikely(frag) ? skb_frag_address(frag) : + xdpf->data; + dma = dma_map_single(dmadev, data, len, DMA_TO_DEVICE); + if (dma_mapping_error(dmadev, dma)) + return -ENOMEM; + + entry->type = TSNEP_TX_TYPE_XDP_NDO; + } else { + page = unlikely(frag) ? skb_frag_page(frag) : + virt_to_page(xdpf->data); + dma = page_pool_get_dma_addr(page); + if (unlikely(frag)) + dma += skb_frag_off(frag); + else + dma += sizeof(*xdpf) + xdpf->headroom; + dma_sync_single_for_device(dmadev, dma, len, + DMA_BIDIRECTIONAL); + + entry->type = TSNEP_TX_TYPE_XDP_TX; + } + + entry->len = len; + dma_unmap_addr_set(entry, dma, dma); + + entry->desc->tx = __cpu_to_le64(dma); + + map_len += len; + + if (i + 1 < count) { + frag = &shinfo->frags[i]; + len = skb_frag_size(frag); + } + } + + return map_len; +} + +/* This function requires __netif_tx_lock is held by the caller. */ +static bool tsnep_xdp_xmit_frame_ring(struct xdp_frame *xdpf, + struct tsnep_tx *tx, u32 type) +{ + struct skb_shared_info *shinfo = xdp_get_shared_info_from_frame(xdpf); + struct tsnep_tx_entry *entry; + int count, length, retval, i; + + count = 1; + if (unlikely(xdp_frame_has_frags(xdpf))) + count += shinfo->nr_frags; + + /* ensure that TX ring is not filled up by XDP, always MAX_SKB_FRAGS + * will be available for normal TX path and queue is stopped there if + * necessary + */ + if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1 + count)) + return false; + + entry = &tx->entry[tx->write]; + entry->xdpf = xdpf; + + retval = tsnep_xdp_tx_map(xdpf, tx, shinfo, count, type); + if (retval < 0) { + tsnep_tx_unmap(tx, tx->write, count); + entry->xdpf = NULL; + + tx->dropped++; + + return false; + } + length = retval; + + for (i = 0; i < count; i++) + tsnep_tx_activate(tx, (tx->write + i) % TSNEP_RING_SIZE, length, + i == count - 1); + tx->write = (tx->write + count) % TSNEP_RING_SIZE; + + /* descriptor properties shall be valid before hardware is notified */ + dma_wmb(); + + return true; +} + +static void tsnep_xdp_xmit_flush(struct tsnep_tx *tx) +{ + iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL); +} + +static bool tsnep_xdp_xmit_back(struct tsnep_adapter *adapter, + struct xdp_buff *xdp, + struct netdev_queue *tx_nq, struct tsnep_tx *tx) +{ + struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); + bool xmit; + + if (unlikely(!xdpf)) + return false; + + __netif_tx_lock(tx_nq, smp_processor_id()); + + xmit = tsnep_xdp_xmit_frame_ring(xdpf, tx, TSNEP_TX_TYPE_XDP_TX); + + /* Avoid transmit queue timeout since we share it with the slow path */ + if (xmit) + txq_trans_cond_update(tx_nq); + + __netif_tx_unlock(tx_nq); + + return xmit; +} + static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget) { - unsigned long flags; - int budget = 128; struct tsnep_tx_entry *entry; - int count; + struct netdev_queue *nq; + int budget = 128; int length; + int count; - spin_lock_irqsave(&tx->lock, flags); + nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index); + __netif_tx_lock(nq, smp_processor_id()); do { if (tx->read == tx->write) @@ -527,12 +661,17 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget) dma_rmb(); count = 1; - if (skb_shinfo(entry->skb)->nr_frags > 0) + if ((entry->type & TSNEP_TX_TYPE_SKB) && + skb_shinfo(entry->skb)->nr_frags > 0) count += skb_shinfo(entry->skb)->nr_frags; + else if (!(entry->type & TSNEP_TX_TYPE_SKB) && + xdp_frame_has_frags(entry->xdpf)) + count += xdp_get_shared_info_from_frame(entry->xdpf)->nr_frags; length = tsnep_tx_unmap(tx, tx->read, count); - if ((skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) && + if ((entry->type & TSNEP_TX_TYPE_SKB) && + (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) && (__le32_to_cpu(entry->desc_wb->properties) & TSNEP_DESC_EXTENDED_WRITEBACK_FLAG)) { struct skb_shared_hwtstamps hwtstamps; @@ -552,7 +691,11 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget) skb_tstamp_tx(entry->skb, &hwtstamps); } - napi_consume_skb(entry->skb, budget); + if (entry->type & TSNEP_TX_TYPE_SKB) + napi_consume_skb(entry->skb, napi_budget); + else + xdp_return_frame_rx_napi(entry->xdpf); + /* xdpf is union with skb */ entry->skb = NULL; tx->read = (tx->read + count) % TSNEP_RING_SIZE; @@ -568,18 +711,19 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget) netif_wake_queue(tx->adapter->netdev); } - spin_unlock_irqrestore(&tx->lock, flags); + __netif_tx_unlock(nq); - return (budget != 0); + return budget != 0; } static bool tsnep_tx_pending(struct tsnep_tx *tx) { - unsigned long flags; struct tsnep_tx_entry *entry; + struct netdev_queue *nq; bool pending = false; - spin_lock_irqsave(&tx->lock, flags); + nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index); + __netif_tx_lock(nq, smp_processor_id()); if (tx->read != tx->write) { entry = &tx->entry[tx->read]; @@ -589,7 +733,7 @@ static bool tsnep_tx_pending(struct tsnep_tx *tx) pending = true; } - spin_unlock_irqrestore(&tx->lock, flags); + __netif_tx_unlock(nq); return pending; } @@ -615,8 +759,6 @@ static int tsnep_tx_open(struct tsnep_adapter *adapter, void __iomem *addr, tx->owner_counter = 1; tx->increment_owner_counter = TSNEP_RING_SIZE - 1; - spin_lock_init(&tx->lock); - return 0; } @@ -692,9 +834,9 @@ static int tsnep_rx_ring_init(struct tsnep_rx *rx) pp_params.pool_size = TSNEP_RING_SIZE; pp_params.nid = dev_to_node(dmadev); pp_params.dev = dmadev; - pp_params.dma_dir = DMA_FROM_DEVICE; + pp_params.dma_dir = DMA_BIDIRECTIONAL; pp_params.max_len = TSNEP_MAX_RX_BUF_SIZE; - pp_params.offset = TSNEP_SKB_PAD; + pp_params.offset = TSNEP_RX_OFFSET; rx->page_pool = page_pool_create(&pp_params); if (IS_ERR(rx->page_pool)) { retval = PTR_ERR(rx->page_pool); @@ -729,7 +871,7 @@ static void tsnep_rx_set_page(struct tsnep_rx *rx, struct tsnep_rx_entry *entry, entry->page = page; entry->len = TSNEP_MAX_RX_BUF_SIZE; entry->dma = page_pool_get_dma_addr(entry->page); - entry->desc->rx = __cpu_to_le64(entry->dma + TSNEP_SKB_PAD); + entry->desc->rx = __cpu_to_le64(entry->dma + TSNEP_RX_OFFSET); } static int tsnep_rx_alloc_buffer(struct tsnep_rx *rx, int index) @@ -823,6 +965,62 @@ static int tsnep_rx_refill(struct tsnep_rx *rx, int count, bool reuse) return i; } +static bool tsnep_xdp_run_prog(struct tsnep_rx *rx, struct bpf_prog *prog, + struct xdp_buff *xdp, int *status, + struct netdev_queue *tx_nq, struct tsnep_tx *tx) +{ + unsigned int length; + unsigned int sync; + u32 act; + + length = xdp->data_end - xdp->data_hard_start - XDP_PACKET_HEADROOM; + + act = bpf_prog_run_xdp(prog, xdp); + + /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */ + sync = xdp->data_end - xdp->data_hard_start - XDP_PACKET_HEADROOM; + sync = max(sync, length); + + switch (act) { + case XDP_PASS: + return false; + case XDP_TX: + if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx)) + goto out_failure; + *status |= TSNEP_XDP_TX; + return true; + case XDP_REDIRECT: + if (xdp_do_redirect(rx->adapter->netdev, xdp, prog) < 0) + goto out_failure; + *status |= TSNEP_XDP_REDIRECT; + return true; + default: + bpf_warn_invalid_xdp_action(rx->adapter->netdev, prog, act); + fallthrough; + case XDP_ABORTED: +out_failure: + trace_xdp_exception(rx->adapter->netdev, prog, act); + fallthrough; + case XDP_DROP: + page_pool_put_page(rx->page_pool, virt_to_head_page(xdp->data), + sync, true); + return true; + } +} + +static void tsnep_finalize_xdp(struct tsnep_adapter *adapter, int status, + struct netdev_queue *tx_nq, struct tsnep_tx *tx) +{ + if (status & TSNEP_XDP_TX) { + __netif_tx_lock(tx_nq, smp_processor_id()); + tsnep_xdp_xmit_flush(tx); + __netif_tx_unlock(tx_nq); + } + + if (status & TSNEP_XDP_REDIRECT) + xdp_do_flush(); +} + static struct sk_buff *tsnep_build_skb(struct tsnep_rx *rx, struct page *page, int length) { @@ -833,14 +1031,14 @@ static struct sk_buff *tsnep_build_skb(struct tsnep_rx *rx, struct page *page, return NULL; /* update pointers within the skb to store the data */ - skb_reserve(skb, TSNEP_SKB_PAD + TSNEP_RX_INLINE_METADATA_SIZE); - __skb_put(skb, length - TSNEP_RX_INLINE_METADATA_SIZE - ETH_FCS_LEN); + skb_reserve(skb, TSNEP_RX_OFFSET + TSNEP_RX_INLINE_METADATA_SIZE); + __skb_put(skb, length - ETH_FCS_LEN); if (rx->adapter->hwtstamp_config.rx_filter == HWTSTAMP_FILTER_ALL) { struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb); struct tsnep_rx_inline *rx_inline = (struct tsnep_rx_inline *)(page_address(page) + - TSNEP_SKB_PAD); + TSNEP_RX_OFFSET); skb_shinfo(skb)->tx_flags |= SKBTX_HW_TSTAMP_NETDEV; @@ -858,15 +1056,28 @@ static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi, int budget) { struct device *dmadev = rx->adapter->dmadev; - int desc_available; - int done = 0; enum dma_data_direction dma_dir; struct tsnep_rx_entry *entry; + struct netdev_queue *tx_nq; + struct bpf_prog *prog; + struct xdp_buff xdp; struct sk_buff *skb; + struct tsnep_tx *tx; + int desc_available; + int xdp_status = 0; + int done = 0; int length; desc_available = tsnep_rx_desc_available(rx); dma_dir = page_pool_get_dma_dir(rx->page_pool); + prog = READ_ONCE(rx->adapter->xdp_prog); + if (prog) { + tx_nq = netdev_get_tx_queue(rx->adapter->netdev, + rx->tx_queue_index); + tx = &rx->adapter->tx[rx->tx_queue_index]; + + xdp_init_buff(&xdp, PAGE_SIZE, &rx->xdp_rxq); + } while (likely(done < budget) && (rx->read != rx->write)) { entry = &rx->entry[rx->read]; @@ -900,21 +1111,47 @@ static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi, */ dma_rmb(); - prefetch(page_address(entry->page) + TSNEP_SKB_PAD); + prefetch(page_address(entry->page) + TSNEP_RX_OFFSET); length = __le32_to_cpu(entry->desc_wb->properties) & TSNEP_DESC_LENGTH_MASK; - dma_sync_single_range_for_cpu(dmadev, entry->dma, TSNEP_SKB_PAD, - length, dma_dir); + dma_sync_single_range_for_cpu(dmadev, entry->dma, + TSNEP_RX_OFFSET, length, dma_dir); + + /* RX metadata with timestamps is in front of actual data, + * subtract metadata size to get length of actual data and + * consider metadata size as offset of actual data during RX + * processing + */ + length -= TSNEP_RX_INLINE_METADATA_SIZE; rx->read = (rx->read + 1) % TSNEP_RING_SIZE; desc_available++; + if (prog) { + bool consume; + + xdp_prepare_buff(&xdp, page_address(entry->page), + XDP_PACKET_HEADROOM + TSNEP_RX_INLINE_METADATA_SIZE, + length, false); + + consume = tsnep_xdp_run_prog(rx, prog, &xdp, + &xdp_status, tx_nq, tx); + if (consume) { + rx->packets++; + rx->bytes += length; + + entry->page = NULL; + + continue; + } + } + skb = tsnep_build_skb(rx, entry->page, length); if (skb) { page_pool_release_page(rx->page_pool, entry->page); rx->packets++; - rx->bytes += length - TSNEP_RX_INLINE_METADATA_SIZE; + rx->bytes += length; if (skb->pkt_type == PACKET_MULTICAST) rx->multicast++; @@ -927,6 +1164,9 @@ static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi, entry->page = NULL; } + if (xdp_status) + tsnep_finalize_xdp(rx->adapter, xdp_status, tx_nq, tx); + if (desc_available) tsnep_rx_refill(rx, desc_available, false); @@ -1083,17 +1323,73 @@ static void tsnep_free_irq(struct tsnep_queue *queue, bool first) memset(queue->name, 0, sizeof(queue->name)); } +static void tsnep_queue_close(struct tsnep_queue *queue, bool first) +{ + struct tsnep_rx *rx = queue->rx; + + tsnep_free_irq(queue, first); + + if (rx && xdp_rxq_info_is_reg(&rx->xdp_rxq)) + xdp_rxq_info_unreg(&rx->xdp_rxq); + + netif_napi_del(&queue->napi); +} + +static int tsnep_queue_open(struct tsnep_adapter *adapter, + struct tsnep_queue *queue, bool first) +{ + struct tsnep_rx *rx = queue->rx; + struct tsnep_tx *tx = queue->tx; + int retval; + + queue->adapter = adapter; + + netif_napi_add(adapter->netdev, &queue->napi, tsnep_poll); + + if (rx) { + /* choose TX queue for XDP_TX */ + if (tx) + rx->tx_queue_index = tx->queue_index; + else if (rx->queue_index < adapter->num_tx_queues) + rx->tx_queue_index = rx->queue_index; + else + rx->tx_queue_index = 0; + + retval = xdp_rxq_info_reg(&rx->xdp_rxq, adapter->netdev, + rx->queue_index, queue->napi.napi_id); + if (retval) + goto failed; + retval = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq, + MEM_TYPE_PAGE_POOL, + rx->page_pool); + if (retval) + goto failed; + } + + retval = tsnep_request_irq(queue, first); + if (retval) { + netif_err(adapter, drv, adapter->netdev, + "can't get assigned irq %d.\n", queue->irq); + goto failed; + } + + return 0; + +failed: + tsnep_queue_close(queue, first); + + return retval; +} + static int tsnep_netdev_open(struct net_device *netdev) { struct tsnep_adapter *adapter = netdev_priv(netdev); - int i; - void __iomem *addr; int tx_queue_index = 0; int rx_queue_index = 0; - int retval; + void __iomem *addr; + int i, retval; for (i = 0; i < adapter->num_queues; i++) { - adapter->queue[i].adapter = adapter; if (adapter->queue[i].tx) { addr = adapter->addr + TSNEP_QUEUE(tx_queue_index); retval = tsnep_tx_open(adapter, addr, tx_queue_index, @@ -1104,21 +1400,16 @@ static int tsnep_netdev_open(struct net_device *netdev) } if (adapter->queue[i].rx) { addr = adapter->addr + TSNEP_QUEUE(rx_queue_index); - retval = tsnep_rx_open(adapter, addr, - rx_queue_index, + retval = tsnep_rx_open(adapter, addr, rx_queue_index, adapter->queue[i].rx); if (retval) goto failed; rx_queue_index++; } - retval = tsnep_request_irq(&adapter->queue[i], i == 0); - if (retval) { - netif_err(adapter, drv, adapter->netdev, - "can't get assigned irq %d.\n", - adapter->queue[i].irq); + retval = tsnep_queue_open(adapter, &adapter->queue[i], i == 0); + if (retval) goto failed; - } } retval = netif_set_real_num_tx_queues(adapter->netdev, @@ -1136,8 +1427,6 @@ static int tsnep_netdev_open(struct net_device *netdev) goto phy_failed; for (i = 0; i < adapter->num_queues; i++) { - netif_napi_add(adapter->netdev, &adapter->queue[i].napi, - tsnep_poll); napi_enable(&adapter->queue[i].napi); tsnep_enable_irq(adapter, adapter->queue[i].irq_mask); @@ -1147,10 +1436,9 @@ static int tsnep_netdev_open(struct net_device *netdev) phy_failed: tsnep_disable_irq(adapter, ECM_INT_LINK); - tsnep_phy_close(adapter); failed: for (i = 0; i < adapter->num_queues; i++) { - tsnep_free_irq(&adapter->queue[i], i == 0); + tsnep_queue_close(&adapter->queue[i], i == 0); if (adapter->queue[i].rx) tsnep_rx_close(adapter->queue[i].rx); @@ -1172,9 +1460,8 @@ static int tsnep_netdev_close(struct net_device *netdev) tsnep_disable_irq(adapter, adapter->queue[i].irq_mask); napi_disable(&adapter->queue[i].napi); - netif_napi_del(&adapter->queue[i].napi); - tsnep_free_irq(&adapter->queue[i], i == 0); + tsnep_queue_close(&adapter->queue[i], i == 0); if (adapter->queue[i].rx) tsnep_rx_close(adapter->queue[i].rx); @@ -1327,6 +1614,67 @@ static ktime_t tsnep_netdev_get_tstamp(struct net_device *netdev, return ns_to_ktime(timestamp); } +static int tsnep_netdev_bpf(struct net_device *dev, struct netdev_bpf *bpf) +{ + struct tsnep_adapter *adapter = netdev_priv(dev); + + switch (bpf->command) { + case XDP_SETUP_PROG: + return tsnep_xdp_setup_prog(adapter, bpf->prog, bpf->extack); + default: + return -EOPNOTSUPP; + } +} + +static struct tsnep_tx *tsnep_xdp_get_tx(struct tsnep_adapter *adapter, u32 cpu) +{ + if (cpu >= TSNEP_MAX_QUEUES) + cpu &= TSNEP_MAX_QUEUES - 1; + + while (cpu >= adapter->num_tx_queues) + cpu -= adapter->num_tx_queues; + + return &adapter->tx[cpu]; +} + +static int tsnep_netdev_xdp_xmit(struct net_device *dev, int n, + struct xdp_frame **xdp, u32 flags) +{ + struct tsnep_adapter *adapter = netdev_priv(dev); + u32 cpu = smp_processor_id(); + struct netdev_queue *nq; + struct tsnep_tx *tx; + int nxmit; + bool xmit; + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + + tx = tsnep_xdp_get_tx(adapter, cpu); + nq = netdev_get_tx_queue(adapter->netdev, tx->queue_index); + + __netif_tx_lock(nq, cpu); + + for (nxmit = 0; nxmit < n; nxmit++) { + xmit = tsnep_xdp_xmit_frame_ring(xdp[nxmit], tx, + TSNEP_TX_TYPE_XDP_NDO); + if (!xmit) + break; + + /* avoid transmit queue timeout since we share it with the slow + * path + */ + txq_trans_cond_update(nq); + } + + if (flags & XDP_XMIT_FLUSH) + tsnep_xdp_xmit_flush(tx); + + __netif_tx_unlock(nq); + + return nxmit; +} + static const struct net_device_ops tsnep_netdev_ops = { .ndo_open = tsnep_netdev_open, .ndo_stop = tsnep_netdev_close, @@ -1338,6 +1686,8 @@ static const struct net_device_ops tsnep_netdev_ops = { .ndo_set_features = tsnep_netdev_set_features, .ndo_get_tstamp = tsnep_netdev_get_tstamp, .ndo_setup_tc = tsnep_tc_setup, + .ndo_bpf = tsnep_netdev_bpf, + .ndo_xdp_xmit = tsnep_netdev_xdp_xmit, }; static int tsnep_mac_init(struct tsnep_adapter *adapter) diff --git a/drivers/net/ethernet/engleder/tsnep_xdp.c b/drivers/net/ethernet/engleder/tsnep_xdp.c new file mode 100644 index 000000000000..4d14cb1fd772 --- /dev/null +++ b/drivers/net/ethernet/engleder/tsnep_xdp.c @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2022 Gerhard Engleder <gerhard@engleder-embedded.com> */ + +#include <linux/if_vlan.h> +#include <net/xdp_sock_drv.h> + +#include "tsnep.h" + +int tsnep_xdp_setup_prog(struct tsnep_adapter *adapter, struct bpf_prog *prog, + struct netlink_ext_ack *extack) +{ + struct bpf_prog *old_prog; + + old_prog = xchg(&adapter->xdp_prog, prog); + if (old_prog) + bpf_prog_put(old_prog); + + return 0; +} diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c index 6c8c78018ce6..139fe66f8bcd 100644 --- a/drivers/net/ethernet/faraday/ftmac100.c +++ b/drivers/net/ethernet/faraday/ftmac100.c @@ -182,6 +182,12 @@ static int ftmac100_start_hw(struct ftmac100 *priv) if (netdev->mtu > ETH_DATA_LEN) maccr |= FTMAC100_MACCR_RX_FTL; + /* Add other bits as needed */ + if (netdev->flags & IFF_PROMISC) + maccr |= FTMAC100_MACCR_RCV_ALL; + if (netdev->flags & IFF_ALLMULTI) + maccr |= FTMAC100_MACCR_RX_MULTIPKT; + iowrite32(maccr, priv->base + FTMAC100_OFFSET_MACCR); return 0; } diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig index cdc0ff89388a..6f6d07324d3b 100644 --- a/drivers/net/ethernet/freescale/enetc/Kconfig +++ b/drivers/net/ethernet/freescale/enetc/Kconfig @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 config FSL_ENETC tristate "ENETC PF driver" - depends on PCI && PCI_MSI + depends on PCI_MSI select FSL_ENETC_IERB select FSL_ENETC_MDIO select PHYLINK @@ -16,7 +16,7 @@ config FSL_ENETC config FSL_ENETC_VF tristate "ENETC VF driver" - depends on PCI && PCI_MSI + depends on PCI_MSI select FSL_ENETC_MDIO select PHYLINK select DIMLIB diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index e96449eedfb5..b227e17e7b02 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -1305,6 +1305,10 @@ static int enetc_xdp_frame_to_xdp_tx_swbd(struct enetc_bdr *tx_ring, xdp_tx_swbd->xdp_frame = NULL; n++; + + if (!xdp_frame_has_frags(xdp_frame)) + goto out; + xdp_tx_swbd = &xdp_tx_arr[n]; shinfo = xdp_get_shared_info_from_frame(xdp_frame); @@ -1334,7 +1338,7 @@ static int enetc_xdp_frame_to_xdp_tx_swbd(struct enetc_bdr *tx_ring, n++; xdp_tx_swbd = &xdp_tx_arr[n]; } - +out: xdp_tx_arr[n - 1].is_eof = true; xdp_tx_arr[n - 1].xdp_frame = xdp_frame; @@ -1390,16 +1394,12 @@ static void enetc_map_rx_buff_to_xdp(struct enetc_bdr *rx_ring, int i, { struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size); void *hard_start = page_address(rx_swbd->page) + rx_swbd->page_offset; - struct skb_shared_info *shinfo; /* To be used for XDP_TX */ rx_swbd->len = size; xdp_prepare_buff(xdp_buff, hard_start - rx_ring->buffer_offset, rx_ring->buffer_offset, size, false); - - shinfo = xdp_get_shared_info_from_buff(xdp_buff); - shinfo->nr_frags = 0; } static void enetc_add_rx_buff_to_xdp(struct enetc_bdr *rx_ring, int i, @@ -1407,11 +1407,23 @@ static void enetc_add_rx_buff_to_xdp(struct enetc_bdr *rx_ring, int i, { struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp_buff); struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size); - skb_frag_t *frag = &shinfo->frags[shinfo->nr_frags]; + skb_frag_t *frag; /* To be used for XDP_TX */ rx_swbd->len = size; + if (!xdp_buff_has_frags(xdp_buff)) { + xdp_buff_set_frags_flag(xdp_buff); + shinfo->xdp_frags_size = size; + shinfo->nr_frags = 0; + } else { + shinfo->xdp_frags_size += size; + } + + if (page_is_pfmemalloc(rx_swbd->page)) + xdp_buff_set_frag_pfmemalloc(xdp_buff); + + frag = &shinfo->frags[shinfo->nr_frags]; skb_frag_off_set(frag, rx_swbd->page_offset); skb_frag_size_set(frag, size); __skb_frag_set_page(frag, rx_swbd->page); @@ -1584,20 +1596,6 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, } break; case XDP_REDIRECT: - /* xdp_return_frame does not support S/G in the sense - * that it leaks the fragments (__xdp_return should not - * call page_frag_free only for the initial buffer). - * Until XDP_REDIRECT gains support for S/G let's keep - * the code structure in place, but dead. We drop the - * S/G frames ourselves to avoid memory leaks which - * would otherwise leave the kernel OOM. - */ - if (unlikely(cleaned_cnt - orig_cleaned_cnt != 1)) { - enetc_xdp_drop(rx_ring, orig_i, i); - rx_ring->stats.xdp_redirect_sg++; - break; - } - err = xdp_do_redirect(rx_ring->ndev, &xdp_buff, prog); if (unlikely(err)) { enetc_xdp_drop(rx_ring, orig_i, i); @@ -1717,200 +1715,255 @@ void enetc_get_si_caps(struct enetc_si *si) si->hw_features |= ENETC_SI_F_PSFP; } -static int enetc_dma_alloc_bdr(struct enetc_bdr *r, size_t bd_size) +static int enetc_dma_alloc_bdr(struct enetc_bdr_resource *res) { - r->bd_base = dma_alloc_coherent(r->dev, r->bd_count * bd_size, - &r->bd_dma_base, GFP_KERNEL); - if (!r->bd_base) + size_t bd_base_size = res->bd_count * res->bd_size; + + res->bd_base = dma_alloc_coherent(res->dev, bd_base_size, + &res->bd_dma_base, GFP_KERNEL); + if (!res->bd_base) return -ENOMEM; /* h/w requires 128B alignment */ - if (!IS_ALIGNED(r->bd_dma_base, 128)) { - dma_free_coherent(r->dev, r->bd_count * bd_size, r->bd_base, - r->bd_dma_base); + if (!IS_ALIGNED(res->bd_dma_base, 128)) { + dma_free_coherent(res->dev, bd_base_size, res->bd_base, + res->bd_dma_base); return -EINVAL; } return 0; } -static int enetc_alloc_txbdr(struct enetc_bdr *txr) +static void enetc_dma_free_bdr(const struct enetc_bdr_resource *res) +{ + size_t bd_base_size = res->bd_count * res->bd_size; + + dma_free_coherent(res->dev, bd_base_size, res->bd_base, + res->bd_dma_base); +} + +static int enetc_alloc_tx_resource(struct enetc_bdr_resource *res, + struct device *dev, size_t bd_count) { int err; - txr->tx_swbd = vzalloc(txr->bd_count * sizeof(struct enetc_tx_swbd)); - if (!txr->tx_swbd) + res->dev = dev; + res->bd_count = bd_count; + res->bd_size = sizeof(union enetc_tx_bd); + + res->tx_swbd = vzalloc(bd_count * sizeof(*res->tx_swbd)); + if (!res->tx_swbd) return -ENOMEM; - err = enetc_dma_alloc_bdr(txr, sizeof(union enetc_tx_bd)); + err = enetc_dma_alloc_bdr(res); if (err) goto err_alloc_bdr; - txr->tso_headers = dma_alloc_coherent(txr->dev, - txr->bd_count * TSO_HEADER_SIZE, - &txr->tso_headers_dma, + res->tso_headers = dma_alloc_coherent(dev, bd_count * TSO_HEADER_SIZE, + &res->tso_headers_dma, GFP_KERNEL); - if (!txr->tso_headers) { + if (!res->tso_headers) { err = -ENOMEM; goto err_alloc_tso; } - txr->next_to_clean = 0; - txr->next_to_use = 0; - return 0; err_alloc_tso: - dma_free_coherent(txr->dev, txr->bd_count * sizeof(union enetc_tx_bd), - txr->bd_base, txr->bd_dma_base); - txr->bd_base = NULL; + enetc_dma_free_bdr(res); err_alloc_bdr: - vfree(txr->tx_swbd); - txr->tx_swbd = NULL; + vfree(res->tx_swbd); + res->tx_swbd = NULL; return err; } -static void enetc_free_txbdr(struct enetc_bdr *txr) +static void enetc_free_tx_resource(const struct enetc_bdr_resource *res) { - int size, i; - - for (i = 0; i < txr->bd_count; i++) - enetc_free_tx_frame(txr, &txr->tx_swbd[i]); - - size = txr->bd_count * sizeof(union enetc_tx_bd); - - dma_free_coherent(txr->dev, txr->bd_count * TSO_HEADER_SIZE, - txr->tso_headers, txr->tso_headers_dma); - txr->tso_headers = NULL; - - dma_free_coherent(txr->dev, size, txr->bd_base, txr->bd_dma_base); - txr->bd_base = NULL; - - vfree(txr->tx_swbd); - txr->tx_swbd = NULL; + dma_free_coherent(res->dev, res->bd_count * TSO_HEADER_SIZE, + res->tso_headers, res->tso_headers_dma); + enetc_dma_free_bdr(res); + vfree(res->tx_swbd); } -static int enetc_alloc_tx_resources(struct enetc_ndev_priv *priv) +static struct enetc_bdr_resource * +enetc_alloc_tx_resources(struct enetc_ndev_priv *priv) { + struct enetc_bdr_resource *tx_res; int i, err; + tx_res = kcalloc(priv->num_tx_rings, sizeof(*tx_res), GFP_KERNEL); + if (!tx_res) + return ERR_PTR(-ENOMEM); + for (i = 0; i < priv->num_tx_rings; i++) { - err = enetc_alloc_txbdr(priv->tx_ring[i]); + struct enetc_bdr *tx_ring = priv->tx_ring[i]; + err = enetc_alloc_tx_resource(&tx_res[i], tx_ring->dev, + tx_ring->bd_count); if (err) goto fail; } - return 0; + return tx_res; fail: while (i-- > 0) - enetc_free_txbdr(priv->tx_ring[i]); + enetc_free_tx_resource(&tx_res[i]); - return err; + kfree(tx_res); + + return ERR_PTR(err); } -static void enetc_free_tx_resources(struct enetc_ndev_priv *priv) +static void enetc_free_tx_resources(const struct enetc_bdr_resource *tx_res, + size_t num_resources) { - int i; + size_t i; - for (i = 0; i < priv->num_tx_rings; i++) - enetc_free_txbdr(priv->tx_ring[i]); + for (i = 0; i < num_resources; i++) + enetc_free_tx_resource(&tx_res[i]); + + kfree(tx_res); } -static int enetc_alloc_rxbdr(struct enetc_bdr *rxr, bool extended) +static int enetc_alloc_rx_resource(struct enetc_bdr_resource *res, + struct device *dev, size_t bd_count, + bool extended) { - size_t size = sizeof(union enetc_rx_bd); int err; - rxr->rx_swbd = vzalloc(rxr->bd_count * sizeof(struct enetc_rx_swbd)); - if (!rxr->rx_swbd) - return -ENOMEM; - + res->dev = dev; + res->bd_count = bd_count; + res->bd_size = sizeof(union enetc_rx_bd); if (extended) - size *= 2; + res->bd_size *= 2; + + res->rx_swbd = vzalloc(bd_count * sizeof(struct enetc_rx_swbd)); + if (!res->rx_swbd) + return -ENOMEM; - err = enetc_dma_alloc_bdr(rxr, size); + err = enetc_dma_alloc_bdr(res); if (err) { - vfree(rxr->rx_swbd); + vfree(res->rx_swbd); return err; } - rxr->next_to_clean = 0; - rxr->next_to_use = 0; - rxr->next_to_alloc = 0; - rxr->ext_en = extended; - return 0; } -static void enetc_free_rxbdr(struct enetc_bdr *rxr) +static void enetc_free_rx_resource(const struct enetc_bdr_resource *res) { - int size; - - size = rxr->bd_count * sizeof(union enetc_rx_bd); - - dma_free_coherent(rxr->dev, size, rxr->bd_base, rxr->bd_dma_base); - rxr->bd_base = NULL; - - vfree(rxr->rx_swbd); - rxr->rx_swbd = NULL; + enetc_dma_free_bdr(res); + vfree(res->rx_swbd); } -static int enetc_alloc_rx_resources(struct enetc_ndev_priv *priv) +static struct enetc_bdr_resource * +enetc_alloc_rx_resources(struct enetc_ndev_priv *priv, bool extended) { - bool extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP); + struct enetc_bdr_resource *rx_res; int i, err; + rx_res = kcalloc(priv->num_rx_rings, sizeof(*rx_res), GFP_KERNEL); + if (!rx_res) + return ERR_PTR(-ENOMEM); + for (i = 0; i < priv->num_rx_rings; i++) { - err = enetc_alloc_rxbdr(priv->rx_ring[i], extended); + struct enetc_bdr *rx_ring = priv->rx_ring[i]; + err = enetc_alloc_rx_resource(&rx_res[i], rx_ring->dev, + rx_ring->bd_count, extended); if (err) goto fail; } - return 0; + return rx_res; fail: while (i-- > 0) - enetc_free_rxbdr(priv->rx_ring[i]); + enetc_free_rx_resource(&rx_res[i]); - return err; + kfree(rx_res); + + return ERR_PTR(err); } -static void enetc_free_rx_resources(struct enetc_ndev_priv *priv) +static void enetc_free_rx_resources(const struct enetc_bdr_resource *rx_res, + size_t num_resources) +{ + size_t i; + + for (i = 0; i < num_resources; i++) + enetc_free_rx_resource(&rx_res[i]); + + kfree(rx_res); +} + +static void enetc_assign_tx_resource(struct enetc_bdr *tx_ring, + const struct enetc_bdr_resource *res) +{ + tx_ring->bd_base = res ? res->bd_base : NULL; + tx_ring->bd_dma_base = res ? res->bd_dma_base : 0; + tx_ring->tx_swbd = res ? res->tx_swbd : NULL; + tx_ring->tso_headers = res ? res->tso_headers : NULL; + tx_ring->tso_headers_dma = res ? res->tso_headers_dma : 0; +} + +static void enetc_assign_rx_resource(struct enetc_bdr *rx_ring, + const struct enetc_bdr_resource *res) +{ + rx_ring->bd_base = res ? res->bd_base : NULL; + rx_ring->bd_dma_base = res ? res->bd_dma_base : 0; + rx_ring->rx_swbd = res ? res->rx_swbd : NULL; +} + +static void enetc_assign_tx_resources(struct enetc_ndev_priv *priv, + const struct enetc_bdr_resource *res) { int i; - for (i = 0; i < priv->num_rx_rings; i++) - enetc_free_rxbdr(priv->rx_ring[i]); + if (priv->tx_res) + enetc_free_tx_resources(priv->tx_res, priv->num_tx_rings); + + for (i = 0; i < priv->num_tx_rings; i++) { + enetc_assign_tx_resource(priv->tx_ring[i], + res ? &res[i] : NULL); + } + + priv->tx_res = res; } -static void enetc_free_tx_ring(struct enetc_bdr *tx_ring) +static void enetc_assign_rx_resources(struct enetc_ndev_priv *priv, + const struct enetc_bdr_resource *res) { int i; - if (!tx_ring->tx_swbd) - return; + if (priv->rx_res) + enetc_free_rx_resources(priv->rx_res, priv->num_rx_rings); + + for (i = 0; i < priv->num_rx_rings; i++) { + enetc_assign_rx_resource(priv->rx_ring[i], + res ? &res[i] : NULL); + } + + priv->rx_res = res; +} + +static void enetc_free_tx_ring(struct enetc_bdr *tx_ring) +{ + int i; for (i = 0; i < tx_ring->bd_count; i++) { struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i]; enetc_free_tx_frame(tx_ring, tx_swbd); } - - tx_ring->next_to_clean = 0; - tx_ring->next_to_use = 0; } static void enetc_free_rx_ring(struct enetc_bdr *rx_ring) { int i; - if (!rx_ring->rx_swbd) - return; - for (i = 0; i < rx_ring->bd_count; i++) { struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i]; @@ -1922,10 +1975,6 @@ static void enetc_free_rx_ring(struct enetc_bdr *rx_ring) __free_page(rx_swbd->page); rx_swbd->page = NULL; } - - rx_ring->next_to_clean = 0; - rx_ring->next_to_use = 0; - rx_ring->next_to_alloc = 0; } static void enetc_free_rxtx_rings(struct enetc_ndev_priv *priv) @@ -2039,7 +2088,7 @@ static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring) /* enable Tx ints by setting pkt thr to 1 */ enetc_txbdr_wr(hw, idx, ENETC_TBICR0, ENETC_TBICR0_ICEN | 0x1); - tbmr = ENETC_TBMR_EN | ENETC_TBMR_SET_PRIO(tx_ring->prio); + tbmr = ENETC_TBMR_SET_PRIO(tx_ring->prio); if (tx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_TX) tbmr |= ENETC_TBMR_VIH; @@ -2051,10 +2100,11 @@ static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring) tx_ring->idr = hw->reg + ENETC_SITXIDR; } -static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) +static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring, + bool extended) { int idx = rx_ring->index; - u32 rbmr; + u32 rbmr = 0; enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0, lower_32_bits(rx_ring->bd_dma_base)); @@ -2081,8 +2131,7 @@ static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) /* enable Rx ints by setting pkt thr to 1 */ enetc_rxbdr_wr(hw, idx, ENETC_RBICR0, ENETC_RBICR0_ICEN | 0x1); - rbmr = ENETC_RBMR_EN; - + rx_ring->ext_en = extended; if (rx_ring->ext_en) rbmr |= ENETC_RBMR_BDS; @@ -2092,15 +2141,18 @@ static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) rx_ring->rcir = hw->reg + ENETC_BDR(RX, idx, ENETC_RBCIR); rx_ring->idr = hw->reg + ENETC_SIRXIDR; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + rx_ring->next_to_alloc = 0; + enetc_lock_mdio(); enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring)); enetc_unlock_mdio(); - /* enable ring */ enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr); } -static void enetc_setup_bdrs(struct enetc_ndev_priv *priv) +static void enetc_setup_bdrs(struct enetc_ndev_priv *priv, bool extended) { struct enetc_hw *hw = &priv->si->hw; int i; @@ -2109,10 +2161,42 @@ static void enetc_setup_bdrs(struct enetc_ndev_priv *priv) enetc_setup_txbdr(hw, priv->tx_ring[i]); for (i = 0; i < priv->num_rx_rings; i++) - enetc_setup_rxbdr(hw, priv->rx_ring[i]); + enetc_setup_rxbdr(hw, priv->rx_ring[i], extended); } -static void enetc_clear_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) +static void enetc_enable_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring) +{ + int idx = tx_ring->index; + u32 tbmr; + + tbmr = enetc_txbdr_rd(hw, idx, ENETC_TBMR); + tbmr |= ENETC_TBMR_EN; + enetc_txbdr_wr(hw, idx, ENETC_TBMR, tbmr); +} + +static void enetc_enable_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) +{ + int idx = rx_ring->index; + u32 rbmr; + + rbmr = enetc_rxbdr_rd(hw, idx, ENETC_RBMR); + rbmr |= ENETC_RBMR_EN; + enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr); +} + +static void enetc_enable_bdrs(struct enetc_ndev_priv *priv) +{ + struct enetc_hw *hw = &priv->si->hw; + int i; + + for (i = 0; i < priv->num_tx_rings; i++) + enetc_enable_txbdr(hw, priv->tx_ring[i]); + + for (i = 0; i < priv->num_rx_rings; i++) + enetc_enable_rxbdr(hw, priv->rx_ring[i]); +} + +static void enetc_disable_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) { int idx = rx_ring->index; @@ -2120,13 +2204,30 @@ static void enetc_clear_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) enetc_rxbdr_wr(hw, idx, ENETC_RBMR, 0); } -static void enetc_clear_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring) +static void enetc_disable_txbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) { - int delay = 8, timeout = 100; - int idx = tx_ring->index; + int idx = rx_ring->index; /* disable EN bit on ring */ enetc_txbdr_wr(hw, idx, ENETC_TBMR, 0); +} + +static void enetc_disable_bdrs(struct enetc_ndev_priv *priv) +{ + struct enetc_hw *hw = &priv->si->hw; + int i; + + for (i = 0; i < priv->num_tx_rings; i++) + enetc_disable_txbdr(hw, priv->tx_ring[i]); + + for (i = 0; i < priv->num_rx_rings; i++) + enetc_disable_rxbdr(hw, priv->rx_ring[i]); +} + +static void enetc_wait_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring) +{ + int delay = 8, timeout = 100; + int idx = tx_ring->index; /* wait for busy to clear */ while (delay < timeout && @@ -2140,18 +2241,13 @@ static void enetc_clear_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring) idx); } -static void enetc_clear_bdrs(struct enetc_ndev_priv *priv) +static void enetc_wait_bdrs(struct enetc_ndev_priv *priv) { struct enetc_hw *hw = &priv->si->hw; int i; for (i = 0; i < priv->num_tx_rings; i++) - enetc_clear_txbdr(hw, priv->tx_ring[i]); - - for (i = 0; i < priv->num_rx_rings; i++) - enetc_clear_rxbdr(hw, priv->rx_ring[i]); - - udelay(1); + enetc_wait_txbdr(hw, priv->tx_ring[i]); } static int enetc_setup_irqs(struct enetc_ndev_priv *priv) @@ -2267,8 +2363,11 @@ static int enetc_phylink_connect(struct net_device *ndev) struct ethtool_eee edata; int err; - if (!priv->phylink) - return 0; /* phy-less mode */ + if (!priv->phylink) { + /* phy-less mode */ + netif_carrier_on(ndev); + return 0; + } err = phylink_of_phy_connect(priv->phylink, priv->dev->of_node, 0); if (err) { @@ -2280,6 +2379,8 @@ static int enetc_phylink_connect(struct net_device *ndev) memset(&edata, 0, sizeof(struct ethtool_eee)); phylink_ethtool_set_eee(priv->phylink, &edata); + phylink_start(priv->phylink); + return 0; } @@ -2321,10 +2422,7 @@ void enetc_start(struct net_device *ndev) enable_irq(irq); } - if (priv->phylink) - phylink_start(priv->phylink); - else - netif_carrier_on(ndev); + enetc_enable_bdrs(priv); netif_tx_start_all_queues(ndev); } @@ -2332,9 +2430,13 @@ void enetc_start(struct net_device *ndev) int enetc_open(struct net_device *ndev) { struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_bdr_resource *tx_res, *rx_res; int num_stack_tx_queues; + bool extended; int err; + extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP); + err = enetc_setup_irqs(priv); if (err) return err; @@ -2343,13 +2445,17 @@ int enetc_open(struct net_device *ndev) if (err) goto err_phy_connect; - err = enetc_alloc_tx_resources(priv); - if (err) + tx_res = enetc_alloc_tx_resources(priv); + if (IS_ERR(tx_res)) { + err = PTR_ERR(tx_res); goto err_alloc_tx; + } - err = enetc_alloc_rx_resources(priv); - if (err) + rx_res = enetc_alloc_rx_resources(priv, extended); + if (IS_ERR(rx_res)) { + err = PTR_ERR(rx_res); goto err_alloc_rx; + } num_stack_tx_queues = enetc_num_stack_tx_queues(priv); @@ -2362,15 +2468,17 @@ int enetc_open(struct net_device *ndev) goto err_set_queues; enetc_tx_onestep_tstamp_init(priv); - enetc_setup_bdrs(priv); + enetc_assign_tx_resources(priv, tx_res); + enetc_assign_rx_resources(priv, rx_res); + enetc_setup_bdrs(priv, extended); enetc_start(ndev); return 0; err_set_queues: - enetc_free_rx_resources(priv); + enetc_free_rx_resources(rx_res, priv->num_rx_rings); err_alloc_rx: - enetc_free_tx_resources(priv); + enetc_free_tx_resources(tx_res, priv->num_tx_rings); err_alloc_tx: if (priv->phylink) phylink_disconnect_phy(priv->phylink); @@ -2387,6 +2495,8 @@ void enetc_stop(struct net_device *ndev) netif_tx_stop_all_queues(ndev); + enetc_disable_bdrs(priv); + for (i = 0; i < priv->bdr_int_num; i++) { int irq = pci_irq_vector(priv->si->pdev, ENETC_BDR_INT_BASE_IDX + i); @@ -2396,10 +2506,7 @@ void enetc_stop(struct net_device *ndev) napi_disable(&priv->int_vector[i]->napi); } - if (priv->phylink) - phylink_stop(priv->phylink); - else - netif_carrier_off(ndev); + enetc_wait_bdrs(priv); enetc_clear_interrupts(priv); } @@ -2409,18 +2516,76 @@ int enetc_close(struct net_device *ndev) struct enetc_ndev_priv *priv = netdev_priv(ndev); enetc_stop(ndev); - enetc_clear_bdrs(priv); - if (priv->phylink) + if (priv->phylink) { + phylink_stop(priv->phylink); phylink_disconnect_phy(priv->phylink); + } else { + netif_carrier_off(ndev); + } + enetc_free_rxtx_rings(priv); - enetc_free_rx_resources(priv); - enetc_free_tx_resources(priv); + + /* Avoids dangling pointers and also frees old resources */ + enetc_assign_rx_resources(priv, NULL); + enetc_assign_tx_resources(priv, NULL); + enetc_free_irqs(priv); return 0; } +static int enetc_reconfigure(struct enetc_ndev_priv *priv, bool extended, + int (*cb)(struct enetc_ndev_priv *priv, void *ctx), + void *ctx) +{ + struct enetc_bdr_resource *tx_res, *rx_res; + int err; + + ASSERT_RTNL(); + + /* If the interface is down, run the callback right away, + * without reconfiguration. + */ + if (!netif_running(priv->ndev)) { + if (cb) + cb(priv, ctx); + + return 0; + } + + tx_res = enetc_alloc_tx_resources(priv); + if (IS_ERR(tx_res)) { + err = PTR_ERR(tx_res); + goto out; + } + + rx_res = enetc_alloc_rx_resources(priv, extended); + if (IS_ERR(rx_res)) { + err = PTR_ERR(rx_res); + goto out_free_tx_res; + } + + enetc_stop(priv->ndev); + enetc_free_rxtx_rings(priv); + + /* Interface is down, run optional callback now */ + if (cb) + cb(priv, ctx); + + enetc_assign_tx_resources(priv, tx_res); + enetc_assign_rx_resources(priv, rx_res); + enetc_setup_bdrs(priv, extended); + enetc_start(priv->ndev); + + return 0; + +out_free_tx_res: + enetc_free_tx_resources(tx_res, priv->num_tx_rings); +out: + return err; +} + int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data) { struct enetc_ndev_priv *priv = netdev_priv(ndev); @@ -2478,21 +2643,11 @@ int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data) return 0; } -static int enetc_setup_xdp_prog(struct net_device *dev, struct bpf_prog *prog, - struct netlink_ext_ack *extack) +static int enetc_reconfigure_xdp_cb(struct enetc_ndev_priv *priv, void *ctx) { - struct enetc_ndev_priv *priv = netdev_priv(dev); - struct bpf_prog *old_prog; - bool is_up; + struct bpf_prog *old_prog, *prog = ctx; int i; - /* The buffer layout is changing, so we need to drain the old - * RX buffers and seed new ones. - */ - is_up = netif_running(dev); - if (is_up) - dev_close(dev); - old_prog = xchg(&priv->xdp_prog, prog); if (old_prog) bpf_prog_put(old_prog); @@ -2508,17 +2663,28 @@ static int enetc_setup_xdp_prog(struct net_device *dev, struct bpf_prog *prog, rx_ring->buffer_offset = ENETC_RXB_PAD; } - if (is_up) - return dev_open(dev, extack); - return 0; } -int enetc_setup_bpf(struct net_device *dev, struct netdev_bpf *xdp) +static int enetc_setup_xdp_prog(struct net_device *ndev, struct bpf_prog *prog, + struct netlink_ext_ack *extack) { - switch (xdp->command) { + struct enetc_ndev_priv *priv = netdev_priv(ndev); + bool extended; + + extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP); + + /* The buffer layout is changing, so we need to drain the old + * RX buffers and seed new ones. + */ + return enetc_reconfigure(priv, extended, enetc_reconfigure_xdp_cb, prog); +} + +int enetc_setup_bpf(struct net_device *ndev, struct netdev_bpf *bpf) +{ + switch (bpf->command) { case XDP_SETUP_PROG: - return enetc_setup_xdp_prog(dev, xdp->prog, xdp->extack); + return enetc_setup_xdp_prog(ndev, bpf->prog, bpf->extack); default: return -EINVAL; } @@ -2613,43 +2779,47 @@ void enetc_set_features(struct net_device *ndev, netdev_features_t features) static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr) { struct enetc_ndev_priv *priv = netdev_priv(ndev); + int err, new_offloads = priv->active_offloads; struct hwtstamp_config config; - int ao; if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) return -EFAULT; switch (config.tx_type) { case HWTSTAMP_TX_OFF: - priv->active_offloads &= ~ENETC_F_TX_TSTAMP_MASK; + new_offloads &= ~ENETC_F_TX_TSTAMP_MASK; break; case HWTSTAMP_TX_ON: - priv->active_offloads &= ~ENETC_F_TX_TSTAMP_MASK; - priv->active_offloads |= ENETC_F_TX_TSTAMP; + new_offloads &= ~ENETC_F_TX_TSTAMP_MASK; + new_offloads |= ENETC_F_TX_TSTAMP; break; case HWTSTAMP_TX_ONESTEP_SYNC: - priv->active_offloads &= ~ENETC_F_TX_TSTAMP_MASK; - priv->active_offloads |= ENETC_F_TX_ONESTEP_SYNC_TSTAMP; + new_offloads &= ~ENETC_F_TX_TSTAMP_MASK; + new_offloads |= ENETC_F_TX_ONESTEP_SYNC_TSTAMP; break; default: return -ERANGE; } - ao = priv->active_offloads; switch (config.rx_filter) { case HWTSTAMP_FILTER_NONE: - priv->active_offloads &= ~ENETC_F_RX_TSTAMP; + new_offloads &= ~ENETC_F_RX_TSTAMP; break; default: - priv->active_offloads |= ENETC_F_RX_TSTAMP; + new_offloads |= ENETC_F_RX_TSTAMP; config.rx_filter = HWTSTAMP_FILTER_ALL; } - if (netif_running(ndev) && ao != priv->active_offloads) { - enetc_close(ndev); - enetc_open(ndev); + if ((new_offloads ^ priv->active_offloads) & ENETC_F_RX_TSTAMP) { + bool extended = !!(new_offloads & ENETC_F_RX_TSTAMP); + + err = enetc_reconfigure(priv, extended, NULL, NULL); + if (err) + return err; } + priv->active_offloads = new_offloads; + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; } diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h index c6d8cc15c270..6a87aa972e1e 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.h +++ b/drivers/net/ethernet/freescale/enetc/enetc.h @@ -70,7 +70,6 @@ struct enetc_ring_stats { unsigned int xdp_tx_drops; unsigned int xdp_redirect; unsigned int xdp_redirect_failures; - unsigned int xdp_redirect_sg; unsigned int recycles; unsigned int recycle_failures; unsigned int win_drop; @@ -86,6 +85,23 @@ struct enetc_xdp_data { #define ENETC_TX_RING_DEFAULT_SIZE 2048 #define ENETC_DEFAULT_TX_WORK (ENETC_TX_RING_DEFAULT_SIZE / 2) +struct enetc_bdr_resource { + /* Input arguments saved for teardown */ + struct device *dev; /* for DMA mapping */ + size_t bd_count; + size_t bd_size; + + /* Resource proper */ + void *bd_base; /* points to Rx or Tx BD ring */ + dma_addr_t bd_dma_base; + union { + struct enetc_tx_swbd *tx_swbd; + struct enetc_rx_swbd *rx_swbd; + }; + char *tso_headers; + dma_addr_t tso_headers_dma; +}; + struct enetc_bdr { struct device *dev; /* for DMA mapping */ struct net_device *ndev; @@ -345,6 +361,8 @@ struct enetc_ndev_priv { struct enetc_bdr **xdp_tx_ring; struct enetc_bdr *tx_ring[16]; struct enetc_bdr *rx_ring[16]; + const struct enetc_bdr_resource *tx_res; + const struct enetc_bdr_resource *rx_res; struct enetc_cls_rule *cls_rules; @@ -397,7 +415,7 @@ struct net_device_stats *enetc_get_stats(struct net_device *ndev); void enetc_set_features(struct net_device *ndev, netdev_features_t features); int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd); int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data); -int enetc_setup_bpf(struct net_device *dev, struct netdev_bpf *xdp); +int enetc_setup_bpf(struct net_device *ndev, struct netdev_bpf *bpf); int enetc_xdp_xmit(struct net_device *ndev, int num_frames, struct xdp_frame **frames, u32 flags); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c index c8369e3752b0..d45f305eb03c 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c @@ -197,7 +197,6 @@ static const char rx_ring_stats[][ETH_GSTRING_LEN] = { "Rx ring %2d recycle failures", "Rx ring %2d redirects", "Rx ring %2d redirect failures", - "Rx ring %2d redirect S/G", }; static const char tx_ring_stats[][ETH_GSTRING_LEN] = { @@ -291,7 +290,6 @@ static void enetc_get_ethtool_stats(struct net_device *ndev, data[o++] = priv->rx_ring[i]->stats.recycle_failures; data[o++] = priv->rx_ring[i]->stats.xdp_redirect; data[o++] = priv->rx_ring[i]->stats.xdp_redirect_failures; - data[o++] = priv->rx_ring[i]->stats.xdp_redirect_sg; } if (!enetc_si_is_pf(priv->si)) diff --git a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c index 1c8f5cc6dec4..998aaa394e9c 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c @@ -55,7 +55,8 @@ static int enetc_mdio_wait_complete(struct enetc_mdio_priv *mdio_priv) is_busy, !is_busy, 10, 10 * 1000); } -int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value) +int enetc_mdio_write_c22(struct mii_bus *bus, int phy_id, int regnum, + u16 value) { struct enetc_mdio_priv *mdio_priv = bus->priv; u32 mdio_ctl, mdio_cfg; @@ -63,14 +64,39 @@ int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value) int ret; mdio_cfg = ENETC_EMDIO_CFG; - if (regnum & MII_ADDR_C45) { - dev_addr = (regnum >> 16) & 0x1f; - mdio_cfg |= MDIO_CFG_ENC45; - } else { - /* clause 22 (ie 1G) */ - dev_addr = regnum & 0x1f; - mdio_cfg &= ~MDIO_CFG_ENC45; - } + dev_addr = regnum & 0x1f; + mdio_cfg &= ~MDIO_CFG_ENC45; + + enetc_mdio_wr(mdio_priv, ENETC_MDIO_CFG, mdio_cfg); + + ret = enetc_mdio_wait_complete(mdio_priv); + if (ret) + return ret; + + /* set port and dev addr */ + mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr); + enetc_mdio_wr(mdio_priv, ENETC_MDIO_CTL, mdio_ctl); + + /* write the value */ + enetc_mdio_wr(mdio_priv, ENETC_MDIO_DATA, value); + + ret = enetc_mdio_wait_complete(mdio_priv); + if (ret) + return ret; + + return 0; +} +EXPORT_SYMBOL_GPL(enetc_mdio_write_c22); + +int enetc_mdio_write_c45(struct mii_bus *bus, int phy_id, int dev_addr, + int regnum, u16 value) +{ + struct enetc_mdio_priv *mdio_priv = bus->priv; + u32 mdio_ctl, mdio_cfg; + int ret; + + mdio_cfg = ENETC_EMDIO_CFG; + mdio_cfg |= MDIO_CFG_ENC45; enetc_mdio_wr(mdio_priv, ENETC_MDIO_CFG, mdio_cfg); @@ -83,13 +109,11 @@ int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value) enetc_mdio_wr(mdio_priv, ENETC_MDIO_CTL, mdio_ctl); /* set the register address */ - if (regnum & MII_ADDR_C45) { - enetc_mdio_wr(mdio_priv, ENETC_MDIO_ADDR, regnum & 0xffff); + enetc_mdio_wr(mdio_priv, ENETC_MDIO_ADDR, regnum & 0xffff); - ret = enetc_mdio_wait_complete(mdio_priv); - if (ret) - return ret; - } + ret = enetc_mdio_wait_complete(mdio_priv); + if (ret) + return ret; /* write the value */ enetc_mdio_wr(mdio_priv, ENETC_MDIO_DATA, value); @@ -100,9 +124,9 @@ int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value) return 0; } -EXPORT_SYMBOL_GPL(enetc_mdio_write); +EXPORT_SYMBOL_GPL(enetc_mdio_write_c45); -int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum) +int enetc_mdio_read_c22(struct mii_bus *bus, int phy_id, int regnum) { struct enetc_mdio_priv *mdio_priv = bus->priv; u32 mdio_ctl, mdio_cfg; @@ -110,14 +134,51 @@ int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum) int ret; mdio_cfg = ENETC_EMDIO_CFG; - if (regnum & MII_ADDR_C45) { - dev_addr = (regnum >> 16) & 0x1f; - mdio_cfg |= MDIO_CFG_ENC45; - } else { - dev_addr = regnum & 0x1f; - mdio_cfg &= ~MDIO_CFG_ENC45; + dev_addr = regnum & 0x1f; + mdio_cfg &= ~MDIO_CFG_ENC45; + + enetc_mdio_wr(mdio_priv, ENETC_MDIO_CFG, mdio_cfg); + + ret = enetc_mdio_wait_complete(mdio_priv); + if (ret) + return ret; + + /* set port and device addr */ + mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr); + enetc_mdio_wr(mdio_priv, ENETC_MDIO_CTL, mdio_ctl); + + /* initiate the read */ + enetc_mdio_wr(mdio_priv, ENETC_MDIO_CTL, mdio_ctl | MDIO_CTL_READ); + + ret = enetc_mdio_wait_complete(mdio_priv); + if (ret) + return ret; + + /* return all Fs if nothing was there */ + if (enetc_mdio_rd(mdio_priv, ENETC_MDIO_CFG) & MDIO_CFG_RD_ER) { + dev_dbg(&bus->dev, + "Error while reading PHY%d reg at %d.%d\n", + phy_id, dev_addr, regnum); + return 0xffff; } + value = enetc_mdio_rd(mdio_priv, ENETC_MDIO_DATA) & 0xffff; + + return value; +} +EXPORT_SYMBOL_GPL(enetc_mdio_read_c22); + +int enetc_mdio_read_c45(struct mii_bus *bus, int phy_id, int dev_addr, + int regnum) +{ + struct enetc_mdio_priv *mdio_priv = bus->priv; + u32 mdio_ctl, mdio_cfg; + u16 value; + int ret; + + mdio_cfg = ENETC_EMDIO_CFG; + mdio_cfg |= MDIO_CFG_ENC45; + enetc_mdio_wr(mdio_priv, ENETC_MDIO_CFG, mdio_cfg); ret = enetc_mdio_wait_complete(mdio_priv); @@ -129,13 +190,11 @@ int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum) enetc_mdio_wr(mdio_priv, ENETC_MDIO_CTL, mdio_ctl); /* set the register address */ - if (regnum & MII_ADDR_C45) { - enetc_mdio_wr(mdio_priv, ENETC_MDIO_ADDR, regnum & 0xffff); + enetc_mdio_wr(mdio_priv, ENETC_MDIO_ADDR, regnum & 0xffff); - ret = enetc_mdio_wait_complete(mdio_priv); - if (ret) - return ret; - } + ret = enetc_mdio_wait_complete(mdio_priv); + if (ret) + return ret; /* initiate the read */ enetc_mdio_wr(mdio_priv, ENETC_MDIO_CTL, mdio_ctl | MDIO_CTL_READ); @@ -156,7 +215,7 @@ int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum) return value; } -EXPORT_SYMBOL_GPL(enetc_mdio_read); +EXPORT_SYMBOL_GPL(enetc_mdio_read_c45); struct enetc_hw *enetc_hw_alloc(struct device *dev, void __iomem *port_regs) { diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c index dafb26f81f95..a1b595bd7993 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c @@ -39,8 +39,10 @@ static int enetc_pci_mdio_probe(struct pci_dev *pdev, } bus->name = ENETC_MDIO_BUS_NAME; - bus->read = enetc_mdio_read; - bus->write = enetc_mdio_write; + bus->read = enetc_mdio_read_c22; + bus->write = enetc_mdio_write_c22; + bus->read_c45 = enetc_mdio_read_c45; + bus->write_c45 = enetc_mdio_write_c45; bus->parent = dev; mdio_priv = bus->priv; mdio_priv->hw = hw; diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c index 9f6c4f5c0a6c..bc012deedab4 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c @@ -848,8 +848,10 @@ static int enetc_mdio_probe(struct enetc_pf *pf, struct device_node *np) return -ENOMEM; bus->name = "Freescale ENETC MDIO Bus"; - bus->read = enetc_mdio_read; - bus->write = enetc_mdio_write; + bus->read = enetc_mdio_read_c22; + bus->write = enetc_mdio_write_c22; + bus->read_c45 = enetc_mdio_read_c45; + bus->write_c45 = enetc_mdio_write_c45; bus->parent = dev; mdio_priv = bus->priv; mdio_priv->hw = &pf->si->hw; @@ -885,8 +887,10 @@ static int enetc_imdio_create(struct enetc_pf *pf) return -ENOMEM; bus->name = "Freescale ENETC internal MDIO Bus"; - bus->read = enetc_mdio_read; - bus->write = enetc_mdio_write; + bus->read = enetc_mdio_read_c22; + bus->write = enetc_mdio_write_c22; + bus->read_c45 = enetc_mdio_read_c45; + bus->write_c45 = enetc_mdio_write_c45; bus->parent = dev; bus->phy_mask = ~0; mdio_priv = bus->priv; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 644f3c963730..e6238e53940d 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1987,47 +1987,74 @@ static int fec_enet_mdio_wait(struct fec_enet_private *fep) return ret; } -static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) +static int fec_enet_mdio_read_c22(struct mii_bus *bus, int mii_id, int regnum) { struct fec_enet_private *fep = bus->priv; struct device *dev = &fep->pdev->dev; int ret = 0, frame_start, frame_addr, frame_op; - bool is_c45 = !!(regnum & MII_ADDR_C45); ret = pm_runtime_resume_and_get(dev); if (ret < 0) return ret; - if (is_c45) { - frame_start = FEC_MMFR_ST_C45; + /* C22 read */ + frame_op = FEC_MMFR_OP_READ; + frame_start = FEC_MMFR_ST; + frame_addr = regnum; - /* write address */ - frame_addr = (regnum >> 16); - writel(frame_start | FEC_MMFR_OP_ADDR_WRITE | - FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) | - FEC_MMFR_TA | (regnum & 0xFFFF), - fep->hwp + FEC_MII_DATA); + /* start a read op */ + writel(frame_start | frame_op | + FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) | + FEC_MMFR_TA, fep->hwp + FEC_MII_DATA); - /* wait for end of transfer */ - ret = fec_enet_mdio_wait(fep); - if (ret) { - netdev_err(fep->netdev, "MDIO address write timeout\n"); - goto out; - } + /* wait for end of transfer */ + ret = fec_enet_mdio_wait(fep); + if (ret) { + netdev_err(fep->netdev, "MDIO read timeout\n"); + goto out; + } - frame_op = FEC_MMFR_OP_READ_C45; + ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); - } else { - /* C22 read */ - frame_op = FEC_MMFR_OP_READ; - frame_start = FEC_MMFR_ST; - frame_addr = regnum; +out: + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); + + return ret; +} + +static int fec_enet_mdio_read_c45(struct mii_bus *bus, int mii_id, + int devad, int regnum) +{ + struct fec_enet_private *fep = bus->priv; + struct device *dev = &fep->pdev->dev; + int ret = 0, frame_start, frame_op; + + ret = pm_runtime_resume_and_get(dev); + if (ret < 0) + return ret; + + frame_start = FEC_MMFR_ST_C45; + + /* write address */ + writel(frame_start | FEC_MMFR_OP_ADDR_WRITE | + FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) | + FEC_MMFR_TA | (regnum & 0xFFFF), + fep->hwp + FEC_MII_DATA); + + /* wait for end of transfer */ + ret = fec_enet_mdio_wait(fep); + if (ret) { + netdev_err(fep->netdev, "MDIO address write timeout\n"); + goto out; } + frame_op = FEC_MMFR_OP_READ_C45; + /* start a read op */ writel(frame_start | frame_op | - FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) | - FEC_MMFR_TA, fep->hwp + FEC_MII_DATA); + FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) | + FEC_MMFR_TA, fep->hwp + FEC_MII_DATA); /* wait for end of transfer */ ret = fec_enet_mdio_wait(fep); @@ -2045,45 +2072,69 @@ out: return ret; } -static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, - u16 value) +static int fec_enet_mdio_write_c22(struct mii_bus *bus, int mii_id, int regnum, + u16 value) { struct fec_enet_private *fep = bus->priv; struct device *dev = &fep->pdev->dev; int ret, frame_start, frame_addr; - bool is_c45 = !!(regnum & MII_ADDR_C45); ret = pm_runtime_resume_and_get(dev); if (ret < 0) return ret; - if (is_c45) { - frame_start = FEC_MMFR_ST_C45; + /* C22 write */ + frame_start = FEC_MMFR_ST; + frame_addr = regnum; - /* write address */ - frame_addr = (regnum >> 16); - writel(frame_start | FEC_MMFR_OP_ADDR_WRITE | - FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) | - FEC_MMFR_TA | (regnum & 0xFFFF), - fep->hwp + FEC_MII_DATA); + /* start a write op */ + writel(frame_start | FEC_MMFR_OP_WRITE | + FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) | + FEC_MMFR_TA | FEC_MMFR_DATA(value), + fep->hwp + FEC_MII_DATA); - /* wait for end of transfer */ - ret = fec_enet_mdio_wait(fep); - if (ret) { - netdev_err(fep->netdev, "MDIO address write timeout\n"); - goto out; - } - } else { - /* C22 write */ - frame_start = FEC_MMFR_ST; - frame_addr = regnum; + /* wait for end of transfer */ + ret = fec_enet_mdio_wait(fep); + if (ret) + netdev_err(fep->netdev, "MDIO write timeout\n"); + + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); + + return ret; +} + +static int fec_enet_mdio_write_c45(struct mii_bus *bus, int mii_id, + int devad, int regnum, u16 value) +{ + struct fec_enet_private *fep = bus->priv; + struct device *dev = &fep->pdev->dev; + int ret, frame_start; + + ret = pm_runtime_resume_and_get(dev); + if (ret < 0) + return ret; + + frame_start = FEC_MMFR_ST_C45; + + /* write address */ + writel(frame_start | FEC_MMFR_OP_ADDR_WRITE | + FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) | + FEC_MMFR_TA | (regnum & 0xFFFF), + fep->hwp + FEC_MII_DATA); + + /* wait for end of transfer */ + ret = fec_enet_mdio_wait(fep); + if (ret) { + netdev_err(fep->netdev, "MDIO address write timeout\n"); + goto out; } /* start a write op */ writel(frame_start | FEC_MMFR_OP_WRITE | - FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) | - FEC_MMFR_TA | FEC_MMFR_DATA(value), - fep->hwp + FEC_MII_DATA); + FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) | + FEC_MMFR_TA | FEC_MMFR_DATA(value), + fep->hwp + FEC_MII_DATA); /* wait for end of transfer */ ret = fec_enet_mdio_wait(fep); @@ -2381,8 +2432,10 @@ static int fec_enet_mii_init(struct platform_device *pdev) } fep->mii_bus->name = "fec_enet_mii_bus"; - fep->mii_bus->read = fec_enet_mdio_read; - fep->mii_bus->write = fec_enet_mdio_write; + fep->mii_bus->read = fec_enet_mdio_read_c22; + fep->mii_bus->write = fec_enet_mdio_write_c22; + fep->mii_bus->read_c45 = fec_enet_mdio_read_c45; + fep->mii_bus->write_c45 = fec_enet_mdio_write_c45; snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", pdev->name, fep->dev_id + 1); fep->mii_bus->priv = fep; diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c index d7d39a58cd80..a13b4ba4d6e1 100644 --- a/drivers/net/ethernet/freescale/xgmac_mdio.c +++ b/drivers/net/ethernet/freescale/xgmac_mdio.c @@ -128,30 +128,49 @@ static int xgmac_wait_until_done(struct device *dev, return 0; } -/* - * Write value to the PHY for this device to the register at regnum,waiting - * until the write is done before it returns. All PHY configuration has to be - * done through the TSEC1 MIIM regs. - */ -static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value) +static int xgmac_mdio_write_c22(struct mii_bus *bus, int phy_id, int regnum, + u16 value) { struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv; struct tgec_mdio_controller __iomem *regs = priv->mdio_base; - uint16_t dev_addr; + bool endian = priv->is_little_endian; + u16 dev_addr = regnum & 0x1f; u32 mdio_ctl, mdio_stat; int ret; + + mdio_stat = xgmac_read32(®s->mdio_stat, endian); + mdio_stat &= ~MDIO_STAT_ENC; + xgmac_write32(mdio_stat, ®s->mdio_stat, endian); + + ret = xgmac_wait_until_free(&bus->dev, regs, endian); + if (ret) + return ret; + + /* Set the port and dev addr */ + mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr); + xgmac_write32(mdio_ctl, ®s->mdio_ctl, endian); + + /* Write the value to the register */ + xgmac_write32(MDIO_DATA(value), ®s->mdio_data, endian); + + ret = xgmac_wait_until_done(&bus->dev, regs, endian); + if (ret) + return ret; + + return 0; +} + +static int xgmac_mdio_write_c45(struct mii_bus *bus, int phy_id, int dev_addr, + int regnum, u16 value) +{ + struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv; + struct tgec_mdio_controller __iomem *regs = priv->mdio_base; bool endian = priv->is_little_endian; + u32 mdio_ctl, mdio_stat; + int ret; mdio_stat = xgmac_read32(®s->mdio_stat, endian); - if (regnum & MII_ADDR_C45) { - /* Clause 45 (ie 10G) */ - dev_addr = (regnum >> 16) & 0x1f; - mdio_stat |= MDIO_STAT_ENC; - } else { - /* Clause 22 (ie 1G) */ - dev_addr = regnum & 0x1f; - mdio_stat &= ~MDIO_STAT_ENC; - } + mdio_stat |= MDIO_STAT_ENC; xgmac_write32(mdio_stat, ®s->mdio_stat, endian); @@ -164,13 +183,11 @@ static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 val xgmac_write32(mdio_ctl, ®s->mdio_ctl, endian); /* Set the register address */ - if (regnum & MII_ADDR_C45) { - xgmac_write32(regnum & 0xffff, ®s->mdio_addr, endian); + xgmac_write32(regnum & 0xffff, ®s->mdio_addr, endian); - ret = xgmac_wait_until_free(&bus->dev, regs, endian); - if (ret) - return ret; - } + ret = xgmac_wait_until_free(&bus->dev, regs, endian); + if (ret) + return ret; /* Write the value to the register */ xgmac_write32(MDIO_DATA(value), ®s->mdio_data, endian); @@ -182,31 +199,82 @@ static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 val return 0; } -/* - * Reads from register regnum in the PHY for device dev, returning the value. +/* Reads from register regnum in the PHY for device dev, returning the value. * Clears miimcom first. All PHY configuration has to be done through the * TSEC1 MIIM regs. */ -static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum) +static int xgmac_mdio_read_c22(struct mii_bus *bus, int phy_id, int regnum) { struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv; struct tgec_mdio_controller __iomem *regs = priv->mdio_base; + bool endian = priv->is_little_endian; + u16 dev_addr = regnum & 0x1f; unsigned long flags; - uint16_t dev_addr; uint32_t mdio_stat; uint32_t mdio_ctl; int ret; - bool endian = priv->is_little_endian; mdio_stat = xgmac_read32(®s->mdio_stat, endian); - if (regnum & MII_ADDR_C45) { - dev_addr = (regnum >> 16) & 0x1f; - mdio_stat |= MDIO_STAT_ENC; + mdio_stat &= ~MDIO_STAT_ENC; + xgmac_write32(mdio_stat, ®s->mdio_stat, endian); + + ret = xgmac_wait_until_free(&bus->dev, regs, endian); + if (ret) + return ret; + + /* Set the Port and Device Addrs */ + mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr); + xgmac_write32(mdio_ctl, ®s->mdio_ctl, endian); + + if (priv->has_a009885) + /* Once the operation completes, i.e. MDIO_STAT_BSY clears, we + * must read back the data register within 16 MDC cycles. + */ + local_irq_save(flags); + + /* Initiate the read */ + xgmac_write32(mdio_ctl | MDIO_CTL_READ, ®s->mdio_ctl, endian); + + ret = xgmac_wait_until_done(&bus->dev, regs, endian); + if (ret) + goto irq_restore; + + /* Return all Fs if nothing was there */ + if ((xgmac_read32(®s->mdio_stat, endian) & MDIO_STAT_RD_ER) && + !priv->has_a011043) { + dev_dbg(&bus->dev, + "Error while reading PHY%d reg at %d.%d\n", + phy_id, dev_addr, regnum); + ret = 0xffff; } else { - dev_addr = regnum & 0x1f; - mdio_stat &= ~MDIO_STAT_ENC; + ret = xgmac_read32(®s->mdio_data, endian) & 0xffff; + dev_dbg(&bus->dev, "read %04x\n", ret); } +irq_restore: + if (priv->has_a009885) + local_irq_restore(flags); + + return ret; +} + +/* Reads from register regnum in the PHY for device dev, returning the value. + * Clears miimcom first. All PHY configuration has to be done through the + * TSEC1 MIIM regs. + */ +static int xgmac_mdio_read_c45(struct mii_bus *bus, int phy_id, int dev_addr, + int regnum) +{ + struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv; + struct tgec_mdio_controller __iomem *regs = priv->mdio_base; + bool endian = priv->is_little_endian; + u32 mdio_stat, mdio_ctl; + unsigned long flags; + int ret; + + mdio_stat = xgmac_read32(®s->mdio_stat, endian); + mdio_stat |= MDIO_STAT_ENC; + xgmac_write32(mdio_stat, ®s->mdio_stat, endian); ret = xgmac_wait_until_free(&bus->dev, regs, endian); @@ -218,13 +286,11 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum) xgmac_write32(mdio_ctl, ®s->mdio_ctl, endian); /* Set the register address */ - if (regnum & MII_ADDR_C45) { - xgmac_write32(regnum & 0xffff, ®s->mdio_addr, endian); + xgmac_write32(regnum & 0xffff, ®s->mdio_addr, endian); - ret = xgmac_wait_until_free(&bus->dev, regs, endian); - if (ret) - return ret; - } + ret = xgmac_wait_until_free(&bus->dev, regs, endian); + if (ret) + return ret; if (priv->has_a009885) /* Once the operation completes, i.e. MDIO_STAT_BSY clears, we @@ -326,10 +392,11 @@ static int xgmac_mdio_probe(struct platform_device *pdev) return -ENOMEM; bus->name = "Freescale XGMAC MDIO Bus"; - bus->read = xgmac_mdio_read; - bus->write = xgmac_mdio_write; + bus->read = xgmac_mdio_read_c22; + bus->write = xgmac_mdio_write_c22; + bus->read_c45 = xgmac_mdio_read_c45; + bus->write_c45 = xgmac_mdio_write_c45; bus->parent = &pdev->dev; - bus->probe_capabilities = MDIOBUS_C22_C45; snprintf(bus->id, MII_BUS_ID_SIZE, "%pa", &res->start); priv = bus->priv; diff --git a/drivers/net/ethernet/fungible/funeth/Kconfig b/drivers/net/ethernet/fungible/funeth/Kconfig index c72ad9386400..e742e7663449 100644 --- a/drivers/net/ethernet/fungible/funeth/Kconfig +++ b/drivers/net/ethernet/fungible/funeth/Kconfig @@ -5,7 +5,7 @@ config FUN_ETH tristate "Fungible Ethernet device driver" - depends on PCI && PCI_MSI + depends on PCI_MSI depends on TLS && TLS_DEVICE || TLS_DEVICE=n select NET_DEVLINK select FUN_CORE diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c index 740850b64aff..5df19c604d09 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c @@ -554,11 +554,11 @@ static phy_interface_t hns_mac_get_phy_if_acpi(struct hns_mac_cb *mac_cb) argv4.package.count = 1; argv4.package.elements = &obj_args; - obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev), - &hns_dsaf_acpi_dsm_guid, 0, - HNS_OP_GET_PORT_TYPE_FUNC, &argv4); - - if (!obj || obj->type != ACPI_TYPE_INTEGER) + obj = acpi_evaluate_dsm_typed(ACPI_HANDLE(mac_cb->dev), + &hns_dsaf_acpi_dsm_guid, 0, + HNS_OP_GET_PORT_TYPE_FUNC, &argv4, + ACPI_TYPE_INTEGER); + if (!obj) return phy_if; phy_if = obj->integer.value ? @@ -601,11 +601,11 @@ static int hns_mac_get_sfp_prsnt_acpi(struct hns_mac_cb *mac_cb, int *sfp_prsnt) argv4.package.count = 1; argv4.package.elements = &obj_args; - obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev), - &hns_dsaf_acpi_dsm_guid, 0, - HNS_OP_GET_SFP_STAT_FUNC, &argv4); - - if (!obj || obj->type != ACPI_TYPE_INTEGER) + obj = acpi_evaluate_dsm_typed(ACPI_HANDLE(mac_cb->dev), + &hns_dsaf_acpi_dsm_guid, 0, + HNS_OP_GET_SFP_STAT_FUNC, &argv4, + ACPI_TYPE_INTEGER); + if (!obj) return -ENODEV; *sfp_prsnt = obj->integer.value; diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c index c2ae1b4f9a5f..9232caaf0bdc 100644 --- a/drivers/net/ethernet/hisilicon/hns_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns_mdio.c @@ -206,7 +206,7 @@ static void hns_mdio_cmd_write(struct hns_mdio_device *mdio_dev, } /** - * hns_mdio_write - access phy register + * hns_mdio_write_c22 - access phy register * @bus: mdio bus * @phy_id: phy id * @regnum: register num @@ -214,21 +214,19 @@ static void hns_mdio_cmd_write(struct hns_mdio_device *mdio_dev, * * Return 0 on success, negative on failure */ -static int hns_mdio_write(struct mii_bus *bus, - int phy_id, int regnum, u16 data) +static int hns_mdio_write_c22(struct mii_bus *bus, + int phy_id, int regnum, u16 data) { - int ret; struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv; - u8 devad = ((regnum >> 16) & 0x1f); - u8 is_c45 = !!(regnum & MII_ADDR_C45); u16 reg = (u16)(regnum & 0xffff); - u8 op; u16 cmd_reg_cfg; + int ret; + u8 op; dev_dbg(&bus->dev, "mdio write %s,base is %p\n", bus->id, mdio_dev->vbase); - dev_dbg(&bus->dev, "phy id=%d, is_c45=%d, devad=%d, reg=%#x, write data=%d\n", - phy_id, is_c45, devad, reg, data); + dev_dbg(&bus->dev, "phy id=%d, reg=%#x, write data=%d\n", + phy_id, reg, data); /* wait for ready */ ret = hns_mdio_wait_ready(bus); @@ -237,58 +235,91 @@ static int hns_mdio_write(struct mii_bus *bus, return ret; } - if (!is_c45) { - cmd_reg_cfg = reg; - op = MDIO_C22_WRITE; - } else { - /* config the cmd-reg to write addr*/ - MDIO_SET_REG_FIELD(mdio_dev, MDIO_ADDR_REG, MDIO_ADDR_DATA_M, - MDIO_ADDR_DATA_S, reg); + cmd_reg_cfg = reg; + op = MDIO_C22_WRITE; - hns_mdio_cmd_write(mdio_dev, is_c45, - MDIO_C45_WRITE_ADDR, phy_id, devad); + MDIO_SET_REG_FIELD(mdio_dev, MDIO_WDATA_REG, MDIO_WDATA_DATA_M, + MDIO_WDATA_DATA_S, data); - /* check for read or write opt is finished */ - ret = hns_mdio_wait_ready(bus); - if (ret) { - dev_err(&bus->dev, "MDIO bus is busy\n"); - return ret; - } + hns_mdio_cmd_write(mdio_dev, false, op, phy_id, cmd_reg_cfg); + + return 0; +} + +/** + * hns_mdio_write_c45 - access phy register + * @bus: mdio bus + * @phy_id: phy id + * @devad: device address to read + * @regnum: register num + * @data: register value + * + * Return 0 on success, negative on failure + */ +static int hns_mdio_write_c45(struct mii_bus *bus, int phy_id, int devad, + int regnum, u16 data) +{ + struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv; + u16 reg = (u16)(regnum & 0xffff); + u16 cmd_reg_cfg; + int ret; + u8 op; + + dev_dbg(&bus->dev, "mdio write %s,base is %p\n", + bus->id, mdio_dev->vbase); + dev_dbg(&bus->dev, "phy id=%d, devad=%d, reg=%#x, write data=%d\n", + phy_id, devad, reg, data); + + /* wait for ready */ + ret = hns_mdio_wait_ready(bus); + if (ret) { + dev_err(&bus->dev, "MDIO bus is busy\n"); + return ret; + } + + /* config the cmd-reg to write addr*/ + MDIO_SET_REG_FIELD(mdio_dev, MDIO_ADDR_REG, MDIO_ADDR_DATA_M, + MDIO_ADDR_DATA_S, reg); - /* config the data needed writing */ - cmd_reg_cfg = devad; - op = MDIO_C45_WRITE_DATA; + hns_mdio_cmd_write(mdio_dev, true, MDIO_C45_WRITE_ADDR, phy_id, devad); + + /* check for read or write opt is finished */ + ret = hns_mdio_wait_ready(bus); + if (ret) { + dev_err(&bus->dev, "MDIO bus is busy\n"); + return ret; } + /* config the data needed writing */ + cmd_reg_cfg = devad; + op = MDIO_C45_WRITE_DATA; + MDIO_SET_REG_FIELD(mdio_dev, MDIO_WDATA_REG, MDIO_WDATA_DATA_M, MDIO_WDATA_DATA_S, data); - hns_mdio_cmd_write(mdio_dev, is_c45, op, phy_id, cmd_reg_cfg); + hns_mdio_cmd_write(mdio_dev, true, op, phy_id, cmd_reg_cfg); return 0; } /** - * hns_mdio_read - access phy register + * hns_mdio_read_c22 - access phy register * @bus: mdio bus * @phy_id: phy id * @regnum: register num * * Return phy register value */ -static int hns_mdio_read(struct mii_bus *bus, int phy_id, int regnum) +static int hns_mdio_read_c22(struct mii_bus *bus, int phy_id, int regnum) { - int ret; - u16 reg_val; - u8 devad = ((regnum >> 16) & 0x1f); - u8 is_c45 = !!(regnum & MII_ADDR_C45); - u16 reg = (u16)(regnum & 0xffff); struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv; + u16 reg = (u16)(regnum & 0xffff); + u16 reg_val; + int ret; dev_dbg(&bus->dev, "mdio read %s,base is %p\n", bus->id, mdio_dev->vbase); - dev_dbg(&bus->dev, "phy id=%d, is_c45=%d, devad=%d, reg=%#x!\n", - phy_id, is_c45, devad, reg); + dev_dbg(&bus->dev, "phy id=%d, reg=%#x!\n", phy_id, reg); /* Step 1: wait for ready */ ret = hns_mdio_wait_ready(bus); @@ -297,29 +328,74 @@ static int hns_mdio_read(struct mii_bus *bus, int phy_id, int regnum) return ret; } - if (!is_c45) { - hns_mdio_cmd_write(mdio_dev, is_c45, - MDIO_C22_READ, phy_id, reg); - } else { - MDIO_SET_REG_FIELD(mdio_dev, MDIO_ADDR_REG, MDIO_ADDR_DATA_M, - MDIO_ADDR_DATA_S, reg); + hns_mdio_cmd_write(mdio_dev, false, MDIO_C22_READ, phy_id, reg); - /* Step 2; config the cmd-reg to write addr*/ - hns_mdio_cmd_write(mdio_dev, is_c45, - MDIO_C45_WRITE_ADDR, phy_id, devad); + /* Step 2: waiting for MDIO_COMMAND_REG 's mdio_start==0,*/ + /* check for read or write opt is finished */ + ret = hns_mdio_wait_ready(bus); + if (ret) { + dev_err(&bus->dev, "MDIO bus is busy\n"); + return ret; + } - /* Step 3: check for read or write opt is finished */ - ret = hns_mdio_wait_ready(bus); - if (ret) { - dev_err(&bus->dev, "MDIO bus is busy\n"); - return ret; - } + reg_val = MDIO_GET_REG_BIT(mdio_dev, MDIO_STA_REG, MDIO_STATE_STA_B); + if (reg_val) { + dev_err(&bus->dev, " ERROR! MDIO Read failed!\n"); + return -EBUSY; + } - hns_mdio_cmd_write(mdio_dev, is_c45, - MDIO_C45_READ, phy_id, devad); + /* Step 3; get out data*/ + reg_val = (u16)MDIO_GET_REG_FIELD(mdio_dev, MDIO_RDATA_REG, + MDIO_RDATA_DATA_M, MDIO_RDATA_DATA_S); + + return reg_val; +} + +/** + * hns_mdio_read_c45 - access phy register + * @bus: mdio bus + * @phy_id: phy id + * @devad: device address to read + * @regnum: register num + * + * Return phy register value + */ +static int hns_mdio_read_c45(struct mii_bus *bus, int phy_id, int devad, + int regnum) +{ + struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv; + u16 reg = (u16)(regnum & 0xffff); + u16 reg_val; + int ret; + + dev_dbg(&bus->dev, "mdio read %s,base is %p\n", + bus->id, mdio_dev->vbase); + dev_dbg(&bus->dev, "phy id=%d, devad=%d, reg=%#x!\n", + phy_id, devad, reg); + + /* Step 1: wait for ready */ + ret = hns_mdio_wait_ready(bus); + if (ret) { + dev_err(&bus->dev, "MDIO bus is busy\n"); + return ret; + } + + MDIO_SET_REG_FIELD(mdio_dev, MDIO_ADDR_REG, MDIO_ADDR_DATA_M, + MDIO_ADDR_DATA_S, reg); + + /* Step 2; config the cmd-reg to write addr*/ + hns_mdio_cmd_write(mdio_dev, true, MDIO_C45_WRITE_ADDR, phy_id, devad); + + /* Step 3: check for read or write opt is finished */ + ret = hns_mdio_wait_ready(bus); + if (ret) { + dev_err(&bus->dev, "MDIO bus is busy\n"); + return ret; } - /* Step 5: waiting for MDIO_COMMAND_REG's mdio_start==0,*/ + hns_mdio_cmd_write(mdio_dev, true, MDIO_C45_READ, phy_id, devad); + + /* Step 5: waiting for MDIO_COMMAND_REG 's mdio_start==0,*/ /* check for read or write opt is finished */ ret = hns_mdio_wait_ready(bus); if (ret) { @@ -438,8 +514,10 @@ static int hns_mdio_probe(struct platform_device *pdev) } new_bus->name = MDIO_BUS_NAME; - new_bus->read = hns_mdio_read; - new_bus->write = hns_mdio_write; + new_bus->read = hns_mdio_read_c22; + new_bus->write = hns_mdio_write_c22; + new_bus->read_c45 = hns_mdio_read_c45; + new_bus->write_c45 = hns_mdio_write_c45; new_bus->reset = hns_mdio_reset; new_bus->priv = mdio_dev; new_bus->parent = &pdev->dev; diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 3facb55b7161..a3c84bf05e44 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -337,6 +337,9 @@ config ICE_HWTS the PTP clock driver precise cross-timestamp ioctl (PTP_SYS_OFFSET_PRECISE). +config ICE_GNSS + def_bool GNSS = y || GNSS = ICE + config FM10K tristate "Intel(R) FM10000 Ethernet Switch Host Interface Support" default n diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index 59e82d131d88..721f86fd5802 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -110,9 +110,9 @@ static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = { static int e1000_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { + u32 speed, supported, advertising, lp_advertising, lpa_t; struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - u32 speed, supported, advertising; if (hw->phy.media_type == e1000_media_type_copper) { supported = (SUPPORTED_10baseT_Half | @@ -120,7 +120,9 @@ static int e1000_get_link_ksettings(struct net_device *netdev, SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Full | + SUPPORTED_Asym_Pause | SUPPORTED_Autoneg | + SUPPORTED_Pause | SUPPORTED_TP); if (hw->phy.type == e1000_phy_ife) supported &= ~SUPPORTED_1000baseT_Full; @@ -192,10 +194,16 @@ static int e1000_get_link_ksettings(struct net_device *netdev, if (hw->phy.media_type != e1000_media_type_copper) cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID; + lpa_t = mii_stat1000_to_ethtool_lpa_t(adapter->phy_regs.stat1000); + lp_advertising = lpa_t | + mii_lpa_to_ethtool_lpa_t(adapter->phy_regs.lpa); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, supported); ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, advertising); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising, + lp_advertising); return 0; } diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c index 060b263348ce..08c3d477dd6f 100644 --- a/drivers/net/ethernet/intel/e1000e/phy.c +++ b/drivers/net/ethernet/intel/e1000e/phy.c @@ -2,6 +2,7 @@ /* Copyright(c) 1999 - 2018 Intel Corporation. */ #include "e1000.h" +#include <linux/ethtool.h> static s32 e1000_wait_autoneg(struct e1000_hw *hw); static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, @@ -1011,6 +1012,8 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) */ mii_autoneg_adv_reg &= ~(ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP); + phy->autoneg_advertised &= + ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); break; case e1000_fc_rx_pause: /* Rx Flow control is enabled, and Tx Flow control is @@ -1024,6 +1027,8 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) */ mii_autoneg_adv_reg |= (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP); + phy->autoneg_advertised |= + (ADVERTISED_Pause | ADVERTISED_Asym_Pause); break; case e1000_fc_tx_pause: /* Tx Flow control is enabled, and Rx Flow control is @@ -1031,6 +1036,8 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) */ mii_autoneg_adv_reg |= ADVERTISE_PAUSE_ASYM; mii_autoneg_adv_reg &= ~ADVERTISE_PAUSE_CAP; + phy->autoneg_advertised |= ADVERTISED_Asym_Pause; + phy->autoneg_advertised &= ~ADVERTISED_Pause; break; case e1000_fc_full: /* Flow control (both Rx and Tx) is enabled by a software @@ -1038,6 +1045,8 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) */ mii_autoneg_adv_reg |= (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP); + phy->autoneg_advertised |= + (ADVERTISED_Pause | ADVERTISED_Asym_Pause); break; default: e_dbg("Flow control param set incorrectly\n"); diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile index 9183d480b70b..f269952d207d 100644 --- a/drivers/net/ethernet/intel/ice/Makefile +++ b/drivers/net/ethernet/intel/ice/Makefile @@ -28,6 +28,7 @@ ice-y := ice_main.o \ ice_flow.o \ ice_idc.o \ ice_devlink.o \ + ice_ddp.o \ ice_fw_update.o \ ice_lag.o \ ice_ethtool.o \ @@ -42,8 +43,8 @@ ice-$(CONFIG_PCI_IOV) += \ ice_vf_vsi_vlan_ops.o \ ice_vf_lib.o ice-$(CONFIG_PTP_1588_CLOCK) += ice_ptp.o ice_ptp_hw.o -ice-$(CONFIG_TTY) += ice_gnss.o ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o ice-$(CONFIG_ICE_SWITCHDEV) += ice_eswitch.o +ice-$(CONFIG_ICE_GNSS) += ice_gnss.o diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 2f0b604abc5e..ae93ae488bc2 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -39,6 +39,7 @@ #include <linux/avf/virtchnl.h> #include <linux/cpu_rmap.h> #include <linux/dim.h> +#include <linux/gnss.h> #include <net/pkt_cls.h> #include <net/tc_act/tc_mirred.h> #include <net/tc_act/tc_gact.h> @@ -565,9 +566,8 @@ struct ice_pf { struct mutex adev_mutex; /* lock to protect aux device access */ u32 msg_enable; struct ice_ptp ptp; - struct tty_driver *ice_gnss_tty_driver; - struct tty_port *gnss_tty_port[ICE_GNSS_TTY_MINOR_DEVICES]; - struct gnss_serial *gnss_serial[ICE_GNSS_TTY_MINOR_DEVICES]; + struct gnss_serial *gnss_serial; + struct gnss_device *gnss_dev; u16 num_rdma_msix; /* Total MSIX vectors for RDMA driver */ u16 rdma_base_vector; diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 958c1e435232..838d9b274d68 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -1659,14 +1659,24 @@ struct ice_aqc_lldp_get_mib { #define ICE_AQ_LLDP_TX_ACTIVE 0 #define ICE_AQ_LLDP_TX_SUSPENDED 1 #define ICE_AQ_LLDP_TX_FLUSHED 3 +/* DCBX mode */ +#define ICE_AQ_LLDP_DCBX_M GENMASK(7, 6) +#define ICE_AQ_LLDP_DCBX_NA 0 +#define ICE_AQ_LLDP_DCBX_CEE 1 +#define ICE_AQ_LLDP_DCBX_IEEE 2 + + u8 state; +#define ICE_AQ_LLDP_MIB_CHANGE_STATE_M BIT(0) +#define ICE_AQ_LLDP_MIB_CHANGE_EXECUTED 0 +#define ICE_AQ_LLDP_MIB_CHANGE_PENDING 1 + /* The following bytes are reserved for the Get LLDP MIB command (0x0A00) * and in the LLDP MIB Change Event (0x0A01). They are valid for the * Get LLDP MIB (0x0A00) response only. */ - u8 reserved1; __le16 local_len; __le16 remote_len; - u8 reserved2[2]; + u8 reserved[2]; __le32 addr_high; __le32 addr_low; }; @@ -1677,6 +1687,9 @@ struct ice_aqc_lldp_set_mib_change { u8 command; #define ICE_AQ_LLDP_MIB_UPDATE_ENABLE 0x0 #define ICE_AQ_LLDP_MIB_UPDATE_DIS 0x1 +#define ICE_AQ_LLDP_MIB_PENDING_M BIT(1) +#define ICE_AQ_LLDP_MIB_PENDING_DISABLE 0 +#define ICE_AQ_LLDP_MIB_PENDING_ENABLE 1 u8 reserved[15]; }; @@ -2329,6 +2342,7 @@ enum ice_adminq_opc { ice_aqc_opc_lldp_set_local_mib = 0x0A08, ice_aqc_opc_lldp_stop_start_specific_agent = 0x0A09, ice_aqc_opc_lldp_filter_ctrl = 0x0A0A, + ice_aqc_opc_lldp_execute_pending_mib = 0x0A0B, /* RSS commands */ ice_aqc_opc_set_rss_key = 0x0B02, diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index d02b55b6aa9c..f5842ff42bc7 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -5504,6 +5504,19 @@ ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add) } /** + * ice_lldp_execute_pending_mib - execute LLDP pending MIB request + * @hw: pointer to HW struct + */ +int ice_lldp_execute_pending_mib(struct ice_hw *hw) +{ + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_execute_pending_mib); + + return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); +} + +/** * ice_fw_supports_report_dflt_cfg * @hw: pointer to the hardware structure * diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index 4c6a0b5c9304..98aa8d124730 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -122,7 +122,7 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update); int ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, - enum ice_fc_mode fc); + enum ice_fc_mode req_mode); bool ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *caps, struct ice_aqc_set_phy_cfg_data *cfg); @@ -221,6 +221,7 @@ ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw); int ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add); +int ice_lldp_execute_pending_mib(struct ice_hw *hw); int ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, u16 bus_addr, __le16 addr, u8 params, u8 *data, diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c index 6be02f9b0b8c..776c1ff6e265 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb.c @@ -73,6 +73,9 @@ ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update, if (!ena_update) cmd->command |= ICE_AQ_LLDP_MIB_UPDATE_DIS; + else + cmd->command |= FIELD_PREP(ICE_AQ_LLDP_MIB_PENDING_M, + ICE_AQ_LLDP_MIB_PENDING_ENABLE); return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } @@ -964,6 +967,42 @@ int ice_get_dcb_cfg(struct ice_port_info *pi) } /** + * ice_get_dcb_cfg_from_mib_change + * @pi: port information structure + * @event: pointer to the admin queue receive event + * + * Set DCB configuration from received MIB Change event + */ +void ice_get_dcb_cfg_from_mib_change(struct ice_port_info *pi, + struct ice_rq_event_info *event) +{ + struct ice_dcbx_cfg *dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg; + struct ice_aqc_lldp_get_mib *mib; + u8 change_type, dcbx_mode; + + mib = (struct ice_aqc_lldp_get_mib *)&event->desc.params.raw; + + change_type = FIELD_GET(ICE_AQ_LLDP_MIB_TYPE_M, mib->type); + if (change_type == ICE_AQ_LLDP_MIB_REMOTE) + dcbx_cfg = &pi->qos_cfg.remote_dcbx_cfg; + + dcbx_mode = FIELD_GET(ICE_AQ_LLDP_DCBX_M, mib->type); + + switch (dcbx_mode) { + case ICE_AQ_LLDP_DCBX_IEEE: + dcbx_cfg->dcbx_mode = ICE_DCBX_MODE_IEEE; + ice_lldp_to_dcb_cfg(event->msg_buf, dcbx_cfg); + break; + + case ICE_AQ_LLDP_DCBX_CEE: + pi->qos_cfg.desired_dcbx_cfg = pi->qos_cfg.local_dcbx_cfg; + ice_cee_to_dcb_cfg((struct ice_aqc_get_cee_dcb_cfg_resp *) + event->msg_buf, pi); + break; + } +} + +/** * ice_init_dcb * @hw: pointer to the HW struct * @enable_mib_change: enable MIB change event diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.h b/drivers/net/ethernet/intel/ice/ice_dcb.h index 6abf28a14291..be34650a77d5 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb.h +++ b/drivers/net/ethernet/intel/ice/ice_dcb.h @@ -144,6 +144,8 @@ ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype, struct ice_dcbx_cfg *dcbcfg); int ice_get_dcb_cfg(struct ice_port_info *pi); int ice_set_dcb_cfg(struct ice_port_info *pi); +void ice_get_dcb_cfg_from_mib_change(struct ice_port_info *pi, + struct ice_rq_event_info *event); int ice_init_dcb(struct ice_hw *hw, bool enable_mib_change); int ice_query_port_ets(struct ice_port_info *pi, diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c index 4f24d441c35e..a97b137e21c0 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c @@ -859,7 +859,7 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked) if (err) goto dcb_init_err; - return err; + return 0; dcb_init_err: dev_err(dev, "DCB init failed\n"); @@ -944,6 +944,16 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring, } /** + * ice_dcb_is_mib_change_pending - Check if MIB change is pending + * @state: MIB change state + */ +static bool ice_dcb_is_mib_change_pending(u8 state) +{ + return ICE_AQ_LLDP_MIB_CHANGE_PENDING == + FIELD_GET(ICE_AQ_LLDP_MIB_CHANGE_STATE_M, state); +} + +/** * ice_dcb_process_lldp_set_mib_change - Process MIB change * @pf: ptr to ice_pf * @event: pointer to the admin queue receive event @@ -956,6 +966,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, struct device *dev = ice_pf_to_dev(pf); struct ice_aqc_lldp_get_mib *mib; struct ice_dcbx_cfg tmp_dcbx_cfg; + bool pending_handled = true; bool need_reconfig = false; struct ice_port_info *pi; u8 mib_type; @@ -972,41 +983,58 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, pi = pf->hw.port_info; mib = (struct ice_aqc_lldp_get_mib *)&event->desc.params.raw; + /* Ignore if event is not for Nearest Bridge */ - mib_type = ((mib->type >> ICE_AQ_LLDP_BRID_TYPE_S) & - ICE_AQ_LLDP_BRID_TYPE_M); + mib_type = FIELD_GET(ICE_AQ_LLDP_BRID_TYPE_M, mib->type); dev_dbg(dev, "LLDP event MIB bridge type 0x%x\n", mib_type); if (mib_type != ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID) return; + /* A pending change event contains accurate config information, and + * the FW setting has not been updaed yet, so detect if change is + * pending to determine where to pull config information from + * (FW vs event) + */ + if (ice_dcb_is_mib_change_pending(mib->state)) + pending_handled = false; + /* Check MIB Type and return if event for Remote MIB update */ - mib_type = mib->type & ICE_AQ_LLDP_MIB_TYPE_M; + mib_type = FIELD_GET(ICE_AQ_LLDP_MIB_TYPE_M, mib->type); dev_dbg(dev, "LLDP event mib type %s\n", mib_type ? "remote" : "local"); if (mib_type == ICE_AQ_LLDP_MIB_REMOTE) { /* Update the remote cached instance and return */ - ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE, - ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID, - &pi->qos_cfg.remote_dcbx_cfg); - if (ret) { - dev_err(dev, "Failed to get remote DCB config\n"); - return; + if (!pending_handled) { + ice_get_dcb_cfg_from_mib_change(pi, event); + } else { + ret = + ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE, + ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID, + &pi->qos_cfg.remote_dcbx_cfg); + if (ret) + dev_dbg(dev, "Failed to get remote DCB config\n"); } + return; } + /* That a DCB change has happened is now determined */ mutex_lock(&pf->tc_mutex); /* store the old configuration */ - tmp_dcbx_cfg = pf->hw.port_info->qos_cfg.local_dcbx_cfg; + tmp_dcbx_cfg = pi->qos_cfg.local_dcbx_cfg; /* Reset the old DCBX configuration data */ memset(&pi->qos_cfg.local_dcbx_cfg, 0, sizeof(pi->qos_cfg.local_dcbx_cfg)); /* Get updated DCBX data from firmware */ - ret = ice_get_dcb_cfg(pf->hw.port_info); - if (ret) { - dev_err(dev, "Failed to get DCB config\n"); - goto out; + if (!pending_handled) { + ice_get_dcb_cfg_from_mib_change(pi, event); + } else { + ret = ice_get_dcb_cfg(pi); + if (ret) { + dev_err(dev, "Failed to get DCB config\n"); + goto out; + } } /* No change detected in DCBX configs */ @@ -1033,11 +1061,17 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, clear_bit(ICE_FLAG_DCB_ENA, pf->flags); } + /* Send Execute Pending MIB Change event if it is a Pending event */ + if (!pending_handled) { + ice_lldp_execute_pending_mib(&pf->hw); + pending_handled = true; + } + rtnl_lock(); /* disable VSIs affected by DCB changes */ ice_dcb_ena_dis_vsi(pf, false, true); - ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL); + ret = ice_query_port_ets(pi, &buf, sizeof(buf), NULL); if (ret) { dev_err(dev, "Query Port ETS failed\n"); goto unlock_rtnl; @@ -1052,4 +1086,8 @@ unlock_rtnl: rtnl_unlock(); out: mutex_unlock(&pf->tc_mutex); + + /* Send Execute Pending MIB Change event if it is a Pending event */ + if (!pending_handled) + ice_lldp_execute_pending_mib(&pf->hw); } diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c new file mode 100644 index 000000000000..d71ed210f9c4 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_ddp.c @@ -0,0 +1,1897 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022, Intel Corporation. */ + +#include "ice_common.h" +#include "ice.h" +#include "ice_ddp.h" + +/* For supporting double VLAN mode, it is necessary to enable or disable certain + * boost tcam entries. The metadata labels names that match the following + * prefixes will be saved to allow enabling double VLAN mode. + */ +#define ICE_DVM_PRE "BOOST_MAC_VLAN_DVM" /* enable these entries */ +#define ICE_SVM_PRE "BOOST_MAC_VLAN_SVM" /* disable these entries */ + +/* To support tunneling entries by PF, the package will append the PF number to + * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc. + */ +#define ICE_TNL_PRE "TNL_" +static const struct ice_tunnel_type_scan tnls[] = { + { TNL_VXLAN, "TNL_VXLAN_PF" }, + { TNL_GENEVE, "TNL_GENEVE_PF" }, + { TNL_LAST, "" } +}; + +/** + * ice_verify_pkg - verify package + * @pkg: pointer to the package buffer + * @len: size of the package buffer + * + * Verifies various attributes of the package file, including length, format + * version, and the requirement of at least one segment. + */ +enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len) +{ + u32 seg_count; + u32 i; + + if (len < struct_size(pkg, seg_offset, 1)) + return ICE_DDP_PKG_INVALID_FILE; + + if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ || + pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR || + pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD || + pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT) + return ICE_DDP_PKG_INVALID_FILE; + + /* pkg must have at least one segment */ + seg_count = le32_to_cpu(pkg->seg_count); + if (seg_count < 1) + return ICE_DDP_PKG_INVALID_FILE; + + /* make sure segment array fits in package length */ + if (len < struct_size(pkg, seg_offset, seg_count)) + return ICE_DDP_PKG_INVALID_FILE; + + /* all segments must fit within length */ + for (i = 0; i < seg_count; i++) { + u32 off = le32_to_cpu(pkg->seg_offset[i]); + struct ice_generic_seg_hdr *seg; + + /* segment header must fit */ + if (len < off + sizeof(*seg)) + return ICE_DDP_PKG_INVALID_FILE; + + seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off); + + /* segment body must fit */ + if (len < off + le32_to_cpu(seg->seg_size)) + return ICE_DDP_PKG_INVALID_FILE; + } + + return ICE_DDP_PKG_SUCCESS; +} + +/** + * ice_free_seg - free package segment pointer + * @hw: pointer to the hardware structure + * + * Frees the package segment pointer in the proper manner, depending on if the + * segment was allocated or just the passed in pointer was stored. + */ +void ice_free_seg(struct ice_hw *hw) +{ + if (hw->pkg_copy) { + devm_kfree(ice_hw_to_dev(hw), hw->pkg_copy); + hw->pkg_copy = NULL; + hw->pkg_size = 0; + } + hw->seg = NULL; +} + +/** + * ice_chk_pkg_version - check package version for compatibility with driver + * @pkg_ver: pointer to a version structure to check + * + * Check to make sure that the package about to be downloaded is compatible with + * the driver. To be compatible, the major and minor components of the package + * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR + * definitions. + */ +static enum ice_ddp_state ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver) +{ + if (pkg_ver->major > ICE_PKG_SUPP_VER_MAJ || + (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ && + pkg_ver->minor > ICE_PKG_SUPP_VER_MNR)) + return ICE_DDP_PKG_FILE_VERSION_TOO_HIGH; + else if (pkg_ver->major < ICE_PKG_SUPP_VER_MAJ || + (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ && + pkg_ver->minor < ICE_PKG_SUPP_VER_MNR)) + return ICE_DDP_PKG_FILE_VERSION_TOO_LOW; + + return ICE_DDP_PKG_SUCCESS; +} + +/** + * ice_pkg_val_buf + * @buf: pointer to the ice buffer + * + * This helper function validates a buffer's header. + */ +struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf) +{ + struct ice_buf_hdr *hdr; + u16 section_count; + u16 data_end; + + hdr = (struct ice_buf_hdr *)buf->buf; + /* verify data */ + section_count = le16_to_cpu(hdr->section_count); + if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT) + return NULL; + + data_end = le16_to_cpu(hdr->data_end); + if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END) + return NULL; + + return hdr; +} + +/** + * ice_find_buf_table + * @ice_seg: pointer to the ice segment + * + * Returns the address of the buffer table within the ice segment. + */ +static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg) +{ + struct ice_nvm_table *nvms = (struct ice_nvm_table *) + (ice_seg->device_table + le32_to_cpu(ice_seg->device_table_count)); + + return (__force struct ice_buf_table *)(nvms->vers + + le32_to_cpu(nvms->table_count)); +} + +/** + * ice_pkg_enum_buf + * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) + * @state: pointer to the enum state + * + * This function will enumerate all the buffers in the ice segment. The first + * call is made with the ice_seg parameter non-NULL; on subsequent calls, + * ice_seg is set to NULL which continues the enumeration. When the function + * returns a NULL pointer, then the end of the buffers has been reached, or an + * unexpected value has been detected (for example an invalid section count or + * an invalid buffer end value). + */ +static struct ice_buf_hdr *ice_pkg_enum_buf(struct ice_seg *ice_seg, + struct ice_pkg_enum *state) +{ + if (ice_seg) { + state->buf_table = ice_find_buf_table(ice_seg); + if (!state->buf_table) + return NULL; + + state->buf_idx = 0; + return ice_pkg_val_buf(state->buf_table->buf_array); + } + + if (++state->buf_idx < le32_to_cpu(state->buf_table->buf_count)) + return ice_pkg_val_buf(state->buf_table->buf_array + + state->buf_idx); + else + return NULL; +} + +/** + * ice_pkg_advance_sect + * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) + * @state: pointer to the enum state + * + * This helper function will advance the section within the ice segment, + * also advancing the buffer if needed. + */ +static bool ice_pkg_advance_sect(struct ice_seg *ice_seg, + struct ice_pkg_enum *state) +{ + if (!ice_seg && !state->buf) + return false; + + if (!ice_seg && state->buf) + if (++state->sect_idx < le16_to_cpu(state->buf->section_count)) + return true; + + state->buf = ice_pkg_enum_buf(ice_seg, state); + if (!state->buf) + return false; + + /* start of new buffer, reset section index */ + state->sect_idx = 0; + return true; +} + +/** + * ice_pkg_enum_section + * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) + * @state: pointer to the enum state + * @sect_type: section type to enumerate + * + * This function will enumerate all the sections of a particular type in the + * ice segment. The first call is made with the ice_seg parameter non-NULL; + * on subsequent calls, ice_seg is set to NULL which continues the enumeration. + * When the function returns a NULL pointer, then the end of the matching + * sections has been reached. + */ +void *ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state, + u32 sect_type) +{ + u16 offset, size; + + if (ice_seg) + state->type = sect_type; + + if (!ice_pkg_advance_sect(ice_seg, state)) + return NULL; + + /* scan for next matching section */ + while (state->buf->section_entry[state->sect_idx].type != + cpu_to_le32(state->type)) + if (!ice_pkg_advance_sect(NULL, state)) + return NULL; + + /* validate section */ + offset = le16_to_cpu(state->buf->section_entry[state->sect_idx].offset); + if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF) + return NULL; + + size = le16_to_cpu(state->buf->section_entry[state->sect_idx].size); + if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ) + return NULL; + + /* make sure the section fits in the buffer */ + if (offset + size > ICE_PKG_BUF_SIZE) + return NULL; + + state->sect_type = + le32_to_cpu(state->buf->section_entry[state->sect_idx].type); + + /* calc pointer to this section */ + state->sect = + ((u8 *)state->buf) + + le16_to_cpu(state->buf->section_entry[state->sect_idx].offset); + + return state->sect; +} + +/** + * ice_pkg_enum_entry + * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) + * @state: pointer to the enum state + * @sect_type: section type to enumerate + * @offset: pointer to variable that receives the offset in the table (optional) + * @handler: function that handles access to the entries into the section type + * + * This function will enumerate all the entries in particular section type in + * the ice segment. The first call is made with the ice_seg parameter non-NULL; + * on subsequent calls, ice_seg is set to NULL which continues the enumeration. + * When the function returns a NULL pointer, then the end of the entries has + * been reached. + * + * Since each section may have a different header and entry size, the handler + * function is needed to determine the number and location entries in each + * section. + * + * The offset parameter is optional, but should be used for sections that + * contain an offset for each section table. For such cases, the section handler + * function must return the appropriate offset + index to give the absolution + * offset for each entry. For example, if the base for a section's header + * indicates a base offset of 10, and the index for the entry is 2, then + * section handler function should set the offset to 10 + 2 = 12. + */ +static void *ice_pkg_enum_entry(struct ice_seg *ice_seg, + struct ice_pkg_enum *state, u32 sect_type, + u32 *offset, + void *(*handler)(u32 sect_type, void *section, + u32 index, u32 *offset)) +{ + void *entry; + + if (ice_seg) { + if (!handler) + return NULL; + + if (!ice_pkg_enum_section(ice_seg, state, sect_type)) + return NULL; + + state->entry_idx = 0; + state->handler = handler; + } else { + state->entry_idx++; + } + + if (!state->handler) + return NULL; + + /* get entry */ + entry = state->handler(state->sect_type, state->sect, state->entry_idx, + offset); + if (!entry) { + /* end of a section, look for another section of this type */ + if (!ice_pkg_enum_section(NULL, state, 0)) + return NULL; + + state->entry_idx = 0; + entry = state->handler(state->sect_type, state->sect, + state->entry_idx, offset); + } + + return entry; +} + +/** + * ice_sw_fv_handler + * @sect_type: section type + * @section: pointer to section + * @index: index of the field vector entry to be returned + * @offset: ptr to variable that receives the offset in the field vector table + * + * This is a callback function that can be passed to ice_pkg_enum_entry. + * This function treats the given section as of type ice_sw_fv_section and + * enumerates offset field. "offset" is an index into the field vector table. + */ +static void *ice_sw_fv_handler(u32 sect_type, void *section, u32 index, + u32 *offset) +{ + struct ice_sw_fv_section *fv_section = section; + + if (!section || sect_type != ICE_SID_FLD_VEC_SW) + return NULL; + if (index >= le16_to_cpu(fv_section->count)) + return NULL; + if (offset) + /* "index" passed in to this function is relative to a given + * 4k block. To get to the true index into the field vector + * table need to add the relative index to the base_offset + * field of this section + */ + *offset = le16_to_cpu(fv_section->base_offset) + index; + return fv_section->fv + index; +} + +/** + * ice_get_prof_index_max - get the max profile index for used profile + * @hw: pointer to the HW struct + * + * Calling this function will get the max profile index for used profile + * and store the index number in struct ice_switch_info *switch_info + * in HW for following use. + */ +static int ice_get_prof_index_max(struct ice_hw *hw) +{ + u16 prof_index = 0, j, max_prof_index = 0; + struct ice_pkg_enum state; + struct ice_seg *ice_seg; + bool flag = false; + struct ice_fv *fv; + u32 offset; + + memset(&state, 0, sizeof(state)); + + if (!hw->seg) + return -EINVAL; + + ice_seg = hw->seg; + + do { + fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, + &offset, ice_sw_fv_handler); + if (!fv) + break; + ice_seg = NULL; + + /* in the profile that not be used, the prot_id is set to 0xff + * and the off is set to 0x1ff for all the field vectors. + */ + for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) + if (fv->ew[j].prot_id != ICE_PROT_INVALID || + fv->ew[j].off != ICE_FV_OFFSET_INVAL) + flag = true; + if (flag && prof_index > max_prof_index) + max_prof_index = prof_index; + + prof_index++; + flag = false; + } while (fv); + + hw->switch_info->max_used_prof_index = max_prof_index; + + return 0; +} + +/** + * ice_get_ddp_pkg_state - get DDP pkg state after download + * @hw: pointer to the HW struct + * @already_loaded: indicates if pkg was already loaded onto the device + */ +static enum ice_ddp_state ice_get_ddp_pkg_state(struct ice_hw *hw, + bool already_loaded) +{ + if (hw->pkg_ver.major == hw->active_pkg_ver.major && + hw->pkg_ver.minor == hw->active_pkg_ver.minor && + hw->pkg_ver.update == hw->active_pkg_ver.update && + hw->pkg_ver.draft == hw->active_pkg_ver.draft && + !memcmp(hw->pkg_name, hw->active_pkg_name, sizeof(hw->pkg_name))) { + if (already_loaded) + return ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED; + else + return ICE_DDP_PKG_SUCCESS; + } else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ || + hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) { + return ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED; + } else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && + hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) { + return ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED; + } else { + return ICE_DDP_PKG_ERR; + } +} + +/** + * ice_init_pkg_regs - initialize additional package registers + * @hw: pointer to the hardware structure + */ +static void ice_init_pkg_regs(struct ice_hw *hw) +{ +#define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF +#define ICE_SW_BLK_INP_MASK_H 0x0000FFFF +#define ICE_SW_BLK_IDX 0 + + /* setup Switch block input mask, which is 48-bits in two parts */ + wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L); + wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H); +} + +/** + * ice_marker_ptype_tcam_handler + * @sect_type: section type + * @section: pointer to section + * @index: index of the Marker PType TCAM entry to be returned + * @offset: pointer to receive absolute offset, always 0 for ptype TCAM sections + * + * This is a callback function that can be passed to ice_pkg_enum_entry. + * Handles enumeration of individual Marker PType TCAM entries. + */ +static void *ice_marker_ptype_tcam_handler(u32 sect_type, void *section, + u32 index, u32 *offset) +{ + struct ice_marker_ptype_tcam_section *marker_ptype; + + if (sect_type != ICE_SID_RXPARSER_MARKER_PTYPE) + return NULL; + + if (index > ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF) + return NULL; + + if (offset) + *offset = 0; + + marker_ptype = section; + if (index >= le16_to_cpu(marker_ptype->count)) + return NULL; + + return marker_ptype->tcam + index; +} + +/** + * ice_add_dvm_hint + * @hw: pointer to the HW structure + * @val: value of the boost entry + * @enable: true if entry needs to be enabled, or false if needs to be disabled + */ +static void ice_add_dvm_hint(struct ice_hw *hw, u16 val, bool enable) +{ + if (hw->dvm_upd.count < ICE_DVM_MAX_ENTRIES) { + hw->dvm_upd.tbl[hw->dvm_upd.count].boost_addr = val; + hw->dvm_upd.tbl[hw->dvm_upd.count].enable = enable; + hw->dvm_upd.count++; + } +} + +/** + * ice_add_tunnel_hint + * @hw: pointer to the HW structure + * @label_name: label text + * @val: value of the tunnel port boost entry + */ +static void ice_add_tunnel_hint(struct ice_hw *hw, char *label_name, u16 val) +{ + if (hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) { + u16 i; + + for (i = 0; tnls[i].type != TNL_LAST; i++) { + size_t len = strlen(tnls[i].label_prefix); + + /* Look for matching label start, before continuing */ + if (strncmp(label_name, tnls[i].label_prefix, len)) + continue; + + /* Make sure this label matches our PF. Note that the PF + * character ('0' - '7') will be located where our + * prefix string's null terminator is located. + */ + if ((label_name[len] - '0') == hw->pf_id) { + hw->tnl.tbl[hw->tnl.count].type = tnls[i].type; + hw->tnl.tbl[hw->tnl.count].valid = false; + hw->tnl.tbl[hw->tnl.count].boost_addr = val; + hw->tnl.tbl[hw->tnl.count].port = 0; + hw->tnl.count++; + break; + } + } + } +} + +/** + * ice_label_enum_handler + * @sect_type: section type + * @section: pointer to section + * @index: index of the label entry to be returned + * @offset: pointer to receive absolute offset, always zero for label sections + * + * This is a callback function that can be passed to ice_pkg_enum_entry. + * Handles enumeration of individual label entries. + */ +static void *ice_label_enum_handler(u32 __always_unused sect_type, + void *section, u32 index, u32 *offset) +{ + struct ice_label_section *labels; + + if (!section) + return NULL; + + if (index > ICE_MAX_LABELS_IN_BUF) + return NULL; + + if (offset) + *offset = 0; + + labels = section; + if (index >= le16_to_cpu(labels->count)) + return NULL; + + return labels->label + index; +} + +/** + * ice_enum_labels + * @ice_seg: pointer to the ice segment (NULL on subsequent calls) + * @type: the section type that will contain the label (0 on subsequent calls) + * @state: ice_pkg_enum structure that will hold the state of the enumeration + * @value: pointer to a value that will return the label's value if found + * + * Enumerates a list of labels in the package. The caller will call + * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call + * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL + * the end of the list has been reached. + */ +static char *ice_enum_labels(struct ice_seg *ice_seg, u32 type, + struct ice_pkg_enum *state, u16 *value) +{ + struct ice_label *label; + + /* Check for valid label section on first call */ + if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST)) + return NULL; + + label = ice_pkg_enum_entry(ice_seg, state, type, NULL, + ice_label_enum_handler); + if (!label) + return NULL; + + *value = le16_to_cpu(label->value); + return label->name; +} + +/** + * ice_boost_tcam_handler + * @sect_type: section type + * @section: pointer to section + * @index: index of the boost TCAM entry to be returned + * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections + * + * This is a callback function that can be passed to ice_pkg_enum_entry. + * Handles enumeration of individual boost TCAM entries. + */ +static void *ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, + u32 *offset) +{ + struct ice_boost_tcam_section *boost; + + if (!section) + return NULL; + + if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM) + return NULL; + + if (index > ICE_MAX_BST_TCAMS_IN_BUF) + return NULL; + + if (offset) + *offset = 0; + + boost = section; + if (index >= le16_to_cpu(boost->count)) + return NULL; + + return boost->tcam + index; +} + +/** + * ice_find_boost_entry + * @ice_seg: pointer to the ice segment (non-NULL) + * @addr: Boost TCAM address of entry to search for + * @entry: returns pointer to the entry + * + * Finds a particular Boost TCAM entry and returns a pointer to that entry + * if it is found. The ice_seg parameter must not be NULL since the first call + * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure. + */ +static int ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr, + struct ice_boost_tcam_entry **entry) +{ + struct ice_boost_tcam_entry *tcam; + struct ice_pkg_enum state; + + memset(&state, 0, sizeof(state)); + + if (!ice_seg) + return -EINVAL; + + do { + tcam = ice_pkg_enum_entry(ice_seg, &state, + ICE_SID_RXPARSER_BOOST_TCAM, NULL, + ice_boost_tcam_handler); + if (tcam && le16_to_cpu(tcam->addr) == addr) { + *entry = tcam; + return 0; + } + + ice_seg = NULL; + } while (tcam); + + *entry = NULL; + return -EIO; +} + +/** + * ice_is_init_pkg_successful - check if DDP init was successful + * @state: state of the DDP pkg after download + */ +bool ice_is_init_pkg_successful(enum ice_ddp_state state) +{ + switch (state) { + case ICE_DDP_PKG_SUCCESS: + case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED: + case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED: + return true; + default: + return false; + } +} + +/** + * ice_pkg_buf_alloc + * @hw: pointer to the HW structure + * + * Allocates a package buffer and returns a pointer to the buffer header. + * Note: all package contents must be in Little Endian form. + */ +struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw) +{ + struct ice_buf_build *bld; + struct ice_buf_hdr *buf; + + bld = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*bld), GFP_KERNEL); + if (!bld) + return NULL; + + buf = (struct ice_buf_hdr *)bld; + buf->data_end = + cpu_to_le16(offsetof(struct ice_buf_hdr, section_entry)); + return bld; +} + +static bool ice_is_gtp_u_profile(u16 prof_idx) +{ + return (prof_idx >= ICE_PROFID_IPV6_GTPU_TEID && + prof_idx <= ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER) || + prof_idx == ICE_PROFID_IPV4_GTPU_TEID; +} + +static bool ice_is_gtp_c_profile(u16 prof_idx) +{ + switch (prof_idx) { + case ICE_PROFID_IPV4_GTPC_TEID: + case ICE_PROFID_IPV4_GTPC_NO_TEID: + case ICE_PROFID_IPV6_GTPC_TEID: + case ICE_PROFID_IPV6_GTPC_NO_TEID: + return true; + default: + return false; + } +} + +/** + * ice_get_sw_prof_type - determine switch profile type + * @hw: pointer to the HW structure + * @fv: pointer to the switch field vector + * @prof_idx: profile index to check + */ +static enum ice_prof_type ice_get_sw_prof_type(struct ice_hw *hw, + struct ice_fv *fv, u32 prof_idx) +{ + u16 i; + + if (ice_is_gtp_c_profile(prof_idx)) + return ICE_PROF_TUN_GTPC; + + if (ice_is_gtp_u_profile(prof_idx)) + return ICE_PROF_TUN_GTPU; + + for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) { + /* UDP tunnel will have UDP_OF protocol ID and VNI offset */ + if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF && + fv->ew[i].off == ICE_VNI_OFFSET) + return ICE_PROF_TUN_UDP; + + /* GRE tunnel will have GRE protocol */ + if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF) + return ICE_PROF_TUN_GRE; + } + + return ICE_PROF_NON_TUN; +} + +/** + * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type + * @hw: pointer to hardware structure + * @req_profs: type of profiles requested + * @bm: pointer to memory for returning the bitmap of field vectors + */ +void ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs, + unsigned long *bm) +{ + struct ice_pkg_enum state; + struct ice_seg *ice_seg; + struct ice_fv *fv; + + if (req_profs == ICE_PROF_ALL) { + bitmap_set(bm, 0, ICE_MAX_NUM_PROFILES); + return; + } + + memset(&state, 0, sizeof(state)); + bitmap_zero(bm, ICE_MAX_NUM_PROFILES); + ice_seg = hw->seg; + do { + enum ice_prof_type prof_type; + u32 offset; + + fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, + &offset, ice_sw_fv_handler); + ice_seg = NULL; + + if (fv) { + /* Determine field vector type */ + prof_type = ice_get_sw_prof_type(hw, fv, offset); + + if (req_profs & prof_type) + set_bit((u16)offset, bm); + } + } while (fv); +} + +/** + * ice_get_sw_fv_list + * @hw: pointer to the HW structure + * @lkups: list of protocol types + * @bm: bitmap of field vectors to consider + * @fv_list: Head of a list + * + * Finds all the field vector entries from switch block that contain + * a given protocol ID and offset and returns a list of structures of type + * "ice_sw_fv_list_entry". Every structure in the list has a field vector + * definition and profile ID information + * NOTE: The caller of the function is responsible for freeing the memory + * allocated for every list entry. + */ +int ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups, + unsigned long *bm, struct list_head *fv_list) +{ + struct ice_sw_fv_list_entry *fvl; + struct ice_sw_fv_list_entry *tmp; + struct ice_pkg_enum state; + struct ice_seg *ice_seg; + struct ice_fv *fv; + u32 offset; + + memset(&state, 0, sizeof(state)); + + if (!lkups->n_val_words || !hw->seg) + return -EINVAL; + + ice_seg = hw->seg; + do { + u16 i; + + fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, + &offset, ice_sw_fv_handler); + if (!fv) + break; + ice_seg = NULL; + + /* If field vector is not in the bitmap list, then skip this + * profile. + */ + if (!test_bit((u16)offset, bm)) + continue; + + for (i = 0; i < lkups->n_val_words; i++) { + int j; + + for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) + if (fv->ew[j].prot_id == + lkups->fv_words[i].prot_id && + fv->ew[j].off == lkups->fv_words[i].off) + break; + if (j >= hw->blk[ICE_BLK_SW].es.fvw) + break; + if (i + 1 == lkups->n_val_words) { + fvl = devm_kzalloc(ice_hw_to_dev(hw), + sizeof(*fvl), GFP_KERNEL); + if (!fvl) + goto err; + fvl->fv_ptr = fv; + fvl->profile_id = offset; + list_add(&fvl->list_entry, fv_list); + break; + } + } + } while (fv); + if (list_empty(fv_list)) { + dev_warn(ice_hw_to_dev(hw), + "Required profiles not found in currently loaded DDP package"); + return -EIO; + } + + return 0; + +err: + list_for_each_entry_safe(fvl, tmp, fv_list, list_entry) { + list_del(&fvl->list_entry); + devm_kfree(ice_hw_to_dev(hw), fvl); + } + + return -ENOMEM; +} + +/** + * ice_init_prof_result_bm - Initialize the profile result index bitmap + * @hw: pointer to hardware structure + */ +void ice_init_prof_result_bm(struct ice_hw *hw) +{ + struct ice_pkg_enum state; + struct ice_seg *ice_seg; + struct ice_fv *fv; + + memset(&state, 0, sizeof(state)); + + if (!hw->seg) + return; + + ice_seg = hw->seg; + do { + u32 off; + u16 i; + + fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, + &off, ice_sw_fv_handler); + ice_seg = NULL; + if (!fv) + break; + + bitmap_zero(hw->switch_info->prof_res_bm[off], + ICE_MAX_FV_WORDS); + + /* Determine empty field vector indices, these can be + * used for recipe results. Skip index 0, since it is + * always used for Switch ID. + */ + for (i = 1; i < ICE_MAX_FV_WORDS; i++) + if (fv->ew[i].prot_id == ICE_PROT_INVALID && + fv->ew[i].off == ICE_FV_OFFSET_INVAL) + set_bit(i, hw->switch_info->prof_res_bm[off]); + } while (fv); +} + +/** + * ice_pkg_buf_free + * @hw: pointer to the HW structure + * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) + * + * Frees a package buffer + */ +void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld) +{ + devm_kfree(ice_hw_to_dev(hw), bld); +} + +/** + * ice_pkg_buf_reserve_section + * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) + * @count: the number of sections to reserve + * + * Reserves one or more section table entries in a package buffer. This routine + * can be called multiple times as long as they are made before calling + * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section() + * is called once, the number of sections that can be allocated will not be able + * to be increased; not using all reserved sections is fine, but this will + * result in some wasted space in the buffer. + * Note: all package contents must be in Little Endian form. + */ +int ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count) +{ + struct ice_buf_hdr *buf; + u16 section_count; + u16 data_end; + + if (!bld) + return -EINVAL; + + buf = (struct ice_buf_hdr *)&bld->buf; + + /* already an active section, can't increase table size */ + section_count = le16_to_cpu(buf->section_count); + if (section_count > 0) + return -EIO; + + if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT) + return -EIO; + bld->reserved_section_table_entries += count; + + data_end = le16_to_cpu(buf->data_end) + + flex_array_size(buf, section_entry, count); + buf->data_end = cpu_to_le16(data_end); + + return 0; +} + +/** + * ice_pkg_buf_alloc_section + * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) + * @type: the section type value + * @size: the size of the section to reserve (in bytes) + * + * Reserves memory in the buffer for a section's content and updates the + * buffers' status accordingly. This routine returns a pointer to the first + * byte of the section start within the buffer, which is used to fill in the + * section contents. + * Note: all package contents must be in Little Endian form. + */ +void *ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size) +{ + struct ice_buf_hdr *buf; + u16 sect_count; + u16 data_end; + + if (!bld || !type || !size) + return NULL; + + buf = (struct ice_buf_hdr *)&bld->buf; + + /* check for enough space left in buffer */ + data_end = le16_to_cpu(buf->data_end); + + /* section start must align on 4 byte boundary */ + data_end = ALIGN(data_end, 4); + + if ((data_end + size) > ICE_MAX_S_DATA_END) + return NULL; + + /* check for more available section table entries */ + sect_count = le16_to_cpu(buf->section_count); + if (sect_count < bld->reserved_section_table_entries) { + void *section_ptr = ((u8 *)buf) + data_end; + + buf->section_entry[sect_count].offset = cpu_to_le16(data_end); + buf->section_entry[sect_count].size = cpu_to_le16(size); + buf->section_entry[sect_count].type = cpu_to_le32(type); + + data_end += size; + buf->data_end = cpu_to_le16(data_end); + + buf->section_count = cpu_to_le16(sect_count + 1); + return section_ptr; + } + + /* no free section table entries */ + return NULL; +} + +/** + * ice_pkg_buf_alloc_single_section + * @hw: pointer to the HW structure + * @type: the section type value + * @size: the size of the section to reserve (in bytes) + * @section: returns pointer to the section + * + * Allocates a package buffer with a single section. + * Note: all package contents must be in Little Endian form. + */ +struct ice_buf_build *ice_pkg_buf_alloc_single_section(struct ice_hw *hw, + u32 type, u16 size, + void **section) +{ + struct ice_buf_build *buf; + + if (!section) + return NULL; + + buf = ice_pkg_buf_alloc(hw); + if (!buf) + return NULL; + + if (ice_pkg_buf_reserve_section(buf, 1)) + goto ice_pkg_buf_alloc_single_section_err; + + *section = ice_pkg_buf_alloc_section(buf, type, size); + if (!*section) + goto ice_pkg_buf_alloc_single_section_err; + + return buf; + +ice_pkg_buf_alloc_single_section_err: + ice_pkg_buf_free(hw, buf); + return NULL; +} + +/** + * ice_pkg_buf_get_active_sections + * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) + * + * Returns the number of active sections. Before using the package buffer + * in an update package command, the caller should make sure that there is at + * least one active section - otherwise, the buffer is not legal and should + * not be used. + * Note: all package contents must be in Little Endian form. + */ +u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld) +{ + struct ice_buf_hdr *buf; + + if (!bld) + return 0; + + buf = (struct ice_buf_hdr *)&bld->buf; + return le16_to_cpu(buf->section_count); +} + +/** + * ice_pkg_buf + * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) + * + * Return a pointer to the buffer's header + */ +struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld) +{ + if (!bld) + return NULL; + + return &bld->buf; +} + +static enum ice_ddp_state ice_map_aq_err_to_ddp_state(enum ice_aq_err aq_err) +{ + switch (aq_err) { + case ICE_AQ_RC_ENOSEC: + case ICE_AQ_RC_EBADSIG: + return ICE_DDP_PKG_FILE_SIGNATURE_INVALID; + case ICE_AQ_RC_ESVN: + return ICE_DDP_PKG_FILE_REVISION_TOO_LOW; + case ICE_AQ_RC_EBADMAN: + case ICE_AQ_RC_EBADBUF: + return ICE_DDP_PKG_LOAD_ERROR; + default: + return ICE_DDP_PKG_ERR; + } +} + +/** + * ice_acquire_global_cfg_lock + * @hw: pointer to the HW structure + * @access: access type (read or write) + * + * This function will request ownership of the global config lock for reading + * or writing of the package. When attempting to obtain write access, the + * caller must check for the following two return values: + * + * 0 - Means the caller has acquired the global config lock + * and can perform writing of the package. + * -EALREADY - Indicates another driver has already written the + * package or has found that no update was necessary; in + * this case, the caller can just skip performing any + * update of the package. + */ +static int ice_acquire_global_cfg_lock(struct ice_hw *hw, + enum ice_aq_res_access_type access) +{ + int status; + + status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access, + ICE_GLOBAL_CFG_LOCK_TIMEOUT); + + if (!status) + mutex_lock(&ice_global_cfg_lock_sw); + else if (status == -EALREADY) + ice_debug(hw, ICE_DBG_PKG, + "Global config lock: No work to do\n"); + + return status; +} + +/** + * ice_release_global_cfg_lock + * @hw: pointer to the HW structure + * + * This function will release the global config lock. + */ +static void ice_release_global_cfg_lock(struct ice_hw *hw) +{ + mutex_unlock(&ice_global_cfg_lock_sw); + ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID); +} + +/** + * ice_dwnld_cfg_bufs + * @hw: pointer to the hardware structure + * @bufs: pointer to an array of buffers + * @count: the number of buffers in the array + * + * Obtains global config lock and downloads the package configuration buffers + * to the firmware. Metadata buffers are skipped, and the first metadata buffer + * found indicates that the rest of the buffers are all metadata buffers. + */ +static enum ice_ddp_state ice_dwnld_cfg_bufs(struct ice_hw *hw, + struct ice_buf *bufs, u32 count) +{ + enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS; + struct ice_buf_hdr *bh; + enum ice_aq_err err; + u32 offset, info, i; + int status; + + if (!bufs || !count) + return ICE_DDP_PKG_ERR; + + /* If the first buffer's first section has its metadata bit set + * then there are no buffers to be downloaded, and the operation is + * considered a success. + */ + bh = (struct ice_buf_hdr *)bufs; + if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF) + return ICE_DDP_PKG_SUCCESS; + + status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE); + if (status) { + if (status == -EALREADY) + return ICE_DDP_PKG_ALREADY_LOADED; + return ice_map_aq_err_to_ddp_state(hw->adminq.sq_last_status); + } + + for (i = 0; i < count; i++) { + bool last = ((i + 1) == count); + + if (!last) { + /* check next buffer for metadata flag */ + bh = (struct ice_buf_hdr *)(bufs + i + 1); + + /* A set metadata flag in the next buffer will signal + * that the current buffer will be the last buffer + * downloaded + */ + if (le16_to_cpu(bh->section_count)) + if (le32_to_cpu(bh->section_entry[0].type) & + ICE_METADATA_BUF) + last = true; + } + + bh = (struct ice_buf_hdr *)(bufs + i); + + status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last, + &offset, &info, NULL); + + /* Save AQ status from download package */ + if (status) { + ice_debug(hw, ICE_DBG_PKG, + "Pkg download failed: err %d off %d inf %d\n", + status, offset, info); + err = hw->adminq.sq_last_status; + state = ice_map_aq_err_to_ddp_state(err); + break; + } + + if (last) + break; + } + + if (!status) { + status = ice_set_vlan_mode(hw); + if (status) + ice_debug(hw, ICE_DBG_PKG, + "Failed to set VLAN mode: err %d\n", status); + } + + ice_release_global_cfg_lock(hw); + + return state; +} + +/** + * ice_aq_get_pkg_info_list + * @hw: pointer to the hardware structure + * @pkg_info: the buffer which will receive the information list + * @buf_size: the size of the pkg_info information buffer + * @cd: pointer to command details structure or NULL + * + * Get Package Info List (0x0C43) + */ +static int ice_aq_get_pkg_info_list(struct ice_hw *hw, + struct ice_aqc_get_pkg_info_resp *pkg_info, + u16 buf_size, struct ice_sq_cd *cd) +{ + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list); + + return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd); +} + +/** + * ice_download_pkg + * @hw: pointer to the hardware structure + * @ice_seg: pointer to the segment of the package to be downloaded + * + * Handles the download of a complete package. + */ +static enum ice_ddp_state ice_download_pkg(struct ice_hw *hw, + struct ice_seg *ice_seg) +{ + struct ice_buf_table *ice_buf_tbl; + int status; + + ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n", + ice_seg->hdr.seg_format_ver.major, + ice_seg->hdr.seg_format_ver.minor, + ice_seg->hdr.seg_format_ver.update, + ice_seg->hdr.seg_format_ver.draft); + + ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n", + le32_to_cpu(ice_seg->hdr.seg_type), + le32_to_cpu(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id); + + ice_buf_tbl = ice_find_buf_table(ice_seg); + + ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n", + le32_to_cpu(ice_buf_tbl->buf_count)); + + status = ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array, + le32_to_cpu(ice_buf_tbl->buf_count)); + + ice_post_pkg_dwnld_vlan_mode_cfg(hw); + + return status; +} + +/** + * ice_aq_download_pkg + * @hw: pointer to the hardware structure + * @pkg_buf: the package buffer to transfer + * @buf_size: the size of the package buffer + * @last_buf: last buffer indicator + * @error_offset: returns error offset + * @error_info: returns error information + * @cd: pointer to command details structure or NULL + * + * Download Package (0x0C40) + */ +int ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, + u16 buf_size, bool last_buf, u32 *error_offset, + u32 *error_info, struct ice_sq_cd *cd) +{ + struct ice_aqc_download_pkg *cmd; + struct ice_aq_desc desc; + int status; + + if (error_offset) + *error_offset = 0; + if (error_info) + *error_info = 0; + + cmd = &desc.params.download_pkg; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg); + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + + if (last_buf) + cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; + + status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); + if (status == -EIO) { + /* Read error from buffer only when the FW returned an error */ + struct ice_aqc_download_pkg_resp *resp; + + resp = (struct ice_aqc_download_pkg_resp *)pkg_buf; + if (error_offset) + *error_offset = le32_to_cpu(resp->error_offset); + if (error_info) + *error_info = le32_to_cpu(resp->error_info); + } + + return status; +} + +/** + * ice_aq_upload_section + * @hw: pointer to the hardware structure + * @pkg_buf: the package buffer which will receive the section + * @buf_size: the size of the package buffer + * @cd: pointer to command details structure or NULL + * + * Upload Section (0x0C41) + */ +int ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, + u16 buf_size, struct ice_sq_cd *cd) +{ + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section); + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + + return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); +} + +/** + * ice_aq_update_pkg + * @hw: pointer to the hardware structure + * @pkg_buf: the package cmd buffer + * @buf_size: the size of the package cmd buffer + * @last_buf: last buffer indicator + * @error_offset: returns error offset + * @error_info: returns error information + * @cd: pointer to command details structure or NULL + * + * Update Package (0x0C42) + */ +static int ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, + u16 buf_size, bool last_buf, u32 *error_offset, + u32 *error_info, struct ice_sq_cd *cd) +{ + struct ice_aqc_download_pkg *cmd; + struct ice_aq_desc desc; + int status; + + if (error_offset) + *error_offset = 0; + if (error_info) + *error_info = 0; + + cmd = &desc.params.download_pkg; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg); + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + + if (last_buf) + cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; + + status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); + if (status == -EIO) { + /* Read error from buffer only when the FW returned an error */ + struct ice_aqc_download_pkg_resp *resp; + + resp = (struct ice_aqc_download_pkg_resp *)pkg_buf; + if (error_offset) + *error_offset = le32_to_cpu(resp->error_offset); + if (error_info) + *error_info = le32_to_cpu(resp->error_info); + } + + return status; +} + +/** + * ice_update_pkg_no_lock + * @hw: pointer to the hardware structure + * @bufs: pointer to an array of buffers + * @count: the number of buffers in the array + */ +int ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count) +{ + int status = 0; + u32 i; + + for (i = 0; i < count; i++) { + struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i); + bool last = ((i + 1) == count); + u32 offset, info; + + status = ice_aq_update_pkg(hw, bh, le16_to_cpu(bh->data_end), + last, &offset, &info, NULL); + + if (status) { + ice_debug(hw, ICE_DBG_PKG, + "Update pkg failed: err %d off %d inf %d\n", + status, offset, info); + break; + } + } + + return status; +} + +/** + * ice_update_pkg + * @hw: pointer to the hardware structure + * @bufs: pointer to an array of buffers + * @count: the number of buffers in the array + * + * Obtains change lock and updates package. + */ +int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count) +{ + int status; + + status = ice_acquire_change_lock(hw, ICE_RES_WRITE); + if (status) + return status; + + status = ice_update_pkg_no_lock(hw, bufs, count); + + ice_release_change_lock(hw); + + return status; +} + +/** + * ice_find_seg_in_pkg + * @hw: pointer to the hardware structure + * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK) + * @pkg_hdr: pointer to the package header to be searched + * + * This function searches a package file for a particular segment type. On + * success it returns a pointer to the segment header, otherwise it will + * return NULL. + */ +struct ice_generic_seg_hdr *ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type, + struct ice_pkg_hdr *pkg_hdr) +{ + u32 i; + + ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n", + pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor, + pkg_hdr->pkg_format_ver.update, + pkg_hdr->pkg_format_ver.draft); + + /* Search all package segments for the requested segment type */ + for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) { + struct ice_generic_seg_hdr *seg; + + seg = (struct ice_generic_seg_hdr + *)((u8 *)pkg_hdr + + le32_to_cpu(pkg_hdr->seg_offset[i])); + + if (le32_to_cpu(seg->seg_type) == seg_type) + return seg; + } + + return NULL; +} + +/** + * ice_init_pkg_info + * @hw: pointer to the hardware structure + * @pkg_hdr: pointer to the driver's package hdr + * + * Saves off the package details into the HW structure. + */ +static enum ice_ddp_state ice_init_pkg_info(struct ice_hw *hw, + struct ice_pkg_hdr *pkg_hdr) +{ + struct ice_generic_seg_hdr *seg_hdr; + + if (!pkg_hdr) + return ICE_DDP_PKG_ERR; + + seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr); + if (seg_hdr) { + struct ice_meta_sect *meta; + struct ice_pkg_enum state; + + memset(&state, 0, sizeof(state)); + + /* Get package information from the Metadata Section */ + meta = ice_pkg_enum_section((struct ice_seg *)seg_hdr, &state, + ICE_SID_METADATA); + if (!meta) { + ice_debug(hw, ICE_DBG_INIT, + "Did not find ice metadata section in package\n"); + return ICE_DDP_PKG_INVALID_FILE; + } + + hw->pkg_ver = meta->ver; + memcpy(hw->pkg_name, meta->name, sizeof(meta->name)); + + ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n", + meta->ver.major, meta->ver.minor, meta->ver.update, + meta->ver.draft, meta->name); + + hw->ice_seg_fmt_ver = seg_hdr->seg_format_ver; + memcpy(hw->ice_seg_id, seg_hdr->seg_id, sizeof(hw->ice_seg_id)); + + ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n", + seg_hdr->seg_format_ver.major, + seg_hdr->seg_format_ver.minor, + seg_hdr->seg_format_ver.update, + seg_hdr->seg_format_ver.draft, seg_hdr->seg_id); + } else { + ice_debug(hw, ICE_DBG_INIT, + "Did not find ice segment in driver package\n"); + return ICE_DDP_PKG_INVALID_FILE; + } + + return ICE_DDP_PKG_SUCCESS; +} + +/** + * ice_get_pkg_info + * @hw: pointer to the hardware structure + * + * Store details of the package currently loaded in HW into the HW structure. + */ +static enum ice_ddp_state ice_get_pkg_info(struct ice_hw *hw) +{ + enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS; + struct ice_aqc_get_pkg_info_resp *pkg_info; + u16 size; + u32 i; + + size = struct_size(pkg_info, pkg_info, ICE_PKG_CNT); + pkg_info = kzalloc(size, GFP_KERNEL); + if (!pkg_info) + return ICE_DDP_PKG_ERR; + + if (ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL)) { + state = ICE_DDP_PKG_ERR; + goto init_pkg_free_alloc; + } + + for (i = 0; i < le32_to_cpu(pkg_info->count); i++) { +#define ICE_PKG_FLAG_COUNT 4 + char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 }; + u8 place = 0; + + if (pkg_info->pkg_info[i].is_active) { + flags[place++] = 'A'; + hw->active_pkg_ver = pkg_info->pkg_info[i].ver; + hw->active_track_id = + le32_to_cpu(pkg_info->pkg_info[i].track_id); + memcpy(hw->active_pkg_name, pkg_info->pkg_info[i].name, + sizeof(pkg_info->pkg_info[i].name)); + hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm; + } + if (pkg_info->pkg_info[i].is_active_at_boot) + flags[place++] = 'B'; + if (pkg_info->pkg_info[i].is_modified) + flags[place++] = 'M'; + if (pkg_info->pkg_info[i].is_in_nvm) + flags[place++] = 'N'; + + ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n", i, + pkg_info->pkg_info[i].ver.major, + pkg_info->pkg_info[i].ver.minor, + pkg_info->pkg_info[i].ver.update, + pkg_info->pkg_info[i].ver.draft, + pkg_info->pkg_info[i].name, flags); + } + +init_pkg_free_alloc: + kfree(pkg_info); + + return state; +} + +/** + * ice_chk_pkg_compat + * @hw: pointer to the hardware structure + * @ospkg: pointer to the package hdr + * @seg: pointer to the package segment hdr + * + * This function checks the package version compatibility with driver and NVM + */ +static enum ice_ddp_state ice_chk_pkg_compat(struct ice_hw *hw, + struct ice_pkg_hdr *ospkg, + struct ice_seg **seg) +{ + struct ice_aqc_get_pkg_info_resp *pkg; + enum ice_ddp_state state; + u16 size; + u32 i; + + /* Check package version compatibility */ + state = ice_chk_pkg_version(&hw->pkg_ver); + if (state) { + ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n"); + return state; + } + + /* find ICE segment in given package */ + *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, + ospkg); + if (!*seg) { + ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n"); + return ICE_DDP_PKG_INVALID_FILE; + } + + /* Check if FW is compatible with the OS package */ + size = struct_size(pkg, pkg_info, ICE_PKG_CNT); + pkg = kzalloc(size, GFP_KERNEL); + if (!pkg) + return ICE_DDP_PKG_ERR; + + if (ice_aq_get_pkg_info_list(hw, pkg, size, NULL)) { + state = ICE_DDP_PKG_LOAD_ERROR; + goto fw_ddp_compat_free_alloc; + } + + for (i = 0; i < le32_to_cpu(pkg->count); i++) { + /* loop till we find the NVM package */ + if (!pkg->pkg_info[i].is_in_nvm) + continue; + if ((*seg)->hdr.seg_format_ver.major != + pkg->pkg_info[i].ver.major || + (*seg)->hdr.seg_format_ver.minor > + pkg->pkg_info[i].ver.minor) { + state = ICE_DDP_PKG_FW_MISMATCH; + ice_debug(hw, ICE_DBG_INIT, + "OS package is not compatible with NVM.\n"); + } + /* done processing NVM package so break */ + break; + } +fw_ddp_compat_free_alloc: + kfree(pkg); + return state; +} + +/** + * ice_init_pkg_hints + * @hw: pointer to the HW structure + * @ice_seg: pointer to the segment of the package scan (non-NULL) + * + * This function will scan the package and save off relevant information + * (hints or metadata) for driver use. The ice_seg parameter must not be NULL + * since the first call to ice_enum_labels requires a pointer to an actual + * ice_seg structure. + */ +static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg) +{ + struct ice_pkg_enum state; + char *label_name; + u16 val; + int i; + + memset(&hw->tnl, 0, sizeof(hw->tnl)); + memset(&state, 0, sizeof(state)); + + if (!ice_seg) + return; + + label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state, + &val); + + while (label_name) { + if (!strncmp(label_name, ICE_TNL_PRE, strlen(ICE_TNL_PRE))) + /* check for a tunnel entry */ + ice_add_tunnel_hint(hw, label_name, val); + + /* check for a dvm mode entry */ + else if (!strncmp(label_name, ICE_DVM_PRE, strlen(ICE_DVM_PRE))) + ice_add_dvm_hint(hw, val, true); + + /* check for a svm mode entry */ + else if (!strncmp(label_name, ICE_SVM_PRE, strlen(ICE_SVM_PRE))) + ice_add_dvm_hint(hw, val, false); + + label_name = ice_enum_labels(NULL, 0, &state, &val); + } + + /* Cache the appropriate boost TCAM entry pointers for tunnels */ + for (i = 0; i < hw->tnl.count; i++) { + ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr, + &hw->tnl.tbl[i].boost_entry); + if (hw->tnl.tbl[i].boost_entry) { + hw->tnl.tbl[i].valid = true; + if (hw->tnl.tbl[i].type < __TNL_TYPE_CNT) + hw->tnl.valid_count[hw->tnl.tbl[i].type]++; + } + } + + /* Cache the appropriate boost TCAM entry pointers for DVM and SVM */ + for (i = 0; i < hw->dvm_upd.count; i++) + ice_find_boost_entry(ice_seg, hw->dvm_upd.tbl[i].boost_addr, + &hw->dvm_upd.tbl[i].boost_entry); +} + +/** + * ice_fill_hw_ptype - fill the enabled PTYPE bit information + * @hw: pointer to the HW structure + */ +static void ice_fill_hw_ptype(struct ice_hw *hw) +{ + struct ice_marker_ptype_tcam_entry *tcam; + struct ice_seg *seg = hw->seg; + struct ice_pkg_enum state; + + bitmap_zero(hw->hw_ptype, ICE_FLOW_PTYPE_MAX); + if (!seg) + return; + + memset(&state, 0, sizeof(state)); + + do { + tcam = ice_pkg_enum_entry(seg, &state, + ICE_SID_RXPARSER_MARKER_PTYPE, NULL, + ice_marker_ptype_tcam_handler); + if (tcam && + le16_to_cpu(tcam->addr) < ICE_MARKER_PTYPE_TCAM_ADDR_MAX && + le16_to_cpu(tcam->ptype) < ICE_FLOW_PTYPE_MAX) + set_bit(le16_to_cpu(tcam->ptype), hw->hw_ptype); + + seg = NULL; + } while (tcam); +} + +/** + * ice_init_pkg - initialize/download package + * @hw: pointer to the hardware structure + * @buf: pointer to the package buffer + * @len: size of the package buffer + * + * This function initializes a package. The package contains HW tables + * required to do packet processing. First, the function extracts package + * information such as version. Then it finds the ice configuration segment + * within the package; this function then saves a copy of the segment pointer + * within the supplied package buffer. Next, the function will cache any hints + * from the package, followed by downloading the package itself. Note, that if + * a previous PF driver has already downloaded the package successfully, then + * the current driver will not have to download the package again. + * + * The local package contents will be used to query default behavior and to + * update specific sections of the HW's version of the package (e.g. to update + * the parse graph to understand new protocols). + * + * This function stores a pointer to the package buffer memory, and it is + * expected that the supplied buffer will not be freed immediately. If the + * package buffer needs to be freed, such as when read from a file, use + * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this + * case. + */ +enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len) +{ + bool already_loaded = false; + enum ice_ddp_state state; + struct ice_pkg_hdr *pkg; + struct ice_seg *seg; + + if (!buf || !len) + return ICE_DDP_PKG_ERR; + + pkg = (struct ice_pkg_hdr *)buf; + state = ice_verify_pkg(pkg, len); + if (state) { + ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n", + state); + return state; + } + + /* initialize package info */ + state = ice_init_pkg_info(hw, pkg); + if (state) + return state; + + /* before downloading the package, check package version for + * compatibility with driver + */ + state = ice_chk_pkg_compat(hw, pkg, &seg); + if (state) + return state; + + /* initialize package hints and then download package */ + ice_init_pkg_hints(hw, seg); + state = ice_download_pkg(hw, seg); + if (state == ICE_DDP_PKG_ALREADY_LOADED) { + ice_debug(hw, ICE_DBG_INIT, + "package previously loaded - no work.\n"); + already_loaded = true; + } + + /* Get information on the package currently loaded in HW, then make sure + * the driver is compatible with this version. + */ + if (!state || state == ICE_DDP_PKG_ALREADY_LOADED) { + state = ice_get_pkg_info(hw); + if (!state) + state = ice_get_ddp_pkg_state(hw, already_loaded); + } + + if (ice_is_init_pkg_successful(state)) { + hw->seg = seg; + /* on successful package download update other required + * registers to support the package and fill HW tables + * with package content. + */ + ice_init_pkg_regs(hw); + ice_fill_blk_tbls(hw); + ice_fill_hw_ptype(hw); + ice_get_prof_index_max(hw); + } else { + ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n", state); + } + + return state; +} + +/** + * ice_copy_and_init_pkg - initialize/download a copy of the package + * @hw: pointer to the hardware structure + * @buf: pointer to the package buffer + * @len: size of the package buffer + * + * This function copies the package buffer, and then calls ice_init_pkg() to + * initialize the copied package contents. + * + * The copying is necessary if the package buffer supplied is constant, or if + * the memory may disappear shortly after calling this function. + * + * If the package buffer resides in the data segment and can be modified, the + * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg(). + * + * However, if the package buffer needs to be copied first, such as when being + * read from a file, the caller should use ice_copy_and_init_pkg(). + * + * This function will first copy the package buffer, before calling + * ice_init_pkg(). The caller is free to immediately destroy the original + * package buffer, as the new copy will be managed by this function and + * related routines. + */ +enum ice_ddp_state ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, + u32 len) +{ + enum ice_ddp_state state; + u8 *buf_copy; + + if (!buf || !len) + return ICE_DDP_PKG_ERR; + + buf_copy = devm_kmemdup(ice_hw_to_dev(hw), buf, len, GFP_KERNEL); + + state = ice_init_pkg(hw, buf_copy, len); + if (!ice_is_init_pkg_successful(state)) { + /* Free the copy, since we failed to initialize the package */ + devm_kfree(ice_hw_to_dev(hw), buf_copy); + } else { + /* Track the copied pkg so we can free it later */ + hw->pkg_copy = buf_copy; + hw->pkg_size = len; + } + + return state; +} diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.h b/drivers/net/ethernet/intel/ice/ice_ddp.h new file mode 100644 index 000000000000..37eadb3d27a8 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_ddp.h @@ -0,0 +1,445 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2022, Intel Corporation. */ + +#ifndef _ICE_DDP_H_ +#define _ICE_DDP_H_ + +#include "ice_type.h" + +/* Package minimal version supported */ +#define ICE_PKG_SUPP_VER_MAJ 1 +#define ICE_PKG_SUPP_VER_MNR 3 + +/* Package format version */ +#define ICE_PKG_FMT_VER_MAJ 1 +#define ICE_PKG_FMT_VER_MNR 0 +#define ICE_PKG_FMT_VER_UPD 0 +#define ICE_PKG_FMT_VER_DFT 0 + +#define ICE_PKG_CNT 4 + +#define ICE_FV_OFFSET_INVAL 0x1FF + +/* Extraction Sequence (Field Vector) Table */ +struct ice_fv_word { + u8 prot_id; + u16 off; /* Offset within the protocol header */ + u8 resvrd; +} __packed; + +#define ICE_MAX_NUM_PROFILES 256 + +#define ICE_MAX_FV_WORDS 48 +struct ice_fv { + struct ice_fv_word ew[ICE_MAX_FV_WORDS]; +}; + +enum ice_ddp_state { + /* Indicates that this call to ice_init_pkg + * successfully loaded the requested DDP package + */ + ICE_DDP_PKG_SUCCESS = 0, + + /* Generic error for already loaded errors, it is mapped later to + * the more specific one (one of the next 3) + */ + ICE_DDP_PKG_ALREADY_LOADED = -1, + + /* Indicates that a DDP package of the same version has already been + * loaded onto the device by a previous call or by another PF + */ + ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED = -2, + + /* The device has a DDP package that is not supported by the driver */ + ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED = -3, + + /* The device has a compatible package + * (but different from the request) already loaded + */ + ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED = -4, + + /* The firmware loaded on the device is not compatible with + * the DDP package loaded + */ + ICE_DDP_PKG_FW_MISMATCH = -5, + + /* The DDP package file is invalid */ + ICE_DDP_PKG_INVALID_FILE = -6, + + /* The version of the DDP package provided is higher than + * the driver supports + */ + ICE_DDP_PKG_FILE_VERSION_TOO_HIGH = -7, + + /* The version of the DDP package provided is lower than the + * driver supports + */ + ICE_DDP_PKG_FILE_VERSION_TOO_LOW = -8, + + /* The signature of the DDP package file provided is invalid */ + ICE_DDP_PKG_FILE_SIGNATURE_INVALID = -9, + + /* The DDP package file security revision is too low and not + * supported by firmware + */ + ICE_DDP_PKG_FILE_REVISION_TOO_LOW = -10, + + /* An error occurred in firmware while loading the DDP package */ + ICE_DDP_PKG_LOAD_ERROR = -11, + + /* Other errors */ + ICE_DDP_PKG_ERR = -12 +}; + +/* Package and segment headers and tables */ +struct ice_pkg_hdr { + struct ice_pkg_ver pkg_format_ver; + __le32 seg_count; + __le32 seg_offset[]; +}; + +/* generic segment */ +struct ice_generic_seg_hdr { +#define SEGMENT_TYPE_METADATA 0x00000001 +#define SEGMENT_TYPE_ICE 0x00000010 + __le32 seg_type; + struct ice_pkg_ver seg_format_ver; + __le32 seg_size; + char seg_id[ICE_PKG_NAME_SIZE]; +}; + +/* ice specific segment */ + +union ice_device_id { + struct { + __le16 device_id; + __le16 vendor_id; + } dev_vend_id; + __le32 id; +}; + +struct ice_device_id_entry { + union ice_device_id device; + union ice_device_id sub_device; +}; + +struct ice_seg { + struct ice_generic_seg_hdr hdr; + __le32 device_table_count; + struct ice_device_id_entry device_table[]; +}; + +struct ice_nvm_table { + __le32 table_count; + __le32 vers[]; +}; + +struct ice_buf { +#define ICE_PKG_BUF_SIZE 4096 + u8 buf[ICE_PKG_BUF_SIZE]; +}; + +struct ice_buf_table { + __le32 buf_count; + struct ice_buf buf_array[]; +}; + +struct ice_run_time_cfg_seg { + struct ice_generic_seg_hdr hdr; + u8 rsvd[8]; + struct ice_buf_table buf_table; +}; + +/* global metadata specific segment */ +struct ice_global_metadata_seg { + struct ice_generic_seg_hdr hdr; + struct ice_pkg_ver pkg_ver; + __le32 rsvd; + char pkg_name[ICE_PKG_NAME_SIZE]; +}; + +#define ICE_MIN_S_OFF 12 +#define ICE_MAX_S_OFF 4095 +#define ICE_MIN_S_SZ 1 +#define ICE_MAX_S_SZ 4084 + +/* section information */ +struct ice_section_entry { + __le32 type; + __le16 offset; + __le16 size; +}; + +#define ICE_MIN_S_COUNT 1 +#define ICE_MAX_S_COUNT 511 +#define ICE_MIN_S_DATA_END 12 +#define ICE_MAX_S_DATA_END 4096 + +#define ICE_METADATA_BUF 0x80000000 + +struct ice_buf_hdr { + __le16 section_count; + __le16 data_end; + struct ice_section_entry section_entry[]; +}; + +#define ICE_MAX_ENTRIES_IN_BUF(hd_sz, ent_sz) \ + ((ICE_PKG_BUF_SIZE - \ + struct_size((struct ice_buf_hdr *)0, section_entry, 1) - (hd_sz)) / \ + (ent_sz)) + +/* ice package section IDs */ +#define ICE_SID_METADATA 1 +#define ICE_SID_XLT0_SW 10 +#define ICE_SID_XLT_KEY_BUILDER_SW 11 +#define ICE_SID_XLT1_SW 12 +#define ICE_SID_XLT2_SW 13 +#define ICE_SID_PROFID_TCAM_SW 14 +#define ICE_SID_PROFID_REDIR_SW 15 +#define ICE_SID_FLD_VEC_SW 16 +#define ICE_SID_CDID_KEY_BUILDER_SW 17 + +struct ice_meta_sect { + struct ice_pkg_ver ver; +#define ICE_META_SECT_NAME_SIZE 28 + char name[ICE_META_SECT_NAME_SIZE]; + __le32 track_id; +}; + +#define ICE_SID_CDID_REDIR_SW 18 + +#define ICE_SID_XLT0_ACL 20 +#define ICE_SID_XLT_KEY_BUILDER_ACL 21 +#define ICE_SID_XLT1_ACL 22 +#define ICE_SID_XLT2_ACL 23 +#define ICE_SID_PROFID_TCAM_ACL 24 +#define ICE_SID_PROFID_REDIR_ACL 25 +#define ICE_SID_FLD_VEC_ACL 26 +#define ICE_SID_CDID_KEY_BUILDER_ACL 27 +#define ICE_SID_CDID_REDIR_ACL 28 + +#define ICE_SID_XLT0_FD 30 +#define ICE_SID_XLT_KEY_BUILDER_FD 31 +#define ICE_SID_XLT1_FD 32 +#define ICE_SID_XLT2_FD 33 +#define ICE_SID_PROFID_TCAM_FD 34 +#define ICE_SID_PROFID_REDIR_FD 35 +#define ICE_SID_FLD_VEC_FD 36 +#define ICE_SID_CDID_KEY_BUILDER_FD 37 +#define ICE_SID_CDID_REDIR_FD 38 + +#define ICE_SID_XLT0_RSS 40 +#define ICE_SID_XLT_KEY_BUILDER_RSS 41 +#define ICE_SID_XLT1_RSS 42 +#define ICE_SID_XLT2_RSS 43 +#define ICE_SID_PROFID_TCAM_RSS 44 +#define ICE_SID_PROFID_REDIR_RSS 45 +#define ICE_SID_FLD_VEC_RSS 46 +#define ICE_SID_CDID_KEY_BUILDER_RSS 47 +#define ICE_SID_CDID_REDIR_RSS 48 + +#define ICE_SID_RXPARSER_MARKER_PTYPE 55 +#define ICE_SID_RXPARSER_BOOST_TCAM 56 +#define ICE_SID_RXPARSER_METADATA_INIT 58 +#define ICE_SID_TXPARSER_BOOST_TCAM 66 + +#define ICE_SID_XLT0_PE 80 +#define ICE_SID_XLT_KEY_BUILDER_PE 81 +#define ICE_SID_XLT1_PE 82 +#define ICE_SID_XLT2_PE 83 +#define ICE_SID_PROFID_TCAM_PE 84 +#define ICE_SID_PROFID_REDIR_PE 85 +#define ICE_SID_FLD_VEC_PE 86 +#define ICE_SID_CDID_KEY_BUILDER_PE 87 +#define ICE_SID_CDID_REDIR_PE 88 + +/* Label Metadata section IDs */ +#define ICE_SID_LBL_FIRST 0x80000010 +#define ICE_SID_LBL_RXPARSER_TMEM 0x80000018 +/* The following define MUST be updated to reflect the last label section ID */ +#define ICE_SID_LBL_LAST 0x80000038 + +/* Label ICE runtime configuration section IDs */ +#define ICE_SID_TX_5_LAYER_TOPO 0x10 + +enum ice_block { + ICE_BLK_SW = 0, + ICE_BLK_ACL, + ICE_BLK_FD, + ICE_BLK_RSS, + ICE_BLK_PE, + ICE_BLK_COUNT +}; + +enum ice_sect { + ICE_XLT0 = 0, + ICE_XLT_KB, + ICE_XLT1, + ICE_XLT2, + ICE_PROF_TCAM, + ICE_PROF_REDIR, + ICE_VEC_TBL, + ICE_CDID_KB, + ICE_CDID_REDIR, + ICE_SECT_COUNT +}; + +/* package labels */ +struct ice_label { + __le16 value; +#define ICE_PKG_LABEL_SIZE 64 + char name[ICE_PKG_LABEL_SIZE]; +}; + +struct ice_label_section { + __le16 count; + struct ice_label label[]; +}; + +#define ICE_MAX_LABELS_IN_BUF \ + ICE_MAX_ENTRIES_IN_BUF(struct_size((struct ice_label_section *)0, \ + label, 1) - \ + sizeof(struct ice_label), \ + sizeof(struct ice_label)) + +struct ice_sw_fv_section { + __le16 count; + __le16 base_offset; + struct ice_fv fv[]; +}; + +struct ice_sw_fv_list_entry { + struct list_head list_entry; + u32 profile_id; + struct ice_fv *fv_ptr; +}; + +/* The BOOST TCAM stores the match packet header in reverse order, meaning + * the fields are reversed; in addition, this means that the normally big endian + * fields of the packet are now little endian. + */ +struct ice_boost_key_value { +#define ICE_BOOST_REMAINING_HV_KEY 15 + u8 remaining_hv_key[ICE_BOOST_REMAINING_HV_KEY]; + __le16 hv_dst_port_key; + __le16 hv_src_port_key; + u8 tcam_search_key; +} __packed; + +struct ice_boost_key { + struct ice_boost_key_value key; + struct ice_boost_key_value key2; +}; + +/* package Boost TCAM entry */ +struct ice_boost_tcam_entry { + __le16 addr; + __le16 reserved; + /* break up the 40 bytes of key into different fields */ + struct ice_boost_key key; + u8 boost_hit_index_group; + /* The following contains bitfields which are not on byte boundaries. + * These fields are currently unused by driver software. + */ +#define ICE_BOOST_BIT_FIELDS 43 + u8 bit_fields[ICE_BOOST_BIT_FIELDS]; +}; + +struct ice_boost_tcam_section { + __le16 count; + __le16 reserved; + struct ice_boost_tcam_entry tcam[]; +}; + +#define ICE_MAX_BST_TCAMS_IN_BUF \ + ICE_MAX_ENTRIES_IN_BUF(struct_size((struct ice_boost_tcam_section *)0, \ + tcam, 1) - \ + sizeof(struct ice_boost_tcam_entry), \ + sizeof(struct ice_boost_tcam_entry)) + +/* package Marker Ptype TCAM entry */ +struct ice_marker_ptype_tcam_entry { +#define ICE_MARKER_PTYPE_TCAM_ADDR_MAX 1024 + __le16 addr; + __le16 ptype; + u8 keys[20]; +}; + +struct ice_marker_ptype_tcam_section { + __le16 count; + __le16 reserved; + struct ice_marker_ptype_tcam_entry tcam[]; +}; + +#define ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF \ + ICE_MAX_ENTRIES_IN_BUF( \ + struct_size((struct ice_marker_ptype_tcam_section *)0, tcam, \ + 1) - \ + sizeof(struct ice_marker_ptype_tcam_entry), \ + sizeof(struct ice_marker_ptype_tcam_entry)) + +struct ice_xlt1_section { + __le16 count; + __le16 offset; + u8 value[]; +}; + +struct ice_xlt2_section { + __le16 count; + __le16 offset; + __le16 value[]; +}; + +struct ice_prof_redir_section { + __le16 count; + __le16 offset; + u8 redir_value[]; +}; + +/* package buffer building */ + +struct ice_buf_build { + struct ice_buf buf; + u16 reserved_section_table_entries; +}; + +struct ice_pkg_enum { + struct ice_buf_table *buf_table; + u32 buf_idx; + + u32 type; + struct ice_buf_hdr *buf; + u32 sect_idx; + void *sect; + u32 sect_type; + + u32 entry_idx; + void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset); +}; + +int ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, + u16 buf_size, bool last_buf, u32 *error_offset, + u32 *error_info, struct ice_sq_cd *cd); +int ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, + u16 buf_size, struct ice_sq_cd *cd); + +void *ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size); + +enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len); + +struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw); + +struct ice_generic_seg_hdr *ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type, + struct ice_pkg_hdr *pkg_hdr); + +int ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count); +int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count); + +int ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count); +u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld); +void *ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state, + u32 sect_type); + +struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf); + +#endif diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 4191994d8f3a..5b71d40a7dc0 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -664,7 +664,7 @@ static int ice_lbtest_prepare_rings(struct ice_vsi *vsi) if (status) goto err_start_rx_ring; - return status; + return 0; err_start_rx_ring: ice_vsi_free_rx_rings(vsi); @@ -1950,8 +1950,7 @@ ice_phy_type_to_ethtool(struct net_device *netdev, ICE_PHY_TYPE_LOW_100G_CAUI4 | ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC | ICE_PHY_TYPE_LOW_100G_AUI4 | - ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4 | - ICE_PHY_TYPE_LOW_100GBASE_CP2; + ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4; phy_type_mask_hi = ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC | ICE_PHY_TYPE_HIGH_100G_CAUI2 | ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC | @@ -1964,15 +1963,27 @@ ice_phy_type_to_ethtool(struct net_device *netdev, 100000baseCR4_Full); } - phy_type_mask_lo = ICE_PHY_TYPE_LOW_100GBASE_SR4 | - ICE_PHY_TYPE_LOW_100GBASE_SR2; - if (phy_types_low & phy_type_mask_lo) { + if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CP2) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 100000baseCR2_Full); + ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB, + 100000baseCR2_Full); + } + + if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_SR4) { ethtool_link_ksettings_add_link_mode(ks, supported, 100000baseSR4_Full); ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB, 100000baseSR4_Full); } + if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_SR2) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 100000baseSR2_Full); + ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB, + 100000baseSR2_Full); + } + phy_type_mask_lo = ICE_PHY_TYPE_LOW_100GBASE_LR4 | ICE_PHY_TYPE_LOW_100GBASE_DR; if (phy_types_low & phy_type_mask_lo) { @@ -1984,14 +1995,20 @@ ice_phy_type_to_ethtool(struct net_device *netdev, phy_type_mask_lo = ICE_PHY_TYPE_LOW_100GBASE_KR4 | ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4; - phy_type_mask_hi = ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4; - if (phy_types_low & phy_type_mask_lo || - phy_types_high & phy_type_mask_hi) { + if (phy_types_low & phy_type_mask_lo) { ethtool_link_ksettings_add_link_mode(ks, supported, 100000baseKR4_Full); ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB, 100000baseKR4_Full); } + + if (phy_types_high & ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 100000baseKR2_Full); + ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB, + 100000baseKR2_Full); + } + } #define TEST_SET_BITS_TIMEOUT 50 @@ -2242,17 +2259,15 @@ ice_ksettings_find_adv_link_speed(const struct ethtool_link_ksettings *ks) 100baseT_Full)) adv_link_speed |= ICE_AQ_LINK_SPEED_100MB; if (ethtool_link_ksettings_test_link_mode(ks, advertising, - 1000baseX_Full)) - adv_link_speed |= ICE_AQ_LINK_SPEED_1000MB; - if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 1000baseX_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, 1000baseT_Full) || ethtool_link_ksettings_test_link_mode(ks, advertising, 1000baseKX_Full)) adv_link_speed |= ICE_AQ_LINK_SPEED_1000MB; if (ethtool_link_ksettings_test_link_mode(ks, advertising, - 2500baseT_Full)) - adv_link_speed |= ICE_AQ_LINK_SPEED_2500MB; - if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 2500baseT_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, 2500baseX_Full)) adv_link_speed |= ICE_AQ_LINK_SPEED_2500MB; if (ethtool_link_ksettings_test_link_mode(ks, advertising, @@ -2261,9 +2276,8 @@ ice_ksettings_find_adv_link_speed(const struct ethtool_link_ksettings *ks) if (ethtool_link_ksettings_test_link_mode(ks, advertising, 10000baseT_Full) || ethtool_link_ksettings_test_link_mode(ks, advertising, - 10000baseKR_Full)) - adv_link_speed |= ICE_AQ_LINK_SPEED_10GB; - if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 10000baseKR_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, 10000baseSR_Full) || ethtool_link_ksettings_test_link_mode(ks, advertising, 10000baseLR_Full)) @@ -2287,9 +2301,8 @@ ice_ksettings_find_adv_link_speed(const struct ethtool_link_ksettings *ks) if (ethtool_link_ksettings_test_link_mode(ks, advertising, 50000baseCR2_Full) || ethtool_link_ksettings_test_link_mode(ks, advertising, - 50000baseKR2_Full)) - adv_link_speed |= ICE_AQ_LINK_SPEED_50GB; - if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 50000baseKR2_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, 50000baseSR2_Full)) adv_link_speed |= ICE_AQ_LINK_SPEED_50GB; if (ethtool_link_ksettings_test_link_mode(ks, advertising, @@ -2299,7 +2312,13 @@ ice_ksettings_find_adv_link_speed(const struct ethtool_link_ksettings *ks) ethtool_link_ksettings_test_link_mode(ks, advertising, 100000baseLR4_ER4_Full) || ethtool_link_ksettings_test_link_mode(ks, advertising, - 100000baseKR4_Full)) + 100000baseKR4_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 100000baseCR2_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 100000baseSR2_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 100000baseKR2_Full)) adv_link_speed |= ICE_AQ_LINK_SPEED_100GB; return adv_link_speed; diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c index 4b3bb19e1d06..5ce413965930 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c @@ -6,23 +6,6 @@ #include "ice_flow.h" #include "ice.h" -/* For supporting double VLAN mode, it is necessary to enable or disable certain - * boost tcam entries. The metadata labels names that match the following - * prefixes will be saved to allow enabling double VLAN mode. - */ -#define ICE_DVM_PRE "BOOST_MAC_VLAN_DVM" /* enable these entries */ -#define ICE_SVM_PRE "BOOST_MAC_VLAN_SVM" /* disable these entries */ - -/* To support tunneling entries by PF, the package will append the PF number to - * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc. - */ -#define ICE_TNL_PRE "TNL_" -static const struct ice_tunnel_type_scan tnls[] = { - { TNL_VXLAN, "TNL_VXLAN_PF" }, - { TNL_GENEVE, "TNL_GENEVE_PF" }, - { TNL_LAST, "" } -}; - static const u32 ice_sect_lkup[ICE_BLK_COUNT][ICE_SECT_COUNT] = { /* SWITCH */ { @@ -104,225 +87,6 @@ static u32 ice_sect_id(enum ice_block blk, enum ice_sect sect) } /** - * ice_pkg_val_buf - * @buf: pointer to the ice buffer - * - * This helper function validates a buffer's header. - */ -static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf) -{ - struct ice_buf_hdr *hdr; - u16 section_count; - u16 data_end; - - hdr = (struct ice_buf_hdr *)buf->buf; - /* verify data */ - section_count = le16_to_cpu(hdr->section_count); - if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT) - return NULL; - - data_end = le16_to_cpu(hdr->data_end); - if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END) - return NULL; - - return hdr; -} - -/** - * ice_find_buf_table - * @ice_seg: pointer to the ice segment - * - * Returns the address of the buffer table within the ice segment. - */ -static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg) -{ - struct ice_nvm_table *nvms; - - nvms = (struct ice_nvm_table *) - (ice_seg->device_table + - le32_to_cpu(ice_seg->device_table_count)); - - return (__force struct ice_buf_table *) - (nvms->vers + le32_to_cpu(nvms->table_count)); -} - -/** - * ice_pkg_enum_buf - * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) - * @state: pointer to the enum state - * - * This function will enumerate all the buffers in the ice segment. The first - * call is made with the ice_seg parameter non-NULL; on subsequent calls, - * ice_seg is set to NULL which continues the enumeration. When the function - * returns a NULL pointer, then the end of the buffers has been reached, or an - * unexpected value has been detected (for example an invalid section count or - * an invalid buffer end value). - */ -static struct ice_buf_hdr * -ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state) -{ - if (ice_seg) { - state->buf_table = ice_find_buf_table(ice_seg); - if (!state->buf_table) - return NULL; - - state->buf_idx = 0; - return ice_pkg_val_buf(state->buf_table->buf_array); - } - - if (++state->buf_idx < le32_to_cpu(state->buf_table->buf_count)) - return ice_pkg_val_buf(state->buf_table->buf_array + - state->buf_idx); - else - return NULL; -} - -/** - * ice_pkg_advance_sect - * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) - * @state: pointer to the enum state - * - * This helper function will advance the section within the ice segment, - * also advancing the buffer if needed. - */ -static bool -ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state) -{ - if (!ice_seg && !state->buf) - return false; - - if (!ice_seg && state->buf) - if (++state->sect_idx < le16_to_cpu(state->buf->section_count)) - return true; - - state->buf = ice_pkg_enum_buf(ice_seg, state); - if (!state->buf) - return false; - - /* start of new buffer, reset section index */ - state->sect_idx = 0; - return true; -} - -/** - * ice_pkg_enum_section - * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) - * @state: pointer to the enum state - * @sect_type: section type to enumerate - * - * This function will enumerate all the sections of a particular type in the - * ice segment. The first call is made with the ice_seg parameter non-NULL; - * on subsequent calls, ice_seg is set to NULL which continues the enumeration. - * When the function returns a NULL pointer, then the end of the matching - * sections has been reached. - */ -static void * -ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state, - u32 sect_type) -{ - u16 offset, size; - - if (ice_seg) - state->type = sect_type; - - if (!ice_pkg_advance_sect(ice_seg, state)) - return NULL; - - /* scan for next matching section */ - while (state->buf->section_entry[state->sect_idx].type != - cpu_to_le32(state->type)) - if (!ice_pkg_advance_sect(NULL, state)) - return NULL; - - /* validate section */ - offset = le16_to_cpu(state->buf->section_entry[state->sect_idx].offset); - if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF) - return NULL; - - size = le16_to_cpu(state->buf->section_entry[state->sect_idx].size); - if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ) - return NULL; - - /* make sure the section fits in the buffer */ - if (offset + size > ICE_PKG_BUF_SIZE) - return NULL; - - state->sect_type = - le32_to_cpu(state->buf->section_entry[state->sect_idx].type); - - /* calc pointer to this section */ - state->sect = ((u8 *)state->buf) + - le16_to_cpu(state->buf->section_entry[state->sect_idx].offset); - - return state->sect; -} - -/** - * ice_pkg_enum_entry - * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) - * @state: pointer to the enum state - * @sect_type: section type to enumerate - * @offset: pointer to variable that receives the offset in the table (optional) - * @handler: function that handles access to the entries into the section type - * - * This function will enumerate all the entries in particular section type in - * the ice segment. The first call is made with the ice_seg parameter non-NULL; - * on subsequent calls, ice_seg is set to NULL which continues the enumeration. - * When the function returns a NULL pointer, then the end of the entries has - * been reached. - * - * Since each section may have a different header and entry size, the handler - * function is needed to determine the number and location entries in each - * section. - * - * The offset parameter is optional, but should be used for sections that - * contain an offset for each section table. For such cases, the section handler - * function must return the appropriate offset + index to give the absolution - * offset for each entry. For example, if the base for a section's header - * indicates a base offset of 10, and the index for the entry is 2, then - * section handler function should set the offset to 10 + 2 = 12. - */ -static void * -ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state, - u32 sect_type, u32 *offset, - void *(*handler)(u32 sect_type, void *section, - u32 index, u32 *offset)) -{ - void *entry; - - if (ice_seg) { - if (!handler) - return NULL; - - if (!ice_pkg_enum_section(ice_seg, state, sect_type)) - return NULL; - - state->entry_idx = 0; - state->handler = handler; - } else { - state->entry_idx++; - } - - if (!state->handler) - return NULL; - - /* get entry */ - entry = state->handler(state->sect_type, state->sect, state->entry_idx, - offset); - if (!entry) { - /* end of a section, look for another section of this type */ - if (!ice_pkg_enum_section(NULL, state, 0)) - return NULL; - - state->entry_idx = 0; - entry = state->handler(state->sect_type, state->sect, - state->entry_idx, offset); - } - - return entry; -} - -/** * ice_hw_ptype_ena - check if the PTYPE is enabled or not * @hw: pointer to the HW structure * @ptype: the hardware PTYPE @@ -333,312 +97,6 @@ bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype) test_bit(ptype, hw->hw_ptype); } -/** - * ice_marker_ptype_tcam_handler - * @sect_type: section type - * @section: pointer to section - * @index: index of the Marker PType TCAM entry to be returned - * @offset: pointer to receive absolute offset, always 0 for ptype TCAM sections - * - * This is a callback function that can be passed to ice_pkg_enum_entry. - * Handles enumeration of individual Marker PType TCAM entries. - */ -static void * -ice_marker_ptype_tcam_handler(u32 sect_type, void *section, u32 index, - u32 *offset) -{ - struct ice_marker_ptype_tcam_section *marker_ptype; - - if (sect_type != ICE_SID_RXPARSER_MARKER_PTYPE) - return NULL; - - if (index > ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF) - return NULL; - - if (offset) - *offset = 0; - - marker_ptype = section; - if (index >= le16_to_cpu(marker_ptype->count)) - return NULL; - - return marker_ptype->tcam + index; -} - -/** - * ice_fill_hw_ptype - fill the enabled PTYPE bit information - * @hw: pointer to the HW structure - */ -static void ice_fill_hw_ptype(struct ice_hw *hw) -{ - struct ice_marker_ptype_tcam_entry *tcam; - struct ice_seg *seg = hw->seg; - struct ice_pkg_enum state; - - bitmap_zero(hw->hw_ptype, ICE_FLOW_PTYPE_MAX); - if (!seg) - return; - - memset(&state, 0, sizeof(state)); - - do { - tcam = ice_pkg_enum_entry(seg, &state, - ICE_SID_RXPARSER_MARKER_PTYPE, NULL, - ice_marker_ptype_tcam_handler); - if (tcam && - le16_to_cpu(tcam->addr) < ICE_MARKER_PTYPE_TCAM_ADDR_MAX && - le16_to_cpu(tcam->ptype) < ICE_FLOW_PTYPE_MAX) - set_bit(le16_to_cpu(tcam->ptype), hw->hw_ptype); - - seg = NULL; - } while (tcam); -} - -/** - * ice_boost_tcam_handler - * @sect_type: section type - * @section: pointer to section - * @index: index of the boost TCAM entry to be returned - * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections - * - * This is a callback function that can be passed to ice_pkg_enum_entry. - * Handles enumeration of individual boost TCAM entries. - */ -static void * -ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset) -{ - struct ice_boost_tcam_section *boost; - - if (!section) - return NULL; - - if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM) - return NULL; - - /* cppcheck-suppress nullPointer */ - if (index > ICE_MAX_BST_TCAMS_IN_BUF) - return NULL; - - if (offset) - *offset = 0; - - boost = section; - if (index >= le16_to_cpu(boost->count)) - return NULL; - - return boost->tcam + index; -} - -/** - * ice_find_boost_entry - * @ice_seg: pointer to the ice segment (non-NULL) - * @addr: Boost TCAM address of entry to search for - * @entry: returns pointer to the entry - * - * Finds a particular Boost TCAM entry and returns a pointer to that entry - * if it is found. The ice_seg parameter must not be NULL since the first call - * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure. - */ -static int -ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr, - struct ice_boost_tcam_entry **entry) -{ - struct ice_boost_tcam_entry *tcam; - struct ice_pkg_enum state; - - memset(&state, 0, sizeof(state)); - - if (!ice_seg) - return -EINVAL; - - do { - tcam = ice_pkg_enum_entry(ice_seg, &state, - ICE_SID_RXPARSER_BOOST_TCAM, NULL, - ice_boost_tcam_handler); - if (tcam && le16_to_cpu(tcam->addr) == addr) { - *entry = tcam; - return 0; - } - - ice_seg = NULL; - } while (tcam); - - *entry = NULL; - return -EIO; -} - -/** - * ice_label_enum_handler - * @sect_type: section type - * @section: pointer to section - * @index: index of the label entry to be returned - * @offset: pointer to receive absolute offset, always zero for label sections - * - * This is a callback function that can be passed to ice_pkg_enum_entry. - * Handles enumeration of individual label entries. - */ -static void * -ice_label_enum_handler(u32 __always_unused sect_type, void *section, u32 index, - u32 *offset) -{ - struct ice_label_section *labels; - - if (!section) - return NULL; - - /* cppcheck-suppress nullPointer */ - if (index > ICE_MAX_LABELS_IN_BUF) - return NULL; - - if (offset) - *offset = 0; - - labels = section; - if (index >= le16_to_cpu(labels->count)) - return NULL; - - return labels->label + index; -} - -/** - * ice_enum_labels - * @ice_seg: pointer to the ice segment (NULL on subsequent calls) - * @type: the section type that will contain the label (0 on subsequent calls) - * @state: ice_pkg_enum structure that will hold the state of the enumeration - * @value: pointer to a value that will return the label's value if found - * - * Enumerates a list of labels in the package. The caller will call - * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call - * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL - * the end of the list has been reached. - */ -static char * -ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state, - u16 *value) -{ - struct ice_label *label; - - /* Check for valid label section on first call */ - if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST)) - return NULL; - - label = ice_pkg_enum_entry(ice_seg, state, type, NULL, - ice_label_enum_handler); - if (!label) - return NULL; - - *value = le16_to_cpu(label->value); - return label->name; -} - -/** - * ice_add_tunnel_hint - * @hw: pointer to the HW structure - * @label_name: label text - * @val: value of the tunnel port boost entry - */ -static void ice_add_tunnel_hint(struct ice_hw *hw, char *label_name, u16 val) -{ - if (hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) { - u16 i; - - for (i = 0; tnls[i].type != TNL_LAST; i++) { - size_t len = strlen(tnls[i].label_prefix); - - /* Look for matching label start, before continuing */ - if (strncmp(label_name, tnls[i].label_prefix, len)) - continue; - - /* Make sure this label matches our PF. Note that the PF - * character ('0' - '7') will be located where our - * prefix string's null terminator is located. - */ - if ((label_name[len] - '0') == hw->pf_id) { - hw->tnl.tbl[hw->tnl.count].type = tnls[i].type; - hw->tnl.tbl[hw->tnl.count].valid = false; - hw->tnl.tbl[hw->tnl.count].boost_addr = val; - hw->tnl.tbl[hw->tnl.count].port = 0; - hw->tnl.count++; - break; - } - } - } -} - -/** - * ice_add_dvm_hint - * @hw: pointer to the HW structure - * @val: value of the boost entry - * @enable: true if entry needs to be enabled, or false if needs to be disabled - */ -static void ice_add_dvm_hint(struct ice_hw *hw, u16 val, bool enable) -{ - if (hw->dvm_upd.count < ICE_DVM_MAX_ENTRIES) { - hw->dvm_upd.tbl[hw->dvm_upd.count].boost_addr = val; - hw->dvm_upd.tbl[hw->dvm_upd.count].enable = enable; - hw->dvm_upd.count++; - } -} - -/** - * ice_init_pkg_hints - * @hw: pointer to the HW structure - * @ice_seg: pointer to the segment of the package scan (non-NULL) - * - * This function will scan the package and save off relevant information - * (hints or metadata) for driver use. The ice_seg parameter must not be NULL - * since the first call to ice_enum_labels requires a pointer to an actual - * ice_seg structure. - */ -static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg) -{ - struct ice_pkg_enum state; - char *label_name; - u16 val; - int i; - - memset(&hw->tnl, 0, sizeof(hw->tnl)); - memset(&state, 0, sizeof(state)); - - if (!ice_seg) - return; - - label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state, - &val); - - while (label_name) { - if (!strncmp(label_name, ICE_TNL_PRE, strlen(ICE_TNL_PRE))) - /* check for a tunnel entry */ - ice_add_tunnel_hint(hw, label_name, val); - - /* check for a dvm mode entry */ - else if (!strncmp(label_name, ICE_DVM_PRE, strlen(ICE_DVM_PRE))) - ice_add_dvm_hint(hw, val, true); - - /* check for a svm mode entry */ - else if (!strncmp(label_name, ICE_SVM_PRE, strlen(ICE_SVM_PRE))) - ice_add_dvm_hint(hw, val, false); - - label_name = ice_enum_labels(NULL, 0, &state, &val); - } - - /* Cache the appropriate boost TCAM entry pointers for tunnels */ - for (i = 0; i < hw->tnl.count; i++) { - ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr, - &hw->tnl.tbl[i].boost_entry); - if (hw->tnl.tbl[i].boost_entry) { - hw->tnl.tbl[i].valid = true; - if (hw->tnl.tbl[i].type < __TNL_TYPE_CNT) - hw->tnl.valid_count[hw->tnl.tbl[i].type]++; - } - } - - /* Cache the appropriate boost TCAM entry pointers for DVM and SVM */ - for (i = 0; i < hw->dvm_upd.count; i++) - ice_find_boost_entry(ice_seg, hw->dvm_upd.tbl[i].boost_addr, - &hw->dvm_upd.tbl[i].boost_entry); -} - /* Key creation */ #define ICE_DC_KEY 0x1 /* don't care */ @@ -810,51 +268,6 @@ ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off, } /** - * ice_acquire_global_cfg_lock - * @hw: pointer to the HW structure - * @access: access type (read or write) - * - * This function will request ownership of the global config lock for reading - * or writing of the package. When attempting to obtain write access, the - * caller must check for the following two return values: - * - * 0 - Means the caller has acquired the global config lock - * and can perform writing of the package. - * -EALREADY - Indicates another driver has already written the - * package or has found that no update was necessary; in - * this case, the caller can just skip performing any - * update of the package. - */ -static int -ice_acquire_global_cfg_lock(struct ice_hw *hw, - enum ice_aq_res_access_type access) -{ - int status; - - status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access, - ICE_GLOBAL_CFG_LOCK_TIMEOUT); - - if (!status) - mutex_lock(&ice_global_cfg_lock_sw); - else if (status == -EALREADY) - ice_debug(hw, ICE_DBG_PKG, "Global config lock: No work to do\n"); - - return status; -} - -/** - * ice_release_global_cfg_lock - * @hw: pointer to the HW structure - * - * This function will release the global config lock. - */ -static void ice_release_global_cfg_lock(struct ice_hw *hw) -{ - mutex_unlock(&ice_global_cfg_lock_sw); - ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID); -} - -/** * ice_acquire_change_lock * @hw: pointer to the HW structure * @access: access type (read or write) @@ -880,1325 +293,6 @@ void ice_release_change_lock(struct ice_hw *hw) } /** - * ice_aq_download_pkg - * @hw: pointer to the hardware structure - * @pkg_buf: the package buffer to transfer - * @buf_size: the size of the package buffer - * @last_buf: last buffer indicator - * @error_offset: returns error offset - * @error_info: returns error information - * @cd: pointer to command details structure or NULL - * - * Download Package (0x0C40) - */ -static int -ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, - u16 buf_size, bool last_buf, u32 *error_offset, - u32 *error_info, struct ice_sq_cd *cd) -{ - struct ice_aqc_download_pkg *cmd; - struct ice_aq_desc desc; - int status; - - if (error_offset) - *error_offset = 0; - if (error_info) - *error_info = 0; - - cmd = &desc.params.download_pkg; - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg); - desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); - - if (last_buf) - cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; - - status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); - if (status == -EIO) { - /* Read error from buffer only when the FW returned an error */ - struct ice_aqc_download_pkg_resp *resp; - - resp = (struct ice_aqc_download_pkg_resp *)pkg_buf; - if (error_offset) - *error_offset = le32_to_cpu(resp->error_offset); - if (error_info) - *error_info = le32_to_cpu(resp->error_info); - } - - return status; -} - -/** - * ice_aq_upload_section - * @hw: pointer to the hardware structure - * @pkg_buf: the package buffer which will receive the section - * @buf_size: the size of the package buffer - * @cd: pointer to command details structure or NULL - * - * Upload Section (0x0C41) - */ -int -ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, - u16 buf_size, struct ice_sq_cd *cd) -{ - struct ice_aq_desc desc; - - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section); - desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); - - return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); -} - -/** - * ice_aq_update_pkg - * @hw: pointer to the hardware structure - * @pkg_buf: the package cmd buffer - * @buf_size: the size of the package cmd buffer - * @last_buf: last buffer indicator - * @error_offset: returns error offset - * @error_info: returns error information - * @cd: pointer to command details structure or NULL - * - * Update Package (0x0C42) - */ -static int -ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size, - bool last_buf, u32 *error_offset, u32 *error_info, - struct ice_sq_cd *cd) -{ - struct ice_aqc_download_pkg *cmd; - struct ice_aq_desc desc; - int status; - - if (error_offset) - *error_offset = 0; - if (error_info) - *error_info = 0; - - cmd = &desc.params.download_pkg; - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg); - desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); - - if (last_buf) - cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; - - status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); - if (status == -EIO) { - /* Read error from buffer only when the FW returned an error */ - struct ice_aqc_download_pkg_resp *resp; - - resp = (struct ice_aqc_download_pkg_resp *)pkg_buf; - if (error_offset) - *error_offset = le32_to_cpu(resp->error_offset); - if (error_info) - *error_info = le32_to_cpu(resp->error_info); - } - - return status; -} - -/** - * ice_find_seg_in_pkg - * @hw: pointer to the hardware structure - * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK) - * @pkg_hdr: pointer to the package header to be searched - * - * This function searches a package file for a particular segment type. On - * success it returns a pointer to the segment header, otherwise it will - * return NULL. - */ -static struct ice_generic_seg_hdr * -ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type, - struct ice_pkg_hdr *pkg_hdr) -{ - u32 i; - - ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n", - pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor, - pkg_hdr->pkg_format_ver.update, - pkg_hdr->pkg_format_ver.draft); - - /* Search all package segments for the requested segment type */ - for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) { - struct ice_generic_seg_hdr *seg; - - seg = (struct ice_generic_seg_hdr *) - ((u8 *)pkg_hdr + le32_to_cpu(pkg_hdr->seg_offset[i])); - - if (le32_to_cpu(seg->seg_type) == seg_type) - return seg; - } - - return NULL; -} - -/** - * ice_update_pkg_no_lock - * @hw: pointer to the hardware structure - * @bufs: pointer to an array of buffers - * @count: the number of buffers in the array - */ -static int -ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count) -{ - int status = 0; - u32 i; - - for (i = 0; i < count; i++) { - struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i); - bool last = ((i + 1) == count); - u32 offset, info; - - status = ice_aq_update_pkg(hw, bh, le16_to_cpu(bh->data_end), - last, &offset, &info, NULL); - - if (status) { - ice_debug(hw, ICE_DBG_PKG, "Update pkg failed: err %d off %d inf %d\n", - status, offset, info); - break; - } - } - - return status; -} - -/** - * ice_update_pkg - * @hw: pointer to the hardware structure - * @bufs: pointer to an array of buffers - * @count: the number of buffers in the array - * - * Obtains change lock and updates package. - */ -static int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count) -{ - int status; - - status = ice_acquire_change_lock(hw, ICE_RES_WRITE); - if (status) - return status; - - status = ice_update_pkg_no_lock(hw, bufs, count); - - ice_release_change_lock(hw); - - return status; -} - -static enum ice_ddp_state ice_map_aq_err_to_ddp_state(enum ice_aq_err aq_err) -{ - switch (aq_err) { - case ICE_AQ_RC_ENOSEC: - case ICE_AQ_RC_EBADSIG: - return ICE_DDP_PKG_FILE_SIGNATURE_INVALID; - case ICE_AQ_RC_ESVN: - return ICE_DDP_PKG_FILE_REVISION_TOO_LOW; - case ICE_AQ_RC_EBADMAN: - case ICE_AQ_RC_EBADBUF: - return ICE_DDP_PKG_LOAD_ERROR; - default: - return ICE_DDP_PKG_ERR; - } -} - -/** - * ice_dwnld_cfg_bufs - * @hw: pointer to the hardware structure - * @bufs: pointer to an array of buffers - * @count: the number of buffers in the array - * - * Obtains global config lock and downloads the package configuration buffers - * to the firmware. Metadata buffers are skipped, and the first metadata buffer - * found indicates that the rest of the buffers are all metadata buffers. - */ -static enum ice_ddp_state -ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count) -{ - enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS; - struct ice_buf_hdr *bh; - enum ice_aq_err err; - u32 offset, info, i; - int status; - - if (!bufs || !count) - return ICE_DDP_PKG_ERR; - - /* If the first buffer's first section has its metadata bit set - * then there are no buffers to be downloaded, and the operation is - * considered a success. - */ - bh = (struct ice_buf_hdr *)bufs; - if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF) - return ICE_DDP_PKG_SUCCESS; - - status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE); - if (status) { - if (status == -EALREADY) - return ICE_DDP_PKG_ALREADY_LOADED; - return ice_map_aq_err_to_ddp_state(hw->adminq.sq_last_status); - } - - for (i = 0; i < count; i++) { - bool last = ((i + 1) == count); - - if (!last) { - /* check next buffer for metadata flag */ - bh = (struct ice_buf_hdr *)(bufs + i + 1); - - /* A set metadata flag in the next buffer will signal - * that the current buffer will be the last buffer - * downloaded - */ - if (le16_to_cpu(bh->section_count)) - if (le32_to_cpu(bh->section_entry[0].type) & - ICE_METADATA_BUF) - last = true; - } - - bh = (struct ice_buf_hdr *)(bufs + i); - - status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last, - &offset, &info, NULL); - - /* Save AQ status from download package */ - if (status) { - ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n", - status, offset, info); - err = hw->adminq.sq_last_status; - state = ice_map_aq_err_to_ddp_state(err); - break; - } - - if (last) - break; - } - - if (!status) { - status = ice_set_vlan_mode(hw); - if (status) - ice_debug(hw, ICE_DBG_PKG, "Failed to set VLAN mode: err %d\n", - status); - } - - ice_release_global_cfg_lock(hw); - - return state; -} - -/** - * ice_aq_get_pkg_info_list - * @hw: pointer to the hardware structure - * @pkg_info: the buffer which will receive the information list - * @buf_size: the size of the pkg_info information buffer - * @cd: pointer to command details structure or NULL - * - * Get Package Info List (0x0C43) - */ -static int -ice_aq_get_pkg_info_list(struct ice_hw *hw, - struct ice_aqc_get_pkg_info_resp *pkg_info, - u16 buf_size, struct ice_sq_cd *cd) -{ - struct ice_aq_desc desc; - - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list); - - return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd); -} - -/** - * ice_download_pkg - * @hw: pointer to the hardware structure - * @ice_seg: pointer to the segment of the package to be downloaded - * - * Handles the download of a complete package. - */ -static enum ice_ddp_state -ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg) -{ - struct ice_buf_table *ice_buf_tbl; - int status; - - ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n", - ice_seg->hdr.seg_format_ver.major, - ice_seg->hdr.seg_format_ver.minor, - ice_seg->hdr.seg_format_ver.update, - ice_seg->hdr.seg_format_ver.draft); - - ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n", - le32_to_cpu(ice_seg->hdr.seg_type), - le32_to_cpu(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id); - - ice_buf_tbl = ice_find_buf_table(ice_seg); - - ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n", - le32_to_cpu(ice_buf_tbl->buf_count)); - - status = ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array, - le32_to_cpu(ice_buf_tbl->buf_count)); - - ice_post_pkg_dwnld_vlan_mode_cfg(hw); - - return status; -} - -/** - * ice_init_pkg_info - * @hw: pointer to the hardware structure - * @pkg_hdr: pointer to the driver's package hdr - * - * Saves off the package details into the HW structure. - */ -static enum ice_ddp_state -ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) -{ - struct ice_generic_seg_hdr *seg_hdr; - - if (!pkg_hdr) - return ICE_DDP_PKG_ERR; - - seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr); - if (seg_hdr) { - struct ice_meta_sect *meta; - struct ice_pkg_enum state; - - memset(&state, 0, sizeof(state)); - - /* Get package information from the Metadata Section */ - meta = ice_pkg_enum_section((struct ice_seg *)seg_hdr, &state, - ICE_SID_METADATA); - if (!meta) { - ice_debug(hw, ICE_DBG_INIT, "Did not find ice metadata section in package\n"); - return ICE_DDP_PKG_INVALID_FILE; - } - - hw->pkg_ver = meta->ver; - memcpy(hw->pkg_name, meta->name, sizeof(meta->name)); - - ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n", - meta->ver.major, meta->ver.minor, meta->ver.update, - meta->ver.draft, meta->name); - - hw->ice_seg_fmt_ver = seg_hdr->seg_format_ver; - memcpy(hw->ice_seg_id, seg_hdr->seg_id, - sizeof(hw->ice_seg_id)); - - ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n", - seg_hdr->seg_format_ver.major, - seg_hdr->seg_format_ver.minor, - seg_hdr->seg_format_ver.update, - seg_hdr->seg_format_ver.draft, - seg_hdr->seg_id); - } else { - ice_debug(hw, ICE_DBG_INIT, "Did not find ice segment in driver package\n"); - return ICE_DDP_PKG_INVALID_FILE; - } - - return ICE_DDP_PKG_SUCCESS; -} - -/** - * ice_get_pkg_info - * @hw: pointer to the hardware structure - * - * Store details of the package currently loaded in HW into the HW structure. - */ -static enum ice_ddp_state ice_get_pkg_info(struct ice_hw *hw) -{ - enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS; - struct ice_aqc_get_pkg_info_resp *pkg_info; - u16 size; - u32 i; - - size = struct_size(pkg_info, pkg_info, ICE_PKG_CNT); - pkg_info = kzalloc(size, GFP_KERNEL); - if (!pkg_info) - return ICE_DDP_PKG_ERR; - - if (ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL)) { - state = ICE_DDP_PKG_ERR; - goto init_pkg_free_alloc; - } - - for (i = 0; i < le32_to_cpu(pkg_info->count); i++) { -#define ICE_PKG_FLAG_COUNT 4 - char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 }; - u8 place = 0; - - if (pkg_info->pkg_info[i].is_active) { - flags[place++] = 'A'; - hw->active_pkg_ver = pkg_info->pkg_info[i].ver; - hw->active_track_id = - le32_to_cpu(pkg_info->pkg_info[i].track_id); - memcpy(hw->active_pkg_name, - pkg_info->pkg_info[i].name, - sizeof(pkg_info->pkg_info[i].name)); - hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm; - } - if (pkg_info->pkg_info[i].is_active_at_boot) - flags[place++] = 'B'; - if (pkg_info->pkg_info[i].is_modified) - flags[place++] = 'M'; - if (pkg_info->pkg_info[i].is_in_nvm) - flags[place++] = 'N'; - - ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n", - i, pkg_info->pkg_info[i].ver.major, - pkg_info->pkg_info[i].ver.minor, - pkg_info->pkg_info[i].ver.update, - pkg_info->pkg_info[i].ver.draft, - pkg_info->pkg_info[i].name, flags); - } - -init_pkg_free_alloc: - kfree(pkg_info); - - return state; -} - -/** - * ice_verify_pkg - verify package - * @pkg: pointer to the package buffer - * @len: size of the package buffer - * - * Verifies various attributes of the package file, including length, format - * version, and the requirement of at least one segment. - */ -static enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len) -{ - u32 seg_count; - u32 i; - - if (len < struct_size(pkg, seg_offset, 1)) - return ICE_DDP_PKG_INVALID_FILE; - - if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ || - pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR || - pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD || - pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT) - return ICE_DDP_PKG_INVALID_FILE; - - /* pkg must have at least one segment */ - seg_count = le32_to_cpu(pkg->seg_count); - if (seg_count < 1) - return ICE_DDP_PKG_INVALID_FILE; - - /* make sure segment array fits in package length */ - if (len < struct_size(pkg, seg_offset, seg_count)) - return ICE_DDP_PKG_INVALID_FILE; - - /* all segments must fit within length */ - for (i = 0; i < seg_count; i++) { - u32 off = le32_to_cpu(pkg->seg_offset[i]); - struct ice_generic_seg_hdr *seg; - - /* segment header must fit */ - if (len < off + sizeof(*seg)) - return ICE_DDP_PKG_INVALID_FILE; - - seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off); - - /* segment body must fit */ - if (len < off + le32_to_cpu(seg->seg_size)) - return ICE_DDP_PKG_INVALID_FILE; - } - - return ICE_DDP_PKG_SUCCESS; -} - -/** - * ice_free_seg - free package segment pointer - * @hw: pointer to the hardware structure - * - * Frees the package segment pointer in the proper manner, depending on if the - * segment was allocated or just the passed in pointer was stored. - */ -void ice_free_seg(struct ice_hw *hw) -{ - if (hw->pkg_copy) { - devm_kfree(ice_hw_to_dev(hw), hw->pkg_copy); - hw->pkg_copy = NULL; - hw->pkg_size = 0; - } - hw->seg = NULL; -} - -/** - * ice_init_pkg_regs - initialize additional package registers - * @hw: pointer to the hardware structure - */ -static void ice_init_pkg_regs(struct ice_hw *hw) -{ -#define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF -#define ICE_SW_BLK_INP_MASK_H 0x0000FFFF -#define ICE_SW_BLK_IDX 0 - - /* setup Switch block input mask, which is 48-bits in two parts */ - wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L); - wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H); -} - -/** - * ice_chk_pkg_version - check package version for compatibility with driver - * @pkg_ver: pointer to a version structure to check - * - * Check to make sure that the package about to be downloaded is compatible with - * the driver. To be compatible, the major and minor components of the package - * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR - * definitions. - */ -static enum ice_ddp_state ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver) -{ - if (pkg_ver->major > ICE_PKG_SUPP_VER_MAJ || - (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ && - pkg_ver->minor > ICE_PKG_SUPP_VER_MNR)) - return ICE_DDP_PKG_FILE_VERSION_TOO_HIGH; - else if (pkg_ver->major < ICE_PKG_SUPP_VER_MAJ || - (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ && - pkg_ver->minor < ICE_PKG_SUPP_VER_MNR)) - return ICE_DDP_PKG_FILE_VERSION_TOO_LOW; - - return ICE_DDP_PKG_SUCCESS; -} - -/** - * ice_chk_pkg_compat - * @hw: pointer to the hardware structure - * @ospkg: pointer to the package hdr - * @seg: pointer to the package segment hdr - * - * This function checks the package version compatibility with driver and NVM - */ -static enum ice_ddp_state -ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg, - struct ice_seg **seg) -{ - struct ice_aqc_get_pkg_info_resp *pkg; - enum ice_ddp_state state; - u16 size; - u32 i; - - /* Check package version compatibility */ - state = ice_chk_pkg_version(&hw->pkg_ver); - if (state) { - ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n"); - return state; - } - - /* find ICE segment in given package */ - *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, - ospkg); - if (!*seg) { - ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n"); - return ICE_DDP_PKG_INVALID_FILE; - } - - /* Check if FW is compatible with the OS package */ - size = struct_size(pkg, pkg_info, ICE_PKG_CNT); - pkg = kzalloc(size, GFP_KERNEL); - if (!pkg) - return ICE_DDP_PKG_ERR; - - if (ice_aq_get_pkg_info_list(hw, pkg, size, NULL)) { - state = ICE_DDP_PKG_LOAD_ERROR; - goto fw_ddp_compat_free_alloc; - } - - for (i = 0; i < le32_to_cpu(pkg->count); i++) { - /* loop till we find the NVM package */ - if (!pkg->pkg_info[i].is_in_nvm) - continue; - if ((*seg)->hdr.seg_format_ver.major != - pkg->pkg_info[i].ver.major || - (*seg)->hdr.seg_format_ver.minor > - pkg->pkg_info[i].ver.minor) { - state = ICE_DDP_PKG_FW_MISMATCH; - ice_debug(hw, ICE_DBG_INIT, "OS package is not compatible with NVM.\n"); - } - /* done processing NVM package so break */ - break; - } -fw_ddp_compat_free_alloc: - kfree(pkg); - return state; -} - -/** - * ice_sw_fv_handler - * @sect_type: section type - * @section: pointer to section - * @index: index of the field vector entry to be returned - * @offset: ptr to variable that receives the offset in the field vector table - * - * This is a callback function that can be passed to ice_pkg_enum_entry. - * This function treats the given section as of type ice_sw_fv_section and - * enumerates offset field. "offset" is an index into the field vector table. - */ -static void * -ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset) -{ - struct ice_sw_fv_section *fv_section = section; - - if (!section || sect_type != ICE_SID_FLD_VEC_SW) - return NULL; - if (index >= le16_to_cpu(fv_section->count)) - return NULL; - if (offset) - /* "index" passed in to this function is relative to a given - * 4k block. To get to the true index into the field vector - * table need to add the relative index to the base_offset - * field of this section - */ - *offset = le16_to_cpu(fv_section->base_offset) + index; - return fv_section->fv + index; -} - -/** - * ice_get_prof_index_max - get the max profile index for used profile - * @hw: pointer to the HW struct - * - * Calling this function will get the max profile index for used profile - * and store the index number in struct ice_switch_info *switch_info - * in HW for following use. - */ -static int ice_get_prof_index_max(struct ice_hw *hw) -{ - u16 prof_index = 0, j, max_prof_index = 0; - struct ice_pkg_enum state; - struct ice_seg *ice_seg; - bool flag = false; - struct ice_fv *fv; - u32 offset; - - memset(&state, 0, sizeof(state)); - - if (!hw->seg) - return -EINVAL; - - ice_seg = hw->seg; - - do { - fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, - &offset, ice_sw_fv_handler); - if (!fv) - break; - ice_seg = NULL; - - /* in the profile that not be used, the prot_id is set to 0xff - * and the off is set to 0x1ff for all the field vectors. - */ - for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) - if (fv->ew[j].prot_id != ICE_PROT_INVALID || - fv->ew[j].off != ICE_FV_OFFSET_INVAL) - flag = true; - if (flag && prof_index > max_prof_index) - max_prof_index = prof_index; - - prof_index++; - flag = false; - } while (fv); - - hw->switch_info->max_used_prof_index = max_prof_index; - - return 0; -} - -/** - * ice_get_ddp_pkg_state - get DDP pkg state after download - * @hw: pointer to the HW struct - * @already_loaded: indicates if pkg was already loaded onto the device - */ -static enum ice_ddp_state -ice_get_ddp_pkg_state(struct ice_hw *hw, bool already_loaded) -{ - if (hw->pkg_ver.major == hw->active_pkg_ver.major && - hw->pkg_ver.minor == hw->active_pkg_ver.minor && - hw->pkg_ver.update == hw->active_pkg_ver.update && - hw->pkg_ver.draft == hw->active_pkg_ver.draft && - !memcmp(hw->pkg_name, hw->active_pkg_name, sizeof(hw->pkg_name))) { - if (already_loaded) - return ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED; - else - return ICE_DDP_PKG_SUCCESS; - } else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ || - hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) { - return ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED; - } else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && - hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) { - return ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED; - } else { - return ICE_DDP_PKG_ERR; - } -} - -/** - * ice_init_pkg - initialize/download package - * @hw: pointer to the hardware structure - * @buf: pointer to the package buffer - * @len: size of the package buffer - * - * This function initializes a package. The package contains HW tables - * required to do packet processing. First, the function extracts package - * information such as version. Then it finds the ice configuration segment - * within the package; this function then saves a copy of the segment pointer - * within the supplied package buffer. Next, the function will cache any hints - * from the package, followed by downloading the package itself. Note, that if - * a previous PF driver has already downloaded the package successfully, then - * the current driver will not have to download the package again. - * - * The local package contents will be used to query default behavior and to - * update specific sections of the HW's version of the package (e.g. to update - * the parse graph to understand new protocols). - * - * This function stores a pointer to the package buffer memory, and it is - * expected that the supplied buffer will not be freed immediately. If the - * package buffer needs to be freed, such as when read from a file, use - * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this - * case. - */ -enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len) -{ - bool already_loaded = false; - enum ice_ddp_state state; - struct ice_pkg_hdr *pkg; - struct ice_seg *seg; - - if (!buf || !len) - return ICE_DDP_PKG_ERR; - - pkg = (struct ice_pkg_hdr *)buf; - state = ice_verify_pkg(pkg, len); - if (state) { - ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n", - state); - return state; - } - - /* initialize package info */ - state = ice_init_pkg_info(hw, pkg); - if (state) - return state; - - /* before downloading the package, check package version for - * compatibility with driver - */ - state = ice_chk_pkg_compat(hw, pkg, &seg); - if (state) - return state; - - /* initialize package hints and then download package */ - ice_init_pkg_hints(hw, seg); - state = ice_download_pkg(hw, seg); - if (state == ICE_DDP_PKG_ALREADY_LOADED) { - ice_debug(hw, ICE_DBG_INIT, "package previously loaded - no work.\n"); - already_loaded = true; - } - - /* Get information on the package currently loaded in HW, then make sure - * the driver is compatible with this version. - */ - if (!state || state == ICE_DDP_PKG_ALREADY_LOADED) { - state = ice_get_pkg_info(hw); - if (!state) - state = ice_get_ddp_pkg_state(hw, already_loaded); - } - - if (ice_is_init_pkg_successful(state)) { - hw->seg = seg; - /* on successful package download update other required - * registers to support the package and fill HW tables - * with package content. - */ - ice_init_pkg_regs(hw); - ice_fill_blk_tbls(hw); - ice_fill_hw_ptype(hw); - ice_get_prof_index_max(hw); - } else { - ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n", - state); - } - - return state; -} - -/** - * ice_copy_and_init_pkg - initialize/download a copy of the package - * @hw: pointer to the hardware structure - * @buf: pointer to the package buffer - * @len: size of the package buffer - * - * This function copies the package buffer, and then calls ice_init_pkg() to - * initialize the copied package contents. - * - * The copying is necessary if the package buffer supplied is constant, or if - * the memory may disappear shortly after calling this function. - * - * If the package buffer resides in the data segment and can be modified, the - * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg(). - * - * However, if the package buffer needs to be copied first, such as when being - * read from a file, the caller should use ice_copy_and_init_pkg(). - * - * This function will first copy the package buffer, before calling - * ice_init_pkg(). The caller is free to immediately destroy the original - * package buffer, as the new copy will be managed by this function and - * related routines. - */ -enum ice_ddp_state -ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len) -{ - enum ice_ddp_state state; - u8 *buf_copy; - - if (!buf || !len) - return ICE_DDP_PKG_ERR; - - buf_copy = devm_kmemdup(ice_hw_to_dev(hw), buf, len, GFP_KERNEL); - - state = ice_init_pkg(hw, buf_copy, len); - if (!ice_is_init_pkg_successful(state)) { - /* Free the copy, since we failed to initialize the package */ - devm_kfree(ice_hw_to_dev(hw), buf_copy); - } else { - /* Track the copied pkg so we can free it later */ - hw->pkg_copy = buf_copy; - hw->pkg_size = len; - } - - return state; -} - -/** - * ice_is_init_pkg_successful - check if DDP init was successful - * @state: state of the DDP pkg after download - */ -bool ice_is_init_pkg_successful(enum ice_ddp_state state) -{ - switch (state) { - case ICE_DDP_PKG_SUCCESS: - case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED: - case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED: - return true; - default: - return false; - } -} - -/** - * ice_pkg_buf_alloc - * @hw: pointer to the HW structure - * - * Allocates a package buffer and returns a pointer to the buffer header. - * Note: all package contents must be in Little Endian form. - */ -static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw) -{ - struct ice_buf_build *bld; - struct ice_buf_hdr *buf; - - bld = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*bld), GFP_KERNEL); - if (!bld) - return NULL; - - buf = (struct ice_buf_hdr *)bld; - buf->data_end = cpu_to_le16(offsetof(struct ice_buf_hdr, - section_entry)); - return bld; -} - -static bool ice_is_gtp_u_profile(u16 prof_idx) -{ - return (prof_idx >= ICE_PROFID_IPV6_GTPU_TEID && - prof_idx <= ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER) || - prof_idx == ICE_PROFID_IPV4_GTPU_TEID; -} - -static bool ice_is_gtp_c_profile(u16 prof_idx) -{ - switch (prof_idx) { - case ICE_PROFID_IPV4_GTPC_TEID: - case ICE_PROFID_IPV4_GTPC_NO_TEID: - case ICE_PROFID_IPV6_GTPC_TEID: - case ICE_PROFID_IPV6_GTPC_NO_TEID: - return true; - default: - return false; - } -} - -/** - * ice_get_sw_prof_type - determine switch profile type - * @hw: pointer to the HW structure - * @fv: pointer to the switch field vector - * @prof_idx: profile index to check - */ -static enum ice_prof_type -ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv, u32 prof_idx) -{ - u16 i; - - if (ice_is_gtp_c_profile(prof_idx)) - return ICE_PROF_TUN_GTPC; - - if (ice_is_gtp_u_profile(prof_idx)) - return ICE_PROF_TUN_GTPU; - - for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) { - /* UDP tunnel will have UDP_OF protocol ID and VNI offset */ - if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF && - fv->ew[i].off == ICE_VNI_OFFSET) - return ICE_PROF_TUN_UDP; - - /* GRE tunnel will have GRE protocol */ - if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF) - return ICE_PROF_TUN_GRE; - } - - return ICE_PROF_NON_TUN; -} - -/** - * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type - * @hw: pointer to hardware structure - * @req_profs: type of profiles requested - * @bm: pointer to memory for returning the bitmap of field vectors - */ -void -ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs, - unsigned long *bm) -{ - struct ice_pkg_enum state; - struct ice_seg *ice_seg; - struct ice_fv *fv; - - if (req_profs == ICE_PROF_ALL) { - bitmap_set(bm, 0, ICE_MAX_NUM_PROFILES); - return; - } - - memset(&state, 0, sizeof(state)); - bitmap_zero(bm, ICE_MAX_NUM_PROFILES); - ice_seg = hw->seg; - do { - enum ice_prof_type prof_type; - u32 offset; - - fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, - &offset, ice_sw_fv_handler); - ice_seg = NULL; - - if (fv) { - /* Determine field vector type */ - prof_type = ice_get_sw_prof_type(hw, fv, offset); - - if (req_profs & prof_type) - set_bit((u16)offset, bm); - } - } while (fv); -} - -/** - * ice_get_sw_fv_list - * @hw: pointer to the HW structure - * @lkups: list of protocol types - * @bm: bitmap of field vectors to consider - * @fv_list: Head of a list - * - * Finds all the field vector entries from switch block that contain - * a given protocol ID and offset and returns a list of structures of type - * "ice_sw_fv_list_entry". Every structure in the list has a field vector - * definition and profile ID information - * NOTE: The caller of the function is responsible for freeing the memory - * allocated for every list entry. - */ -int -ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups, - unsigned long *bm, struct list_head *fv_list) -{ - struct ice_sw_fv_list_entry *fvl; - struct ice_sw_fv_list_entry *tmp; - struct ice_pkg_enum state; - struct ice_seg *ice_seg; - struct ice_fv *fv; - u32 offset; - - memset(&state, 0, sizeof(state)); - - if (!lkups->n_val_words || !hw->seg) - return -EINVAL; - - ice_seg = hw->seg; - do { - u16 i; - - fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, - &offset, ice_sw_fv_handler); - if (!fv) - break; - ice_seg = NULL; - - /* If field vector is not in the bitmap list, then skip this - * profile. - */ - if (!test_bit((u16)offset, bm)) - continue; - - for (i = 0; i < lkups->n_val_words; i++) { - int j; - - for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) - if (fv->ew[j].prot_id == - lkups->fv_words[i].prot_id && - fv->ew[j].off == lkups->fv_words[i].off) - break; - if (j >= hw->blk[ICE_BLK_SW].es.fvw) - break; - if (i + 1 == lkups->n_val_words) { - fvl = devm_kzalloc(ice_hw_to_dev(hw), - sizeof(*fvl), GFP_KERNEL); - if (!fvl) - goto err; - fvl->fv_ptr = fv; - fvl->profile_id = offset; - list_add(&fvl->list_entry, fv_list); - break; - } - } - } while (fv); - if (list_empty(fv_list)) { - dev_warn(ice_hw_to_dev(hw), "Required profiles not found in currently loaded DDP package"); - return -EIO; - } - - return 0; - -err: - list_for_each_entry_safe(fvl, tmp, fv_list, list_entry) { - list_del(&fvl->list_entry); - devm_kfree(ice_hw_to_dev(hw), fvl); - } - - return -ENOMEM; -} - -/** - * ice_init_prof_result_bm - Initialize the profile result index bitmap - * @hw: pointer to hardware structure - */ -void ice_init_prof_result_bm(struct ice_hw *hw) -{ - struct ice_pkg_enum state; - struct ice_seg *ice_seg; - struct ice_fv *fv; - - memset(&state, 0, sizeof(state)); - - if (!hw->seg) - return; - - ice_seg = hw->seg; - do { - u32 off; - u16 i; - - fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, - &off, ice_sw_fv_handler); - ice_seg = NULL; - if (!fv) - break; - - bitmap_zero(hw->switch_info->prof_res_bm[off], - ICE_MAX_FV_WORDS); - - /* Determine empty field vector indices, these can be - * used for recipe results. Skip index 0, since it is - * always used for Switch ID. - */ - for (i = 1; i < ICE_MAX_FV_WORDS; i++) - if (fv->ew[i].prot_id == ICE_PROT_INVALID && - fv->ew[i].off == ICE_FV_OFFSET_INVAL) - set_bit(i, hw->switch_info->prof_res_bm[off]); - } while (fv); -} - -/** - * ice_pkg_buf_free - * @hw: pointer to the HW structure - * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) - * - * Frees a package buffer - */ -void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld) -{ - devm_kfree(ice_hw_to_dev(hw), bld); -} - -/** - * ice_pkg_buf_reserve_section - * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) - * @count: the number of sections to reserve - * - * Reserves one or more section table entries in a package buffer. This routine - * can be called multiple times as long as they are made before calling - * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section() - * is called once, the number of sections that can be allocated will not be able - * to be increased; not using all reserved sections is fine, but this will - * result in some wasted space in the buffer. - * Note: all package contents must be in Little Endian form. - */ -static int -ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count) -{ - struct ice_buf_hdr *buf; - u16 section_count; - u16 data_end; - - if (!bld) - return -EINVAL; - - buf = (struct ice_buf_hdr *)&bld->buf; - - /* already an active section, can't increase table size */ - section_count = le16_to_cpu(buf->section_count); - if (section_count > 0) - return -EIO; - - if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT) - return -EIO; - bld->reserved_section_table_entries += count; - - data_end = le16_to_cpu(buf->data_end) + - flex_array_size(buf, section_entry, count); - buf->data_end = cpu_to_le16(data_end); - - return 0; -} - -/** - * ice_pkg_buf_alloc_section - * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) - * @type: the section type value - * @size: the size of the section to reserve (in bytes) - * - * Reserves memory in the buffer for a section's content and updates the - * buffers' status accordingly. This routine returns a pointer to the first - * byte of the section start within the buffer, which is used to fill in the - * section contents. - * Note: all package contents must be in Little Endian form. - */ -static void * -ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size) -{ - struct ice_buf_hdr *buf; - u16 sect_count; - u16 data_end; - - if (!bld || !type || !size) - return NULL; - - buf = (struct ice_buf_hdr *)&bld->buf; - - /* check for enough space left in buffer */ - data_end = le16_to_cpu(buf->data_end); - - /* section start must align on 4 byte boundary */ - data_end = ALIGN(data_end, 4); - - if ((data_end + size) > ICE_MAX_S_DATA_END) - return NULL; - - /* check for more available section table entries */ - sect_count = le16_to_cpu(buf->section_count); - if (sect_count < bld->reserved_section_table_entries) { - void *section_ptr = ((u8 *)buf) + data_end; - - buf->section_entry[sect_count].offset = cpu_to_le16(data_end); - buf->section_entry[sect_count].size = cpu_to_le16(size); - buf->section_entry[sect_count].type = cpu_to_le32(type); - - data_end += size; - buf->data_end = cpu_to_le16(data_end); - - buf->section_count = cpu_to_le16(sect_count + 1); - return section_ptr; - } - - /* no free section table entries */ - return NULL; -} - -/** - * ice_pkg_buf_alloc_single_section - * @hw: pointer to the HW structure - * @type: the section type value - * @size: the size of the section to reserve (in bytes) - * @section: returns pointer to the section - * - * Allocates a package buffer with a single section. - * Note: all package contents must be in Little Endian form. - */ -struct ice_buf_build * -ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size, - void **section) -{ - struct ice_buf_build *buf; - - if (!section) - return NULL; - - buf = ice_pkg_buf_alloc(hw); - if (!buf) - return NULL; - - if (ice_pkg_buf_reserve_section(buf, 1)) - goto ice_pkg_buf_alloc_single_section_err; - - *section = ice_pkg_buf_alloc_section(buf, type, size); - if (!*section) - goto ice_pkg_buf_alloc_single_section_err; - - return buf; - -ice_pkg_buf_alloc_single_section_err: - ice_pkg_buf_free(hw, buf); - return NULL; -} - -/** - * ice_pkg_buf_get_active_sections - * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) - * - * Returns the number of active sections. Before using the package buffer - * in an update package command, the caller should make sure that there is at - * least one active section - otherwise, the buffer is not legal and should - * not be used. - * Note: all package contents must be in Little Endian form. - */ -static u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld) -{ - struct ice_buf_hdr *buf; - - if (!bld) - return 0; - - buf = (struct ice_buf_hdr *)&bld->buf; - return le16_to_cpu(buf->section_count); -} - -/** - * ice_pkg_buf - * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) - * - * Return a pointer to the buffer's header - */ -struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld) -{ - if (!bld) - return NULL; - - return &bld->buf; -} - -/** * ice_get_open_tunnel_port - retrieve an open tunnel port * @hw: pointer to the HW structure * @port: returns open port @@ -2297,10 +391,11 @@ ice_upd_dvm_boost_entry_err: */ int ice_set_dvm_boost_entries(struct ice_hw *hw) { - int status; u16 i; for (i = 0; i < hw->dvm_upd.count; i++) { + int status; + status = ice_upd_dvm_boost_entry(hw, &hw->dvm_upd.tbl[i]); if (status) return status; @@ -2757,7 +852,6 @@ ice_match_prop_lst(struct list_head *list1, struct list_head *list2) count++; list_for_each_entry(tmp2, list2, list) chk_count++; - /* cppcheck-suppress knownConditionTrueFalse */ if (!count || count != chk_count) return false; @@ -5102,12 +3196,13 @@ ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 idx = vsig & ICE_VSIG_IDX_M; struct ice_vsig_vsi *vsi_cur; struct ice_vsig_prof *d, *t; - int status; /* remove TCAM entries */ list_for_each_entry_safe(d, t, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, list) { + int status; + status = ice_rem_prof_id(hw, blk, d); if (status) return status; @@ -5158,12 +3253,13 @@ ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, { u16 idx = vsig & ICE_VSIG_IDX_M; struct ice_vsig_prof *p, *t; - int status; list_for_each_entry_safe(p, t, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, list) if (p->profile_cookie == hdl) { + int status; + if (ice_vsig_prof_id_count(hw, blk, vsig) == 1) /* this is the last profile, remove the VSIG */ return ice_rem_vsig(hw, blk, vsig, chg); diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h index 9c530c86703e..7af7c8e9aa4e 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h @@ -6,75 +6,6 @@ #include "ice_type.h" -/* Package minimal version supported */ -#define ICE_PKG_SUPP_VER_MAJ 1 -#define ICE_PKG_SUPP_VER_MNR 3 - -/* Package format version */ -#define ICE_PKG_FMT_VER_MAJ 1 -#define ICE_PKG_FMT_VER_MNR 0 -#define ICE_PKG_FMT_VER_UPD 0 -#define ICE_PKG_FMT_VER_DFT 0 - -#define ICE_PKG_CNT 4 - -enum ice_ddp_state { - /* Indicates that this call to ice_init_pkg - * successfully loaded the requested DDP package - */ - ICE_DDP_PKG_SUCCESS = 0, - - /* Generic error for already loaded errors, it is mapped later to - * the more specific one (one of the next 3) - */ - ICE_DDP_PKG_ALREADY_LOADED = -1, - - /* Indicates that a DDP package of the same version has already been - * loaded onto the device by a previous call or by another PF - */ - ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED = -2, - - /* The device has a DDP package that is not supported by the driver */ - ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED = -3, - - /* The device has a compatible package - * (but different from the request) already loaded - */ - ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED = -4, - - /* The firmware loaded on the device is not compatible with - * the DDP package loaded - */ - ICE_DDP_PKG_FW_MISMATCH = -5, - - /* The DDP package file is invalid */ - ICE_DDP_PKG_INVALID_FILE = -6, - - /* The version of the DDP package provided is higher than - * the driver supports - */ - ICE_DDP_PKG_FILE_VERSION_TOO_HIGH = -7, - - /* The version of the DDP package provided is lower than the - * driver supports - */ - ICE_DDP_PKG_FILE_VERSION_TOO_LOW = -8, - - /* The signature of the DDP package file provided is invalid */ - ICE_DDP_PKG_FILE_SIGNATURE_INVALID = -9, - - /* The DDP package file security revision is too low and not - * supported by firmware - */ - ICE_DDP_PKG_FILE_REVISION_TOO_LOW = -10, - - /* An error occurred in firmware while loading the DDP package */ - ICE_DDP_PKG_LOAD_ERROR = -11, - - /* Other errors */ - ICE_DDP_PKG_ERR = -12 -}; - int ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access); void ice_release_change_lock(struct ice_hw *hw); diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h index 974d14a83b2e..4f42e14ed3ae 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_type.h +++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h @@ -3,205 +3,7 @@ #ifndef _ICE_FLEX_TYPE_H_ #define _ICE_FLEX_TYPE_H_ - -#define ICE_FV_OFFSET_INVAL 0x1FF - -/* Extraction Sequence (Field Vector) Table */ -struct ice_fv_word { - u8 prot_id; - u16 off; /* Offset within the protocol header */ - u8 resvrd; -} __packed; - -#define ICE_MAX_NUM_PROFILES 256 - -#define ICE_MAX_FV_WORDS 48 -struct ice_fv { - struct ice_fv_word ew[ICE_MAX_FV_WORDS]; -}; - -/* Package and segment headers and tables */ -struct ice_pkg_hdr { - struct ice_pkg_ver pkg_format_ver; - __le32 seg_count; - __le32 seg_offset[]; -}; - -/* generic segment */ -struct ice_generic_seg_hdr { -#define SEGMENT_TYPE_METADATA 0x00000001 -#define SEGMENT_TYPE_ICE 0x00000010 - __le32 seg_type; - struct ice_pkg_ver seg_format_ver; - __le32 seg_size; - char seg_id[ICE_PKG_NAME_SIZE]; -}; - -/* ice specific segment */ - -union ice_device_id { - struct { - __le16 device_id; - __le16 vendor_id; - } dev_vend_id; - __le32 id; -}; - -struct ice_device_id_entry { - union ice_device_id device; - union ice_device_id sub_device; -}; - -struct ice_seg { - struct ice_generic_seg_hdr hdr; - __le32 device_table_count; - struct ice_device_id_entry device_table[]; -}; - -struct ice_nvm_table { - __le32 table_count; - __le32 vers[]; -}; - -struct ice_buf { -#define ICE_PKG_BUF_SIZE 4096 - u8 buf[ICE_PKG_BUF_SIZE]; -}; - -struct ice_buf_table { - __le32 buf_count; - struct ice_buf buf_array[]; -}; - -/* global metadata specific segment */ -struct ice_global_metadata_seg { - struct ice_generic_seg_hdr hdr; - struct ice_pkg_ver pkg_ver; - __le32 rsvd; - char pkg_name[ICE_PKG_NAME_SIZE]; -}; - -#define ICE_MIN_S_OFF 12 -#define ICE_MAX_S_OFF 4095 -#define ICE_MIN_S_SZ 1 -#define ICE_MAX_S_SZ 4084 - -/* section information */ -struct ice_section_entry { - __le32 type; - __le16 offset; - __le16 size; -}; - -#define ICE_MIN_S_COUNT 1 -#define ICE_MAX_S_COUNT 511 -#define ICE_MIN_S_DATA_END 12 -#define ICE_MAX_S_DATA_END 4096 - -#define ICE_METADATA_BUF 0x80000000 - -struct ice_buf_hdr { - __le16 section_count; - __le16 data_end; - struct ice_section_entry section_entry[]; -}; - -#define ICE_MAX_ENTRIES_IN_BUF(hd_sz, ent_sz) ((ICE_PKG_BUF_SIZE - \ - struct_size((struct ice_buf_hdr *)0, section_entry, 1) - (hd_sz)) /\ - (ent_sz)) - -/* ice package section IDs */ -#define ICE_SID_METADATA 1 -#define ICE_SID_XLT0_SW 10 -#define ICE_SID_XLT_KEY_BUILDER_SW 11 -#define ICE_SID_XLT1_SW 12 -#define ICE_SID_XLT2_SW 13 -#define ICE_SID_PROFID_TCAM_SW 14 -#define ICE_SID_PROFID_REDIR_SW 15 -#define ICE_SID_FLD_VEC_SW 16 -#define ICE_SID_CDID_KEY_BUILDER_SW 17 - -struct ice_meta_sect { - struct ice_pkg_ver ver; -#define ICE_META_SECT_NAME_SIZE 28 - char name[ICE_META_SECT_NAME_SIZE]; - __le32 track_id; -}; - -#define ICE_SID_CDID_REDIR_SW 18 - -#define ICE_SID_XLT0_ACL 20 -#define ICE_SID_XLT_KEY_BUILDER_ACL 21 -#define ICE_SID_XLT1_ACL 22 -#define ICE_SID_XLT2_ACL 23 -#define ICE_SID_PROFID_TCAM_ACL 24 -#define ICE_SID_PROFID_REDIR_ACL 25 -#define ICE_SID_FLD_VEC_ACL 26 -#define ICE_SID_CDID_KEY_BUILDER_ACL 27 -#define ICE_SID_CDID_REDIR_ACL 28 - -#define ICE_SID_XLT0_FD 30 -#define ICE_SID_XLT_KEY_BUILDER_FD 31 -#define ICE_SID_XLT1_FD 32 -#define ICE_SID_XLT2_FD 33 -#define ICE_SID_PROFID_TCAM_FD 34 -#define ICE_SID_PROFID_REDIR_FD 35 -#define ICE_SID_FLD_VEC_FD 36 -#define ICE_SID_CDID_KEY_BUILDER_FD 37 -#define ICE_SID_CDID_REDIR_FD 38 - -#define ICE_SID_XLT0_RSS 40 -#define ICE_SID_XLT_KEY_BUILDER_RSS 41 -#define ICE_SID_XLT1_RSS 42 -#define ICE_SID_XLT2_RSS 43 -#define ICE_SID_PROFID_TCAM_RSS 44 -#define ICE_SID_PROFID_REDIR_RSS 45 -#define ICE_SID_FLD_VEC_RSS 46 -#define ICE_SID_CDID_KEY_BUILDER_RSS 47 -#define ICE_SID_CDID_REDIR_RSS 48 - -#define ICE_SID_RXPARSER_MARKER_PTYPE 55 -#define ICE_SID_RXPARSER_BOOST_TCAM 56 -#define ICE_SID_RXPARSER_METADATA_INIT 58 -#define ICE_SID_TXPARSER_BOOST_TCAM 66 - -#define ICE_SID_XLT0_PE 80 -#define ICE_SID_XLT_KEY_BUILDER_PE 81 -#define ICE_SID_XLT1_PE 82 -#define ICE_SID_XLT2_PE 83 -#define ICE_SID_PROFID_TCAM_PE 84 -#define ICE_SID_PROFID_REDIR_PE 85 -#define ICE_SID_FLD_VEC_PE 86 -#define ICE_SID_CDID_KEY_BUILDER_PE 87 -#define ICE_SID_CDID_REDIR_PE 88 - -/* Label Metadata section IDs */ -#define ICE_SID_LBL_FIRST 0x80000010 -#define ICE_SID_LBL_RXPARSER_TMEM 0x80000018 -/* The following define MUST be updated to reflect the last label section ID */ -#define ICE_SID_LBL_LAST 0x80000038 - -enum ice_block { - ICE_BLK_SW = 0, - ICE_BLK_ACL, - ICE_BLK_FD, - ICE_BLK_RSS, - ICE_BLK_PE, - ICE_BLK_COUNT -}; - -enum ice_sect { - ICE_XLT0 = 0, - ICE_XLT_KB, - ICE_XLT1, - ICE_XLT2, - ICE_PROF_TCAM, - ICE_PROF_REDIR, - ICE_VEC_TBL, - ICE_CDID_KB, - ICE_CDID_REDIR, - ICE_SECT_COUNT -}; +#include "ice_ddp.h" /* Packet Type (PTYPE) values */ #define ICE_PTYPE_MAC_PAY 1 @@ -283,134 +85,6 @@ struct ice_ptype_attributes { enum ice_ptype_attrib_type attrib; }; -/* package labels */ -struct ice_label { - __le16 value; -#define ICE_PKG_LABEL_SIZE 64 - char name[ICE_PKG_LABEL_SIZE]; -}; - -struct ice_label_section { - __le16 count; - struct ice_label label[]; -}; - -#define ICE_MAX_LABELS_IN_BUF ICE_MAX_ENTRIES_IN_BUF( \ - struct_size((struct ice_label_section *)0, label, 1) - \ - sizeof(struct ice_label), sizeof(struct ice_label)) - -struct ice_sw_fv_section { - __le16 count; - __le16 base_offset; - struct ice_fv fv[]; -}; - -struct ice_sw_fv_list_entry { - struct list_head list_entry; - u32 profile_id; - struct ice_fv *fv_ptr; -}; - -/* The BOOST TCAM stores the match packet header in reverse order, meaning - * the fields are reversed; in addition, this means that the normally big endian - * fields of the packet are now little endian. - */ -struct ice_boost_key_value { -#define ICE_BOOST_REMAINING_HV_KEY 15 - u8 remaining_hv_key[ICE_BOOST_REMAINING_HV_KEY]; - __le16 hv_dst_port_key; - __le16 hv_src_port_key; - u8 tcam_search_key; -} __packed; - -struct ice_boost_key { - struct ice_boost_key_value key; - struct ice_boost_key_value key2; -}; - -/* package Boost TCAM entry */ -struct ice_boost_tcam_entry { - __le16 addr; - __le16 reserved; - /* break up the 40 bytes of key into different fields */ - struct ice_boost_key key; - u8 boost_hit_index_group; - /* The following contains bitfields which are not on byte boundaries. - * These fields are currently unused by driver software. - */ -#define ICE_BOOST_BIT_FIELDS 43 - u8 bit_fields[ICE_BOOST_BIT_FIELDS]; -}; - -struct ice_boost_tcam_section { - __le16 count; - __le16 reserved; - struct ice_boost_tcam_entry tcam[]; -}; - -#define ICE_MAX_BST_TCAMS_IN_BUF ICE_MAX_ENTRIES_IN_BUF( \ - struct_size((struct ice_boost_tcam_section *)0, tcam, 1) - \ - sizeof(struct ice_boost_tcam_entry), \ - sizeof(struct ice_boost_tcam_entry)) - -/* package Marker Ptype TCAM entry */ -struct ice_marker_ptype_tcam_entry { -#define ICE_MARKER_PTYPE_TCAM_ADDR_MAX 1024 - __le16 addr; - __le16 ptype; - u8 keys[20]; -}; - -struct ice_marker_ptype_tcam_section { - __le16 count; - __le16 reserved; - struct ice_marker_ptype_tcam_entry tcam[]; -}; - -#define ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF \ - ICE_MAX_ENTRIES_IN_BUF(struct_size((struct ice_marker_ptype_tcam_section *)0, tcam, 1) - \ - sizeof(struct ice_marker_ptype_tcam_entry), \ - sizeof(struct ice_marker_ptype_tcam_entry)) - -struct ice_xlt1_section { - __le16 count; - __le16 offset; - u8 value[]; -}; - -struct ice_xlt2_section { - __le16 count; - __le16 offset; - __le16 value[]; -}; - -struct ice_prof_redir_section { - __le16 count; - __le16 offset; - u8 redir_value[]; -}; - -/* package buffer building */ - -struct ice_buf_build { - struct ice_buf buf; - u16 reserved_section_table_entries; -}; - -struct ice_pkg_enum { - struct ice_buf_table *buf_table; - u32 buf_idx; - - u32 type; - struct ice_buf_hdr *buf; - u32 sect_idx; - void *sect; - u32 sect_type; - - u32 entry_idx; - void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset); -}; - /* Tunnel enabling */ enum ice_tunnel_type { diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.c b/drivers/net/ethernet/intel/ice/ice_gnss.c index 43e199b5b513..8dec748bb53a 100644 --- a/drivers/net/ethernet/intel/ice/ice_gnss.c +++ b/drivers/net/ethernet/intel/ice/ice_gnss.c @@ -3,15 +3,18 @@ #include "ice.h" #include "ice_lib.h" -#include <linux/tty_driver.h> /** - * ice_gnss_do_write - Write data to internal GNSS + * ice_gnss_do_write - Write data to internal GNSS receiver * @pf: board private structure * @buf: command buffer * @size: command buffer size * * Write UBX command data to the GNSS receiver + * + * Return: + * * number of bytes written - success + * * negative - error code */ static unsigned int ice_gnss_do_write(struct ice_pf *pf, unsigned char *buf, unsigned int size) @@ -82,6 +85,12 @@ static void ice_gnss_write_pending(struct kthread_work *work) write_work); struct ice_pf *pf = gnss->back; + if (!pf) + return; + + if (!test_bit(ICE_FLAG_GNSS, pf->flags)) + return; + if (!list_empty(&gnss->queue)) { struct gnss_write_buf *write_buf = NULL; unsigned int bytes; @@ -102,16 +111,14 @@ static void ice_gnss_write_pending(struct kthread_work *work) * ice_gnss_read - Read data from internal GNSS module * @work: GNSS read work structure * - * Read the data from internal GNSS receiver, number of bytes read will be - * returned in *read_data parameter. + * Read the data from internal GNSS receiver, write it to gnss_dev. */ static void ice_gnss_read(struct kthread_work *work) { struct gnss_serial *gnss = container_of(work, struct gnss_serial, read_work.work); + unsigned int i, bytes_read, data_len, count; struct ice_aqc_link_topo_addr link_topo; - unsigned int i, bytes_read, data_len; - struct tty_port *port; struct ice_pf *pf; struct ice_hw *hw; __be16 data_len_b; @@ -120,14 +127,15 @@ static void ice_gnss_read(struct kthread_work *work) int err = 0; pf = gnss->back; - if (!pf || !gnss->tty || !gnss->tty->port) { + if (!pf) { err = -EFAULT; goto exit; } - hw = &pf->hw; - port = gnss->tty->port; + if (!test_bit(ICE_FLAG_GNSS, pf->flags)) + return; + hw = &pf->hw; buf = (char *)get_zeroed_page(GFP_KERNEL); if (!buf) { err = -ENOMEM; @@ -159,7 +167,6 @@ static void ice_gnss_read(struct kthread_work *work) } data_len = min_t(typeof(data_len), data_len, PAGE_SIZE); - data_len = tty_buffer_request_room(port, data_len); if (!data_len) { err = -ENOMEM; goto exit_buf; @@ -179,12 +186,11 @@ static void ice_gnss_read(struct kthread_work *work) goto exit_buf; } - /* Send the data to the tty layer for users to read. This doesn't - * actually push the data through unless tty->low_latency is set. - */ - tty_insert_flip_string(port, buf, i); - tty_flip_buffer_push(port); - + count = gnss_insert_raw(pf->gnss_dev, buf, i); + if (count != i) + dev_warn(ice_pf_to_dev(pf), + "gnss_insert_raw ret=%d size=%d\n", + count, i); exit_buf: free_page((unsigned long)buf); kthread_queue_delayed_work(gnss->kworker, &gnss->read_work, @@ -195,11 +201,16 @@ exit: } /** - * ice_gnss_struct_init - Initialize GNSS structure for the TTY + * ice_gnss_struct_init - Initialize GNSS receiver * @pf: Board private structure - * @index: TTY device index + * + * Initialize GNSS structures and workers. + * + * Return: + * * pointer to initialized gnss_serial struct - success + * * NULL - error */ -static struct gnss_serial *ice_gnss_struct_init(struct ice_pf *pf, int index) +static struct gnss_serial *ice_gnss_struct_init(struct ice_pf *pf) { struct device *dev = ice_pf_to_dev(pf); struct kthread_worker *kworker; @@ -209,17 +220,12 @@ static struct gnss_serial *ice_gnss_struct_init(struct ice_pf *pf, int index) if (!gnss) return NULL; - mutex_init(&gnss->gnss_mutex); - gnss->open_count = 0; gnss->back = pf; - pf->gnss_serial[index] = gnss; + pf->gnss_serial = gnss; kthread_init_delayed_work(&gnss->read_work, ice_gnss_read); INIT_LIST_HEAD(&gnss->queue); kthread_init_work(&gnss->write_work, ice_gnss_write_pending); - /* Allocate a kworker for handling work required for the GNSS TTY - * writes. - */ kworker = kthread_create_worker(0, "ice-gnss-%s", dev_name(dev)); if (IS_ERR(kworker)) { kfree(gnss); @@ -232,140 +238,100 @@ static struct gnss_serial *ice_gnss_struct_init(struct ice_pf *pf, int index) } /** - * ice_gnss_tty_open - Initialize GNSS structures on TTY device open - * @tty: pointer to the tty_struct - * @filp: pointer to the file + * ice_gnss_open - Open GNSS device + * @gdev: pointer to the gnss device struct + * + * Open GNSS device and start filling the read buffer for consumer. * - * This routine is mandatory. If this routine is not filled in, the attempted - * open will fail with ENODEV. + * Return: + * * 0 - success + * * negative - error code */ -static int ice_gnss_tty_open(struct tty_struct *tty, struct file *filp) +static int ice_gnss_open(struct gnss_device *gdev) { + struct ice_pf *pf = gnss_get_drvdata(gdev); struct gnss_serial *gnss; - struct ice_pf *pf; - pf = (struct ice_pf *)tty->driver->driver_state; if (!pf) return -EFAULT; - /* Clear the pointer in case something fails */ - tty->driver_data = NULL; - - /* Get the serial object associated with this tty pointer */ - gnss = pf->gnss_serial[tty->index]; - if (!gnss) { - /* Initialize GNSS struct on the first device open */ - gnss = ice_gnss_struct_init(pf, tty->index); - if (!gnss) - return -ENOMEM; - } + if (!test_bit(ICE_FLAG_GNSS, pf->flags)) + return -EFAULT; - mutex_lock(&gnss->gnss_mutex); + gnss = pf->gnss_serial; + if (!gnss) + return -ENODEV; - /* Save our structure within the tty structure */ - tty->driver_data = gnss; - gnss->tty = tty; - gnss->open_count++; kthread_queue_delayed_work(gnss->kworker, &gnss->read_work, 0); - mutex_unlock(&gnss->gnss_mutex); - return 0; } /** - * ice_gnss_tty_close - Cleanup GNSS structures on tty device close - * @tty: pointer to the tty_struct - * @filp: pointer to the file + * ice_gnss_close - Close GNSS device + * @gdev: pointer to the gnss device struct + * + * Close GNSS device, cancel worker, stop filling the read buffer. */ -static void ice_gnss_tty_close(struct tty_struct *tty, struct file *filp) +static void ice_gnss_close(struct gnss_device *gdev) { - struct gnss_serial *gnss = tty->driver_data; - struct ice_pf *pf; - - if (!gnss) - return; + struct ice_pf *pf = gnss_get_drvdata(gdev); + struct gnss_serial *gnss; - pf = (struct ice_pf *)tty->driver->driver_state; if (!pf) return; - mutex_lock(&gnss->gnss_mutex); - - if (!gnss->open_count) { - /* Port was never opened */ - dev_err(ice_pf_to_dev(pf), "GNSS port not opened\n"); - goto exit; - } + gnss = pf->gnss_serial; + if (!gnss) + return; - gnss->open_count--; - if (gnss->open_count <= 0) { - /* Port is in shutdown state */ - kthread_cancel_delayed_work_sync(&gnss->read_work); - } -exit: - mutex_unlock(&gnss->gnss_mutex); + kthread_cancel_work_sync(&gnss->write_work); + kthread_cancel_delayed_work_sync(&gnss->read_work); } /** - * ice_gnss_tty_write - Write GNSS data - * @tty: pointer to the tty_struct + * ice_gnss_write - Write to GNSS device + * @gdev: pointer to the gnss device struct * @buf: pointer to the user data - * @count: the number of characters queued to be sent to the HW + * @count: size of the buffer to be sent to the GNSS device * - * The write function call is called by the user when there is data to be sent - * to the hardware. First the tty core receives the call, and then it passes the - * data on to the tty driver's write function. The tty core also tells the tty - * driver the size of the data being sent. - * If any errors happen during the write call, a negative error value should be - * returned instead of the number of characters queued to be written. + * Return: + * * number of written bytes - success + * * negative - error code */ static int -ice_gnss_tty_write(struct tty_struct *tty, const unsigned char *buf, int count) +ice_gnss_write(struct gnss_device *gdev, const unsigned char *buf, + size_t count) { + struct ice_pf *pf = gnss_get_drvdata(gdev); struct gnss_write_buf *write_buf; struct gnss_serial *gnss; unsigned char *cmd_buf; - struct ice_pf *pf; int err = count; /* We cannot write a single byte using our I2C implementation. */ if (count <= 1 || count > ICE_GNSS_TTY_WRITE_BUF) return -EINVAL; - gnss = tty->driver_data; - if (!gnss) - return -EFAULT; - - pf = (struct ice_pf *)tty->driver->driver_state; if (!pf) return -EFAULT; - /* Only allow to write on TTY 0 */ - if (gnss != pf->gnss_serial[0]) - return -EIO; - - mutex_lock(&gnss->gnss_mutex); + if (!test_bit(ICE_FLAG_GNSS, pf->flags)) + return -EFAULT; - if (!gnss->open_count) { - err = -EINVAL; - goto exit; - } + gnss = pf->gnss_serial; + if (!gnss) + return -ENODEV; cmd_buf = kcalloc(count, sizeof(*buf), GFP_KERNEL); - if (!cmd_buf) { - err = -ENOMEM; - goto exit; - } + if (!cmd_buf) + return -ENOMEM; memcpy(cmd_buf, buf, count); - - /* Send the data out to a hardware port */ write_buf = kzalloc(sizeof(*write_buf), GFP_KERNEL); if (!write_buf) { kfree(cmd_buf); - err = -ENOMEM; - goto exit; + return -ENOMEM; } write_buf->buf = cmd_buf; @@ -373,141 +339,89 @@ ice_gnss_tty_write(struct tty_struct *tty, const unsigned char *buf, int count) INIT_LIST_HEAD(&write_buf->queue); list_add_tail(&write_buf->queue, &gnss->queue); kthread_queue_work(gnss->kworker, &gnss->write_work); -exit: - mutex_unlock(&gnss->gnss_mutex); + return err; } +static const struct gnss_operations ice_gnss_ops = { + .open = ice_gnss_open, + .close = ice_gnss_close, + .write_raw = ice_gnss_write, +}; + /** - * ice_gnss_tty_write_room - Returns the numbers of characters to be written. - * @tty: pointer to the tty_struct + * ice_gnss_register - Register GNSS receiver + * @pf: Board private structure + * + * Allocate and register GNSS receiver in the Linux GNSS subsystem. * - * This routine returns the numbers of characters the tty driver will accept - * for queuing to be written or 0 if either the TTY is not open or user - * tries to write to the TTY other than the first. + * Return: + * * 0 - success + * * negative - error code */ -static unsigned int ice_gnss_tty_write_room(struct tty_struct *tty) +static int ice_gnss_register(struct ice_pf *pf) { - struct gnss_serial *gnss = tty->driver_data; - - /* Only allow to write on TTY 0 */ - if (!gnss || gnss != gnss->back->gnss_serial[0]) - return 0; - - mutex_lock(&gnss->gnss_mutex); + struct gnss_device *gdev; + int ret; + + gdev = gnss_allocate_device(ice_pf_to_dev(pf)); + if (!gdev) { + dev_err(ice_pf_to_dev(pf), + "gnss_allocate_device returns NULL\n"); + return -ENOMEM; + } - if (!gnss->open_count) { - mutex_unlock(&gnss->gnss_mutex); - return 0; + gdev->ops = &ice_gnss_ops; + gdev->type = GNSS_TYPE_UBX; + gnss_set_drvdata(gdev, pf); + ret = gnss_register_device(gdev); + if (ret) { + dev_err(ice_pf_to_dev(pf), "gnss_register_device err=%d\n", + ret); + gnss_put_device(gdev); + } else { + pf->gnss_dev = gdev; } - mutex_unlock(&gnss->gnss_mutex); - return ICE_GNSS_TTY_WRITE_BUF; + return ret; } -static const struct tty_operations tty_gps_ops = { - .open = ice_gnss_tty_open, - .close = ice_gnss_tty_close, - .write = ice_gnss_tty_write, - .write_room = ice_gnss_tty_write_room, -}; - /** - * ice_gnss_create_tty_driver - Create a TTY driver for GNSS + * ice_gnss_deregister - Deregister GNSS receiver * @pf: Board private structure + * + * Deregister GNSS receiver from the Linux GNSS subsystem, + * release its resources. */ -static struct tty_driver *ice_gnss_create_tty_driver(struct ice_pf *pf) +static void ice_gnss_deregister(struct ice_pf *pf) { - struct device *dev = ice_pf_to_dev(pf); - const int ICE_TTYDRV_NAME_MAX = 14; - struct tty_driver *tty_driver; - char *ttydrv_name; - unsigned int i; - int err; - - tty_driver = tty_alloc_driver(ICE_GNSS_TTY_MINOR_DEVICES, - TTY_DRIVER_REAL_RAW); - if (IS_ERR(tty_driver)) { - dev_err(dev, "Failed to allocate memory for GNSS TTY\n"); - return NULL; - } - - ttydrv_name = kzalloc(ICE_TTYDRV_NAME_MAX, GFP_KERNEL); - if (!ttydrv_name) { - tty_driver_kref_put(tty_driver); - return NULL; + if (pf->gnss_dev) { + gnss_deregister_device(pf->gnss_dev); + gnss_put_device(pf->gnss_dev); + pf->gnss_dev = NULL; } - - snprintf(ttydrv_name, ICE_TTYDRV_NAME_MAX, "ttyGNSS_%02x%02x_", - (u8)pf->pdev->bus->number, (u8)PCI_SLOT(pf->pdev->devfn)); - - /* Initialize the tty driver*/ - tty_driver->owner = THIS_MODULE; - tty_driver->driver_name = dev_driver_string(dev); - tty_driver->name = (const char *)ttydrv_name; - tty_driver->type = TTY_DRIVER_TYPE_SERIAL; - tty_driver->subtype = SERIAL_TYPE_NORMAL; - tty_driver->init_termios = tty_std_termios; - tty_driver->init_termios.c_iflag &= ~INLCR; - tty_driver->init_termios.c_iflag |= IGNCR; - tty_driver->init_termios.c_oflag &= ~OPOST; - tty_driver->init_termios.c_lflag &= ~ICANON; - tty_driver->init_termios.c_cflag &= ~(CSIZE | CBAUD | CBAUDEX); - /* baud rate 9600 */ - tty_termios_encode_baud_rate(&tty_driver->init_termios, 9600, 9600); - tty_driver->driver_state = pf; - tty_set_operations(tty_driver, &tty_gps_ops); - - for (i = 0; i < ICE_GNSS_TTY_MINOR_DEVICES; i++) { - pf->gnss_tty_port[i] = kzalloc(sizeof(*pf->gnss_tty_port[i]), - GFP_KERNEL); - if (!pf->gnss_tty_port[i]) - goto err_out; - - pf->gnss_serial[i] = NULL; - - tty_port_init(pf->gnss_tty_port[i]); - tty_port_link_device(pf->gnss_tty_port[i], tty_driver, i); - } - - err = tty_register_driver(tty_driver); - if (err) { - dev_err(dev, "Failed to register TTY driver err=%d\n", err); - goto err_out; - } - - for (i = 0; i < ICE_GNSS_TTY_MINOR_DEVICES; i++) - dev_info(dev, "%s%d registered\n", ttydrv_name, i); - - return tty_driver; - -err_out: - while (i--) { - tty_port_destroy(pf->gnss_tty_port[i]); - kfree(pf->gnss_tty_port[i]); - } - kfree(ttydrv_name); - tty_driver_kref_put(pf->ice_gnss_tty_driver); - - return NULL; } /** - * ice_gnss_init - Initialize GNSS TTY support + * ice_gnss_init - Initialize GNSS support * @pf: Board private structure */ void ice_gnss_init(struct ice_pf *pf) { - struct tty_driver *tty_driver; + int ret; - tty_driver = ice_gnss_create_tty_driver(pf); - if (!tty_driver) + pf->gnss_serial = ice_gnss_struct_init(pf); + if (!pf->gnss_serial) return; - pf->ice_gnss_tty_driver = tty_driver; - - set_bit(ICE_FLAG_GNSS, pf->flags); - dev_info(ice_pf_to_dev(pf), "GNSS TTY init successful\n"); + ret = ice_gnss_register(pf); + if (!ret) { + set_bit(ICE_FLAG_GNSS, pf->flags); + dev_info(ice_pf_to_dev(pf), "GNSS init successful\n"); + } else { + ice_gnss_exit(pf); + dev_err(ice_pf_to_dev(pf), "GNSS init failure\n"); + } } /** @@ -516,31 +430,20 @@ void ice_gnss_init(struct ice_pf *pf) */ void ice_gnss_exit(struct ice_pf *pf) { - unsigned int i; + ice_gnss_deregister(pf); + clear_bit(ICE_FLAG_GNSS, pf->flags); - if (!test_bit(ICE_FLAG_GNSS, pf->flags) || !pf->ice_gnss_tty_driver) - return; - - for (i = 0; i < ICE_GNSS_TTY_MINOR_DEVICES; i++) { - if (pf->gnss_tty_port[i]) { - tty_port_destroy(pf->gnss_tty_port[i]); - kfree(pf->gnss_tty_port[i]); - } + if (pf->gnss_serial) { + struct gnss_serial *gnss = pf->gnss_serial; - if (pf->gnss_serial[i]) { - struct gnss_serial *gnss = pf->gnss_serial[i]; + kthread_cancel_work_sync(&gnss->write_work); + kthread_cancel_delayed_work_sync(&gnss->read_work); + kthread_destroy_worker(gnss->kworker); + gnss->kworker = NULL; - kthread_cancel_work_sync(&gnss->write_work); - kthread_cancel_delayed_work_sync(&gnss->read_work); - kfree(gnss); - pf->gnss_serial[i] = NULL; - } + kfree(gnss); + pf->gnss_serial = NULL; } - - tty_unregister_driver(pf->ice_gnss_tty_driver); - kfree(pf->ice_gnss_tty_driver->name); - tty_driver_kref_put(pf->ice_gnss_tty_driver); - pf->ice_gnss_tty_driver = NULL; } /** diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.h b/drivers/net/ethernet/intel/ice/ice_gnss.h index f454dd1d9285..31db0701d13f 100644 --- a/drivers/net/ethernet/intel/ice/ice_gnss.h +++ b/drivers/net/ethernet/intel/ice/ice_gnss.h @@ -4,15 +4,8 @@ #ifndef _ICE_GNSS_H_ #define _ICE_GNSS_H_ -#include <linux/tty.h> -#include <linux/tty_flip.h> - #define ICE_E810T_GNSS_I2C_BUS 0x2 #define ICE_GNSS_TIMER_DELAY_TIME (HZ / 10) /* 0.1 second per message */ -/* Create 2 minor devices, both using the same GNSS module. First one is RW, - * second one RO. - */ -#define ICE_GNSS_TTY_MINOR_DEVICES 2 #define ICE_GNSS_TTY_WRITE_BUF 250 #define ICE_MAX_I2C_DATA_SIZE FIELD_MAX(ICE_AQC_I2C_DATA_SIZE_M) #define ICE_MAX_I2C_WRITE_BYTES 4 @@ -36,13 +29,9 @@ struct gnss_write_buf { unsigned char *buf; }; - /** * struct gnss_serial - data used to initialize GNSS TTY port * @back: back pointer to PF - * @tty: pointer to the tty for this device - * @open_count: number of times this port has been opened - * @gnss_mutex: gnss_mutex used to protect GNSS serial operations * @kworker: kwork thread for handling periodic work * @read_work: read_work function for handling GNSS reads * @write_work: write_work function for handling GNSS writes @@ -50,16 +39,13 @@ struct gnss_write_buf { */ struct gnss_serial { struct ice_pf *back; - struct tty_struct *tty; - int open_count; - struct mutex gnss_mutex; /* protects GNSS serial structure */ struct kthread_worker *kworker; struct kthread_delayed_work read_work; struct kthread_work write_work; struct list_head queue; }; -#if IS_ENABLED(CONFIG_TTY) +#if IS_ENABLED(CONFIG_ICE_GNSS) void ice_gnss_init(struct ice_pf *pf); void ice_gnss_exit(struct ice_pf *pf); bool ice_gnss_is_gps_present(struct ice_hw *hw); @@ -70,5 +56,5 @@ static inline bool ice_gnss_is_gps_present(struct ice_hw *hw) { return false; } -#endif /* IS_ENABLED(CONFIG_TTY) */ +#endif /* IS_ENABLED(CONFIG_ICE_GNSS) */ #endif /* _ICE_GNSS_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 94aa834cd9a6..8316037b5548 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -1670,7 +1670,7 @@ static int ice_vsi_alloc_ring_stats(struct ice_vsi *vsi) if (!ring_stats) goto err_out; - WRITE_ONCE(rx_ring_stats[i], ring_stats); + WRITE_ONCE(rx_ring_stats[i], ring_stats); } ring->ring_stats = ring_stats; diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index a9a7f8b52140..cb870da5c317 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -3780,13 +3780,11 @@ static int ice_setup_pf_sw(struct ice_pf *pf) unroll_napi_add: ice_tc_indir_block_unregister(vsi); unroll_cfg_netdev: - if (vsi) { - ice_napi_del(vsi); - if (vsi->netdev) { - clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); - free_netdev(vsi->netdev); - vsi->netdev = NULL; - } + ice_napi_del(vsi); + if (vsi->netdev) { + clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); + free_netdev(vsi->netdev); + vsi->netdev = NULL; } unroll_vsi_setup: @@ -5048,8 +5046,11 @@ static void ice_setup_mc_magic_wake(struct ice_pf *pf) static void ice_remove(struct pci_dev *pdev) { struct ice_pf *pf = pci_get_drvdata(pdev); + struct ice_hw *hw; int i; + hw = &pf->hw; + ice_devlink_unregister(pf); for (i = 0; i < ICE_MAX_RESET_WAIT; i++) { if (!ice_is_reset_in_progress(pf->state)) @@ -5082,7 +5083,7 @@ static void ice_remove(struct pci_dev *pdev) ice_remove_arfs(pf); ice_setup_mc_magic_wake(pf); ice_vsi_release_all(pf); - mutex_destroy(&(&pf->hw)->fdir_fltr_lock); + mutex_destroy(&hw->fdir_fltr_lock); ice_set_wake(pf); ice_free_irq_msix_misc(pf); ice_for_each_vsi(pf, i) { @@ -5094,13 +5095,13 @@ static void ice_remove(struct pci_dev *pdev) pf->vsi_stats = NULL; ice_deinit_pf(pf); ice_devlink_destroy_regions(pf); - ice_deinit_hw(&pf->hw); + ice_deinit_hw(hw); /* Issue a PFR as part of the prescribed driver unload flow. Do not * do it via ice_schedule_reset() since there is no need to rebuild * and the service task is already stopped. */ - ice_reset(&pf->hw, ICE_RESET_PFR); + ice_reset(hw, ICE_RESET_PFR); pci_wait_for_pending_transaction(pdev); ice_clear_interrupt_scheme(pf); pci_disable_pcie_error_reporting(pdev); @@ -6146,15 +6147,12 @@ int ice_vsi_cfg(struct ice_vsi *vsi) { int err; - if (vsi->netdev) { + if (vsi->netdev && vsi->type == ICE_VSI_PF) { ice_set_rx_mode(vsi->netdev); - if (vsi->type != ICE_VSI_LB) { - err = ice_vsi_vlan_setup(vsi); - - if (err) - return err; - } + err = ice_vsi_vlan_setup(vsi); + if (err) + return err; } ice_vsi_cfg_dcb_rings(vsi); @@ -6335,7 +6333,7 @@ static int ice_up_complete(struct ice_vsi *vsi) if (vsi->port_info && (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) && - vsi->netdev) { + vsi->netdev && vsi->type == ICE_VSI_PF) { ice_print_link_msg(vsi, true); netif_tx_start_all_queues(vsi->netdev); netif_carrier_on(vsi->netdev); @@ -6346,7 +6344,9 @@ static int ice_up_complete(struct ice_vsi *vsi) * set the baseline so counters are ready when interface is up */ ice_update_eth_stats(vsi); - ice_service_task_schedule(pf); + + if (vsi->type == ICE_VSI_PF) + ice_service_task_schedule(pf); return 0; } diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c index c262dc886e6a..f6f52a248066 100644 --- a/drivers/net/ethernet/intel/ice/ice_nvm.c +++ b/drivers/net/ethernet/intel/ice/ice_nvm.c @@ -662,7 +662,6 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank, /* Verify that the simple checksum is zero */ for (i = 0; i < sizeof(*tmp); i++) - /* cppcheck-suppress objectIndex */ sum += ((u8 *)tmp)[i]; if (sum) { diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c index d63161d73eb1..3abc8db1d065 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp.c @@ -2269,7 +2269,7 @@ static void ice_ptp_set_caps(struct ice_pf *pf) snprintf(info->name, sizeof(info->name) - 1, "%s-%s-clk", dev_driver_string(dev), dev_name(dev)); info->owner = THIS_MODULE; - info->max_adj = 999999999; + info->max_adj = 100000000; info->adjtime = ice_ptp_adjtime; info->adjfine = ice_ptp_adjfine; info->gettimex64 = ice_ptp_gettimex64; diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index 6d08b397df2a..4eca8d195ef0 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -1063,7 +1063,6 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi, *num_nodes_added = 0; while (*num_nodes_added < num_nodes) { u16 max_child_nodes, num_added = 0; - /* cppcheck-suppress unusedVariable */ u32 temp; status = ice_sched_add_nodes_to_hw_layer(pi, tc_node, parent, @@ -1655,12 +1654,13 @@ ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, u32 first_node_teid; u16 num_added = 0; u8 i, qgl, vsil; - int status; qgl = ice_sched_get_qgrp_layer(hw); vsil = ice_sched_get_vsi_layer(hw); parent = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); for (i = vsil + 1; i <= qgl; i++) { + int status; + if (!parent) return -EIO; @@ -1756,13 +1756,14 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle, u32 first_node_teid; u16 num_added = 0; u8 i, vsil; - int status; if (!pi) return -EINVAL; vsil = ice_sched_get_vsi_layer(pi->hw); for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) { + int status; + status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i, num_nodes[i], &first_node_teid, diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c index faba0f857cd9..80706f7330f4 100644 --- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c @@ -792,7 +792,7 @@ static struct ice_vsi * ice_tc_forward_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *tc_fltr) { struct ice_rx_ring *ring = NULL; - struct ice_vsi *ch_vsi = NULL; + struct ice_vsi *dest_vsi = NULL; struct ice_pf *pf = vsi->back; struct device *dev; u32 tc_class; @@ -810,7 +810,7 @@ ice_tc_forward_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *tc_fltr) return ERR_PTR(-EOPNOTSUPP); } /* Locate ADQ VSI depending on hw_tc number */ - ch_vsi = vsi->tc_map_vsi[tc_class]; + dest_vsi = vsi->tc_map_vsi[tc_class]; break; case ICE_FWD_TO_Q: /* Locate the Rx queue */ @@ -824,7 +824,7 @@ ice_tc_forward_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *tc_fltr) /* Determine destination VSI even though the action is * FWD_TO_QUEUE, because QUEUE is associated with VSI */ - ch_vsi = tc_fltr->dest_vsi; + dest_vsi = tc_fltr->dest_vsi; break; default: dev_err(dev, @@ -832,13 +832,13 @@ ice_tc_forward_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *tc_fltr) tc_fltr->action.fltr_act); return ERR_PTR(-EINVAL); } - /* Must have valid ch_vsi (it could be main VSI or ADQ VSI) */ - if (!ch_vsi) { + /* Must have valid dest_vsi (it could be main VSI or ADQ VSI) */ + if (!dest_vsi) { dev_err(dev, "Unable to add filter because specified destination VSI doesn't exist\n"); return ERR_PTR(-EINVAL); } - return ch_vsi; + return dest_vsi; } /** @@ -860,7 +860,7 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi, struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; u32 flags = tc_fltr->flags; - struct ice_vsi *ch_vsi; + struct ice_vsi *dest_vsi; struct device *dev; u16 lkups_cnt = 0; u16 l4_proto = 0; @@ -883,9 +883,11 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi, } /* validate forwarding action VSI and queue */ - ch_vsi = ice_tc_forward_action(vsi, tc_fltr); - if (IS_ERR(ch_vsi)) - return PTR_ERR(ch_vsi); + if (ice_is_forward_action(tc_fltr->action.fltr_act)) { + dest_vsi = ice_tc_forward_action(vsi, tc_fltr); + if (IS_ERR(dest_vsi)) + return PTR_ERR(dest_vsi); + } lkups_cnt = ice_tc_count_lkups(flags, headers, tc_fltr); list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC); @@ -904,7 +906,7 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi, switch (tc_fltr->action.fltr_act) { case ICE_FWD_TO_VSI: - rule_info.sw_act.vsi_handle = ch_vsi->idx; + rule_info.sw_act.vsi_handle = dest_vsi->idx; rule_info.priority = ICE_SWITCH_FLTR_PRIO_VSI; rule_info.sw_act.src = hw->pf_id; rule_info.rx = true; @@ -915,7 +917,7 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi, case ICE_FWD_TO_Q: /* HW queue number in global space */ rule_info.sw_act.fwd_id.q_id = tc_fltr->action.fwd.q.hw_queue; - rule_info.sw_act.vsi_handle = ch_vsi->idx; + rule_info.sw_act.vsi_handle = dest_vsi->idx; rule_info.priority = ICE_SWITCH_FLTR_PRIO_QUEUE; rule_info.sw_act.src = hw->pf_id; rule_info.rx = true; @@ -923,14 +925,15 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi, tc_fltr->action.fwd.q.queue, tc_fltr->action.fwd.q.hw_queue, lkups_cnt); break; - default: - rule_info.sw_act.flag |= ICE_FLTR_TX; - /* In case of Tx (LOOKUP_TX), src needs to be src VSI */ - rule_info.sw_act.src = vsi->idx; - /* 'Rx' is false, direction of rule(LOOKUPTRX) */ - rule_info.rx = false; + case ICE_DROP_PACKET: + rule_info.sw_act.flag |= ICE_FLTR_RX; + rule_info.sw_act.src = hw->pf_id; + rule_info.rx = true; rule_info.priority = ICE_SWITCH_FLTR_PRIO_VSI; break; + default: + ret = -EOPNOTSUPP; + goto exit; } ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added); @@ -953,11 +956,11 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi, tc_fltr->dest_vsi_handle = rule_added.vsi_handle; if (tc_fltr->action.fltr_act == ICE_FWD_TO_VSI || tc_fltr->action.fltr_act == ICE_FWD_TO_Q) { - tc_fltr->dest_vsi = ch_vsi; + tc_fltr->dest_vsi = dest_vsi; /* keep track of advanced switch filter for * destination VSI */ - ch_vsi->num_chnl_fltr++; + dest_vsi->num_chnl_fltr++; /* keeps track of channel filters for PF VSI */ if (vsi->type == ICE_VSI_PF && @@ -978,6 +981,10 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi, tc_fltr->action.fwd.q.hw_queue, rule_added.rid, rule_added.rule_id); break; + case ICE_DROP_PACKET: + dev_dbg(dev, "added switch rule (lkups_cnt %u, flags 0x%x), action is drop, rid %u, rule_id %u\n", + lkups_cnt, flags, rule_added.rid, rule_added.rule_id); + break; default: break; } @@ -1712,6 +1719,9 @@ ice_tc_parse_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr, case FLOW_ACTION_RX_QUEUE_MAPPING: /* forward to queue */ return ice_tc_forward_to_queue(vsi, fltr, act); + case FLOW_ACTION_DROP: + fltr->action.fltr_act = ICE_DROP_PACKET; + return 0; default: NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported TC action"); return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.h b/drivers/net/ethernet/intel/ice/ice_tc_lib.h index d916d1e92aa3..8d5e22ac7023 100644 --- a/drivers/net/ethernet/intel/ice/ice_tc_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.h @@ -211,4 +211,14 @@ ice_del_cls_flower(struct ice_vsi *vsi, struct flow_cls_offload *cls_flower); void ice_replay_tc_fltrs(struct ice_pf *pf); bool ice_is_tunnel_supported(struct net_device *dev); +static inline bool ice_is_forward_action(enum ice_sw_fwd_act_type fltr_act) +{ + switch (fltr_act) { + case ICE_FWD_TO_VSI: + case ICE_FWD_TO_Q: + return true; + default: + return false; + } +} #endif /* _ICE_TC_LIB_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 086f0b3ab68d..ccf09c957a1c 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -1996,7 +1996,6 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off) if (err < 0) return err; - /* cppcheck-suppress unreadVariable */ protocol = vlan_get_protocol(skb); if (eth_p_mpls(protocol)) @@ -2033,8 +2032,6 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off) } /* reset pointers to inner headers */ - - /* cppcheck-suppress unreadVariable */ ip.hdr = skb_inner_network_header(skb); l4.hdr = skb_inner_transport_header(skb); diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c index a15927e77272..a1d815af507d 100644 --- a/drivers/net/ethernet/intel/igc/igc_base.c +++ b/drivers/net/ethernet/intel/igc/igc_base.c @@ -396,6 +396,35 @@ void igc_rx_fifo_flush_base(struct igc_hw *hw) rd32(IGC_MPC); } +bool igc_is_device_id_i225(struct igc_hw *hw) +{ + switch (hw->device_id) { + case IGC_DEV_ID_I225_LM: + case IGC_DEV_ID_I225_V: + case IGC_DEV_ID_I225_I: + case IGC_DEV_ID_I225_K: + case IGC_DEV_ID_I225_K2: + case IGC_DEV_ID_I225_LMVP: + case IGC_DEV_ID_I225_IT: + return true; + default: + return false; + } +} + +bool igc_is_device_id_i226(struct igc_hw *hw) +{ + switch (hw->device_id) { + case IGC_DEV_ID_I226_LM: + case IGC_DEV_ID_I226_V: + case IGC_DEV_ID_I226_K: + case IGC_DEV_ID_I226_IT: + return true; + default: + return false; + } +} + static struct igc_mac_operations igc_mac_ops_base = { .init_hw = igc_init_hw_base, .check_for_link = igc_check_for_copper_link, diff --git a/drivers/net/ethernet/intel/igc/igc_base.h b/drivers/net/ethernet/intel/igc/igc_base.h index ce530f5fd7bd..7a992befca24 100644 --- a/drivers/net/ethernet/intel/igc/igc_base.h +++ b/drivers/net/ethernet/intel/igc/igc_base.h @@ -7,6 +7,8 @@ /* forward declaration */ void igc_rx_fifo_flush_base(struct igc_hw *hw); void igc_power_down_phy_copper_base(struct igc_hw *hw); +bool igc_is_device_id_i225(struct igc_hw *hw); +bool igc_is_device_id_i226(struct igc_hw *hw); /* Transmit Descriptor - Advanced */ union igc_adv_tx_desc { diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h index e9747ec5ac0b..9dec3563ce3a 100644 --- a/drivers/net/ethernet/intel/igc/igc_defines.h +++ b/drivers/net/ethernet/intel/igc/igc_defines.h @@ -524,6 +524,7 @@ /* Transmit Scheduling */ #define IGC_TQAVCTRL_TRANSMIT_MODE_TSN 0x00000001 #define IGC_TQAVCTRL_ENHANCED_QAV 0x00000008 +#define IGC_TQAVCTRL_FUTSCDDIS 0x00000080 #define IGC_TXQCTL_QUEUE_MODE_LAUNCHT 0x00000001 #define IGC_TXQCTL_STRICT_CYCLE 0x00000002 diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 44b1740dc098..e86b15efaeb8 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -5958,6 +5958,7 @@ static bool validate_schedule(struct igc_adapter *adapter, const struct tc_taprio_qopt_offload *qopt) { int queue_uses[IGC_MAX_TX_QUEUES] = { }; + struct igc_hw *hw = &adapter->hw; struct timespec64 now; size_t n; @@ -5970,8 +5971,10 @@ static bool validate_schedule(struct igc_adapter *adapter, * in the future, it will hold all the packets until that * time, causing a lot of TX Hangs, so to avoid that, we * reject schedules that would start in the future. + * Note: Limitation above is no longer in i226. */ - if (!is_base_time_past(qopt->base_time, &now)) + if (!is_base_time_past(qopt->base_time, &now) && + igc_is_device_id_i225(hw)) return false; for (n = 0; n < qopt->num_entries; n++) { @@ -6041,6 +6044,7 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter, struct tc_taprio_qopt_offload *qopt) { bool queue_configured[IGC_MAX_TX_QUEUES] = { }; + struct igc_hw *hw = &adapter->hw; u32 start_time = 0, end_time = 0; size_t n; int i; @@ -6053,7 +6057,7 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter, if (qopt->base_time < 0) return -ERANGE; - if (adapter->base_time) + if (igc_is_device_id_i225(hw) && adapter->base_time) return -EALREADY; if (!validate_schedule(adapter, qopt)) diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.c b/drivers/net/ethernet/intel/igc/igc_tsn.c index bb10d7b65232..a386c8d61dbf 100644 --- a/drivers/net/ethernet/intel/igc/igc_tsn.c +++ b/drivers/net/ethernet/intel/igc/igc_tsn.c @@ -2,6 +2,7 @@ /* Copyright (c) 2019 Intel Corporation */ #include "igc.h" +#include "igc_hw.h" #include "igc_tsn.h" static bool is_any_launchtime(struct igc_adapter *adapter) @@ -92,7 +93,8 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter) tqavctrl = rd32(IGC_TQAVCTRL); tqavctrl &= ~(IGC_TQAVCTRL_TRANSMIT_MODE_TSN | - IGC_TQAVCTRL_ENHANCED_QAV); + IGC_TQAVCTRL_ENHANCED_QAV | IGC_TQAVCTRL_FUTSCDDIS); + wr32(IGC_TQAVCTRL, tqavctrl); for (i = 0; i < adapter->num_tx_queues; i++) { @@ -117,20 +119,10 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter) ktime_t base_time, systim; int i; - cycle = adapter->cycle_time; - base_time = adapter->base_time; - wr32(IGC_TSAUXC, 0); wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_TSN); wr32(IGC_TXPBS, IGC_TXPBSIZE_TSN); - tqavctrl = rd32(IGC_TQAVCTRL); - tqavctrl |= IGC_TQAVCTRL_TRANSMIT_MODE_TSN | IGC_TQAVCTRL_ENHANCED_QAV; - wr32(IGC_TQAVCTRL, tqavctrl); - - wr32(IGC_QBVCYCLET_S, cycle); - wr32(IGC_QBVCYCLET, cycle); - for (i = 0; i < adapter->num_tx_queues; i++) { struct igc_ring *ring = adapter->tx_ring[i]; u32 txqctl = 0; @@ -233,21 +225,46 @@ skip_cbs: wr32(IGC_TXQCTL(i), txqctl); } + tqavctrl = rd32(IGC_TQAVCTRL) & ~IGC_TQAVCTRL_FUTSCDDIS; + tqavctrl |= IGC_TQAVCTRL_TRANSMIT_MODE_TSN | IGC_TQAVCTRL_ENHANCED_QAV; + + cycle = adapter->cycle_time; + base_time = adapter->base_time; + nsec = rd32(IGC_SYSTIML); sec = rd32(IGC_SYSTIMH); systim = ktime_set(sec, nsec); - if (ktime_compare(systim, base_time) > 0) { - s64 n; + s64 n = div64_s64(ktime_sub_ns(systim, base_time), cycle); - n = div64_s64(ktime_sub_ns(systim, base_time), cycle); base_time = ktime_add_ns(base_time, (n + 1) * cycle); + } else { + /* According to datasheet section 7.5.2.9.3.3, FutScdDis bit + * has to be configured before the cycle time and base time. + * Tx won't hang if there is a GCL is already running, + * so in this case we don't need to set FutScdDis. + */ + if (igc_is_device_id_i226(hw) && + !(rd32(IGC_BASET_H) || rd32(IGC_BASET_L))) + tqavctrl |= IGC_TQAVCTRL_FUTSCDDIS; } - baset_h = div_s64_rem(base_time, NSEC_PER_SEC, &baset_l); + wr32(IGC_TQAVCTRL, tqavctrl); + + wr32(IGC_QBVCYCLET_S, cycle); + wr32(IGC_QBVCYCLET, cycle); + baset_h = div_s64_rem(base_time, NSEC_PER_SEC, &baset_l); wr32(IGC_BASET_H, baset_h); + + /* In i226, Future base time is only supported when FutScdDis bit + * is enabled and only active for re-configuration. + * In this case, initialize the base time with zero to create + * "re-configuration" scenario then only set the desired base time. + */ + if (tqavctrl & IGC_TQAVCTRL_FUTSCDDIS) + wr32(IGC_BASET_L, 0); wr32(IGC_BASET_L, baset_l); return 0; @@ -274,17 +291,14 @@ int igc_tsn_reset(struct igc_adapter *adapter) int igc_tsn_offload_apply(struct igc_adapter *adapter) { - int err; + struct igc_hw *hw = &adapter->hw; - if (netif_running(adapter->netdev)) { + if (netif_running(adapter->netdev) && igc_is_device_id_i225(hw)) { schedule_work(&adapter->reset_task); return 0; } - err = igc_tsn_enable_offload(adapter); - if (err < 0) - return err; + igc_tsn_reset(adapter); - adapter->flags = igc_tsn_new_flags(adapter); return 0; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 38c4609bd429..878dd8dff528 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -3292,13 +3292,14 @@ static bool ixgbe_need_crosstalk_fix(struct ixgbe_hw *hw) s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *link_up, bool link_up_wait_to_complete) { + bool crosstalk_fix_active = ixgbe_need_crosstalk_fix(hw); u32 links_reg, links_orig; u32 i; /* If Crosstalk fix enabled do the sanity check of making sure * the SFP+ cage is full. */ - if (ixgbe_need_crosstalk_fix(hw)) { + if (crosstalk_fix_active) { u32 sfp_cage_full; switch (hw->mac.type) { @@ -3346,10 +3347,24 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); } } else { - if (links_reg & IXGBE_LINKS_UP) + if (links_reg & IXGBE_LINKS_UP) { + if (crosstalk_fix_active) { + /* Check the link state again after a delay + * to filter out spurious link up + * notifications. + */ + mdelay(5); + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); + if (!(links_reg & IXGBE_LINKS_UP)) { + *link_up = false; + *speed = IXGBE_LINK_SPEED_UNKNOWN; + return 0; + } + } *link_up = true; - else + } else { *link_up = false; + } } switch (links_reg & IXGBE_LINKS_SPEED_82599) { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index ab8370c413f3..43a44c1e1576 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -6647,7 +6647,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, rx_ring->queue_index, ixgbe_rx_napi_id(rx_ring)) < 0) goto err; - rx_ring->xdp_prog = adapter->xdp_prog; + WRITE_ONCE(rx_ring->xdp_prog, adapter->xdp_prog); return 0; err: @@ -8937,7 +8937,8 @@ ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr) int regnum = addr; if (devad != MDIO_DEVAD_NONE) - regnum |= (devad << 16) | MII_ADDR_C45; + return mdiobus_c45_read(adapter->mii_bus, prtad, + devad, regnum); return mdiobus_read(adapter->mii_bus, prtad, regnum); } @@ -8960,7 +8961,8 @@ static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad, int regnum = addr; if (devad != MDIO_DEVAD_NONE) - regnum |= (devad << 16) | MII_ADDR_C45; + return mdiobus_c45_write(adapter->mii_bus, prtad, devad, + regnum, value); return mdiobus_write(adapter->mii_bus, prtad, regnum, value); } @@ -10297,14 +10299,13 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog) synchronize_rcu(); err = ixgbe_setup_tc(dev, adapter->hw_tcs); - if (err) { - rcu_assign_pointer(adapter->xdp_prog, old_prog); + if (err) return -EINVAL; - } } else { - for (i = 0; i < adapter->num_rx_queues; i++) - (void)xchg(&adapter->rx_ring[i]->xdp_prog, - adapter->xdp_prog); + for (i = 0; i < adapter->num_rx_queues; i++) { + WRITE_ONCE(adapter->rx_ring[i]->xdp_prog, + adapter->xdp_prog); + } } if (old_prog) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index 123dca9ce468..689470c1e8ad 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -680,14 +680,14 @@ static s32 ixgbe_msca_cmd(struct ixgbe_hw *hw, u32 cmd) } /** - * ixgbe_mii_bus_read_generic - Read a clause 22/45 register with gssr flags + * ixgbe_mii_bus_read_generic_c22 - Read a clause 22 register with gssr flags * @hw: pointer to hardware structure * @addr: address * @regnum: register number * @gssr: semaphore flags to acquire **/ -static s32 ixgbe_mii_bus_read_generic(struct ixgbe_hw *hw, int addr, - int regnum, u32 gssr) +static s32 ixgbe_mii_bus_read_generic_c22(struct ixgbe_hw *hw, int addr, + int regnum, u32 gssr) { u32 hwaddr, cmd; s32 data; @@ -696,31 +696,52 @@ static s32 ixgbe_mii_bus_read_generic(struct ixgbe_hw *hw, int addr, return -EBUSY; hwaddr = addr << IXGBE_MSCA_PHY_ADDR_SHIFT; - if (regnum & MII_ADDR_C45) { - hwaddr |= regnum & GENMASK(21, 0); - cmd = hwaddr | IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND; - } else { - hwaddr |= (regnum & GENMASK(5, 0)) << IXGBE_MSCA_DEV_TYPE_SHIFT; - cmd = hwaddr | IXGBE_MSCA_OLD_PROTOCOL | - IXGBE_MSCA_READ_AUTOINC | IXGBE_MSCA_MDI_COMMAND; - } + hwaddr |= (regnum & GENMASK(5, 0)) << IXGBE_MSCA_DEV_TYPE_SHIFT; + cmd = hwaddr | IXGBE_MSCA_OLD_PROTOCOL | + IXGBE_MSCA_READ_AUTOINC | IXGBE_MSCA_MDI_COMMAND; data = ixgbe_msca_cmd(hw, cmd); if (data < 0) goto mii_bus_read_done; - /* For a clause 45 access the address cycle just completed, we still - * need to do the read command, otherwise just get the data - */ - if (!(regnum & MII_ADDR_C45)) - goto do_mii_bus_read; + data = IXGBE_READ_REG(hw, IXGBE_MSRWD); + data = (data >> IXGBE_MSRWD_READ_DATA_SHIFT) & GENMASK(16, 0); + +mii_bus_read_done: + hw->mac.ops.release_swfw_sync(hw, gssr); + return data; +} + +/** + * ixgbe_mii_bus_read_generic_c45 - Read a clause 45 register with gssr flags + * @hw: pointer to hardware structure + * @addr: address + * @devad: device address to read + * @regnum: register number + * @gssr: semaphore flags to acquire + **/ +static s32 ixgbe_mii_bus_read_generic_c45(struct ixgbe_hw *hw, int addr, + int devad, int regnum, u32 gssr) +{ + u32 hwaddr, cmd; + s32 data; + + if (hw->mac.ops.acquire_swfw_sync(hw, gssr)) + return -EBUSY; + + hwaddr = addr << IXGBE_MSCA_PHY_ADDR_SHIFT; + hwaddr |= devad << 16 | regnum; + cmd = hwaddr | IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND; + + data = ixgbe_msca_cmd(hw, cmd); + if (data < 0) + goto mii_bus_read_done; cmd = hwaddr | IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND; data = ixgbe_msca_cmd(hw, cmd); if (data < 0) goto mii_bus_read_done; -do_mii_bus_read: data = IXGBE_READ_REG(hw, IXGBE_MSRWD); data = (data >> IXGBE_MSRWD_READ_DATA_SHIFT) & GENMASK(16, 0); @@ -730,15 +751,15 @@ mii_bus_read_done: } /** - * ixgbe_mii_bus_write_generic - Write a clause 22/45 register with gssr flags + * ixgbe_mii_bus_write_generic_c22 - Write a clause 22 register with gssr flags * @hw: pointer to hardware structure * @addr: address * @regnum: register number * @val: value to write * @gssr: semaphore flags to acquire **/ -static s32 ixgbe_mii_bus_write_generic(struct ixgbe_hw *hw, int addr, - int regnum, u16 val, u32 gssr) +static s32 ixgbe_mii_bus_write_generic_c22(struct ixgbe_hw *hw, int addr, + int regnum, u16 val, u32 gssr) { u32 hwaddr, cmd; s32 err; @@ -749,20 +770,43 @@ static s32 ixgbe_mii_bus_write_generic(struct ixgbe_hw *hw, int addr, IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)val); hwaddr = addr << IXGBE_MSCA_PHY_ADDR_SHIFT; - if (regnum & MII_ADDR_C45) { - hwaddr |= regnum & GENMASK(21, 0); - cmd = hwaddr | IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND; - } else { - hwaddr |= (regnum & GENMASK(5, 0)) << IXGBE_MSCA_DEV_TYPE_SHIFT; - cmd = hwaddr | IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_WRITE | - IXGBE_MSCA_MDI_COMMAND; - } + hwaddr |= (regnum & GENMASK(5, 0)) << IXGBE_MSCA_DEV_TYPE_SHIFT; + cmd = hwaddr | IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_WRITE | + IXGBE_MSCA_MDI_COMMAND; + + err = ixgbe_msca_cmd(hw, cmd); + + hw->mac.ops.release_swfw_sync(hw, gssr); + return err; +} + +/** + * ixgbe_mii_bus_write_generic_c45 - Write a clause 45 register with gssr flags + * @hw: pointer to hardware structure + * @addr: address + * @devad: device address to read + * @regnum: register number + * @val: value to write + * @gssr: semaphore flags to acquire + **/ +static s32 ixgbe_mii_bus_write_generic_c45(struct ixgbe_hw *hw, int addr, + int devad, int regnum, u16 val, + u32 gssr) +{ + u32 hwaddr, cmd; + s32 err; + + if (hw->mac.ops.acquire_swfw_sync(hw, gssr)) + return -EBUSY; + + IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)val); + + hwaddr = addr << IXGBE_MSCA_PHY_ADDR_SHIFT; + hwaddr |= devad << 16 | regnum; + cmd = hwaddr | IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND; - /* For clause 45 this is an address cycle, for clause 22 this is the - * entire transaction - */ err = ixgbe_msca_cmd(hw, cmd); - if (err < 0 || !(regnum & MII_ADDR_C45)) + if (err < 0) goto mii_bus_write_done; cmd = hwaddr | IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND; @@ -774,70 +818,144 @@ mii_bus_write_done: } /** - * ixgbe_mii_bus_read - Read a clause 22/45 register + * ixgbe_mii_bus_read_c22 - Read a clause 22 register + * @bus: pointer to mii_bus structure which points to our driver private + * @addr: address + * @regnum: register number + **/ +static s32 ixgbe_mii_bus_read_c22(struct mii_bus *bus, int addr, int regnum) +{ + struct ixgbe_adapter *adapter = bus->priv; + struct ixgbe_hw *hw = &adapter->hw; + u32 gssr = hw->phy.phy_semaphore_mask; + + return ixgbe_mii_bus_read_generic_c22(hw, addr, regnum, gssr); +} + +/** + * ixgbe_mii_bus_read_c45 - Read a clause 45 register * @bus: pointer to mii_bus structure which points to our driver private + * @devad: device address to read * @addr: address * @regnum: register number **/ -static s32 ixgbe_mii_bus_read(struct mii_bus *bus, int addr, int regnum) +static s32 ixgbe_mii_bus_read_c45(struct mii_bus *bus, int devad, int addr, + int regnum) +{ + struct ixgbe_adapter *adapter = bus->priv; + struct ixgbe_hw *hw = &adapter->hw; + u32 gssr = hw->phy.phy_semaphore_mask; + + return ixgbe_mii_bus_read_generic_c45(hw, addr, devad, regnum, gssr); +} + +/** + * ixgbe_mii_bus_write_c22 - Write a clause 22 register + * @bus: pointer to mii_bus structure which points to our driver private + * @addr: address + * @regnum: register number + * @val: value to write + **/ +static s32 ixgbe_mii_bus_write_c22(struct mii_bus *bus, int addr, int regnum, + u16 val) { struct ixgbe_adapter *adapter = bus->priv; struct ixgbe_hw *hw = &adapter->hw; u32 gssr = hw->phy.phy_semaphore_mask; - return ixgbe_mii_bus_read_generic(hw, addr, regnum, gssr); + return ixgbe_mii_bus_write_generic_c22(hw, addr, regnum, val, gssr); } /** - * ixgbe_mii_bus_write - Write a clause 22/45 register + * ixgbe_mii_bus_write_c45 - Write a clause 45 register * @bus: pointer to mii_bus structure which points to our driver private * @addr: address + * @devad: device address to read * @regnum: register number * @val: value to write **/ -static s32 ixgbe_mii_bus_write(struct mii_bus *bus, int addr, int regnum, - u16 val) +static s32 ixgbe_mii_bus_write_c45(struct mii_bus *bus, int addr, int devad, + int regnum, u16 val) { struct ixgbe_adapter *adapter = bus->priv; struct ixgbe_hw *hw = &adapter->hw; u32 gssr = hw->phy.phy_semaphore_mask; - return ixgbe_mii_bus_write_generic(hw, addr, regnum, val, gssr); + return ixgbe_mii_bus_write_generic_c45(hw, addr, devad, regnum, val, + gssr); } /** - * ixgbe_x550em_a_mii_bus_read - Read a clause 22/45 register on x550em_a + * ixgbe_x550em_a_mii_bus_read_c22 - Read a clause 22 register on x550em_a * @bus: pointer to mii_bus structure which points to our driver private * @addr: address * @regnum: register number **/ -static s32 ixgbe_x550em_a_mii_bus_read(struct mii_bus *bus, int addr, - int regnum) +static s32 ixgbe_x550em_a_mii_bus_read_c22(struct mii_bus *bus, int addr, + int regnum) +{ + struct ixgbe_adapter *adapter = bus->priv; + struct ixgbe_hw *hw = &adapter->hw; + u32 gssr = hw->phy.phy_semaphore_mask; + + gssr |= IXGBE_GSSR_TOKEN_SM | IXGBE_GSSR_PHY0_SM; + return ixgbe_mii_bus_read_generic_c22(hw, addr, regnum, gssr); +} + +/** + * ixgbe_x550em_a_mii_bus_read_c45 - Read a clause 45 register on x550em_a + * @bus: pointer to mii_bus structure which points to our driver private + * @addr: address + * @devad: device address to read + * @regnum: register number + **/ +static s32 ixgbe_x550em_a_mii_bus_read_c45(struct mii_bus *bus, int addr, + int devad, int regnum) +{ + struct ixgbe_adapter *adapter = bus->priv; + struct ixgbe_hw *hw = &adapter->hw; + u32 gssr = hw->phy.phy_semaphore_mask; + + gssr |= IXGBE_GSSR_TOKEN_SM | IXGBE_GSSR_PHY0_SM; + return ixgbe_mii_bus_read_generic_c45(hw, addr, devad, regnum, gssr); +} + +/** + * ixgbe_x550em_a_mii_bus_write_c22 - Write a clause 22 register on x550em_a + * @bus: pointer to mii_bus structure which points to our driver private + * @addr: address + * @regnum: register number + * @val: value to write + **/ +static s32 ixgbe_x550em_a_mii_bus_write_c22(struct mii_bus *bus, int addr, + int regnum, u16 val) { struct ixgbe_adapter *adapter = bus->priv; struct ixgbe_hw *hw = &adapter->hw; u32 gssr = hw->phy.phy_semaphore_mask; gssr |= IXGBE_GSSR_TOKEN_SM | IXGBE_GSSR_PHY0_SM; - return ixgbe_mii_bus_read_generic(hw, addr, regnum, gssr); + return ixgbe_mii_bus_write_generic_c22(hw, addr, regnum, val, gssr); } /** - * ixgbe_x550em_a_mii_bus_write - Write a clause 22/45 register on x550em_a + * ixgbe_x550em_a_mii_bus_write_c45 - Write a clause 45 register on x550em_a * @bus: pointer to mii_bus structure which points to our driver private * @addr: address + * @devad: device address to read * @regnum: register number * @val: value to write **/ -static s32 ixgbe_x550em_a_mii_bus_write(struct mii_bus *bus, int addr, - int regnum, u16 val) +static s32 ixgbe_x550em_a_mii_bus_write_c45(struct mii_bus *bus, int addr, + int devad, int regnum, u16 val) { struct ixgbe_adapter *adapter = bus->priv; struct ixgbe_hw *hw = &adapter->hw; u32 gssr = hw->phy.phy_semaphore_mask; gssr |= IXGBE_GSSR_TOKEN_SM | IXGBE_GSSR_PHY0_SM; - return ixgbe_mii_bus_write_generic(hw, addr, regnum, val, gssr); + return ixgbe_mii_bus_write_generic_c45(hw, addr, devad, regnum, val, + gssr); } /** @@ -909,8 +1027,11 @@ out: **/ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw) { - s32 (*write)(struct mii_bus *bus, int addr, int regnum, u16 val); - s32 (*read)(struct mii_bus *bus, int addr, int regnum); + s32 (*write_c22)(struct mii_bus *bus, int addr, int regnum, u16 val); + s32 (*read_c22)(struct mii_bus *bus, int addr, int regnum); + s32 (*write_c45)(struct mii_bus *bus, int addr, int devad, int regnum, + u16 val); + s32 (*read_c45)(struct mii_bus *bus, int addr, int devad, int regnum); struct ixgbe_adapter *adapter = hw->back; struct pci_dev *pdev = adapter->pdev; struct device *dev = &adapter->netdev->dev; @@ -929,12 +1050,16 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw) case IXGBE_DEV_ID_X550EM_A_1G_T_L: if (!ixgbe_x550em_a_has_mii(hw)) return 0; - read = &ixgbe_x550em_a_mii_bus_read; - write = &ixgbe_x550em_a_mii_bus_write; + read_c22 = ixgbe_x550em_a_mii_bus_read_c22; + write_c22 = ixgbe_x550em_a_mii_bus_write_c22; + read_c45 = ixgbe_x550em_a_mii_bus_read_c45; + write_c45 = ixgbe_x550em_a_mii_bus_write_c45; break; default: - read = &ixgbe_mii_bus_read; - write = &ixgbe_mii_bus_write; + read_c22 = ixgbe_mii_bus_read_c22; + write_c22 = ixgbe_mii_bus_write_c22; + read_c45 = ixgbe_mii_bus_read_c45; + write_c45 = ixgbe_mii_bus_write_c45; break; } @@ -942,8 +1067,10 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw) if (!bus) return -ENOMEM; - bus->read = read; - bus->write = write; + bus->read = read_c22; + bus->write = write_c22; + bus->read_c45 = read_c45; + bus->write_c45 = write_c45; /* Use the position of the device in the PCI hierarchy as the id */ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mdio-%s", ixgbe_driver_name, diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c index ef878973b859..2d654a40af13 100644 --- a/drivers/net/ethernet/marvell/mvmdio.c +++ b/drivers/net/ethernet/marvell/mvmdio.c @@ -204,21 +204,17 @@ static const struct orion_mdio_ops orion_mdio_xsmi_ops = { .poll_interval_max = MVMDIO_XSMI_POLL_INTERVAL_MAX, }; -static int orion_mdio_xsmi_read(struct mii_bus *bus, int mii_id, - int regnum) +static int orion_mdio_xsmi_read_c45(struct mii_bus *bus, int mii_id, + int dev_addr, int regnum) { struct orion_mdio_dev *dev = bus->priv; - u16 dev_addr = (regnum >> 16) & GENMASK(4, 0); int ret; - if (!(regnum & MII_ADDR_C45)) - return -EOPNOTSUPP; - ret = orion_mdio_wait_ready(&orion_mdio_xsmi_ops, bus); if (ret < 0) return ret; - writel(regnum & GENMASK(15, 0), dev->regs + MVMDIO_XSMI_ADDR_REG); + writel(regnum, dev->regs + MVMDIO_XSMI_ADDR_REG); writel((mii_id << MVMDIO_XSMI_PHYADDR_SHIFT) | (dev_addr << MVMDIO_XSMI_DEVADDR_SHIFT) | MVMDIO_XSMI_READ_OPERATION, @@ -237,21 +233,17 @@ static int orion_mdio_xsmi_read(struct mii_bus *bus, int mii_id, return readl(dev->regs + MVMDIO_XSMI_MGNT_REG) & GENMASK(15, 0); } -static int orion_mdio_xsmi_write(struct mii_bus *bus, int mii_id, - int regnum, u16 value) +static int orion_mdio_xsmi_write_c45(struct mii_bus *bus, int mii_id, + int dev_addr, int regnum, u16 value) { struct orion_mdio_dev *dev = bus->priv; - u16 dev_addr = (regnum >> 16) & GENMASK(4, 0); int ret; - if (!(regnum & MII_ADDR_C45)) - return -EOPNOTSUPP; - ret = orion_mdio_wait_ready(&orion_mdio_xsmi_ops, bus); if (ret < 0) return ret; - writel(regnum & GENMASK(15, 0), dev->regs + MVMDIO_XSMI_ADDR_REG); + writel(regnum, dev->regs + MVMDIO_XSMI_ADDR_REG); writel((mii_id << MVMDIO_XSMI_PHYADDR_SHIFT) | (dev_addr << MVMDIO_XSMI_DEVADDR_SHIFT) | MVMDIO_XSMI_WRITE_OPERATION | value, @@ -302,8 +294,8 @@ static int orion_mdio_probe(struct platform_device *pdev) bus->write = orion_mdio_smi_write; break; case BUS_TYPE_XSMI: - bus->read = orion_mdio_xsmi_read; - bus->write = orion_mdio_xsmi_write; + bus->read_c45 = orion_mdio_xsmi_read_c45; + bus->write_c45 = orion_mdio_xsmi_write_c45; break; } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h index d2584ebb7a70..5727d67e0259 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -195,6 +195,9 @@ M(CPT_STATS, 0xA05, cpt_sts, cpt_sts_req, cpt_sts_rsp) \ M(CPT_RXC_TIME_CFG, 0xA06, cpt_rxc_time_cfg, cpt_rxc_time_cfg_req, \ msg_rsp) \ M(CPT_CTX_CACHE_SYNC, 0xA07, cpt_ctx_cache_sync, msg_req, msg_rsp) \ +M(CPT_LF_RESET, 0xA08, cpt_lf_reset, cpt_lf_rst_req, msg_rsp) \ +M(CPT_FLT_ENG_INFO, 0xA09, cpt_flt_eng_info, cpt_flt_eng_info_req, \ + cpt_flt_eng_info_rsp) \ /* SDP mbox IDs (range 0x1000 - 0x11FF) */ \ M(SET_SDP_CHAN_INFO, 0x1000, set_sdp_chan_info, sdp_chan_info_msg, msg_rsp) \ M(GET_SDP_CHAN_INFO, 0x1001, get_sdp_chan_info, msg_req, sdp_get_chan_info_msg) \ @@ -297,6 +300,8 @@ M(NIX_BANDPROF_FREE, 0x801e, nix_bandprof_free, nix_bandprof_free_req, \ msg_rsp) \ M(NIX_BANDPROF_GET_HWINFO, 0x801f, nix_bandprof_get_hwinfo, msg_req, \ nix_bandprof_get_hwinfo_rsp) \ +M(NIX_READ_INLINE_IPSEC_CFG, 0x8023, nix_read_inline_ipsec_cfg, \ + msg_req, nix_inline_ipsec_cfg) \ /* MCS mbox IDs (range 0xA000 - 0xBFFF) */ \ M(MCS_ALLOC_RESOURCES, 0xa000, mcs_alloc_resources, mcs_alloc_rsrc_req, \ mcs_alloc_rsrc_rsp) \ @@ -1196,7 +1201,7 @@ struct nix_inline_ipsec_cfg { u32 cpt_credit; struct { u8 egrp; - u8 opcode; + u16 opcode; u16 param1; u16 param2; } gen_cfg; @@ -1205,6 +1210,8 @@ struct nix_inline_ipsec_cfg { u8 cpt_slot; } inst_qsel; u8 enable; + u16 bpid; + u32 credit_th; }; /* Per NIX LF inline IPSec configuration */ @@ -1609,6 +1616,8 @@ struct cpt_lf_alloc_req_msg { u16 sso_pf_func; u16 eng_grpmsk; int blkaddr; + u8 ctx_ilen_valid : 1; + u8 ctx_ilen : 7; }; #define CPT_INLINE_INBOUND 0 @@ -1692,6 +1701,28 @@ struct cpt_inst_lmtst_req { u64 rsvd; }; +/* Mailbox message format to request for CPT LF reset */ +struct cpt_lf_rst_req { + struct mbox_msghdr hdr; + u32 slot; + u32 rsvd; +}; + +/* Mailbox message format to request for CPT faulted engines */ +struct cpt_flt_eng_info_req { + struct mbox_msghdr hdr; + int blkaddr; + bool reset; + u32 rsvd; +}; + +struct cpt_flt_eng_info_rsp { + struct mbox_msghdr hdr; + u64 flt_eng_map[CPT_10K_AF_INT_VEC_RVU]; + u64 rcvrd_eng_map[CPT_10K_AF_INT_VEC_RVU]; + u64 rsvd; +}; + struct sdp_node_info { /* Node to which this PF belons to */ u8 node_id; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c index 3f5e09b77d4b..8683ce57ed3f 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -1164,8 +1164,16 @@ cpt: goto nix_err; } + err = rvu_cpt_init(rvu); + if (err) { + dev_err(rvu->dev, "%s: Failed to initialize cpt\n", __func__); + goto mcs_err; + } + return 0; +mcs_err: + rvu_mcs_exit(rvu); nix_err: rvu_nix_freemem(rvu); npa_err: diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index 7f0a64731c67..5eea2b6cf6bd 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -108,6 +108,8 @@ struct rvu_block { u64 lfreset_reg; unsigned char name[NAME_SIZE]; struct rvu *rvu; + u64 cpt_flt_eng_map[3]; + u64 cpt_rcvrd_eng_map[3]; }; struct nix_mcast { @@ -459,6 +461,7 @@ struct rvu { struct rvu_pfvf *pf; struct rvu_pfvf *hwvf; struct mutex rsrc_lock; /* Serialize resource alloc/free */ + struct mutex alias_lock; /* Serialize bar2 alias access */ int vfs; /* Number of VFs attached to RVU */ int nix_blkaddr[MAX_NIX_BLKS]; @@ -510,6 +513,7 @@ struct rvu { struct ptp *ptp; int mcs_blk_cnt; + int cpt_pf_num; #ifdef CONFIG_DEBUG_FS struct rvu_debugfs rvu_dbg; @@ -524,6 +528,8 @@ struct rvu { struct list_head mcs_intrq_head; /* mcs interrupt queue lock */ spinlock_t mcs_intrq_lock; + /* CPT interrupt lock */ + spinlock_t cpt_intr_lock; }; static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val) @@ -546,6 +552,17 @@ static inline u64 rvupf_read64(struct rvu *rvu, u64 offset) return readq(rvu->pfreg_base + offset); } +static inline void rvu_bar2_sel_write64(struct rvu *rvu, u64 block, u64 offset, u64 val) +{ + /* HW requires read back of RVU_AF_BAR2_SEL register to make sure completion of + * write operation. + */ + rvu_write64(rvu, block, offset, val); + rvu_read64(rvu, block, offset); + /* Barrier to ensure read completes before accessing LF registers */ + mb(); +} + /* Silicon revisions */ static inline bool is_rvu_pre_96xx_C0(struct rvu *rvu) { @@ -865,6 +882,7 @@ void rvu_cpt_unregister_interrupts(struct rvu *rvu); int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf, int slot); int rvu_cpt_ctx_flush(struct rvu *rvu, u16 pcifunc); +int rvu_cpt_init(struct rvu *rvu); /* CN10K RVU */ int rvu_set_channels_base(struct rvu *rvu); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c index 38bbae5d9ae0..f047185f38e0 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c @@ -17,7 +17,7 @@ #define PCI_DEVID_OTX2_CPT10K_PF 0xA0F2 /* Length of initial context fetch in 128 byte words */ -#define CPT_CTX_ILEN 2ULL +#define CPT_CTX_ILEN 1ULL #define cpt_get_eng_sts(e_min, e_max, rsp, etype) \ ({ \ @@ -37,34 +37,68 @@ (_rsp)->free_sts_##etype = free_sts; \ }) -static irqreturn_t rvu_cpt_af_flt_intr_handler(int irq, void *ptr) +static irqreturn_t cpt_af_flt_intr_handler(int vec, void *ptr) { struct rvu_block *block = ptr; struct rvu *rvu = block->rvu; int blkaddr = block->addr; - u64 reg0, reg1, reg2; - - reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0)); - reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1)); - if (!is_rvu_otx2(rvu)) { - reg2 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(2)); - dev_err_ratelimited(rvu->dev, - "Received CPTAF FLT irq : 0x%llx, 0x%llx, 0x%llx", - reg0, reg1, reg2); - } else { - dev_err_ratelimited(rvu->dev, - "Received CPTAF FLT irq : 0x%llx, 0x%llx", - reg0, reg1); + u64 reg, val; + int i, eng; + u8 grp; + + reg = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(vec)); + dev_err_ratelimited(rvu->dev, "Received CPTAF FLT%d irq : 0x%llx", vec, reg); + + i = -1; + while ((i = find_next_bit((unsigned long *)®, 64, i + 1)) < 64) { + switch (vec) { + case 0: + eng = i; + break; + case 1: + eng = i + 64; + break; + case 2: + eng = i + 128; + break; + } + grp = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(eng)) & 0xFF; + /* Disable and enable the engine which triggers fault */ + rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL2(eng), 0x0); + val = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(eng)); + rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL(eng), val & ~1ULL); + + rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL2(eng), grp); + rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL(eng), val | 1ULL); + + spin_lock(&rvu->cpt_intr_lock); + block->cpt_flt_eng_map[vec] |= BIT_ULL(i); + val = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(eng)); + val = val & 0x3; + if (val == 0x1 || val == 0x2) + block->cpt_rcvrd_eng_map[vec] |= BIT_ULL(i); + spin_unlock(&rvu->cpt_intr_lock); } - - rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(0), reg0); - rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(1), reg1); - if (!is_rvu_otx2(rvu)) - rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(2), reg2); + rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(vec), reg); return IRQ_HANDLED; } +static irqreturn_t rvu_cpt_af_flt0_intr_handler(int irq, void *ptr) +{ + return cpt_af_flt_intr_handler(CPT_AF_INT_VEC_FLT0, ptr); +} + +static irqreturn_t rvu_cpt_af_flt1_intr_handler(int irq, void *ptr) +{ + return cpt_af_flt_intr_handler(CPT_AF_INT_VEC_FLT1, ptr); +} + +static irqreturn_t rvu_cpt_af_flt2_intr_handler(int irq, void *ptr) +{ + return cpt_af_flt_intr_handler(CPT_10K_AF_INT_VEC_FLT2, ptr); +} + static irqreturn_t rvu_cpt_af_rvu_intr_handler(int irq, void *ptr) { struct rvu_block *block = ptr; @@ -119,8 +153,10 @@ static void cpt_10k_unregister_interrupts(struct rvu_block *block, int off) int i; /* Disable all CPT AF interrupts */ - for (i = 0; i < CPT_10K_AF_INT_VEC_RVU; i++) - rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i), 0x1); + rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(0), ~0ULL); + rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(1), ~0ULL); + rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(2), 0xFFFF); + rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1C, 0x1); rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1C, 0x1); @@ -151,7 +187,7 @@ static void cpt_unregister_interrupts(struct rvu *rvu, int blkaddr) /* Disable all CPT AF interrupts */ for (i = 0; i < CPT_AF_INT_VEC_RVU; i++) - rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i), 0x1); + rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i), ~0ULL); rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1C, 0x1); rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1C, 0x1); @@ -172,16 +208,31 @@ static int cpt_10k_register_interrupts(struct rvu_block *block, int off) { struct rvu *rvu = block->rvu; int blkaddr = block->addr; + irq_handler_t flt_fn; int i, ret; for (i = CPT_10K_AF_INT_VEC_FLT0; i < CPT_10K_AF_INT_VEC_RVU; i++) { sprintf(&rvu->irq_name[(off + i) * NAME_SIZE], "CPTAF FLT%d", i); + + switch (i) { + case CPT_10K_AF_INT_VEC_FLT0: + flt_fn = rvu_cpt_af_flt0_intr_handler; + break; + case CPT_10K_AF_INT_VEC_FLT1: + flt_fn = rvu_cpt_af_flt1_intr_handler; + break; + case CPT_10K_AF_INT_VEC_FLT2: + flt_fn = rvu_cpt_af_flt2_intr_handler; + break; + } ret = rvu_cpt_do_register_interrupt(block, off + i, - rvu_cpt_af_flt_intr_handler, - &rvu->irq_name[(off + i) * NAME_SIZE]); + flt_fn, &rvu->irq_name[(off + i) * NAME_SIZE]); if (ret) goto err; - rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), 0x1); + if (i == CPT_10K_AF_INT_VEC_FLT2) + rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), 0xFFFF); + else + rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), ~0ULL); } ret = rvu_cpt_do_register_interrupt(block, off + CPT_10K_AF_INT_VEC_RVU, @@ -208,8 +259,8 @@ static int cpt_register_interrupts(struct rvu *rvu, int blkaddr) { struct rvu_hwinfo *hw = rvu->hw; struct rvu_block *block; + irq_handler_t flt_fn; int i, offs, ret = 0; - char irq_name[16]; if (!is_block_implemented(rvu->hw, blkaddr)) return 0; @@ -226,13 +277,20 @@ static int cpt_register_interrupts(struct rvu *rvu, int blkaddr) return cpt_10k_register_interrupts(block, offs); for (i = CPT_AF_INT_VEC_FLT0; i < CPT_AF_INT_VEC_RVU; i++) { - snprintf(irq_name, sizeof(irq_name), "CPTAF FLT%d", i); + sprintf(&rvu->irq_name[(offs + i) * NAME_SIZE], "CPTAF FLT%d", i); + switch (i) { + case CPT_AF_INT_VEC_FLT0: + flt_fn = rvu_cpt_af_flt0_intr_handler; + break; + case CPT_AF_INT_VEC_FLT1: + flt_fn = rvu_cpt_af_flt1_intr_handler; + break; + } ret = rvu_cpt_do_register_interrupt(block, offs + i, - rvu_cpt_af_flt_intr_handler, - irq_name); + flt_fn, &rvu->irq_name[(offs + i) * NAME_SIZE]); if (ret) goto err; - rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), 0x1); + rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), ~0ULL); } ret = rvu_cpt_do_register_interrupt(block, offs + CPT_AF_INT_VEC_RVU, @@ -290,7 +348,7 @@ static int get_cpt_pf_num(struct rvu *rvu) static bool is_cpt_pf(struct rvu *rvu, u16 pcifunc) { - int cpt_pf_num = get_cpt_pf_num(rvu); + int cpt_pf_num = rvu->cpt_pf_num; if (rvu_get_pf(pcifunc) != cpt_pf_num) return false; @@ -302,7 +360,7 @@ static bool is_cpt_pf(struct rvu *rvu, u16 pcifunc) static bool is_cpt_vf(struct rvu *rvu, u16 pcifunc) { - int cpt_pf_num = get_cpt_pf_num(rvu); + int cpt_pf_num = rvu->cpt_pf_num; if (rvu_get_pf(pcifunc) != cpt_pf_num) return false; @@ -371,8 +429,12 @@ int rvu_mbox_handler_cpt_lf_alloc(struct rvu *rvu, /* Set CPT LF group and priority */ val = (u64)req->eng_grpmsk << 48 | 1; - if (!is_rvu_otx2(rvu)) - val |= (CPT_CTX_ILEN << 17); + if (!is_rvu_otx2(rvu)) { + if (req->ctx_ilen_valid) + val |= (req->ctx_ilen << 17); + else + val |= (CPT_CTX_ILEN << 17); + } rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val); @@ -762,10 +824,21 @@ int rvu_mbox_handler_cpt_sts(struct rvu *rvu, struct cpt_sts_req *req, #define RXC_ZOMBIE_COUNT GENMASK_ULL(60, 48) static void cpt_rxc_time_cfg(struct rvu *rvu, struct cpt_rxc_time_cfg_req *req, - int blkaddr) + int blkaddr, struct cpt_rxc_time_cfg_req *save) { u64 dfrg_reg; + if (save) { + /* Save older config */ + dfrg_reg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_DFRG); + save->zombie_thres = FIELD_GET(RXC_ZOMBIE_THRES, dfrg_reg); + save->zombie_limit = FIELD_GET(RXC_ZOMBIE_LIMIT, dfrg_reg); + save->active_thres = FIELD_GET(RXC_ACTIVE_THRES, dfrg_reg); + save->active_limit = FIELD_GET(RXC_ACTIVE_LIMIT, dfrg_reg); + + save->step = rvu_read64(rvu, blkaddr, CPT_AF_RXC_TIME_CFG); + } + dfrg_reg = FIELD_PREP(RXC_ZOMBIE_THRES, req->zombie_thres); dfrg_reg |= FIELD_PREP(RXC_ZOMBIE_LIMIT, req->zombie_limit); dfrg_reg |= FIELD_PREP(RXC_ACTIVE_THRES, req->active_thres); @@ -790,7 +863,7 @@ int rvu_mbox_handler_cpt_rxc_time_cfg(struct rvu *rvu, !is_cpt_vf(rvu, req->hdr.pcifunc)) return CPT_AF_ERR_ACCESS_DENIED; - cpt_rxc_time_cfg(rvu, req, blkaddr); + cpt_rxc_time_cfg(rvu, req, blkaddr, NULL); return 0; } @@ -801,9 +874,67 @@ int rvu_mbox_handler_cpt_ctx_cache_sync(struct rvu *rvu, struct msg_req *req, return rvu_cpt_ctx_flush(rvu, req->hdr.pcifunc); } +int rvu_mbox_handler_cpt_lf_reset(struct rvu *rvu, struct cpt_lf_rst_req *req, + struct msg_rsp *rsp) +{ + u16 pcifunc = req->hdr.pcifunc; + struct rvu_block *block; + int cptlf, blkaddr, ret; + u16 actual_slot; + u64 ctl, ctl2; + + blkaddr = rvu_get_blkaddr_from_slot(rvu, BLKTYPE_CPT, pcifunc, + req->slot, &actual_slot); + if (blkaddr < 0) + return CPT_AF_ERR_LF_INVALID; + + block = &rvu->hw->block[blkaddr]; + + cptlf = rvu_get_lf(rvu, block, pcifunc, actual_slot); + if (cptlf < 0) + return CPT_AF_ERR_LF_INVALID; + ctl = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf)); + ctl2 = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf)); + + ret = rvu_lf_reset(rvu, block, cptlf); + if (ret) + dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n", + block->addr, cptlf); + + rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), ctl); + rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), ctl2); + + return 0; +} + +int rvu_mbox_handler_cpt_flt_eng_info(struct rvu *rvu, struct cpt_flt_eng_info_req *req, + struct cpt_flt_eng_info_rsp *rsp) +{ + struct rvu_block *block; + unsigned long flags; + int blkaddr, vec; + + blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr); + if (blkaddr < 0) + return blkaddr; + + block = &rvu->hw->block[blkaddr]; + for (vec = 0; vec < CPT_10K_AF_INT_VEC_RVU; vec++) { + spin_lock_irqsave(&rvu->cpt_intr_lock, flags); + rsp->flt_eng_map[vec] = block->cpt_flt_eng_map[vec]; + rsp->rcvrd_eng_map[vec] = block->cpt_rcvrd_eng_map[vec]; + if (req->reset) { + block->cpt_flt_eng_map[vec] = 0x0; + block->cpt_rcvrd_eng_map[vec] = 0x0; + } + spin_unlock_irqrestore(&rvu->cpt_intr_lock, flags); + } + return 0; +} + static void cpt_rxc_teardown(struct rvu *rvu, int blkaddr) { - struct cpt_rxc_time_cfg_req req; + struct cpt_rxc_time_cfg_req req, prev; int timeout = 2000; u64 reg; @@ -819,7 +950,7 @@ static void cpt_rxc_teardown(struct rvu *rvu, int blkaddr) req.active_thres = 1; req.active_limit = 1; - cpt_rxc_time_cfg(rvu, &req, blkaddr); + cpt_rxc_time_cfg(rvu, &req, blkaddr, &prev); do { reg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ACTIVE_STS); @@ -845,70 +976,68 @@ static void cpt_rxc_teardown(struct rvu *rvu, int blkaddr) if (timeout == 0) dev_warn(rvu->dev, "Poll for RXC zombie count hits hard loop counter\n"); + + /* Restore config */ + cpt_rxc_time_cfg(rvu, &prev, blkaddr, NULL); } -#define INPROG_INFLIGHT(reg) ((reg) & 0x1FF) -#define INPROG_GRB_PARTIAL(reg) ((reg) & BIT_ULL(31)) -#define INPROG_GRB(reg) (((reg) >> 32) & 0xFF) -#define INPROG_GWB(reg) (((reg) >> 40) & 0xFF) +#define INFLIGHT GENMASK_ULL(8, 0) +#define GRB_CNT GENMASK_ULL(39, 32) +#define GWB_CNT GENMASK_ULL(47, 40) +#define XQ_XOR GENMASK_ULL(63, 63) +#define DQPTR GENMASK_ULL(19, 0) +#define NQPTR GENMASK_ULL(51, 32) static void cpt_lf_disable_iqueue(struct rvu *rvu, int blkaddr, int slot) { - int i = 0, hard_lp_ctr = 100000; - u64 inprog, grp_ptr; - u16 nq_ptr, dq_ptr; + int timeout = 1000000; + u64 inprog, inst_ptr; + u64 qsize, pending; + int i = 0; /* Disable instructions enqueuing */ rvu_write64(rvu, blkaddr, CPT_AF_BAR2_ALIASX(slot, CPT_LF_CTL), 0x0); - /* Disable executions in the LF's queue */ inprog = rvu_read64(rvu, blkaddr, CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG)); - inprog &= ~BIT_ULL(16); + inprog |= BIT_ULL(16); rvu_write64(rvu, blkaddr, CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG), inprog); - /* Wait for CPT queue to become execution-quiescent */ + qsize = rvu_read64(rvu, blkaddr, + CPT_AF_BAR2_ALIASX(slot, CPT_LF_Q_SIZE)) & 0x7FFF; do { - inprog = rvu_read64(rvu, blkaddr, - CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG)); - if (INPROG_GRB_PARTIAL(inprog)) { - i = 0; - hard_lp_ctr--; - } else { - i++; - } - - grp_ptr = rvu_read64(rvu, blkaddr, - CPT_AF_BAR2_ALIASX(slot, - CPT_LF_Q_GRP_PTR)); - nq_ptr = (grp_ptr >> 32) & 0x7FFF; - dq_ptr = grp_ptr & 0x7FFF; - - } while (hard_lp_ctr && (i < 10) && (nq_ptr != dq_ptr)); + inst_ptr = rvu_read64(rvu, blkaddr, + CPT_AF_BAR2_ALIASX(slot, CPT_LF_Q_INST_PTR)); + pending = (FIELD_GET(XQ_XOR, inst_ptr) * qsize * 40) + + FIELD_GET(NQPTR, inst_ptr) - + FIELD_GET(DQPTR, inst_ptr); + udelay(1); + timeout--; + } while ((pending != 0) && (timeout != 0)); - if (hard_lp_ctr == 0) - dev_warn(rvu->dev, "CPT FLR hits hard loop counter\n"); + if (timeout == 0) + dev_warn(rvu->dev, "TIMEOUT: CPT poll on pending instructions\n"); - i = 0; - hard_lp_ctr = 100000; + timeout = 1000000; + /* Wait for CPT queue to become execution-quiescent */ do { inprog = rvu_read64(rvu, blkaddr, CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG)); - if ((INPROG_INFLIGHT(inprog) == 0) && - (INPROG_GWB(inprog) < 40) && - ((INPROG_GRB(inprog) == 0) || - (INPROG_GRB((inprog)) == 40))) { + if ((FIELD_GET(INFLIGHT, inprog) == 0) && + (FIELD_GET(GRB_CNT, inprog) == 0)) { i++; } else { i = 0; - hard_lp_ctr--; + timeout--; } - } while (hard_lp_ctr && (i < 10)); + } while ((timeout != 0) && (i < 10)); - if (hard_lp_ctr == 0) - dev_warn(rvu->dev, "CPT FLR hits hard loop counter\n"); + if (timeout == 0) + dev_warn(rvu->dev, "TIMEOUT: CPT poll on inflight count\n"); + /* Wait for 2 us to flush all queue writes to memory */ + udelay(2); } int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf, int slot) @@ -918,18 +1047,15 @@ int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf, int s if (is_cpt_pf(rvu, pcifunc) || is_cpt_vf(rvu, pcifunc)) cpt_rxc_teardown(rvu, blkaddr); + mutex_lock(&rvu->alias_lock); /* Enable BAR2 ALIAS for this pcifunc. */ reg = BIT_ULL(16) | pcifunc; - rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, reg); + rvu_bar2_sel_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, reg); cpt_lf_disable_iqueue(rvu, blkaddr, slot); - /* Set group drop to help clear out hardware */ - reg = rvu_read64(rvu, blkaddr, CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG)); - reg |= BIT_ULL(17); - rvu_write64(rvu, blkaddr, CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG), reg); - - rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, 0); + rvu_bar2_sel_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, 0); + mutex_unlock(&rvu->alias_lock); return 0; } @@ -940,7 +1066,7 @@ int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf, int s static int cpt_inline_inb_lf_cmd_send(struct rvu *rvu, int blkaddr, int nix_blkaddr) { - int cpt_pf_num = get_cpt_pf_num(rvu); + int cpt_pf_num = rvu->cpt_pf_num; struct cpt_inst_lmtst_req *req; dma_addr_t res_daddr; int timeout = 3000; @@ -1064,7 +1190,7 @@ int rvu_cpt_ctx_flush(struct rvu *rvu, u16 pcifunc) /* Enable BAR2 ALIAS for this pcifunc. */ reg = BIT_ULL(16) | pcifunc; - rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, reg); + rvu_bar2_sel_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, reg); for (i = 0; i < max_ctx_entries; i++) { cam_data = rvu_read64(rvu, blkaddr, CPT_AF_CTX_CAM_DATA(i)); @@ -1077,10 +1203,19 @@ int rvu_cpt_ctx_flush(struct rvu *rvu, u16 pcifunc) reg); } } - rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, 0); + rvu_bar2_sel_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, 0); unlock: mutex_unlock(&rvu->rsrc_lock); return 0; } + +int rvu_cpt_init(struct rvu *rvu) +{ + /* Retrieve CPT PF number */ + rvu->cpt_pf_num = get_cpt_pf_num(rvu); + spin_lock_init(&rvu->cpt_intr_lock); + + return 0; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index 6b8747ebc08c..89e94569e74c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -4731,6 +4731,10 @@ int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu, #define CPT_INST_QSEL_PF_FUNC GENMASK_ULL(23, 8) #define CPT_INST_QSEL_SLOT GENMASK_ULL(7, 0) +#define CPT_INST_CREDIT_TH GENMASK_ULL(53, 32) +#define CPT_INST_CREDIT_BPID GENMASK_ULL(30, 22) +#define CPT_INST_CREDIT_CNT GENMASK_ULL(21, 0) + static void nix_inline_ipsec_cfg(struct rvu *rvu, struct nix_inline_ipsec_cfg *req, int blkaddr) { @@ -4767,14 +4771,23 @@ static void nix_inline_ipsec_cfg(struct rvu *rvu, struct nix_inline_ipsec_cfg *r val); /* Set CPT credit */ - rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), - req->cpt_credit); + val = rvu_read64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx)); + if ((val & 0x3FFFFF) != 0x3FFFFF) + rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), + 0x3FFFFF - val); + + val = FIELD_PREP(CPT_INST_CREDIT_CNT, req->cpt_credit); + val |= FIELD_PREP(CPT_INST_CREDIT_BPID, req->bpid); + val |= FIELD_PREP(CPT_INST_CREDIT_TH, req->credit_th); + rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), val); } else { rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, 0x0); rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx), 0x0); - rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), - 0x3FFFFF); + val = rvu_read64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx)); + if ((val & 0x3FFFFF) != 0x3FFFFF) + rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), + 0x3FFFFF - val); } } @@ -4792,6 +4805,30 @@ int rvu_mbox_handler_nix_inline_ipsec_cfg(struct rvu *rvu, return 0; } +int rvu_mbox_handler_nix_read_inline_ipsec_cfg(struct rvu *rvu, + struct msg_req *req, + struct nix_inline_ipsec_cfg *rsp) + +{ + u64 val; + + if (!is_block_implemented(rvu->hw, BLKADDR_CPT0)) + return 0; + + val = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_RX_IPSEC_GEN_CFG); + rsp->gen_cfg.egrp = FIELD_GET(IPSEC_GEN_CFG_EGRP, val); + rsp->gen_cfg.opcode = FIELD_GET(IPSEC_GEN_CFG_OPCODE, val); + rsp->gen_cfg.param1 = FIELD_GET(IPSEC_GEN_CFG_PARAM1, val); + rsp->gen_cfg.param2 = FIELD_GET(IPSEC_GEN_CFG_PARAM2, val); + + val = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_RX_CPTX_CREDIT(0)); + rsp->cpt_credit = FIELD_GET(CPT_INST_CREDIT_CNT, val); + rsp->credit_th = FIELD_GET(CPT_INST_CREDIT_TH, val); + rsp->bpid = FIELD_GET(CPT_INST_CREDIT_BPID, val); + + return 0; +} + int rvu_mbox_handler_nix_inline_ipsec_lf_cfg(struct rvu *rvu, struct nix_inline_ipsec_lf_cfg *req, struct msg_rsp *rsp) @@ -4835,6 +4872,7 @@ int rvu_mbox_handler_nix_inline_ipsec_lf_cfg(struct rvu *rvu, return 0; } + void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc) { bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h index 0e0d536645ac..5437bd20c719 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h @@ -545,6 +545,8 @@ #define CPT_LF_CTL 0x10 #define CPT_LF_INPROG 0x40 +#define CPT_LF_Q_SIZE 0x100 +#define CPT_LF_Q_INST_PTR 0x110 #define CPT_LF_Q_GRP_PTR 0x120 #define CPT_LF_CTX_FLUSH 0x510 diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index cf456d62677f..87fff539d39d 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -965,7 +965,7 @@ static int pxa168_init_phy(struct net_device *dev) if (dev->phydev) return 0; - phy = mdiobus_scan(pep->smi_bus, pep->phy_addr); + phy = mdiobus_scan_c22(pep->smi_bus, pep->phy_addr); if (IS_ERR(phy)) return PTR_ERR(phy); diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index e3de9a53b2d9..801deac58bf7 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -51,6 +51,7 @@ static const struct mtk_reg_map mtk_reg_map = { .delay_irq = 0x0a0c, .irq_status = 0x0a20, .irq_mask = 0x0a28, + .adma_rx_dbg0 = 0x0a38, .int_grp = 0x0a50, }, .qdma = { @@ -82,6 +83,8 @@ static const struct mtk_reg_map mtk_reg_map = { [0] = 0x2800, [1] = 0x2c00, }, + .pse_iq_sta = 0x0110, + .pse_oq_sta = 0x0118, }; static const struct mtk_reg_map mt7628_reg_map = { @@ -112,6 +115,7 @@ static const struct mtk_reg_map mt7986_reg_map = { .delay_irq = 0x620c, .irq_status = 0x6220, .irq_mask = 0x6228, + .adma_rx_dbg0 = 0x6238, .int_grp = 0x6250, }, .qdma = { @@ -143,6 +147,8 @@ static const struct mtk_reg_map mt7986_reg_map = { [0] = 0x4800, [1] = 0x4c00, }, + .pse_iq_sta = 0x0180, + .pse_oq_sta = 0x01a0, }; /* strings used by ethtool */ @@ -215,8 +221,8 @@ static int mtk_mdio_busy_wait(struct mtk_eth *eth) return -ETIMEDOUT; } -static int _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg, - u32 write_data) +static int _mtk_mdio_write_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg, + u32 write_data) { int ret; @@ -224,35 +230,13 @@ static int _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg, if (ret < 0) return ret; - if (phy_reg & MII_ADDR_C45) { - mtk_w32(eth, PHY_IAC_ACCESS | - PHY_IAC_START_C45 | - PHY_IAC_CMD_C45_ADDR | - PHY_IAC_REG(mdiobus_c45_devad(phy_reg)) | - PHY_IAC_ADDR(phy_addr) | - PHY_IAC_DATA(mdiobus_c45_regad(phy_reg)), - MTK_PHY_IAC); - - ret = mtk_mdio_busy_wait(eth); - if (ret < 0) - return ret; - - mtk_w32(eth, PHY_IAC_ACCESS | - PHY_IAC_START_C45 | - PHY_IAC_CMD_WRITE | - PHY_IAC_REG(mdiobus_c45_devad(phy_reg)) | - PHY_IAC_ADDR(phy_addr) | - PHY_IAC_DATA(write_data), - MTK_PHY_IAC); - } else { - mtk_w32(eth, PHY_IAC_ACCESS | - PHY_IAC_START_C22 | - PHY_IAC_CMD_WRITE | - PHY_IAC_REG(phy_reg) | - PHY_IAC_ADDR(phy_addr) | - PHY_IAC_DATA(write_data), - MTK_PHY_IAC); - } + mtk_w32(eth, PHY_IAC_ACCESS | + PHY_IAC_START_C22 | + PHY_IAC_CMD_WRITE | + PHY_IAC_REG(phy_reg) | + PHY_IAC_ADDR(phy_addr) | + PHY_IAC_DATA(write_data), + MTK_PHY_IAC); ret = mtk_mdio_busy_wait(eth); if (ret < 0) @@ -261,7 +245,8 @@ static int _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg, return 0; } -static int _mtk_mdio_read(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg) +static int _mtk_mdio_write_c45(struct mtk_eth *eth, u32 phy_addr, + u32 devad, u32 phy_reg, u32 write_data) { int ret; @@ -269,33 +254,47 @@ static int _mtk_mdio_read(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg) if (ret < 0) return ret; - if (phy_reg & MII_ADDR_C45) { - mtk_w32(eth, PHY_IAC_ACCESS | - PHY_IAC_START_C45 | - PHY_IAC_CMD_C45_ADDR | - PHY_IAC_REG(mdiobus_c45_devad(phy_reg)) | - PHY_IAC_ADDR(phy_addr) | - PHY_IAC_DATA(mdiobus_c45_regad(phy_reg)), - MTK_PHY_IAC); - - ret = mtk_mdio_busy_wait(eth); - if (ret < 0) - return ret; - - mtk_w32(eth, PHY_IAC_ACCESS | - PHY_IAC_START_C45 | - PHY_IAC_CMD_C45_READ | - PHY_IAC_REG(mdiobus_c45_devad(phy_reg)) | - PHY_IAC_ADDR(phy_addr), - MTK_PHY_IAC); - } else { - mtk_w32(eth, PHY_IAC_ACCESS | - PHY_IAC_START_C22 | - PHY_IAC_CMD_C22_READ | - PHY_IAC_REG(phy_reg) | - PHY_IAC_ADDR(phy_addr), - MTK_PHY_IAC); - } + mtk_w32(eth, PHY_IAC_ACCESS | + PHY_IAC_START_C45 | + PHY_IAC_CMD_C45_ADDR | + PHY_IAC_REG(devad) | + PHY_IAC_ADDR(phy_addr) | + PHY_IAC_DATA(phy_reg), + MTK_PHY_IAC); + + ret = mtk_mdio_busy_wait(eth); + if (ret < 0) + return ret; + + mtk_w32(eth, PHY_IAC_ACCESS | + PHY_IAC_START_C45 | + PHY_IAC_CMD_WRITE | + PHY_IAC_REG(devad) | + PHY_IAC_ADDR(phy_addr) | + PHY_IAC_DATA(write_data), + MTK_PHY_IAC); + + ret = mtk_mdio_busy_wait(eth); + if (ret < 0) + return ret; + + return 0; +} + +static int _mtk_mdio_read_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg) +{ + int ret; + + ret = mtk_mdio_busy_wait(eth); + if (ret < 0) + return ret; + + mtk_w32(eth, PHY_IAC_ACCESS | + PHY_IAC_START_C22 | + PHY_IAC_CMD_C22_READ | + PHY_IAC_REG(phy_reg) | + PHY_IAC_ADDR(phy_addr), + MTK_PHY_IAC); ret = mtk_mdio_busy_wait(eth); if (ret < 0) @@ -304,19 +303,70 @@ static int _mtk_mdio_read(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg) return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK; } -static int mtk_mdio_write(struct mii_bus *bus, int phy_addr, - int phy_reg, u16 val) +static int _mtk_mdio_read_c45(struct mtk_eth *eth, u32 phy_addr, + u32 devad, u32 phy_reg) +{ + int ret; + + ret = mtk_mdio_busy_wait(eth); + if (ret < 0) + return ret; + + mtk_w32(eth, PHY_IAC_ACCESS | + PHY_IAC_START_C45 | + PHY_IAC_CMD_C45_ADDR | + PHY_IAC_REG(devad) | + PHY_IAC_ADDR(phy_addr) | + PHY_IAC_DATA(phy_reg), + MTK_PHY_IAC); + + ret = mtk_mdio_busy_wait(eth); + if (ret < 0) + return ret; + + mtk_w32(eth, PHY_IAC_ACCESS | + PHY_IAC_START_C45 | + PHY_IAC_CMD_C45_READ | + PHY_IAC_REG(devad) | + PHY_IAC_ADDR(phy_addr), + MTK_PHY_IAC); + + ret = mtk_mdio_busy_wait(eth); + if (ret < 0) + return ret; + + return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK; +} + +static int mtk_mdio_write_c22(struct mii_bus *bus, int phy_addr, + int phy_reg, u16 val) +{ + struct mtk_eth *eth = bus->priv; + + return _mtk_mdio_write_c22(eth, phy_addr, phy_reg, val); +} + +static int mtk_mdio_write_c45(struct mii_bus *bus, int phy_addr, + int devad, int phy_reg, u16 val) +{ + struct mtk_eth *eth = bus->priv; + + return _mtk_mdio_write_c45(eth, phy_addr, devad, phy_reg, val); +} + +static int mtk_mdio_read_c22(struct mii_bus *bus, int phy_addr, int phy_reg) { struct mtk_eth *eth = bus->priv; - return _mtk_mdio_write(eth, phy_addr, phy_reg, val); + return _mtk_mdio_read_c22(eth, phy_addr, phy_reg); } -static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg) +static int mtk_mdio_read_c45(struct mii_bus *bus, int phy_addr, int devad, + int phy_reg) { struct mtk_eth *eth = bus->priv; - return _mtk_mdio_read(eth, phy_addr, phy_reg); + return _mtk_mdio_read_c45(eth, phy_addr, devad, phy_reg); } static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth, @@ -760,9 +810,10 @@ static int mtk_mdio_init(struct mtk_eth *eth) } eth->mii_bus->name = "mdio"; - eth->mii_bus->read = mtk_mdio_read; - eth->mii_bus->write = mtk_mdio_write; - eth->mii_bus->probe_capabilities = MDIOBUS_C22_C45; + eth->mii_bus->read = mtk_mdio_read_c22; + eth->mii_bus->write = mtk_mdio_write_c22; + eth->mii_bus->read_c45 = mtk_mdio_read_c45; + eth->mii_bus->write_c45 = mtk_mdio_write_c45; eth->mii_bus->priv = eth; eth->mii_bus->parent = eth->dev; @@ -2984,14 +3035,29 @@ static void mtk_dma_free(struct mtk_eth *eth) kfree(eth->scratch_head); } +static bool mtk_hw_reset_check(struct mtk_eth *eth) +{ + u32 val = mtk_r32(eth, MTK_INT_STATUS2); + + return (val & MTK_FE_INT_FQ_EMPTY) || (val & MTK_FE_INT_RFIFO_UF) || + (val & MTK_FE_INT_RFIFO_OV) || (val & MTK_FE_INT_TSO_FAIL) || + (val & MTK_FE_INT_TSO_ALIGN) || (val & MTK_FE_INT_TSO_ILLEGAL); +} + static void mtk_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct mtk_mac *mac = netdev_priv(dev); struct mtk_eth *eth = mac->hw; + if (test_bit(MTK_RESETTING, ð->state)) + return; + + if (!mtk_hw_reset_check(eth)) + return; + eth->netdev[mac->id]->stats.tx_errors++; - netif_err(eth, tx_err, dev, - "transmit timed out\n"); + netif_err(eth, tx_err, dev, "transmit timed out\n"); + schedule_work(ð->pending_work); } @@ -3471,22 +3537,188 @@ static void mtk_set_mcr_max_rx(struct mtk_mac *mac, u32 val) mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id)); } -static int mtk_hw_init(struct mtk_eth *eth) +static void mtk_hw_reset(struct mtk_eth *eth) +{ + u32 val; + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0); + val = RSTCTRL_PPE0_V2; + } else { + val = RSTCTRL_PPE0; + } + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) + val |= RSTCTRL_PPE1; + + ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | val); + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) + regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, + 0x3ffffff); +} + +static u32 mtk_hw_reset_read(struct mtk_eth *eth) +{ + u32 val; + + regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val); + return val; +} + +static void mtk_hw_warm_reset(struct mtk_eth *eth) +{ + u32 rst_mask, val; + + regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, RSTCTRL_FE, + RSTCTRL_FE); + if (readx_poll_timeout_atomic(mtk_hw_reset_read, eth, val, + val & RSTCTRL_FE, 1, 1000)) { + dev_err(eth->dev, "warm reset failed\n"); + mtk_hw_reset(eth); + return; + } + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) + rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0_V2; + else + rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0; + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) + rst_mask |= RSTCTRL_PPE1; + + regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, rst_mask); + + udelay(1); + val = mtk_hw_reset_read(eth); + if (!(val & rst_mask)) + dev_err(eth->dev, "warm reset stage0 failed %08x (%08x)\n", + val, rst_mask); + + rst_mask |= RSTCTRL_FE; + regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, ~rst_mask); + + udelay(1); + val = mtk_hw_reset_read(eth); + if (val & rst_mask) + dev_err(eth->dev, "warm reset stage1 failed %08x (%08x)\n", + val, rst_mask); +} + +static bool mtk_hw_check_dma_hang(struct mtk_eth *eth) +{ + const struct mtk_reg_map *reg_map = eth->soc->reg_map; + bool gmac1_tx, gmac2_tx, gdm1_tx, gdm2_tx; + bool oq_hang, cdm1_busy, adma_busy; + bool wtx_busy, cdm_full, oq_free; + u32 wdidx, val, gdm1_fc, gdm2_fc; + bool qfsm_hang, qfwd_hang; + bool ret = false; + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) + return false; + + /* WDMA sanity checks */ + wdidx = mtk_r32(eth, reg_map->wdma_base[0] + 0xc); + + val = mtk_r32(eth, reg_map->wdma_base[0] + 0x204); + wtx_busy = FIELD_GET(MTK_TX_DMA_BUSY, val); + + val = mtk_r32(eth, reg_map->wdma_base[0] + 0x230); + cdm_full = !FIELD_GET(MTK_CDM_TXFIFO_RDY, val); + + oq_free = (!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(24, 16)) && + !(mtk_r32(eth, reg_map->pse_oq_sta + 0x4) & GENMASK(8, 0)) && + !(mtk_r32(eth, reg_map->pse_oq_sta + 0x10) & GENMASK(24, 16))); + + if (wdidx == eth->reset.wdidx && wtx_busy && cdm_full && oq_free) { + if (++eth->reset.wdma_hang_count > 2) { + eth->reset.wdma_hang_count = 0; + ret = true; + } + goto out; + } + + /* QDMA sanity checks */ + qfsm_hang = !!mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x234); + qfwd_hang = !mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x308); + + gdm1_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM1_FSM)) > 0; + gdm2_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM2_FSM)) > 0; + gmac1_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(0))) != 1; + gmac2_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(1))) != 1; + gdm1_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x24); + gdm2_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x64); + + if (qfsm_hang && qfwd_hang && + ((gdm1_tx && gmac1_tx && gdm1_fc < 1) || + (gdm2_tx && gmac2_tx && gdm2_fc < 1))) { + if (++eth->reset.qdma_hang_count > 2) { + eth->reset.qdma_hang_count = 0; + ret = true; + } + goto out; + } + + /* ADMA sanity checks */ + oq_hang = !!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(8, 0)); + cdm1_busy = !!(mtk_r32(eth, MTK_FE_CDM1_FSM) & GENMASK(31, 16)); + adma_busy = !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & GENMASK(4, 0)) && + !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & BIT(6)); + + if (oq_hang && cdm1_busy && adma_busy) { + if (++eth->reset.adma_hang_count > 2) { + eth->reset.adma_hang_count = 0; + ret = true; + } + goto out; + } + + eth->reset.wdma_hang_count = 0; + eth->reset.qdma_hang_count = 0; + eth->reset.adma_hang_count = 0; +out: + eth->reset.wdidx = wdidx; + + return ret; +} + +static void mtk_hw_reset_monitor_work(struct work_struct *work) +{ + struct delayed_work *del_work = to_delayed_work(work); + struct mtk_eth *eth = container_of(del_work, struct mtk_eth, + reset.monitor_work); + + if (test_bit(MTK_RESETTING, ð->state)) + goto out; + + /* DMA stuck checks */ + if (mtk_hw_check_dma_hang(eth)) + schedule_work(ð->pending_work); + +out: + schedule_delayed_work(ð->reset.monitor_work, + MTK_DMA_MONITOR_TIMEOUT); +} + +static int mtk_hw_init(struct mtk_eth *eth, bool reset) { u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA | ETHSYS_DMA_AG_MAP_PPE; const struct mtk_reg_map *reg_map = eth->soc->reg_map; int i, val, ret; - if (test_and_set_bit(MTK_HW_INIT, ð->state)) + if (!reset && test_and_set_bit(MTK_HW_INIT, ð->state)) return 0; - pm_runtime_enable(eth->dev); - pm_runtime_get_sync(eth->dev); + if (!reset) { + pm_runtime_enable(eth->dev); + pm_runtime_get_sync(eth->dev); - ret = mtk_clk_enable(eth); - if (ret) - goto err_disable_pm; + ret = mtk_clk_enable(eth); + if (ret) + goto err_disable_pm; + } if (eth->ethsys) regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask, @@ -3510,22 +3742,14 @@ static int mtk_hw_init(struct mtk_eth *eth) return 0; } - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { - regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0); - val = RSTCTRL_PPE0_V2; - } else { - val = RSTCTRL_PPE0; - } - - if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) - val |= RSTCTRL_PPE1; + msleep(100); - ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | val); + if (reset) + mtk_hw_warm_reset(eth); + else + mtk_hw_reset(eth); if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { - regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, - 0x3ffffff); - /* Set FE to PDMAv2 if necessary */ val = mtk_r32(eth, MTK_FE_GLO_MISC); mtk_w32(eth, val | BIT(4), MTK_FE_GLO_MISC); @@ -3627,8 +3851,10 @@ static int mtk_hw_init(struct mtk_eth *eth) return 0; err_disable_pm: - pm_runtime_put_sync(eth->dev); - pm_runtime_disable(eth->dev); + if (!reset) { + pm_runtime_put_sync(eth->dev); + pm_runtime_disable(eth->dev); + } return ret; } @@ -3707,52 +3933,86 @@ static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return -EOPNOTSUPP; } +static void mtk_prepare_for_reset(struct mtk_eth *eth) +{ + u32 val; + int i; + + /* disabe FE P3 and P4 */ + val = mtk_r32(eth, MTK_FE_GLO_CFG) | MTK_FE_LINK_DOWN_P3; + if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) + val |= MTK_FE_LINK_DOWN_P4; + mtk_w32(eth, val, MTK_FE_GLO_CFG); + + /* adjust PPE configurations to prepare for reset */ + for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) + mtk_ppe_prepare_reset(eth->ppe[i]); + + /* disable NETSYS interrupts */ + mtk_w32(eth, 0, MTK_FE_INT_ENABLE); + + /* force link down GMAC */ + for (i = 0; i < 2; i++) { + val = mtk_r32(eth, MTK_MAC_MCR(i)) & ~MAC_MCR_FORCE_LINK; + mtk_w32(eth, val, MTK_MAC_MCR(i)); + } +} + static void mtk_pending_work(struct work_struct *work) { struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work); - int err, i; unsigned long restart = 0; + u32 val; + int i; rtnl_lock(); - - dev_dbg(eth->dev, "[%s][%d] reset\n", __func__, __LINE__); set_bit(MTK_RESETTING, ð->state); + mtk_prepare_for_reset(eth); + mtk_wed_fe_reset(); + /* Run again reset preliminary configuration in order to avoid any + * possible race during FE reset since it can run releasing RTNL lock. + */ + mtk_prepare_for_reset(eth); + /* stop all devices to make sure that dma is properly shut down */ for (i = 0; i < MTK_MAC_COUNT; i++) { - if (!eth->netdev[i]) + if (!eth->netdev[i] || !netif_running(eth->netdev[i])) continue; + mtk_stop(eth->netdev[i]); __set_bit(i, &restart); } - dev_dbg(eth->dev, "[%s][%d] mtk_stop ends\n", __func__, __LINE__); - /* restart underlying hardware such as power, clock, pin mux - * and the connected phy - */ - mtk_hw_deinit(eth); + usleep_range(15000, 16000); if (eth->dev->pins) pinctrl_select_state(eth->dev->pins->p, eth->dev->pins->default_state); - mtk_hw_init(eth); + mtk_hw_init(eth, true); /* restart DMA and enable IRQs */ for (i = 0; i < MTK_MAC_COUNT; i++) { if (!test_bit(i, &restart)) continue; - err = mtk_open(eth->netdev[i]); - if (err) { + + if (mtk_open(eth->netdev[i])) { netif_alert(eth, ifup, eth->netdev[i], - "Driver up/down cycle failed, closing device.\n"); + "Driver up/down cycle failed\n"); dev_close(eth->netdev[i]); } } - dev_dbg(eth->dev, "[%s][%d] reset done\n", __func__, __LINE__); + /* enabe FE P3 and P4 */ + val = mtk_r32(eth, MTK_FE_GLO_CFG) & ~MTK_FE_LINK_DOWN_P3; + if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) + val &= ~MTK_FE_LINK_DOWN_P4; + mtk_w32(eth, val, MTK_FE_GLO_CFG); clear_bit(MTK_RESETTING, ð->state); + mtk_wed_fe_reset_complete(); + rtnl_unlock(); } @@ -3797,6 +4057,7 @@ static int mtk_cleanup(struct mtk_eth *eth) mtk_unreg_dev(eth); mtk_free_dev(eth); cancel_work_sync(ð->pending_work); + cancel_delayed_work_sync(ð->reset.monitor_work); return 0; } @@ -4251,6 +4512,7 @@ static int mtk_probe(struct platform_device *pdev) eth->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; INIT_WORK(ð->rx_dim.work, mtk_dim_rx); + INIT_DELAYED_WORK(ð->reset.monitor_work, mtk_hw_reset_monitor_work); eth->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; INIT_WORK(ð->tx_dim.work, mtk_dim_tx); @@ -4364,7 +4626,7 @@ static int mtk_probe(struct platform_device *pdev) eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE); INIT_WORK(ð->pending_work, mtk_pending_work); - err = mtk_hw_init(eth); + err = mtk_hw_init(eth, false); if (err) goto err_wed_exit; @@ -4453,6 +4715,8 @@ static int mtk_probe(struct platform_device *pdev) netif_napi_add(ð->dummy_dev, ð->rx_napi, mtk_napi_rx); platform_set_drvdata(pdev, eth); + schedule_delayed_work(ð->reset.monitor_work, + MTK_DMA_MONITOR_TIMEOUT); return 0; diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index 18a50529ce7b..dff0e3ad2de6 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -77,12 +77,24 @@ #define MTK_HW_LRO_REPLACE_DELTA 1000 #define MTK_HW_LRO_SDL_REMAIN_ROOM 1522 +/* Frame Engine Global Configuration */ +#define MTK_FE_GLO_CFG 0x00 +#define MTK_FE_LINK_DOWN_P3 BIT(11) +#define MTK_FE_LINK_DOWN_P4 BIT(12) + /* Frame Engine Global Reset Register */ #define MTK_RST_GL 0x04 #define RST_GL_PSE BIT(0) /* Frame Engine Interrupt Status Register */ #define MTK_INT_STATUS2 0x08 +#define MTK_FE_INT_ENABLE 0x0c +#define MTK_FE_INT_FQ_EMPTY BIT(8) +#define MTK_FE_INT_TSO_FAIL BIT(12) +#define MTK_FE_INT_TSO_ILLEGAL BIT(13) +#define MTK_FE_INT_TSO_ALIGN BIT(14) +#define MTK_FE_INT_RFIFO_OV BIT(18) +#define MTK_FE_INT_RFIFO_UF BIT(19) #define MTK_GDM1_AF BIT(28) #define MTK_GDM2_AF BIT(29) @@ -272,6 +284,8 @@ #define MTK_RX_DONE_INT_V2 BIT(14) +#define MTK_CDM_TXFIFO_RDY BIT(7) + /* QDMA Interrupt grouping registers */ #define MTK_RLS_DONE_INT BIT(0) @@ -562,6 +576,17 @@ #define MT7628_SDM_RBCNT (MT7628_SDM_OFFSET + 0x10c) #define MT7628_SDM_CS_ERR (MT7628_SDM_OFFSET + 0x110) +#define MTK_FE_CDM1_FSM 0x220 +#define MTK_FE_CDM2_FSM 0x224 +#define MTK_FE_CDM3_FSM 0x238 +#define MTK_FE_CDM4_FSM 0x298 +#define MTK_FE_CDM5_FSM 0x318 +#define MTK_FE_CDM6_FSM 0x328 +#define MTK_FE_GDM1_FSM 0x228 +#define MTK_FE_GDM2_FSM 0x22C + +#define MTK_MAC_FSM(x) (0x1010C + ((x) * 0x100)) + struct mtk_rx_dma { unsigned int rxd1; unsigned int rxd2; @@ -958,6 +983,7 @@ struct mtk_reg_map { u32 delay_irq; /* delay interrupt */ u32 irq_status; /* interrupt status */ u32 irq_mask; /* interrupt mask */ + u32 adma_rx_dbg0; u32 int_grp; } pdma; struct { @@ -986,6 +1012,8 @@ struct mtk_reg_map { u32 gdma_to_ppe; u32 ppe_base; u32 wdma_base[2]; + u32 pse_iq_sta; + u32 pse_oq_sta; }; /* struct mtk_eth_data - This is the structure holding all differences @@ -1028,6 +1056,8 @@ struct mtk_soc_data { } txrx; }; +#define MTK_DMA_MONITOR_TIMEOUT msecs_to_jiffies(1000) + /* currently no SoC has more than 2 macs */ #define MTK_MAX_DEVS 2 @@ -1152,6 +1182,14 @@ struct mtk_eth { struct rhashtable flow_table; struct bpf_prog __rcu *prog; + + struct { + struct delayed_work monitor_work; + u32 wdidx; + u8 wdma_hang_count; + u8 qdma_hang_count; + u8 adma_hang_count; + } reset; }; /* struct mtk_mac - the structure that holds the info about the MACs of the diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c index 269208a841c7..451a87b1bc20 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe.c +++ b/drivers/net/ethernet/mediatek/mtk_ppe.c @@ -730,6 +730,33 @@ int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) return __mtk_foe_entry_idle_time(ppe, entry->data.ib1); } +int mtk_ppe_prepare_reset(struct mtk_ppe *ppe) +{ + if (!ppe) + return -EINVAL; + + /* disable KA */ + ppe_clear(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_KEEPALIVE); + ppe_clear(ppe, MTK_PPE_BIND_LMT1, MTK_PPE_NTU_KEEPALIVE); + ppe_w32(ppe, MTK_PPE_KEEPALIVE, 0); + usleep_range(10000, 11000); + + /* set KA timer to maximum */ + ppe_set(ppe, MTK_PPE_BIND_LMT1, MTK_PPE_NTU_KEEPALIVE); + ppe_w32(ppe, MTK_PPE_KEEPALIVE, 0xffffffff); + + /* set KA tick select */ + ppe_set(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_TICK_SEL); + ppe_set(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_KEEPALIVE); + usleep_range(10000, 11000); + + /* disable scan mode */ + ppe_clear(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_SCAN_MODE); + usleep_range(10000, 11000); + + return mtk_ppe_wait_busy(ppe); +} + struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int version, int index) { diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h index ea64fac1d425..16b02e1d4649 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe.h +++ b/drivers/net/ethernet/mediatek/mtk_ppe.h @@ -309,6 +309,7 @@ struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, void mtk_ppe_deinit(struct mtk_eth *eth); void mtk_ppe_start(struct mtk_ppe *ppe); int mtk_ppe_stop(struct mtk_ppe *ppe); +int mtk_ppe_prepare_reset(struct mtk_ppe *ppe); void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash); diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_regs.h b/drivers/net/ethernet/mediatek/mtk_ppe_regs.h index 59596d823d8b..0fdb983b0a88 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe_regs.h +++ b/drivers/net/ethernet/mediatek/mtk_ppe_regs.h @@ -58,6 +58,12 @@ #define MTK_PPE_TB_CFG_SCAN_MODE GENMASK(17, 16) #define MTK_PPE_TB_CFG_HASH_DEBUG GENMASK(19, 18) #define MTK_PPE_TB_CFG_INFO_SEL BIT(20) +#define MTK_PPE_TB_TICK_SEL BIT(24) + +#define MTK_PPE_BIND_LMT1 0x230 +#define MTK_PPE_NTU_KEEPALIVE GENMASK(23, 16) + +#define MTK_PPE_KEEPALIVE 0x234 enum { MTK_PPE_SCAN_MODE_DISABLED, diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c index a6271449617f..95ac4f71d2b2 100644 --- a/drivers/net/ethernet/mediatek/mtk_wed.c +++ b/drivers/net/ethernet/mediatek/mtk_wed.c @@ -206,6 +206,48 @@ mtk_wed_wo_reset(struct mtk_wed_device *dev) iounmap(reg); } +void mtk_wed_fe_reset(void) +{ + int i; + + mutex_lock(&hw_lock); + + for (i = 0; i < ARRAY_SIZE(hw_list); i++) { + struct mtk_wed_hw *hw = hw_list[i]; + struct mtk_wed_device *dev = hw->wed_dev; + int err; + + if (!dev || !dev->wlan.reset) + continue; + + /* reset callback blocks until WLAN reset is completed */ + err = dev->wlan.reset(dev); + if (err) + dev_err(dev->dev, "wlan reset failed: %d\n", err); + } + + mutex_unlock(&hw_lock); +} + +void mtk_wed_fe_reset_complete(void) +{ + int i; + + mutex_lock(&hw_lock); + + for (i = 0; i < ARRAY_SIZE(hw_list); i++) { + struct mtk_wed_hw *hw = hw_list[i]; + struct mtk_wed_device *dev = hw->wed_dev; + + if (!dev || !dev->wlan.reset_complete) + continue; + + dev->wlan.reset_complete(dev); + } + + mutex_unlock(&hw_lock); +} + static struct mtk_wed_hw * mtk_wed_assign(struct mtk_wed_device *dev) { diff --git a/drivers/net/ethernet/mediatek/mtk_wed.h b/drivers/net/ethernet/mediatek/mtk_wed.h index e012b8a82133..43ab77eaf683 100644 --- a/drivers/net/ethernet/mediatek/mtk_wed.h +++ b/drivers/net/ethernet/mediatek/mtk_wed.h @@ -128,6 +128,8 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, void mtk_wed_exit(void); int mtk_wed_flow_add(int index); void mtk_wed_flow_remove(int index); +void mtk_wed_fe_reset(void); +void mtk_wed_fe_reset_complete(void); #else static inline void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, @@ -147,6 +149,13 @@ static inline void mtk_wed_flow_remove(int index) { } +static inline void mtk_wed_fe_reset(void) +{ +} + +static inline void mtk_wed_fe_reset_complete(void) +{ +} #endif #ifdef CONFIG_DEBUG_FS diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.c b/drivers/net/ethernet/mediatek/mtk_wed_wo.c index a0a39643caf7..69fba29055e9 100644 --- a/drivers/net/ethernet/mediatek/mtk_wed_wo.c +++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.c @@ -138,7 +138,6 @@ mtk_wed_wo_queue_refill(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, enum dma_data_direction dir = rx ? DMA_FROM_DEVICE : DMA_TO_DEVICE; int n_buf = 0; - spin_lock_bh(&q->lock); while (q->queued < q->n_desc) { struct mtk_wed_wo_queue_entry *entry; dma_addr_t addr; @@ -172,7 +171,6 @@ mtk_wed_wo_queue_refill(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, q->queued++; n_buf++; } - spin_unlock_bh(&q->lock); return n_buf; } @@ -260,7 +258,6 @@ mtk_wed_wo_queue_alloc(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, int n_desc, int buf_size, int index, struct mtk_wed_wo_queue_regs *regs) { - spin_lock_init(&q->lock); q->regs = *regs; q->n_desc = n_desc; q->buf_size = buf_size; @@ -292,7 +289,6 @@ mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q) struct page *page; int i; - spin_lock_bh(&q->lock); for (i = 0; i < q->n_desc; i++) { struct mtk_wed_wo_queue_entry *entry = &q->entry[i]; @@ -301,7 +297,6 @@ mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q) skb_free_frag(entry->buf); entry->buf = NULL; } - spin_unlock_bh(&q->lock); if (!q->cache.va) return; @@ -316,7 +311,6 @@ mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q) { struct page *page; - spin_lock_bh(&q->lock); for (;;) { void *buf = mtk_wed_wo_dequeue(wo, q, NULL, true); @@ -325,7 +319,6 @@ mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q) skb_free_frag(buf); } - spin_unlock_bh(&q->lock); if (!q->cache.va) return; @@ -351,8 +344,6 @@ int mtk_wed_wo_queue_tx_skb(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, int ret = 0, index; u32 ctrl; - spin_lock_bh(&q->lock); - q->tail = mtk_wed_mmio_r32(wo, q->regs.dma_idx); index = (q->head + 1) % q->n_desc; if (q->tail == index) { @@ -383,8 +374,6 @@ int mtk_wed_wo_queue_tx_skb(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, mtk_wed_wo_queue_kick(wo, q, q->head); mtk_wed_wo_kickout(wo); out: - spin_unlock_bh(&q->lock); - dev_kfree_skb(skb); return ret; diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.h b/drivers/net/ethernet/mediatek/mtk_wed_wo.h index c8fb85795864..dbcf42ce9173 100644 --- a/drivers/net/ethernet/mediatek/mtk_wed_wo.h +++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.h @@ -211,7 +211,6 @@ struct mtk_wed_wo_queue { struct mtk_wed_wo_queue_regs regs; struct page_frag_cache cache; - spinlock_t lock; struct mtk_wed_wo_queue_desc *desc; dma_addr_t desc_dma; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index c837103a9ee3..382d02f6619c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -47,6 +47,25 @@ #define CREATE_TRACE_POINTS #include "diag/cmd_tracepoint.h" +struct mlx5_ifc_mbox_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_mbox_in_bits { + u8 opcode[0x10]; + u8 uid[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x40]; +}; + enum { CMD_IF_REV = 5, }; @@ -70,6 +89,26 @@ enum { MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10, }; +static u16 in_to_opcode(void *in) +{ + return MLX5_GET(mbox_in, in, opcode); +} + +/* Returns true for opcodes that might be triggered very frequently and throttle + * the command interface. Limit their command slots usage. + */ +static bool mlx5_cmd_is_throttle_opcode(u16 op) +{ + switch (op) { + case MLX5_CMD_OP_CREATE_GENERAL_OBJECT: + case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT: + case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT: + case MLX5_CMD_OP_QUERY_GENERAL_OBJECT: + return true; + } + return false; +} + static struct mlx5_cmd_work_ent * cmd_alloc_ent(struct mlx5_cmd *cmd, struct mlx5_cmd_msg *in, struct mlx5_cmd_msg *out, void *uout, int uout_size, @@ -91,6 +130,7 @@ cmd_alloc_ent(struct mlx5_cmd *cmd, struct mlx5_cmd_msg *in, ent->context = context; ent->cmd = cmd; ent->page_queue = page_queue; + ent->op = in_to_opcode(in->first.data); refcount_set(&ent->refcnt, 1); return ent; @@ -752,25 +792,6 @@ static int cmd_status_to_err(u8 status) } } -struct mlx5_ifc_mbox_out_bits { - u8 status[0x8]; - u8 reserved_at_8[0x18]; - - u8 syndrome[0x20]; - - u8 reserved_at_40[0x40]; -}; - -struct mlx5_ifc_mbox_in_bits { - u8 opcode[0x10]; - u8 uid[0x10]; - - u8 reserved_at_20[0x10]; - u8 op_mod[0x10]; - - u8 reserved_at_40[0x40]; -}; - void mlx5_cmd_out_err(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod, void *out) { u32 syndrome = MLX5_GET(mbox_out, out, syndrome); @@ -788,11 +809,12 @@ static void cmd_status_print(struct mlx5_core_dev *dev, void *in, void *out) u16 opcode, op_mod; u16 uid; - opcode = MLX5_GET(mbox_in, in, opcode); + opcode = in_to_opcode(in); op_mod = MLX5_GET(mbox_in, in, op_mod); uid = MLX5_GET(mbox_in, in, uid); - if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY) + if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY && + opcode != MLX5_CMD_OP_CREATE_UCTX) mlx5_cmd_out_err(dev, opcode, op_mod, out); } @@ -800,7 +822,7 @@ int mlx5_cmd_check(struct mlx5_core_dev *dev, int err, void *in, void *out) { /* aborted due to PCI error or via reset flow mlx5_cmd_trigger_completions() */ if (err == -ENXIO) { - u16 opcode = MLX5_GET(mbox_in, in, opcode); + u16 opcode = in_to_opcode(in); u32 syndrome; u8 status; @@ -829,9 +851,9 @@ static void dump_command(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent, int input) { struct mlx5_cmd_msg *msg = input ? ent->in : ent->out; - u16 op = MLX5_GET(mbox_in, ent->lay->in, opcode); struct mlx5_cmd_mailbox *next = msg->next; int n = mlx5_calc_cmd_blocks(msg); + u16 op = ent->op; int data_only; u32 offset = 0; int dump_len; @@ -883,11 +905,6 @@ static void dump_command(struct mlx5_core_dev *dev, mlx5_core_dbg(dev, "cmd[%d]: end dump\n", ent->idx); } -static u16 msg_to_opcode(struct mlx5_cmd_msg *in) -{ - return MLX5_GET(mbox_in, in->first.data, opcode); -} - static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced); static void cb_timeout_handler(struct work_struct *work) @@ -905,13 +922,13 @@ static void cb_timeout_handler(struct work_struct *work) /* Maybe got handled by eq recover ? */ if (!test_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state)) { mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) Async, recovered after timeout\n", ent->idx, - mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in)); + mlx5_command_str(ent->op), ent->op); goto out; /* phew, already handled */ } ent->ret = -ETIMEDOUT; mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) Async, timeout. Will cause a leak of a command resource\n", - ent->idx, mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in)); + ent->idx, mlx5_command_str(ent->op), ent->op); mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true); out: @@ -985,7 +1002,6 @@ static void cmd_work_handler(struct work_struct *work) ent->lay = lay; memset(lay, 0, sizeof(*lay)); memcpy(lay->in, ent->in->first.data, sizeof(lay->in)); - ent->op = be32_to_cpu(lay->in[0]) >> 16; if (ent->in->next) lay->in_ptr = cpu_to_be64(ent->in->next->dma); lay->inlen = cpu_to_be32(ent->in->len); @@ -1098,12 +1114,12 @@ static void wait_func_handle_exec_timeout(struct mlx5_core_dev *dev, */ if (wait_for_completion_timeout(&ent->done, timeout)) { mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) recovered after timeout\n", ent->idx, - mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in)); + mlx5_command_str(ent->op), ent->op); return; } mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) No done completion\n", ent->idx, - mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in)); + mlx5_command_str(ent->op), ent->op); ent->ret = -ETIMEDOUT; mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true); @@ -1130,12 +1146,10 @@ out_err: if (err == -ETIMEDOUT) { mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", - mlx5_command_str(msg_to_opcode(ent->in)), - msg_to_opcode(ent->in)); + mlx5_command_str(ent->op), ent->op); } else if (err == -ECANCELED) { mlx5_core_warn(dev, "%s(0x%x) canceled on out of queue timeout.\n", - mlx5_command_str(msg_to_opcode(ent->in)), - msg_to_opcode(ent->in)); + mlx5_command_str(ent->op), ent->op); } mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n", err, deliv_status_to_str(ent->status), ent->status); @@ -1169,7 +1183,6 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, u8 status = 0; int err = 0; s64 ds; - u16 op; if (callback && page_queue) return -EINVAL; @@ -1209,9 +1222,8 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, goto out_free; ds = ent->ts2 - ent->ts1; - op = MLX5_GET(mbox_in, in->first.data, opcode); - if (op < MLX5_CMD_OP_MAX) { - stats = &cmd->stats[op]; + if (ent->op < MLX5_CMD_OP_MAX) { + stats = &cmd->stats[ent->op]; spin_lock_irq(&stats->lock); stats->sum += ds; ++stats->n; @@ -1219,7 +1231,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, } mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME, "fw exec time for %s is %lld nsec\n", - mlx5_command_str(op), ds); + mlx5_command_str(ent->op), ds); out_free: status = ent->status; @@ -1816,7 +1828,7 @@ cache_miss: static int is_manage_pages(void *in) { - return MLX5_GET(mbox_in, in, opcode) == MLX5_CMD_OP_MANAGE_PAGES; + return in_to_opcode(in) == MLX5_CMD_OP_MANAGE_PAGES; } /* Notes: @@ -1827,8 +1839,9 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size, mlx5_cmd_cbk_t callback, void *context, bool force_polling) { - u16 opcode = MLX5_GET(mbox_in, in, opcode); struct mlx5_cmd_msg *inb, *outb; + u16 opcode = in_to_opcode(in); + bool throttle_op; int pages_queue; gfp_t gfp; u8 token; @@ -1837,13 +1850,21 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, opcode)) return -ENXIO; + throttle_op = mlx5_cmd_is_throttle_opcode(opcode); + if (throttle_op) { + /* atomic context may not sleep */ + if (callback) + return -EINVAL; + down(&dev->cmd.throttle_sem); + } + pages_queue = is_manage_pages(in); gfp = callback ? GFP_ATOMIC : GFP_KERNEL; inb = alloc_msg(dev, in_size, gfp); if (IS_ERR(inb)) { err = PTR_ERR(inb); - return err; + goto out_up; } token = alloc_token(&dev->cmd); @@ -1877,6 +1898,9 @@ out_out: mlx5_free_cmd_msg(dev, outb); out_in: free_msg(dev, inb); +out_up: + if (throttle_op) + up(&dev->cmd.throttle_sem); return err; } @@ -1950,8 +1974,8 @@ static int cmd_status_err(struct mlx5_core_dev *dev, int err, u16 opcode, u16 op int mlx5_cmd_do(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size) { int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, false); - u16 opcode = MLX5_GET(mbox_in, in, opcode); u16 op_mod = MLX5_GET(mbox_in, in, op_mod); + u16 opcode = in_to_opcode(in); return cmd_status_err(dev, err, opcode, op_mod, out); } @@ -1996,8 +2020,8 @@ int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size) { int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, true); - u16 opcode = MLX5_GET(mbox_in, in, opcode); u16 op_mod = MLX5_GET(mbox_in, in, op_mod); + u16 opcode = in_to_opcode(in); err = cmd_status_err(dev, err, opcode, op_mod, out); return mlx5_cmd_check(dev, err, in, out); @@ -2049,7 +2073,7 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size, work->ctx = ctx; work->user_callback = callback; - work->opcode = MLX5_GET(mbox_in, in, opcode); + work->opcode = in_to_opcode(in); work->op_mod = MLX5_GET(mbox_in, in, op_mod); work->out = out; if (WARN_ON(!atomic_inc_not_zero(&ctx->num_inflight))) @@ -2220,6 +2244,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev) sema_init(&cmd->sem, cmd->max_reg_cmds); sema_init(&cmd->pages_sem, 1); + sema_init(&cmd->throttle_sem, DIV_ROUND_UP(cmd->max_reg_cmds, 2)); cmd_h = (u32)((u64)(cmd->dma) >> 32); cmd_l = (u32)(cmd->dma); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c index 0571e40c6ee5..2b444fb12388 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c @@ -59,6 +59,9 @@ bool mlx5_eth_supported(struct mlx5_core_dev *dev) if (!IS_ENABLED(CONFIG_MLX5_CORE_EN)) return false; + if (mlx5_core_is_management_pf(dev)) + return false; + if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) return false; @@ -198,6 +201,9 @@ bool mlx5_rdma_supported(struct mlx5_core_dev *dev) if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND)) return false; + if (mlx5_core_is_management_pf(dev)) + return false; + if (dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV) return false; @@ -343,7 +349,6 @@ int mlx5_attach_device(struct mlx5_core_dev *dev) devl_assert_locked(priv_to_devlink(dev)); mutex_lock(&mlx5_intf_mutex); priv->flags &= ~MLX5_PRIV_FLAGS_DETACH; - priv->flags |= MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW; for (i = 0; i < ARRAY_SIZE(mlx5_adev_devices); i++) { if (!priv->adev[i]) { bool is_supported = false; @@ -391,7 +396,6 @@ int mlx5_attach_device(struct mlx5_core_dev *dev) break; } } - priv->flags &= ~MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW; mutex_unlock(&mlx5_intf_mutex); return ret; } @@ -406,7 +410,6 @@ void mlx5_detach_device(struct mlx5_core_dev *dev) devl_assert_locked(priv_to_devlink(dev)); mutex_lock(&mlx5_intf_mutex); - priv->flags |= MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW; for (i = ARRAY_SIZE(mlx5_adev_devices) - 1; i >= 0; i--) { if (!priv->adev[i]) continue; @@ -435,7 +438,6 @@ skip_suspend: del_adev(&priv->adev[i]->adev); priv->adev[i] = NULL; } - priv->flags &= ~MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW; priv->flags |= MLX5_PRIV_FLAGS_DETACH; mutex_unlock(&mlx5_intf_mutex); } @@ -534,22 +536,16 @@ del_adev: int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev) { struct mlx5_priv *priv = &dev->priv; - int err = 0; lockdep_assert_held(&mlx5_intf_mutex); if (priv->flags & MLX5_PRIV_FLAGS_DETACH) return 0; - priv->flags |= MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW; delete_drivers(dev); if (priv->flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV) - goto out; - - err = add_drivers(dev); + return 0; -out: - priv->flags &= ~MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW; - return err; + return add_drivers(dev); } bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c index 464eb3a18450..b70e36025d92 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c @@ -75,6 +75,10 @@ int mlx5_ec_init(struct mlx5_core_dev *dev) if (!mlx5_core_is_ecpf(dev)) return 0; + /* Management PF don't have a peer PF */ + if (mlx5_core_is_management_pf(dev)) + return 0; + return mlx5_host_pf_init(dev); } @@ -85,6 +89,10 @@ void mlx5_ec_cleanup(struct mlx5_core_dev *dev) if (!mlx5_core_is_ecpf(dev)) return; + /* Management PF don't have a peer PF */ + if (mlx5_core_is_management_pf(dev)) + return; + mlx5_host_pf_cleanup(dev); err = mlx5_wait_for_pages(dev, &dev->priv.host_pf_pages); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 2d77fb8a8a01..da58322cbc3a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -247,7 +247,7 @@ struct mlx5e_rx_wqe_ll { }; struct mlx5e_rx_wqe_cyc { - struct mlx5_wqe_data_seg data[0]; + DECLARE_FLEX_ARRAY(struct mlx5_wqe_data_seg, data); }; struct mlx5e_umr_wqe { @@ -968,6 +968,11 @@ struct mlx5e_priv { struct mlx5e_scratchpad scratchpad; struct mlx5e_htb *htb; struct mlx5e_mqprio_rl *mqprio_rl; + struct dentry *dfs_root; +}; + +struct mlx5e_dev { + struct mlx5e_priv *priv; }; struct mlx5e_rx_handlers { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c index 83adaabf59f5..03ad3b61dfc7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c @@ -4,6 +4,29 @@ #include "en/devlink.h" #include "eswitch.h" +static const struct devlink_ops mlx5e_devlink_ops = { +}; + +struct mlx5e_dev *mlx5e_create_devlink(struct device *dev) +{ + struct mlx5e_dev *mlx5e_dev; + struct devlink *devlink; + + devlink = devlink_alloc(&mlx5e_devlink_ops, sizeof(*mlx5e_dev), dev); + if (!devlink) + return ERR_PTR(-ENOMEM); + devlink_register(devlink); + return devlink_priv(devlink); +} + +void mlx5e_destroy_devlink(struct mlx5e_dev *mlx5e_dev) +{ + struct devlink *devlink = priv_to_devlink(mlx5e_dev); + + devlink_unregister(devlink); + devlink_free(devlink); +} + static void mlx5e_devlink_get_port_parent_id(struct mlx5_core_dev *dev, struct netdev_phys_item_id *ppid) { @@ -14,14 +37,14 @@ mlx5e_devlink_get_port_parent_id(struct mlx5_core_dev *dev, struct netdev_phys_i memcpy(ppid->id, &parent_id, sizeof(parent_id)); } -int mlx5e_devlink_port_register(struct mlx5e_priv *priv) +int mlx5e_devlink_port_register(struct mlx5e_dev *mlx5e_dev, + struct mlx5e_priv *priv) { - struct devlink *devlink = priv_to_devlink(priv->mdev); + struct devlink *devlink = priv_to_devlink(mlx5e_dev); struct devlink_port_attrs attrs = {}; struct netdev_phys_item_id ppid = {}; struct devlink_port *dl_port; unsigned int dl_port_index; - int ret; if (mlx5_core_is_pf(priv->mdev)) { attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; @@ -42,23 +65,12 @@ int mlx5e_devlink_port_register(struct mlx5e_priv *priv) memset(dl_port, 0, sizeof(*dl_port)); devlink_port_attrs_set(dl_port, &attrs); - if (!(priv->mdev->priv.flags & MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW)) - devl_lock(devlink); - ret = devl_port_register(devlink, dl_port, dl_port_index); - if (!(priv->mdev->priv.flags & MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW)) - devl_unlock(devlink); - - return ret; + return devlink_port_register(devlink, dl_port, dl_port_index); } void mlx5e_devlink_port_unregister(struct mlx5e_priv *priv) { struct devlink_port *dl_port = mlx5e_devlink_get_dl_port(priv); - struct devlink *devlink = priv_to_devlink(priv->mdev); - if (!(priv->mdev->priv.flags & MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW)) - devl_lock(devlink); - devl_port_unregister(dl_port); - if (!(priv->mdev->priv.flags & MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW)) - devl_unlock(devlink); + devlink_port_unregister(dl_port); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h index 4f238d4fff55..19b1d8e9634e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h @@ -7,7 +7,10 @@ #include <net/devlink.h> #include "en.h" -int mlx5e_devlink_port_register(struct mlx5e_priv *priv); +struct mlx5e_dev *mlx5e_create_devlink(struct device *dev); +void mlx5e_destroy_devlink(struct mlx5e_dev *mlx5e_dev); +int mlx5e_devlink_port_register(struct mlx5e_dev *mlx5e_dev, + struct mlx5e_priv *priv); void mlx5e_devlink_port_unregister(struct mlx5e_priv *priv); static inline struct devlink_port * diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h index 379c6dc9a3be..5233d4daca41 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h @@ -145,7 +145,8 @@ void mlx5e_destroy_flow_steering(struct mlx5e_flow_steering *fs, bool ntuple, struct mlx5e_flow_steering *mlx5e_fs_init(const struct mlx5e_profile *profile, struct mlx5_core_dev *mdev, - bool state_destroy); + bool state_destroy, + struct dentry *dfs_root); void mlx5e_fs_cleanup(struct mlx5e_flow_steering *fs); struct mlx5e_vlan_table *mlx5e_fs_get_vlan(struct mlx5e_flow_steering *fs); void mlx5e_fs_set_tc(struct mlx5e_flow_steering *fs, struct mlx5e_tc_table *tc); @@ -189,6 +190,8 @@ int mlx5e_fs_vlan_rx_kill_vid(struct mlx5e_flow_steering *fs, __be16 proto, u16 vid); void mlx5e_fs_init_l2_addr(struct mlx5e_flow_steering *fs, struct net_device *netdev); +struct dentry *mlx5e_fs_get_debugfs_root(struct mlx5e_flow_steering *fs); + #define fs_err(fs, fmt, ...) \ mlx5_core_err(mlx5e_fs_get_mdev(fs), fmt, ##__VA_ARGS__) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.c b/drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.c index 17325c5d6516..cf60f0a3ff23 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.c @@ -47,6 +47,7 @@ void mlx5e_mod_hdr_tbl_init(struct mod_hdr_tbl *tbl) void mlx5e_mod_hdr_tbl_destroy(struct mod_hdr_tbl *tbl) { + WARN_ON(!hash_empty(tbl->hlist)); mutex_destroy(&tbl->lock); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c index 4ad19c981294..a21bd1179477 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c @@ -411,9 +411,14 @@ u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev, { enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk); u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); + u8 log_wqe_size, log_stride_size; - return mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode) - - mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk); + log_wqe_size = mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode); + log_stride_size = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk); + WARN(log_wqe_size < log_stride_size, + "Log WQE size %u < log stride size %u (page shift %u, umr mode %d, xsk on? %d)\n", + log_wqe_size, log_stride_size, page_shift, umr_mode, !!xsk); + return log_wqe_size - log_stride_size; } u8 mlx5e_mpwqe_get_min_wqe_bulk(unsigned int wq_sz) @@ -580,11 +585,16 @@ int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *pa u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); u16 max_mtu_pkts; - if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift, umr_mode)) + if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift, umr_mode)) { + mlx5_core_err(mdev, "Striding RQ for XSK can't be activated with page_shift %u and umr_mode %d\n", + page_shift, umr_mode); return -EOPNOTSUPP; + } - if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk)) + if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk)) { + mlx5_core_err(mdev, "Striding RQ linear mode for XSK can't be activated with current params\n"); return -EINVAL; + } /* Current RQ length is too big for the given frame size, the * needed number of WQEs exceeds the maximum. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c index 89510cac46c2..505ba41195b9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c @@ -287,6 +287,78 @@ int mlx5e_port_set_pbmc(struct mlx5_core_dev *mdev, void *in) return err; } +int mlx5e_port_query_sbpr(struct mlx5_core_dev *mdev, u32 desc, u8 dir, + u8 pool_idx, void *out, int size_out) +{ + u32 in[MLX5_ST_SZ_DW(sbpr_reg)] = {}; + + MLX5_SET(sbpr_reg, in, desc, desc); + MLX5_SET(sbpr_reg, in, dir, dir); + MLX5_SET(sbpr_reg, in, pool, pool_idx); + + return mlx5_core_access_reg(mdev, in, sizeof(in), out, size_out, MLX5_REG_SBPR, 0, 0); +} + +int mlx5e_port_set_sbpr(struct mlx5_core_dev *mdev, u32 desc, u8 dir, + u8 pool_idx, u32 infi_size, u32 size) +{ + u32 out[MLX5_ST_SZ_DW(sbpr_reg)] = {}; + u32 in[MLX5_ST_SZ_DW(sbpr_reg)] = {}; + + MLX5_SET(sbpr_reg, in, desc, desc); + MLX5_SET(sbpr_reg, in, dir, dir); + MLX5_SET(sbpr_reg, in, pool, pool_idx); + MLX5_SET(sbpr_reg, in, infi_size, infi_size); + MLX5_SET(sbpr_reg, in, size, size); + MLX5_SET(sbpr_reg, in, mode, 1); + + return mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out), MLX5_REG_SBPR, 0, 1); +} + +static int mlx5e_port_query_sbcm(struct mlx5_core_dev *mdev, u32 desc, + u8 pg_buff_idx, u8 dir, void *out, + int size_out) +{ + u32 in[MLX5_ST_SZ_DW(sbcm_reg)] = {}; + + MLX5_SET(sbcm_reg, in, desc, desc); + MLX5_SET(sbcm_reg, in, local_port, 1); + MLX5_SET(sbcm_reg, in, pg_buff, pg_buff_idx); + MLX5_SET(sbcm_reg, in, dir, dir); + + return mlx5_core_access_reg(mdev, in, sizeof(in), out, size_out, MLX5_REG_SBCM, 0, 0); +} + +int mlx5e_port_set_sbcm(struct mlx5_core_dev *mdev, u32 desc, u8 pg_buff_idx, + u8 dir, u8 infi_size, u32 max_buff, u8 pool_idx) +{ + u32 out[MLX5_ST_SZ_DW(sbcm_reg)] = {}; + u32 in[MLX5_ST_SZ_DW(sbcm_reg)] = {}; + u32 min_buff; + int err; + u8 exc; + + err = mlx5e_port_query_sbcm(mdev, desc, pg_buff_idx, dir, out, + sizeof(out)); + if (err) + return err; + + exc = MLX5_GET(sbcm_reg, out, exc); + min_buff = MLX5_GET(sbcm_reg, out, min_buff); + + MLX5_SET(sbcm_reg, in, desc, desc); + MLX5_SET(sbcm_reg, in, local_port, 1); + MLX5_SET(sbcm_reg, in, pg_buff, pg_buff_idx); + MLX5_SET(sbcm_reg, in, dir, dir); + MLX5_SET(sbcm_reg, in, exc, exc); + MLX5_SET(sbcm_reg, in, min_buff, min_buff); + MLX5_SET(sbcm_reg, in, infi_max, infi_size); + MLX5_SET(sbcm_reg, in, max_buff, max_buff); + MLX5_SET(sbcm_reg, in, pool, pool_idx); + + return mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out), MLX5_REG_SBCM, 0, 1); +} + /* buffer[i]: buffer that priority i mapped to */ int mlx5e_port_query_priority2buffer(struct mlx5_core_dev *mdev, u8 *buffer) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h index 7a7defe60792..3f474e370828 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h @@ -57,6 +57,12 @@ u32 mlx5e_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed, bool mlx5e_ptys_ext_supported(struct mlx5_core_dev *mdev); int mlx5e_port_query_pbmc(struct mlx5_core_dev *mdev, void *out); int mlx5e_port_set_pbmc(struct mlx5_core_dev *mdev, void *in); +int mlx5e_port_query_sbpr(struct mlx5_core_dev *mdev, u32 desc, u8 dir, + u8 pool_idx, void *out, int size_out); +int mlx5e_port_set_sbpr(struct mlx5_core_dev *mdev, u32 desc, u8 dir, + u8 pool_idx, u32 infi_size, u32 size); +int mlx5e_port_set_sbcm(struct mlx5_core_dev *mdev, u32 desc, u8 pg_buff_idx, + u8 dir, u8 infi_size, u32 max_buff, u8 pool_idx); int mlx5e_port_query_priority2buffer(struct mlx5_core_dev *mdev, u8 *buffer); int mlx5e_port_set_priority2buffer(struct mlx5_core_dev *mdev, u8 *buffer); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c index c9d5d8d93994..57f4b1b50421 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c @@ -73,6 +73,7 @@ int mlx5e_port_query_buffer(struct mlx5e_priv *priv, port_buffer->buffer[i].lossy); } + port_buffer->headroom_size = total_used; port_buffer->port_buffer_size = MLX5_GET(pbmc_reg, out, port_buffer_size) * port_buff_cell_sz; port_buffer->spare_buffer_size = @@ -86,16 +87,204 @@ out: return err; } +struct mlx5e_buffer_pool { + u32 infi_size; + u32 size; + u32 buff_occupancy; +}; + +static int mlx5e_port_query_pool(struct mlx5_core_dev *mdev, + struct mlx5e_buffer_pool *buffer_pool, + u32 desc, u8 dir, u8 pool_idx) +{ + u32 out[MLX5_ST_SZ_DW(sbpr_reg)] = {}; + int err; + + err = mlx5e_port_query_sbpr(mdev, desc, dir, pool_idx, out, + sizeof(out)); + if (err) + return err; + + buffer_pool->size = MLX5_GET(sbpr_reg, out, size); + buffer_pool->infi_size = MLX5_GET(sbpr_reg, out, infi_size); + buffer_pool->buff_occupancy = MLX5_GET(sbpr_reg, out, buff_occupancy); + + return err; +} + +enum { + MLX5_INGRESS_DIR = 0, + MLX5_EGRESS_DIR = 1, +}; + +enum { + MLX5_LOSSY_POOL = 0, + MLX5_LOSSLESS_POOL = 1, +}; + +/* No limit on usage of shared buffer pool (max_buff=0) */ +#define MLX5_SB_POOL_NO_THRESHOLD 0 +/* Shared buffer pool usage threshold when calculated + * dynamically in alpha units. alpha=13 is equivalent to + * HW_alpha of [(1/128) * 2 ^ (alpha-1)] = 32, where HW_alpha + * equates to the following portion of the shared buffer pool: + * [32 / (1 + n * 32)] While *n* is the number of buffers + * that are using the shared buffer pool. + */ +#define MLX5_SB_POOL_THRESHOLD 13 + +/* Shared buffer class management parameters */ +struct mlx5_sbcm_params { + u8 pool_idx; + u8 max_buff; + u8 infi_size; +}; + +static const struct mlx5_sbcm_params sbcm_default = { + .pool_idx = MLX5_LOSSY_POOL, + .max_buff = MLX5_SB_POOL_NO_THRESHOLD, + .infi_size = 0, +}; + +static const struct mlx5_sbcm_params sbcm_lossy = { + .pool_idx = MLX5_LOSSY_POOL, + .max_buff = MLX5_SB_POOL_NO_THRESHOLD, + .infi_size = 1, +}; + +static const struct mlx5_sbcm_params sbcm_lossless = { + .pool_idx = MLX5_LOSSLESS_POOL, + .max_buff = MLX5_SB_POOL_THRESHOLD, + .infi_size = 0, +}; + +static const struct mlx5_sbcm_params sbcm_lossless_no_threshold = { + .pool_idx = MLX5_LOSSLESS_POOL, + .max_buff = MLX5_SB_POOL_NO_THRESHOLD, + .infi_size = 1, +}; + +/** + * select_sbcm_params() - selects the shared buffer pool configuration + * + * @buffer: <input> port buffer to retrieve params of + * @lossless_buff_count: <input> number of lossless buffers in total + * + * The selection is based on the following rules: + * 1. If buffer size is 0, no shared buffer pool is used. + * 2. If buffer is lossy, use lossy shared buffer pool. + * 3. If there are more than 1 lossless buffers, use lossless shared buffer pool + * with threshold. + * 4. If there is only 1 lossless buffer, use lossless shared buffer pool + * without threshold. + * + * @return const struct mlx5_sbcm_params* selected values + */ +static const struct mlx5_sbcm_params * +select_sbcm_params(struct mlx5e_bufferx_reg *buffer, u8 lossless_buff_count) +{ + if (buffer->size == 0) + return &sbcm_default; + + if (buffer->lossy) + return &sbcm_lossy; + + if (lossless_buff_count > 1) + return &sbcm_lossless; + + return &sbcm_lossless_no_threshold; +} + +static int port_update_pool_cfg(struct mlx5_core_dev *mdev, + struct mlx5e_port_buffer *port_buffer) +{ + const struct mlx5_sbcm_params *p; + u8 lossless_buff_count = 0; + int err; + int i; + + if (!MLX5_CAP_GEN(mdev, sbcam_reg)) + return 0; + + for (i = 0; i < MLX5E_MAX_BUFFER; i++) + lossless_buff_count += ((port_buffer->buffer[i].size) && + (!(port_buffer->buffer[i].lossy))); + + for (i = 0; i < MLX5E_MAX_BUFFER; i++) { + p = select_sbcm_params(&port_buffer->buffer[i], lossless_buff_count); + err = mlx5e_port_set_sbcm(mdev, 0, i, + MLX5_INGRESS_DIR, + p->infi_size, + p->max_buff, + p->pool_idx); + if (err) + return err; + } + + return 0; +} + +static int port_update_shared_buffer(struct mlx5_core_dev *mdev, + u32 current_headroom_size, + u32 new_headroom_size) +{ + struct mlx5e_buffer_pool lossless_ipool; + struct mlx5e_buffer_pool lossy_epool; + u32 lossless_ipool_size; + u32 shared_buffer_size; + u32 total_buffer_size; + u32 lossy_epool_size; + int err; + + if (!MLX5_CAP_GEN(mdev, sbcam_reg)) + return 0; + + err = mlx5e_port_query_pool(mdev, &lossy_epool, 0, MLX5_EGRESS_DIR, + MLX5_LOSSY_POOL); + if (err) + return err; + + err = mlx5e_port_query_pool(mdev, &lossless_ipool, 0, MLX5_INGRESS_DIR, + MLX5_LOSSLESS_POOL); + if (err) + return err; + + total_buffer_size = current_headroom_size + lossy_epool.size + + lossless_ipool.size; + shared_buffer_size = total_buffer_size - new_headroom_size; + + if (shared_buffer_size < 4) { + pr_err("Requested port buffer is too large, not enough space left for shared buffer\n"); + return -EINVAL; + } + + /* Total shared buffer size is split in a ratio of 3:1 between + * lossy and lossless pools respectively. + */ + lossy_epool_size = (shared_buffer_size / 4) * 3; + lossless_ipool_size = shared_buffer_size / 4; + + mlx5e_port_set_sbpr(mdev, 0, MLX5_EGRESS_DIR, MLX5_LOSSY_POOL, 0, + lossy_epool_size); + mlx5e_port_set_sbpr(mdev, 0, MLX5_INGRESS_DIR, MLX5_LOSSLESS_POOL, 0, + lossless_ipool_size); + return 0; +} + static int port_set_buffer(struct mlx5e_priv *priv, struct mlx5e_port_buffer *port_buffer) { u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz; struct mlx5_core_dev *mdev = priv->mdev; int sz = MLX5_ST_SZ_BYTES(pbmc_reg); + u32 new_headroom_size = 0; + u32 current_headroom_size; void *in; int err; int i; + current_headroom_size = port_buffer->headroom_size; + in = kzalloc(sz, GFP_KERNEL); if (!in) return -ENOMEM; @@ -110,6 +299,7 @@ static int port_set_buffer(struct mlx5e_priv *priv, u64 xoff = port_buffer->buffer[i].xoff; u64 xon = port_buffer->buffer[i].xon; + new_headroom_size += size; do_div(size, port_buff_cell_sz); do_div(xoff, port_buff_cell_sz); do_div(xon, port_buff_cell_sz); @@ -119,6 +309,17 @@ static int port_set_buffer(struct mlx5e_priv *priv, MLX5_SET(bufferx_reg, buffer, xon_threshold, xon); } + new_headroom_size /= port_buff_cell_sz; + current_headroom_size /= port_buff_cell_sz; + err = port_update_shared_buffer(priv->mdev, current_headroom_size, + new_headroom_size); + if (err) + return err; + + err = port_update_pool_cfg(priv->mdev, port_buffer); + if (err) + return err; + err = mlx5e_port_set_pbmc(mdev, in); out: kfree(in); @@ -174,6 +375,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, /** * update_buffer_lossy - Update buffer configuration based on pfc + * @mdev: port function core device * @max_mtu: netdev's max_mtu * @pfc_en: <input> current pfc configuration * @buffer: <input> current prio to buffer mapping @@ -192,7 +394,8 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, * @return: 0 if no error, * sets change to true if buffer configuration was modified. */ -static int update_buffer_lossy(unsigned int max_mtu, +static int update_buffer_lossy(struct mlx5_core_dev *mdev, + unsigned int max_mtu, u8 pfc_en, u8 *buffer, u32 xoff, u16 port_buff_cell_sz, struct mlx5e_port_buffer *port_buffer, bool *change) @@ -229,6 +432,10 @@ static int update_buffer_lossy(unsigned int max_mtu, } if (changed) { + err = port_update_pool_cfg(mdev, port_buffer); + if (err) + return err; + err = update_xoff_threshold(port_buffer, xoff, max_mtu, port_buff_cell_sz); if (err) return err; @@ -293,23 +500,30 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, } if (change & MLX5E_PORT_BUFFER_PFC) { + mlx5e_dbg(HW, priv, "%s: requested PFC per priority bitmask: 0x%x\n", + __func__, pfc->pfc_en); err = mlx5e_port_query_priority2buffer(priv->mdev, buffer); if (err) return err; - err = update_buffer_lossy(max_mtu, pfc->pfc_en, buffer, xoff, port_buff_cell_sz, - &port_buffer, &update_buffer); + err = update_buffer_lossy(priv->mdev, max_mtu, pfc->pfc_en, buffer, xoff, + port_buff_cell_sz, &port_buffer, + &update_buffer); if (err) return err; } if (change & MLX5E_PORT_BUFFER_PRIO2BUFFER) { update_prio2buffer = true; + for (i = 0; i < MLX5E_MAX_BUFFER; i++) + mlx5e_dbg(HW, priv, "%s: requested to map prio[%d] to buffer %d\n", + __func__, i, prio2buffer[i]); + err = fill_pfc_en(priv->mdev, &curr_pfc_en); if (err) return err; - err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer, xoff, + err = update_buffer_lossy(priv->mdev, max_mtu, curr_pfc_en, prio2buffer, xoff, port_buff_cell_sz, &port_buffer, &update_buffer); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h index 80af7a5ac604..a6ef118de758 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h @@ -60,6 +60,7 @@ struct mlx5e_bufferx_reg { struct mlx5e_port_buffer { u32 port_buffer_size; u32 spare_buffer_size; + u32 headroom_size; struct mlx5e_bufferx_reg buffer[MLX5E_MAX_BUFFER]; }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c index 1ae15b8536a8..95edab4a1732 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c @@ -754,6 +754,6 @@ void mlx5e_reporter_rx_destroy(struct mlx5e_priv *priv) if (!priv->rx_reporter) return; - devlink_port_health_reporter_destroy(priv->rx_reporter); + devlink_health_reporter_destroy(priv->rx_reporter); priv->rx_reporter = NULL; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c index 60bc5b577ab9..b195dbbf6c90 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c @@ -609,6 +609,6 @@ void mlx5e_reporter_tx_destroy(struct mlx5e_priv *priv) if (!priv->tx_reporter) return; - devlink_port_health_reporter_destroy(priv->tx_reporter); + devlink_health_reporter_destroy(priv->tx_reporter); priv->tx_reporter = NULL; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c index 78af8a3175bf..7758a425bfa8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c @@ -28,7 +28,7 @@ struct mlx5e_flow_meter_aso_obj { int base_id; int total_meters; - unsigned long meters_map[0]; /* must be at the end of this struct */ + unsigned long meters_map[]; /* must be at the end of this struct */ }; struct mlx5e_flow_meters { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h index 2b7fd1c0e643..f575646d2f50 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h @@ -95,8 +95,6 @@ struct mlx5e_tc_flow { */ struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS]; struct mlx5e_tc_flow *peer_flow; - struct mlx5e_mod_hdr_handle *mh; /* attached mod header instance */ - struct mlx5e_mod_hdr_handle *slow_mh; /* attached mod header instance for slow path */ struct mlx5e_hairpin_entry *hpe; /* attached hairpin instance */ struct list_head hairpin; /* flows sharing the same hairpin */ struct list_head peer; /* flows with peer flow */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c index e6f64d890fb3..83bb0811e774 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c @@ -745,8 +745,6 @@ int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv, if (err) goto out; - esw_attr->rx_tun_attr->vni = MLX5_GET(fte_match_param, spec->match_value, - misc_parameters.vxlan_vni); esw_attr->rx_tun_attr->decap_vport = vport_num; } else if (netif_is_ovs_master(attr.route_dev) && mlx5e_tc_int_port_supported(esw)) { int_port = mlx5e_tc_int_port_get(mlx5e_get_int_port_priv(priv), diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c index 2aaf8ab857b8..780224fd67a1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c @@ -1349,7 +1349,8 @@ static void mlx5e_invalidate_encap(struct mlx5e_priv *priv, mlx5e_tc_unoffload_from_slow_path(esw, flow); else mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->attr); - mlx5_modify_header_dealloc(priv->mdev, attr->modify_hdr); + + mlx5e_tc_detach_mod_hdr(priv, flow, attr); attr->modify_hdr = NULL; esw_attr->dests[flow->tmp_entry_index].flags &= @@ -1405,7 +1406,7 @@ static void mlx5e_reoffload_encap(struct mlx5e_priv *priv, continue; } - err = mlx5e_tc_add_flow_mod_hdr(priv, flow, attr); + err = mlx5e_tc_attach_mod_hdr(priv, flow, attr); if (err) { mlx5_core_warn(priv->mdev, "Failed to update flow mod_hdr err=%d", err); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h index 853f312cd757..5578f92f7e0f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h @@ -445,7 +445,7 @@ mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg, static inline u16 mlx5e_stop_room_for_wqe(struct mlx5_core_dev *mdev, u16 wqe_size) { - WARN_ON_ONCE(PAGE_SIZE / MLX5_SEND_WQE_BB < mlx5e_get_max_sq_wqebbs(mdev)); + WARN_ON_ONCE(PAGE_SIZE / MLX5_SEND_WQE_BB < (u16)mlx5e_get_max_sq_wqebbs(mdev)); /* A WQE must not cross the page boundary, hence two conditions: * 1. Its size must not exceed the page size. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c index ff03c43833bb..81a567e17264 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c @@ -7,6 +7,18 @@ #include "en/health.h" #include <net/xdp_sock_drv.h> +static int mlx5e_legacy_rq_validate_xsk(struct mlx5_core_dev *mdev, + struct mlx5e_params *params, + struct mlx5e_xsk_param *xsk) +{ + if (!mlx5e_rx_is_linear_skb(mdev, params, xsk)) { + mlx5_core_err(mdev, "Legacy RQ linear mode for XSK can't be activated with current params\n"); + return -EINVAL; + } + + return 0; +} + /* The limitation of 2048 can be altered, but shouldn't go beyond the minimal * stride size of striding RQ. */ @@ -17,8 +29,11 @@ bool mlx5e_validate_xsk_param(struct mlx5e_params *params, struct mlx5_core_dev *mdev) { /* AF_XDP doesn't support frames larger than PAGE_SIZE. */ - if (xsk->chunk_size > PAGE_SIZE || xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE) + if (xsk->chunk_size > PAGE_SIZE || xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE) { + mlx5_core_err(mdev, "XSK chunk size %u out of bounds [%u, %lu]\n", xsk->chunk_size, + MLX5E_MIN_XSK_CHUNK_SIZE, PAGE_SIZE); return false; + } /* frag_sz is different for regular and XSK RQs, so ensure that linear * SKB mode is possible. @@ -27,7 +42,7 @@ bool mlx5e_validate_xsk_param(struct mlx5e_params *params, case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: return !mlx5e_mpwrq_validate_xsk(mdev, params, xsk); default: /* MLX5_WQ_TYPE_CYCLIC */ - return mlx5e_rx_is_linear_skb(mdev, params, xsk); + return !mlx5e_legacy_rq_validate_xsk(mdev, params, xsk); } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c index d7c020f72401..88a5aed9d678 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c @@ -365,7 +365,7 @@ void mlx5e_accel_fs_tcp_destroy(struct mlx5e_flow_steering *fs) for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) accel_fs_tcp_destroy_table(fs, i); - kvfree(accel_tcp); + kfree(accel_tcp); mlx5e_fs_set_accel_tcp(fs, NULL); } @@ -377,7 +377,7 @@ int mlx5e_accel_fs_tcp_create(struct mlx5e_flow_steering *fs) if (!MLX5_CAP_FLOWTABLE_NIC_RX(mlx5e_fs_get_mdev(fs), ft_field_support.outer_ip_version)) return -EOPNOTSUPP; - accel_tcp = kvzalloc(sizeof(*accel_tcp), GFP_KERNEL); + accel_tcp = kzalloc(sizeof(*accel_tcp), GFP_KERNEL); if (!accel_tcp) return -ENOMEM; mlx5e_fs_set_accel_tcp(fs, accel_tcp); @@ -397,7 +397,7 @@ int mlx5e_accel_fs_tcp_create(struct mlx5e_flow_steering *fs) err_destroy_tables: while (--i >= 0) accel_fs_tcp_destroy_table(fs, i); - kvfree(accel_tcp); + kfree(accel_tcp); mlx5e_fs_set_accel_tcp(fs, NULL); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c index da2184c94203..eb5b09f81dec 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB // Copyright (c) 2019 Mellanox Technologies. +#include <linux/debugfs.h> #include "en.h" #include "lib/mlx5.h" #include "en_accel/ktls.h" @@ -177,6 +178,15 @@ void mlx5e_ktls_cleanup_rx(struct mlx5e_priv *priv) destroy_workqueue(priv->tls->rx_wq); } +static void mlx5e_tls_debugfs_init(struct mlx5e_tls *tls, + struct dentry *dfs_root) +{ + if (IS_ERR_OR_NULL(dfs_root)) + return; + + tls->debugfs.dfs = debugfs_create_dir("tls", dfs_root); +} + int mlx5e_ktls_init(struct mlx5e_priv *priv) { struct mlx5e_tls *tls; @@ -189,11 +199,23 @@ int mlx5e_ktls_init(struct mlx5e_priv *priv) return -ENOMEM; priv->tls = tls; + priv->tls->mdev = priv->mdev; + + mlx5e_tls_debugfs_init(tls, priv->dfs_root); + return 0; } void mlx5e_ktls_cleanup(struct mlx5e_priv *priv) { + struct mlx5e_tls *tls = priv->tls; + + if (!mlx5e_is_ktls_device(priv->mdev)) + return; + + debugfs_remove_recursive(tls->debugfs.dfs); + tls->debugfs.dfs = NULL; + kfree(priv->tls); priv->tls = NULL; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h index 1c35045e41fb..fccf995ee16d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h @@ -4,6 +4,7 @@ #ifndef __MLX5E_KTLS_H__ #define __MLX5E_KTLS_H__ +#include <linux/debugfs.h> #include <linux/tls.h> #include <net/tls.h> #include "en.h" @@ -72,10 +73,17 @@ struct mlx5e_tls_sw_stats { atomic64_t rx_tls_del; }; +struct mlx5e_tls_debugfs { + struct dentry *dfs; + struct dentry *dfs_tx; +}; + struct mlx5e_tls { + struct mlx5_core_dev *mdev; struct mlx5e_tls_sw_stats sw_stats; struct workqueue_struct *rx_wq; struct mlx5e_tls_tx_pool *tx_pool; + struct mlx5e_tls_debugfs debugfs; }; int mlx5e_ktls_init(struct mlx5e_priv *priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c index 78072bf93f3f..6db27062b765 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB // Copyright (c) 2019 Mellanox Technologies. +#include <linux/debugfs.h> #include "en_accel/ktls.h" #include "en_accel/ktls_txrx.h" #include "en_accel/ktls_utils.h" @@ -886,8 +887,24 @@ err_out: return false; } +static void mlx5e_tls_tx_debugfs_init(struct mlx5e_tls *tls, + struct dentry *dfs_root) +{ + if (IS_ERR_OR_NULL(dfs_root)) + return; + + tls->debugfs.dfs_tx = debugfs_create_dir("tx", dfs_root); + if (!tls->debugfs.dfs_tx) + return; + + debugfs_create_size_t("pool_size", 0400, tls->debugfs.dfs_tx, + &tls->tx_pool->size); +} + int mlx5e_ktls_init_tx(struct mlx5e_priv *priv) { + struct mlx5e_tls *tls = priv->tls; + if (!mlx5e_is_ktls_tx(priv->mdev)) return 0; @@ -895,6 +912,8 @@ int mlx5e_ktls_init_tx(struct mlx5e_priv *priv) if (!priv->tls->tx_pool) return -ENOMEM; + mlx5e_tls_tx_debugfs_init(tls, tls->debugfs.dfs); + return 0; } @@ -903,6 +922,9 @@ void mlx5e_ktls_cleanup_tx(struct mlx5e_priv *priv) if (!mlx5e_is_ktls_tx(priv->mdev)) return; + debugfs_remove_recursive(priv->tls->debugfs.dfs_tx); + priv->tls->debugfs.dfs_tx = NULL; + mlx5e_tls_tx_pool_cleanup(priv->tls->tx_pool); priv->tls->tx_pool = NULL; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index 1892ccb889b3..7298fe782e9e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -30,6 +30,7 @@ * SOFTWARE. */ +#include <linux/debugfs.h> #include <linux/list.h> #include <linux/ip.h> #include <linux/ipv6.h> @@ -67,6 +68,7 @@ struct mlx5e_flow_steering { struct mlx5e_fs_udp *udp; struct mlx5e_fs_any *any; struct mlx5e_ptp_fs *ptp_fs; + struct dentry *dfs_root; }; static int mlx5e_add_l2_flow_rule(struct mlx5e_flow_steering *fs, @@ -104,6 +106,11 @@ static inline int mlx5e_hash_l2(const u8 *addr) return addr[5]; } +struct dentry *mlx5e_fs_get_debugfs_root(struct mlx5e_flow_steering *fs) +{ + return fs->dfs_root; +} + static void mlx5e_add_l2_to_hash(struct hlist_head *hash, const u8 *addr) { struct mlx5e_l2_hash_node *hn; @@ -1429,9 +1436,19 @@ static int mlx5e_fs_ethtool_alloc(struct mlx5e_flow_steering *fs) static void mlx5e_fs_ethtool_free(struct mlx5e_flow_steering *fs) { } #endif +static void mlx5e_fs_debugfs_init(struct mlx5e_flow_steering *fs, + struct dentry *dfs_root) +{ + if (IS_ERR_OR_NULL(dfs_root)) + return; + + fs->dfs_root = debugfs_create_dir("fs", dfs_root); +} + struct mlx5e_flow_steering *mlx5e_fs_init(const struct mlx5e_profile *profile, struct mlx5_core_dev *mdev, - bool state_destroy) + bool state_destroy, + struct dentry *dfs_root) { struct mlx5e_flow_steering *fs; int err; @@ -1458,6 +1475,8 @@ struct mlx5e_flow_steering *mlx5e_fs_init(const struct mlx5e_profile *profile, if (err) goto err_free_tc; + mlx5e_fs_debugfs_init(fs, dfs_root); + return fs; err_free_tc: mlx5e_fs_tc_free(fs); @@ -1471,6 +1490,7 @@ err: void mlx5e_fs_cleanup(struct mlx5e_flow_steering *fs) { + debugfs_remove_recursive(fs->dfs_root); mlx5e_fs_ethtool_free(fs); mlx5e_fs_tc_free(fs); mlx5e_fs_vlan_free(fs); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index abcc614b6191..1e0afaa31dd0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -35,6 +35,7 @@ #include <net/vxlan.h> #include <net/geneve.h> #include <linux/bpf.h> +#include <linux/debugfs.h> #include <linux/if_bridge.h> #include <linux/filter.h> #include <net/page_pool.h> @@ -5233,7 +5234,8 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev, mlx5e_timestamp_init(priv); fs = mlx5e_fs_init(priv->profile, mdev, - !test_bit(MLX5E_STATE_DESTROYING, &priv->state)); + !test_bit(MLX5E_STATE_DESTROYING, &priv->state), + priv->dfs_root); if (!fs) { err = -ENOMEM; mlx5_core_err(mdev, "FS initialization failed, %d\n", err); @@ -5874,7 +5876,8 @@ void mlx5e_destroy_netdev(struct mlx5e_priv *priv) static int mlx5e_resume(struct auxiliary_device *adev) { struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev); - struct mlx5e_priv *priv = auxiliary_get_drvdata(adev); + struct mlx5e_dev *mlx5e_dev = auxiliary_get_drvdata(adev); + struct mlx5e_priv *priv = mlx5e_dev->priv; struct net_device *netdev = priv->netdev; struct mlx5_core_dev *mdev = edev->mdev; int err; @@ -5897,7 +5900,8 @@ static int mlx5e_resume(struct auxiliary_device *adev) static int mlx5e_suspend(struct auxiliary_device *adev, pm_message_t state) { - struct mlx5e_priv *priv = auxiliary_get_drvdata(adev); + struct mlx5e_dev *mlx5e_dev = auxiliary_get_drvdata(adev); + struct mlx5e_priv *priv = mlx5e_dev->priv; struct net_device *netdev = priv->netdev; struct mlx5_core_dev *mdev = priv->mdev; @@ -5915,26 +5919,36 @@ static int mlx5e_probe(struct auxiliary_device *adev, struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev); const struct mlx5e_profile *profile = &mlx5e_nic_profile; struct mlx5_core_dev *mdev = edev->mdev; + struct mlx5e_dev *mlx5e_dev; struct net_device *netdev; pm_message_t state = {}; struct mlx5e_priv *priv; int err; + mlx5e_dev = mlx5e_create_devlink(&adev->dev); + if (IS_ERR(mlx5e_dev)) + return PTR_ERR(mlx5e_dev); + auxiliary_set_drvdata(adev, mlx5e_dev); + netdev = mlx5e_create_netdev(mdev, profile); if (!netdev) { mlx5_core_err(mdev, "mlx5e_create_netdev failed\n"); - return -ENOMEM; + err = -ENOMEM; + goto err_devlink_unregister; } mlx5e_build_nic_netdev(netdev); priv = netdev_priv(netdev); - auxiliary_set_drvdata(adev, priv); + mlx5e_dev->priv = priv; priv->profile = profile; priv->ppriv = NULL; - err = mlx5e_devlink_port_register(priv); + priv->dfs_root = debugfs_create_dir("nic", + mlx5_debugfs_get_dev_root(priv->mdev)); + + err = mlx5e_devlink_port_register(mlx5e_dev, priv); if (err) { mlx5_core_err(mdev, "mlx5e_devlink_port_register failed, %d\n", err); goto err_destroy_netdev; @@ -5971,13 +5985,17 @@ err_profile_cleanup: err_devlink_cleanup: mlx5e_devlink_port_unregister(priv); err_destroy_netdev: + debugfs_remove_recursive(priv->dfs_root); mlx5e_destroy_netdev(priv); +err_devlink_unregister: + mlx5e_destroy_devlink(mlx5e_dev); return err; } static void mlx5e_remove(struct auxiliary_device *adev) { - struct mlx5e_priv *priv = auxiliary_get_drvdata(adev); + struct mlx5e_dev *mlx5e_dev = auxiliary_get_drvdata(adev); + struct mlx5e_priv *priv = mlx5e_dev->priv; pm_message_t state = {}; mlx5e_dcbnl_delete_app(priv); @@ -5985,7 +6003,9 @@ static void mlx5e_remove(struct auxiliary_device *adev) mlx5e_suspend(adev, state); priv->profile->cleanup(priv); mlx5e_devlink_port_unregister(priv); + debugfs_remove_recursive(priv->dfs_root); mlx5e_destroy_netdev(priv); + mlx5e_destroy_devlink(mlx5e_dev); } static const struct auxiliary_device_id mlx5e_id_table[] = { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 7d90e5b72854..8d29310c7e48 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -789,8 +789,10 @@ static int mlx5e_init_rep(struct mlx5_core_dev *mdev, { struct mlx5e_priv *priv = netdev_priv(netdev); - priv->fs = mlx5e_fs_init(priv->profile, mdev, - !test_bit(MLX5E_STATE_DESTROYING, &priv->state)); + priv->fs = + mlx5e_fs_init(priv->profile, mdev, + !test_bit(MLX5E_STATE_DESTROYING, &priv->state), + priv->dfs_root); if (!priv->fs) { netdev_err(priv->netdev, "FS allocation failed\n"); return -ENOMEM; @@ -808,7 +810,8 @@ static int mlx5e_init_ul_rep(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv = netdev_priv(netdev); priv->fs = mlx5e_fs_init(priv->profile, mdev, - !test_bit(MLX5E_STATE_DESTROYING, &priv->state)); + !test_bit(MLX5E_STATE_DESTROYING, &priv->state), + priv->dfs_root); if (!priv->fs) { netdev_err(priv->netdev, "FS allocation failed\n"); return -ENOMEM; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 243d5d7750be..4e6f5caf8ab6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -71,6 +71,12 @@ #define MLX5E_TC_TABLE_NUM_GROUPS 4 #define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(18) +struct mlx5e_hairpin_params { + struct mlx5_core_dev *mdev; + u32 num_queues; + u32 queue_size; +}; + struct mlx5e_tc_table { /* Protects the dynamic assignment of the t parameter * which is the nic tc root table. @@ -93,6 +99,8 @@ struct mlx5e_tc_table { struct mlx5_tc_ct_priv *ct; struct mapping_ctx *mapping; + struct mlx5e_hairpin_params hairpin_params; + struct dentry *dfs_root; }; struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = { @@ -639,36 +647,36 @@ get_mod_hdr_table(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow) &tc->mod_hdr; } -static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv, - struct mlx5e_tc_flow *flow, - struct mlx5e_tc_flow_parse_attr *parse_attr) +int mlx5e_tc_attach_mod_hdr(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, + struct mlx5_flow_attr *attr) { - struct mlx5_modify_hdr *modify_hdr; struct mlx5e_mod_hdr_handle *mh; mh = mlx5e_mod_hdr_attach(priv->mdev, get_mod_hdr_table(priv, flow), mlx5e_get_flow_namespace(flow), - &parse_attr->mod_hdr_acts); + &attr->parse_attr->mod_hdr_acts); if (IS_ERR(mh)) return PTR_ERR(mh); - modify_hdr = mlx5e_mod_hdr_get(mh); - flow->attr->modify_hdr = modify_hdr; - flow->mh = mh; + WARN_ON(attr->modify_hdr); + attr->modify_hdr = mlx5e_mod_hdr_get(mh); + attr->mh = mh; return 0; } -static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv, - struct mlx5e_tc_flow *flow) +void mlx5e_tc_detach_mod_hdr(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, + struct mlx5_flow_attr *attr) { /* flow wasn't fully initialized */ - if (!flow->mh) + if (!attr->mh) return; mlx5e_mod_hdr_detach(priv->mdev, get_mod_hdr_table(priv, flow), - flow->mh); - flow->mh = NULL; + attr->mh); + attr->mh = NULL; } static @@ -1017,6 +1025,138 @@ static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv, return 0; } +static int debugfs_hairpin_queues_set(void *data, u64 val) +{ + struct mlx5e_hairpin_params *hp = data; + + if (!val) { + mlx5_core_err(hp->mdev, + "Number of hairpin queues must be > 0\n"); + return -EINVAL; + } + + hp->num_queues = val; + + return 0; +} + +static int debugfs_hairpin_queues_get(void *data, u64 *val) +{ + struct mlx5e_hairpin_params *hp = data; + + *val = hp->num_queues; + + return 0; +} +DEFINE_DEBUGFS_ATTRIBUTE(fops_hairpin_queues, debugfs_hairpin_queues_get, + debugfs_hairpin_queues_set, "%llu\n"); + +static int debugfs_hairpin_queue_size_set(void *data, u64 val) +{ + struct mlx5e_hairpin_params *hp = data; + + if (val > BIT(MLX5_CAP_GEN(hp->mdev, log_max_hairpin_num_packets))) { + mlx5_core_err(hp->mdev, + "Invalid hairpin queue size, must be <= %lu\n", + BIT(MLX5_CAP_GEN(hp->mdev, + log_max_hairpin_num_packets))); + return -EINVAL; + } + + hp->queue_size = roundup_pow_of_two(val); + + return 0; +} + +static int debugfs_hairpin_queue_size_get(void *data, u64 *val) +{ + struct mlx5e_hairpin_params *hp = data; + + *val = hp->queue_size; + + return 0; +} +DEFINE_DEBUGFS_ATTRIBUTE(fops_hairpin_queue_size, + debugfs_hairpin_queue_size_get, + debugfs_hairpin_queue_size_set, "%llu\n"); + +static int debugfs_hairpin_num_active_get(void *data, u64 *val) +{ + struct mlx5e_tc_table *tc = data; + struct mlx5e_hairpin_entry *hpe; + u32 cnt = 0; + u32 bkt; + + mutex_lock(&tc->hairpin_tbl_lock); + hash_for_each(tc->hairpin_tbl, bkt, hpe, hairpin_hlist) + cnt++; + mutex_unlock(&tc->hairpin_tbl_lock); + + *val = cnt; + + return 0; +} +DEFINE_DEBUGFS_ATTRIBUTE(fops_hairpin_num_active, + debugfs_hairpin_num_active_get, NULL, "%llu\n"); + +static int debugfs_hairpin_table_dump_show(struct seq_file *file, void *priv) + +{ + struct mlx5e_tc_table *tc = file->private; + struct mlx5e_hairpin_entry *hpe; + u32 bkt; + + mutex_lock(&tc->hairpin_tbl_lock); + hash_for_each(tc->hairpin_tbl, bkt, hpe, hairpin_hlist) + seq_printf(file, "Hairpin peer_vhca_id %u prio %u refcnt %u\n", + hpe->peer_vhca_id, hpe->prio, + refcount_read(&hpe->refcnt)); + mutex_unlock(&tc->hairpin_tbl_lock); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(debugfs_hairpin_table_dump); + +static void mlx5e_tc_debugfs_init(struct mlx5e_tc_table *tc, + struct dentry *dfs_root) +{ + if (IS_ERR_OR_NULL(dfs_root)) + return; + + tc->dfs_root = debugfs_create_dir("tc", dfs_root); + if (!tc->dfs_root) + return; + + debugfs_create_file("hairpin_num_queues", 0644, tc->dfs_root, + &tc->hairpin_params, &fops_hairpin_queues); + debugfs_create_file("hairpin_queue_size", 0644, tc->dfs_root, + &tc->hairpin_params, &fops_hairpin_queue_size); + debugfs_create_file("hairpin_num_active", 0444, tc->dfs_root, tc, + &fops_hairpin_num_active); + debugfs_create_file("hairpin_table_dump", 0444, tc->dfs_root, tc, + &debugfs_hairpin_table_dump_fops); +} + +static void +mlx5e_hairpin_params_init(struct mlx5e_hairpin_params *hairpin_params, + struct mlx5_core_dev *mdev) +{ + u64 link_speed64; + u32 link_speed; + + hairpin_params->mdev = mdev; + /* set hairpin pair per each 50Gbs share of the link */ + mlx5e_port_max_linkspeed(mdev, &link_speed); + link_speed = max_t(u32, link_speed, 50000); + link_speed64 = link_speed; + do_div(link_speed64, 50000); + hairpin_params->num_queues = link_speed64; + + hairpin_params->queue_size = + BIT(min_t(u32, 16 - MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev), + MLX5_CAP_GEN(mdev, log_max_hairpin_num_packets))); +} + static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow, struct mlx5e_tc_flow_parse_attr *parse_attr, @@ -1028,8 +1168,6 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv, struct mlx5_core_dev *peer_mdev; struct mlx5e_hairpin_entry *hpe; struct mlx5e_hairpin *hp; - u64 link_speed64; - u32 link_speed; u8 match_prio; u16 peer_id; int err; @@ -1082,21 +1220,16 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv, hash_hairpin_info(peer_id, match_prio)); mutex_unlock(&tc->hairpin_tbl_lock); - params.log_data_size = clamp_t(u8, 16, - MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz), - MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz)); - params.log_num_packets = params.log_data_size - - MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev); - params.log_num_packets = min_t(u8, params.log_num_packets, - MLX5_CAP_GEN(priv->mdev, log_max_hairpin_num_packets)); + params.log_num_packets = ilog2(tc->hairpin_params.queue_size); + params.log_data_size = + clamp_t(u32, + params.log_num_packets + + MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev), + MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz), + MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz)); params.q_counter = priv->q_counter; - /* set hairpin pair per each 50Gbs share of the link */ - mlx5e_port_max_linkspeed(priv->mdev, &link_speed); - link_speed = max_t(u32, link_speed, 50000); - link_speed64 = link_speed; - do_div(link_speed64, 50000); - params.num_channels = link_speed64; + params.num_channels = tc->hairpin_params.num_queues; hp = mlx5e_hairpin_create(priv, ¶ms, peer_ifindex); hpe->hp = hp; @@ -1301,7 +1434,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, } if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { - err = mlx5e_attach_mod_hdr(priv, flow, parse_attr); + err = mlx5e_tc_attach_mod_hdr(priv, flow, attr); if (err) return err; } @@ -1361,7 +1494,7 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv, if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts); - mlx5e_detach_mod_hdr(priv, flow); + mlx5e_tc_detach_mod_hdr(priv, flow, attr); } if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) @@ -1472,7 +1605,7 @@ skip_restore: goto err_offload; } - flow->slow_mh = mh; + flow->attr->slow_mh = mh; flow->chain_mapping = chain_mapping; flow_flag_set(flow, SLOW); @@ -1497,6 +1630,7 @@ err_get_chain: void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw, struct mlx5e_tc_flow *flow) { + struct mlx5e_mod_hdr_handle *slow_mh = flow->attr->slow_mh; struct mlx5_flow_attr *slow_attr; slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB); @@ -1509,16 +1643,16 @@ void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw, slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; slow_attr->esw_attr->split_count = 0; slow_attr->flags |= MLX5_ATTR_FLAG_SLOW_PATH; - if (flow->slow_mh) { + if (slow_mh) { slow_attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; - slow_attr->modify_hdr = mlx5e_mod_hdr_get(flow->slow_mh); + slow_attr->modify_hdr = mlx5e_mod_hdr_get(slow_mh); } mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr); - if (flow->slow_mh) { - mlx5e_mod_hdr_detach(esw->dev, get_mod_hdr_table(flow->priv, flow), flow->slow_mh); + if (slow_mh) { + mlx5e_mod_hdr_detach(esw->dev, get_mod_hdr_table(flow->priv, flow), slow_mh); mlx5_chains_put_chain_mapping(esw_chains(esw), flow->chain_mapping); flow->chain_mapping = 0; - flow->slow_mh = NULL; + flow->attr->slow_mh = NULL; } flow_flag_clear(flow, SLOW); kfree(slow_attr); @@ -1629,26 +1763,6 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro return err; } -int mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv *priv, - struct mlx5e_tc_flow *flow, - struct mlx5_flow_attr *attr) -{ - struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts = &attr->parse_attr->mod_hdr_acts; - struct mlx5_modify_hdr *mod_hdr; - - mod_hdr = mlx5_modify_header_alloc(priv->mdev, - mlx5e_get_flow_namespace(flow), - mod_hdr_acts->num_actions, - mod_hdr_acts->actions); - if (IS_ERR(mod_hdr)) - return PTR_ERR(mod_hdr); - - WARN_ON(attr->modify_hdr); - attr->modify_hdr = mod_hdr; - - return 0; -} - static int set_encap_dests(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow, @@ -1768,7 +1882,6 @@ verify_attr_actions(u32 actions, struct netlink_ext_ack *extack) static int post_process_attr(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr, - bool is_post_act_attr, struct netlink_ext_ack *extack) { struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch; @@ -1790,27 +1903,21 @@ post_process_attr(struct mlx5e_tc_flow *flow, } if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { - if (vf_tun || is_post_act_attr) { - err = mlx5e_tc_add_flow_mod_hdr(flow->priv, flow, attr); - if (err) - goto err_out; - } else { - err = mlx5e_attach_mod_hdr(flow->priv, flow, attr->parse_attr); - if (err) - goto err_out; - } + err = mlx5e_tc_attach_mod_hdr(flow->priv, flow, attr); + if (err) + goto err_out; } if (attr->branch_true && attr->branch_true->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { - err = mlx5e_tc_add_flow_mod_hdr(flow->priv, flow, attr->branch_true); + err = mlx5e_tc_attach_mod_hdr(flow->priv, flow, attr->branch_true); if (err) goto err_out; } if (attr->branch_false && attr->branch_false->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { - err = mlx5e_tc_add_flow_mod_hdr(flow->priv, flow, attr->branch_false); + err = mlx5e_tc_attach_mod_hdr(flow->priv, flow, attr->branch_false); if (err) goto err_out; } @@ -1924,7 +2031,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, esw_attr->int_port = int_port; } - err = post_process_attr(flow, attr, false, extack); + err = post_process_attr(flow, attr, extack); if (err) goto err_out; @@ -2009,10 +2116,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts); - if (vf_tun && attr->modify_hdr) - mlx5_modify_header_dealloc(priv->mdev, attr->modify_hdr); - else - mlx5e_detach_mod_hdr(priv, flow); + mlx5e_tc_detach_mod_hdr(priv, flow, attr); } if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) @@ -2492,13 +2596,13 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv, err = mlx5e_tc_set_attr_rx_tun(flow, spec); if (err) return err; - } else if (tunnel && tunnel->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) { + } else if (tunnel) { struct mlx5_flow_spec *tmp_spec; tmp_spec = kvzalloc(sizeof(*tmp_spec), GFP_KERNEL); if (!tmp_spec) { - NL_SET_ERR_MSG_MOD(extack, "Failed to allocate memory for vxlan tmp spec"); - netdev_warn(priv->netdev, "Failed to allocate memory for vxlan tmp spec"); + NL_SET_ERR_MSG_MOD(extack, "Failed to allocate memory for tunnel tmp spec"); + netdev_warn(priv->netdev, "Failed to allocate memory for tunnel tmp spec"); return -ENOMEM; } memcpy(tmp_spec, spec, sizeof(*tmp_spec)); @@ -3831,7 +3935,7 @@ alloc_flow_post_acts(struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack) if (err) goto out_free; - err = post_process_attr(flow, attr, true, extack); + err = post_process_attr(flow, attr, extack); if (err) goto out_free; @@ -4398,8 +4502,7 @@ mlx5_free_flow_attr(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr) if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts); - if (attr->modify_hdr) - mlx5_modify_header_dealloc(flow->priv->mdev, attr->modify_hdr); + mlx5e_tc_detach_mod_hdr(flow->priv, flow, attr); } } @@ -4521,9 +4624,6 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv, if (err) goto err_free; - /* always set IP version for indirect table handling */ - flow->attr->ip_version = mlx5e_tc_get_ip_version(&parse_attr->spec, true); - err = parse_tc_fdb_actions(priv, &rule->action, flow, extack); if (err) goto err_free; @@ -5220,6 +5320,8 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv) tc->ct = mlx5_tc_ct_init(priv, tc->chains, &tc->mod_hdr, MLX5_FLOW_NAMESPACE_KERNEL, tc->post_act); + mlx5e_hairpin_params_init(&tc->hairpin_params, dev); + tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event; err = register_netdevice_notifier_dev_net(priv->netdev, &tc->netdevice_nb, @@ -5230,6 +5332,8 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv) goto err_reg; } + mlx5e_tc_debugfs_init(tc, mlx5e_fs_get_debugfs_root(priv->fs)); + return 0; err_reg: @@ -5258,6 +5362,8 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) { struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs); + debugfs_remove_recursive(tc->dfs_root); + if (tc->netdevice_nb.notifier_call) unregister_netdevice_notifier_dev_net(priv->netdev, &tc->netdevice_nb, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h index 50af70ef22f3..ce516dc7f3fd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h @@ -71,6 +71,8 @@ struct mlx5_flow_attr { u32 action; struct mlx5_fc *counter; struct mlx5_modify_hdr *modify_hdr; + struct mlx5e_mod_hdr_handle *mh; /* attached mod header instance */ + struct mlx5e_mod_hdr_handle *slow_mh; /* attached mod header instance for slow path */ struct mlx5_ct_attr ct_attr; struct mlx5e_sample_attr sample_attr; struct mlx5e_meter_attr meter_attr; @@ -82,7 +84,6 @@ struct mlx5_flow_attr { struct mlx5_flow_table *dest_ft; u8 inner_match_level; u8 outer_match_level; - u8 ip_version; u8 tun_ip_version; int tunnel_id; /* mapped tunnel id */ u32 flags; @@ -134,7 +135,6 @@ struct mlx5_rx_tun_attr { __be32 v4; struct in6_addr v6; } dst_ip; /* Valid if decap_vport is not zero */ - u32 vni; }; #define MLX5E_TC_TABLE_CHAIN_TAG_BITS 16 @@ -285,9 +285,13 @@ int mlx5e_tc_match_to_reg_set_and_get_id(struct mlx5_core_dev *mdev, enum mlx5e_tc_attr_to_reg type, u32 data); -int mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv *priv, - struct mlx5e_tc_flow *flow, - struct mlx5_flow_attr *attr); +int mlx5e_tc_attach_mod_hdr(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, + struct mlx5_flow_attr *attr); + +void mlx5e_tc_detach_mod_hdr(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, + struct mlx5_flow_attr *attr); void mlx5e_tc_set_ethertype(struct mlx5_core_dev *mdev, struct flow_match_basic *match, bool outer, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c index c9a91158e99c..9959e9fd15a1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c @@ -16,18 +16,12 @@ #include "lib/fs_chains.h" #include "en/mod_hdr.h" -#define MLX5_ESW_INDIR_TABLE_SIZE 128 -#define MLX5_ESW_INDIR_TABLE_RECIRC_IDX_MAX (MLX5_ESW_INDIR_TABLE_SIZE - 2) +#define MLX5_ESW_INDIR_TABLE_SIZE 2 +#define MLX5_ESW_INDIR_TABLE_RECIRC_IDX (MLX5_ESW_INDIR_TABLE_SIZE - 2) #define MLX5_ESW_INDIR_TABLE_FWD_IDX (MLX5_ESW_INDIR_TABLE_SIZE - 1) struct mlx5_esw_indir_table_rule { - struct list_head list; struct mlx5_flow_handle *handle; - union { - __be32 v4; - struct in6_addr v6; - } dst_ip; - u32 vni; struct mlx5_modify_hdr *mh; refcount_t refcnt; }; @@ -38,12 +32,10 @@ struct mlx5_esw_indir_table_entry { struct mlx5_flow_group *recirc_grp; struct mlx5_flow_group *fwd_grp; struct mlx5_flow_handle *fwd_rule; - struct list_head recirc_rules; - int recirc_cnt; + struct mlx5_esw_indir_table_rule *recirc_rule; int fwd_ref; u16 vport; - u8 ip_version; }; struct mlx5_esw_indir_table { @@ -89,7 +81,6 @@ mlx5_esw_indir_table_needed(struct mlx5_eswitch *esw, return esw_attr->in_rep->vport == MLX5_VPORT_UPLINK && vf_sf_vport && esw->dev == dest_mdev && - attr->ip_version && attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE; } @@ -101,27 +92,8 @@ mlx5_esw_indir_table_decap_vport(struct mlx5_flow_attr *attr) return esw_attr->rx_tun_attr ? esw_attr->rx_tun_attr->decap_vport : 0; } -static struct mlx5_esw_indir_table_rule * -mlx5_esw_indir_table_rule_lookup(struct mlx5_esw_indir_table_entry *e, - struct mlx5_esw_flow_attr *attr) -{ - struct mlx5_esw_indir_table_rule *rule; - - list_for_each_entry(rule, &e->recirc_rules, list) - if (rule->vni == attr->rx_tun_attr->vni && - !memcmp(&rule->dst_ip, &attr->rx_tun_attr->dst_ip, - sizeof(attr->rx_tun_attr->dst_ip))) - goto found; - return NULL; - -found: - refcount_inc(&rule->refcnt); - return rule; -} - static int mlx5_esw_indir_table_rule_get(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr, - struct mlx5_flow_spec *spec, struct mlx5_esw_indir_table_entry *e) { struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; @@ -130,73 +102,18 @@ static int mlx5_esw_indir_table_rule_get(struct mlx5_eswitch *esw, struct mlx5_flow_destination dest = {}; struct mlx5_esw_indir_table_rule *rule; struct mlx5_flow_act flow_act = {}; - struct mlx5_flow_spec *rule_spec; struct mlx5_flow_handle *handle; int err = 0; u32 data; - rule = mlx5_esw_indir_table_rule_lookup(e, esw_attr); - if (rule) + if (e->recirc_rule) { + refcount_inc(&e->recirc_rule->refcnt); return 0; - - if (e->recirc_cnt == MLX5_ESW_INDIR_TABLE_RECIRC_IDX_MAX) - return -EINVAL; - - rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL); - if (!rule_spec) - return -ENOMEM; - - rule = kzalloc(sizeof(*rule), GFP_KERNEL); - if (!rule) { - err = -ENOMEM; - goto out; - } - - rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | - MLX5_MATCH_MISC_PARAMETERS | - MLX5_MATCH_MISC_PARAMETERS_2; - if (MLX5_CAP_FLOWTABLE_NIC_RX(esw->dev, ft_field_support.outer_ip_version)) { - MLX5_SET(fte_match_param, rule_spec->match_criteria, - outer_headers.ip_version, 0xf); - MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.ip_version, - attr->ip_version); - } else if (attr->ip_version) { - MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria, - outer_headers.ethertype); - MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.ethertype, - (attr->ip_version == 4 ? ETH_P_IP : ETH_P_IPV6)); - } else { - err = -EOPNOTSUPP; - goto err_ethertype; } - if (attr->ip_version == 4) { - MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria, - outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4); - MLX5_SET(fte_match_param, rule_spec->match_value, - outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4, - ntohl(esw_attr->rx_tun_attr->dst_ip.v4)); - } else if (attr->ip_version == 6) { - int len = sizeof(struct in6_addr); - - memset(MLX5_ADDR_OF(fte_match_param, rule_spec->match_criteria, - outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), - 0xff, len); - memcpy(MLX5_ADDR_OF(fte_match_param, rule_spec->match_value, - outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), - &esw_attr->rx_tun_attr->dst_ip.v6, len); - } - - MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria, - misc_parameters.vxlan_vni); - MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters.vxlan_vni, - MLX5_GET(fte_match_param, spec->match_value, misc_parameters.vxlan_vni)); - - MLX5_SET(fte_match_param, rule_spec->match_criteria, - misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask()); - MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_0, - mlx5_eswitch_get_vport_metadata_for_match(esw_attr->in_mdev->priv.eswitch, - MLX5_VPORT_UPLINK)); + rule = kzalloc(sizeof(*rule), GFP_KERNEL); + if (!rule) + return -ENOMEM; /* Modify flow source to recirculate packet */ data = mlx5_eswitch_get_vport_metadata_for_set(esw, esw_attr->rx_tun_attr->decap_vport); @@ -219,13 +136,14 @@ static int mlx5_esw_indir_table_rule_get(struct mlx5_eswitch *esw, flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; flow_act.flags = FLOW_ACT_IGNORE_FLOW_LEVEL | FLOW_ACT_NO_APPEND; + flow_act.fg = e->recirc_grp; dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; dest.ft = mlx5_chains_get_table(chains, 0, 1, 0); if (IS_ERR(dest.ft)) { err = PTR_ERR(dest.ft); goto err_table; } - handle = mlx5_add_flow_rules(e->ft, rule_spec, &flow_act, &dest, 1); + handle = mlx5_add_flow_rules(e->ft, NULL, &flow_act, &dest, 1); if (IS_ERR(handle)) { err = PTR_ERR(handle); goto err_handle; @@ -233,14 +151,10 @@ static int mlx5_esw_indir_table_rule_get(struct mlx5_eswitch *esw, mlx5e_mod_hdr_dealloc(&mod_acts); rule->handle = handle; - rule->vni = esw_attr->rx_tun_attr->vni; rule->mh = flow_act.modify_hdr; - memcpy(&rule->dst_ip, &esw_attr->rx_tun_attr->dst_ip, - sizeof(esw_attr->rx_tun_attr->dst_ip)); refcount_set(&rule->refcnt, 1); - list_add(&rule->list, &e->recirc_rules); - e->recirc_cnt++; - goto out; + e->recirc_rule = rule; + return 0; err_handle: mlx5_chains_put_table(chains, 0, 1, 0); @@ -250,89 +164,44 @@ err_mod_hdr_alloc: err_mod_hdr_regc1: mlx5e_mod_hdr_dealloc(&mod_acts); err_mod_hdr_regc0: -err_ethertype: kfree(rule); -out: - kvfree(rule_spec); return err; } static void mlx5_esw_indir_table_rule_put(struct mlx5_eswitch *esw, - struct mlx5_flow_attr *attr, struct mlx5_esw_indir_table_entry *e) { - struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; + struct mlx5_esw_indir_table_rule *rule = e->recirc_rule; struct mlx5_fs_chains *chains = esw_chains(esw); - struct mlx5_esw_indir_table_rule *rule; - list_for_each_entry(rule, &e->recirc_rules, list) - if (rule->vni == esw_attr->rx_tun_attr->vni && - !memcmp(&rule->dst_ip, &esw_attr->rx_tun_attr->dst_ip, - sizeof(esw_attr->rx_tun_attr->dst_ip))) - goto found; - - return; + if (!rule) + return; -found: if (!refcount_dec_and_test(&rule->refcnt)) return; mlx5_del_flow_rules(rule->handle); mlx5_chains_put_table(chains, 0, 1, 0); mlx5_modify_header_dealloc(esw->dev, rule->mh); - list_del(&rule->list); kfree(rule); - e->recirc_cnt--; + e->recirc_rule = NULL; } -static int mlx5_create_indir_recirc_group(struct mlx5_eswitch *esw, - struct mlx5_flow_attr *attr, - struct mlx5_flow_spec *spec, - struct mlx5_esw_indir_table_entry *e) +static int mlx5_create_indir_recirc_group(struct mlx5_esw_indir_table_entry *e) { int err = 0, inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); - u32 *in, *match; + u32 *in; in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; - MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS | - MLX5_MATCH_MISC_PARAMETERS | MLX5_MATCH_MISC_PARAMETERS_2); - match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); - - if (MLX5_CAP_FLOWTABLE_NIC_RX(esw->dev, ft_field_support.outer_ip_version)) - MLX5_SET(fte_match_param, match, outer_headers.ip_version, 0xf); - else - MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.ethertype); - - if (attr->ip_version == 4) { - MLX5_SET_TO_ONES(fte_match_param, match, - outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4); - } else if (attr->ip_version == 6) { - memset(MLX5_ADDR_OF(fte_match_param, match, - outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), - 0xff, sizeof(struct in6_addr)); - } else { - err = -EOPNOTSUPP; - goto out; - } - - MLX5_SET_TO_ONES(fte_match_param, match, misc_parameters.vxlan_vni); - MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0, - mlx5_eswitch_get_vport_metadata_mask()); MLX5_SET(create_flow_group_in, in, start_flow_index, 0); - MLX5_SET(create_flow_group_in, in, end_flow_index, MLX5_ESW_INDIR_TABLE_RECIRC_IDX_MAX); + MLX5_SET(create_flow_group_in, in, end_flow_index, MLX5_ESW_INDIR_TABLE_RECIRC_IDX); e->recirc_grp = mlx5_create_flow_group(e->ft, in); - if (IS_ERR(e->recirc_grp)) { + if (IS_ERR(e->recirc_grp)) err = PTR_ERR(e->recirc_grp); - goto out; - } - INIT_LIST_HEAD(&e->recirc_rules); - e->recirc_cnt = 0; - -out: kvfree(in); return err; } @@ -343,19 +212,12 @@ static int mlx5_create_indir_fwd_group(struct mlx5_eswitch *esw, int err = 0, inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); struct mlx5_flow_destination dest = {}; struct mlx5_flow_act flow_act = {}; - struct mlx5_flow_spec *spec; u32 *in; in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; - spec = kvzalloc(sizeof(*spec), GFP_KERNEL); - if (!spec) { - kvfree(in); - return -ENOMEM; - } - /* Hold one entry */ MLX5_SET(create_flow_group_in, in, start_flow_index, MLX5_ESW_INDIR_TABLE_FWD_IDX); MLX5_SET(create_flow_group_in, in, end_flow_index, MLX5_ESW_INDIR_TABLE_FWD_IDX); @@ -366,25 +228,25 @@ static int mlx5_create_indir_fwd_group(struct mlx5_eswitch *esw, } flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + flow_act.fg = e->fwd_grp; dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; dest.vport.num = e->vport; dest.vport.vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id); dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID; - e->fwd_rule = mlx5_add_flow_rules(e->ft, spec, &flow_act, &dest, 1); + e->fwd_rule = mlx5_add_flow_rules(e->ft, NULL, &flow_act, &dest, 1); if (IS_ERR(e->fwd_rule)) { mlx5_destroy_flow_group(e->fwd_grp); err = PTR_ERR(e->fwd_rule); } err_out: - kvfree(spec); kvfree(in); return err; } static struct mlx5_esw_indir_table_entry * mlx5_esw_indir_table_entry_create(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr, - struct mlx5_flow_spec *spec, u16 vport, bool decap) + u16 vport, bool decap) { struct mlx5_flow_table_attr ft_attr = {}; struct mlx5_flow_namespace *root_ns; @@ -412,15 +274,14 @@ mlx5_esw_indir_table_entry_create(struct mlx5_eswitch *esw, struct mlx5_flow_att } e->ft = ft; e->vport = vport; - e->ip_version = attr->ip_version; e->fwd_ref = !decap; - err = mlx5_create_indir_recirc_group(esw, attr, spec, e); + err = mlx5_create_indir_recirc_group(e); if (err) goto recirc_grp_err; if (decap) { - err = mlx5_esw_indir_table_rule_get(esw, attr, spec, e); + err = mlx5_esw_indir_table_rule_get(esw, attr, e); if (err) goto recirc_rule_err; } @@ -430,13 +291,13 @@ mlx5_esw_indir_table_entry_create(struct mlx5_eswitch *esw, struct mlx5_flow_att goto fwd_grp_err; hash_add(esw->fdb_table.offloads.indir->table, &e->hlist, - vport << 16 | attr->ip_version); + vport << 16); return e; fwd_grp_err: if (decap) - mlx5_esw_indir_table_rule_put(esw, attr, e); + mlx5_esw_indir_table_rule_put(esw, e); recirc_rule_err: mlx5_destroy_flow_group(e->recirc_grp); recirc_grp_err: @@ -447,13 +308,13 @@ tbl_err: } static struct mlx5_esw_indir_table_entry * -mlx5_esw_indir_table_entry_lookup(struct mlx5_eswitch *esw, u16 vport, u8 ip_version) +mlx5_esw_indir_table_entry_lookup(struct mlx5_eswitch *esw, u16 vport) { struct mlx5_esw_indir_table_entry *e; - u32 key = vport << 16 | ip_version; + u32 key = vport << 16; hash_for_each_possible(esw->fdb_table.offloads.indir->table, e, hlist, key) - if (e->vport == vport && e->ip_version == ip_version) + if (e->vport == vport) return e; return NULL; @@ -461,24 +322,23 @@ mlx5_esw_indir_table_entry_lookup(struct mlx5_eswitch *esw, u16 vport, u8 ip_ver struct mlx5_flow_table *mlx5_esw_indir_table_get(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr, - struct mlx5_flow_spec *spec, u16 vport, bool decap) { struct mlx5_esw_indir_table_entry *e; int err; mutex_lock(&esw->fdb_table.offloads.indir->lock); - e = mlx5_esw_indir_table_entry_lookup(esw, vport, attr->ip_version); + e = mlx5_esw_indir_table_entry_lookup(esw, vport); if (e) { if (!decap) { e->fwd_ref++; } else { - err = mlx5_esw_indir_table_rule_get(esw, attr, spec, e); + err = mlx5_esw_indir_table_rule_get(esw, attr, e); if (err) goto out_err; } } else { - e = mlx5_esw_indir_table_entry_create(esw, attr, spec, vport, decap); + e = mlx5_esw_indir_table_entry_create(esw, attr, vport, decap); if (IS_ERR(e)) { err = PTR_ERR(e); esw_warn(esw->dev, "Failed to create indirection table, err %d.\n", err); @@ -494,22 +354,21 @@ out_err: } void mlx5_esw_indir_table_put(struct mlx5_eswitch *esw, - struct mlx5_flow_attr *attr, u16 vport, bool decap) { struct mlx5_esw_indir_table_entry *e; mutex_lock(&esw->fdb_table.offloads.indir->lock); - e = mlx5_esw_indir_table_entry_lookup(esw, vport, attr->ip_version); + e = mlx5_esw_indir_table_entry_lookup(esw, vport); if (!e) goto out; if (!decap) e->fwd_ref--; else - mlx5_esw_indir_table_rule_put(esw, attr, e); + mlx5_esw_indir_table_rule_put(esw, e); - if (e->fwd_ref || e->recirc_cnt) + if (e->fwd_ref || e->recirc_rule) goto out; hash_del(&e->hlist); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h index 21d56b49d14b..036f5b3a341b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h @@ -13,10 +13,8 @@ mlx5_esw_indir_table_destroy(struct mlx5_esw_indir_table *indir); struct mlx5_flow_table *mlx5_esw_indir_table_get(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr, - struct mlx5_flow_spec *spec, u16 vport, bool decap); void mlx5_esw_indir_table_put(struct mlx5_eswitch *esw, - struct mlx5_flow_attr *attr, u16 vport, bool decap); bool @@ -44,7 +42,6 @@ mlx5_esw_indir_table_destroy(struct mlx5_esw_indir_table *indir) static inline struct mlx5_flow_table * mlx5_esw_indir_table_get(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr, - struct mlx5_flow_spec *spec, u16 vport, bool decap) { return ERR_PTR(-EOPNOTSUPP); @@ -52,7 +49,6 @@ mlx5_esw_indir_table_get(struct mlx5_eswitch *esw, static inline void mlx5_esw_indir_table_put(struct mlx5_eswitch *esw, - struct mlx5_flow_attr *attr, u16 vport, bool decap) { } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 9daf55e90367..d809c9192496 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1250,7 +1250,7 @@ static int mlx5_esw_acls_ns_init(struct mlx5_eswitch *esw) if (err) return err; } else { - esw_warn(dev, "engress ACL is not supported by FW\n"); + esw_warn(dev, "egress ACL is not supported by FW\n"); } if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) { @@ -1406,9 +1406,7 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf) mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs); if (clear_vf) mlx5_eswitch_clear_vf_vports_info(esw); - /* If disabling sriov in switchdev mode, free meta rules here - * because it depends on num_vfs. - */ + if (esw->mode == MLX5_ESWITCH_OFFLOADS) { struct devlink *devlink = priv_to_devlink(esw->dev); @@ -1489,7 +1487,7 @@ int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 * void *hca_caps; int err; - if (!mlx5_core_is_ecpf(dev)) { + if (!mlx5_core_is_ecpf(dev) || mlx5_core_is_management_pf(dev)) { *max_sfs = 0; return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index c981fa77f439..5fb9d5e99734 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -179,15 +179,14 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw, static int esw_setup_decap_indir(struct mlx5_eswitch *esw, - struct mlx5_flow_attr *attr, - struct mlx5_flow_spec *spec) + struct mlx5_flow_attr *attr) { struct mlx5_flow_table *ft; if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE)) return -EOPNOTSUPP; - ft = mlx5_esw_indir_table_get(esw, attr, spec, + ft = mlx5_esw_indir_table_get(esw, attr, mlx5_esw_indir_table_decap_vport(attr), true); return PTR_ERR_OR_ZERO(ft); } @@ -197,7 +196,7 @@ esw_cleanup_decap_indir(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr) { if (mlx5_esw_indir_table_decap_vport(attr)) - mlx5_esw_indir_table_put(esw, attr, + mlx5_esw_indir_table_put(esw, mlx5_esw_indir_table_decap_vport(attr), true); } @@ -235,7 +234,6 @@ esw_setup_ft_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act, struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr, - struct mlx5_flow_spec *spec, int i) { flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; @@ -243,7 +241,7 @@ esw_setup_ft_dest(struct mlx5_flow_destination *dest, dest[i].ft = attr->dest_ft; if (mlx5_esw_indir_table_decap_vport(attr)) - return esw_setup_decap_indir(esw, attr, spec); + return esw_setup_decap_indir(esw, attr); return 0; } @@ -298,7 +296,7 @@ static void esw_put_dest_tables_loop(struct mlx5_eswitch *esw, struct mlx5_flow_ mlx5_chains_put_table(chains, 0, 1, 0); else if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport, esw_attr->dests[i].mdev)) - mlx5_esw_indir_table_put(esw, attr, esw_attr->dests[i].rep->vport, + mlx5_esw_indir_table_put(esw, esw_attr->dests[i].rep->vport, false); } @@ -384,7 +382,6 @@ esw_setup_indir_table(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act, struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr, - struct mlx5_flow_spec *spec, bool ignore_flow_lvl, int *i) { @@ -399,7 +396,7 @@ esw_setup_indir_table(struct mlx5_flow_destination *dest, flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; dest[*i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; - dest[*i].ft = mlx5_esw_indir_table_get(esw, attr, spec, + dest[*i].ft = mlx5_esw_indir_table_get(esw, attr, esw_attr->dests[j].rep->vport, false); if (IS_ERR(dest[*i].ft)) { err = PTR_ERR(dest[*i].ft); @@ -408,7 +405,7 @@ esw_setup_indir_table(struct mlx5_flow_destination *dest, } if (mlx5_esw_indir_table_decap_vport(attr)) { - err = esw_setup_decap_indir(esw, attr, spec); + err = esw_setup_decap_indir(esw, attr); if (err) goto err_indir_tbl_get; } @@ -511,14 +508,14 @@ esw_setup_dests(struct mlx5_flow_destination *dest, err = esw_setup_mtu_dest(dest, &attr->meter_attr, *i); (*i)++; } else if (esw_is_indir_table(esw, attr)) { - err = esw_setup_indir_table(dest, flow_act, esw, attr, spec, true, i); + err = esw_setup_indir_table(dest, flow_act, esw, attr, true, i); } else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) { err = esw_setup_chain_src_port_rewrite(dest, flow_act, esw, chains, attr, i); } else { *i = esw_setup_vport_dests(dest, flow_act, esw, esw_attr, *i); if (attr->dest_ft) { - err = esw_setup_ft_dest(dest, flow_act, esw, attr, spec, *i); + err = esw_setup_ft_dest(dest, flow_act, esw, attr, *i); (*i)++; } else if (attr->dest_chain) { err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain, @@ -727,7 +724,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; for (i = 0; i < esw_attr->split_count; i++) { if (esw_is_indir_table(esw, attr)) - err = esw_setup_indir_table(dest, &flow_act, esw, attr, spec, false, &i); + err = esw_setup_indir_table(dest, &flow_act, esw, attr, false, &i); else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) err = esw_setup_chain_src_port_rewrite(dest, &flow_act, esw, chains, attr, &i); @@ -3575,9 +3572,9 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) if (IS_ERR(esw)) return PTR_ERR(esw); - down_write(&esw->mode_lock); + down_read(&esw->mode_lock); err = esw_mode_to_devlink(esw->mode, mode); - up_write(&esw->mode_lock); + up_read(&esw->mode_lock); return err; } @@ -3675,9 +3672,9 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode) if (IS_ERR(esw)) return PTR_ERR(esw); - down_write(&esw->mode_lock); + down_read(&esw->mode_lock); err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode); - up_write(&esw->mode_lock); + up_read(&esw->mode_lock); return err; } @@ -3749,9 +3746,9 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, if (IS_ERR(esw)) return PTR_ERR(esw); - down_write(&esw->mode_lock); + down_read(&esw->mode_lock); *encap = esw->offloads.encap; - up_write(&esw->mode_lock); + up_read(&esw->mode_lock); return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index 911cf4d23964..c2a4f86bc890 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -412,7 +412,8 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv) int err; priv->fs = mlx5e_fs_init(priv->profile, mdev, - !test_bit(MLX5E_STATE_DESTROYING, &priv->state)); + !test_bit(MLX5E_STATE_DESTROYING, &priv->state), + priv->dfs_root); if (!priv->fs) { netdev_err(priv->netdev, "FS allocation failed\n"); return -ENOMEM; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c index 69318b143268..75510a12ab02 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c @@ -69,6 +69,13 @@ enum { MLX5_MTPPS_FS_OUT_PULSE_DURATION_NS = BIT(0xa), }; +enum { + MLX5_MTUTC_OPERATION_ADJUST_TIME_MIN = S16_MIN, + MLX5_MTUTC_OPERATION_ADJUST_TIME_MAX = S16_MAX, + MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MIN = -200000, + MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MAX = 200000, +}; + static bool mlx5_real_time_mode(struct mlx5_core_dev *mdev) { return (mlx5_is_real_time_rq(mdev) || mlx5_is_real_time_sq(mdev)); @@ -86,6 +93,22 @@ static bool mlx5_modify_mtutc_allowed(struct mlx5_core_dev *mdev) return MLX5_CAP_MCAM_FEATURE(mdev, ptpcyc2realtime_modify); } +static bool mlx5_is_mtutc_time_adj_cap(struct mlx5_core_dev *mdev, s64 delta) +{ + s64 min = MLX5_MTUTC_OPERATION_ADJUST_TIME_MIN; + s64 max = MLX5_MTUTC_OPERATION_ADJUST_TIME_MAX; + + if (MLX5_CAP_MCAM_FEATURE(mdev, mtutc_time_adjustment_extended_range)) { + min = MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MIN; + max = MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MAX; + } + + if (delta < min || delta > max) + return false; + + return true; +} + static int mlx5_set_mtutc(struct mlx5_core_dev *dev, u32 *mtutc, u32 size) { u32 out[MLX5_ST_SZ_DW(mtutc_reg)] = {}; @@ -288,8 +311,8 @@ static int mlx5_ptp_adjtime_real_time(struct mlx5_core_dev *mdev, s64 delta) if (!mlx5_modify_mtutc_allowed(mdev)) return 0; - /* HW time adjustment range is s16. If out of range, settime instead */ - if (delta < S16_MIN || delta > S16_MAX) { + /* HW time adjustment range is checked. If out of range, settime instead */ + if (!mlx5_is_mtutc_time_adj_cap(mdev, delta)) { struct timespec64 ts; s64 ns; @@ -326,6 +349,19 @@ static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) return 0; } +static int mlx5_ptp_adjphase(struct ptp_clock_info *ptp, s32 delta) +{ + struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info); + struct mlx5_core_dev *mdev; + + mdev = container_of(clock, struct mlx5_core_dev, clock); + + if (!mlx5_is_mtutc_time_adj_cap(mdev, delta)) + return -ERANGE; + + return mlx5_ptp_adjtime(ptp, delta); +} + static int mlx5_ptp_adjfreq_real_time(struct mlx5_core_dev *mdev, s32 freq) { u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {}; @@ -688,6 +724,7 @@ static const struct ptp_clock_info mlx5_ptp_clock_info = { .n_pins = 0, .pps = 0, .adjfine = mlx5_ptp_adjfine, + .adjphase = mlx5_ptp_adjphase, .adjtime = mlx5_ptp_adjtime, .gettimex64 = mlx5_ptp_gettimex, .settime64 = mlx5_ptp_settime, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c index a4476cb4c3b3..fd2d31cdbcf9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c @@ -724,7 +724,6 @@ int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn, struct mlx5dr_action *action) { struct postsend_info send_info = {}; - int ret; send_info.write.addr = (uintptr_t)action->rewrite->data; send_info.write.length = action->rewrite->num_of_actions * @@ -734,9 +733,7 @@ int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn, mlx5dr_icm_pool_get_chunk_mr_addr(action->rewrite->chunk); send_info.rkey = mlx5dr_icm_pool_get_chunk_rkey(action->rewrite->chunk); - ret = dr_postsend_icm_data(dmn, &send_info); - - return ret; + return dr_postsend_icm_data(dmn, &send_info); } static int dr_modify_qp_rst2init(struct mlx5_core_dev *mdev, diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h index 5a1027b07215..a453b9cd9033 100644 --- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h +++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h @@ -14,6 +14,7 @@ #include <linux/irqreturn.h> #include <linux/netdevice.h> #include <linux/irq.h> +#include <linux/phy.h> /* The silicon design supports a maximum RX ring size of * 32K entries. Based on current testing this maximum size @@ -67,6 +68,29 @@ struct mlxbf_gige_stats { u64 rx_filter_discard_pkts; }; +struct mlxbf_gige_reg_param { + u32 mask; + u32 shift; +}; + +struct mlxbf_gige_mdio_gw { + u32 gw_address; + u32 read_data_address; + struct mlxbf_gige_reg_param busy; + struct mlxbf_gige_reg_param write_data; + struct mlxbf_gige_reg_param read_data; + struct mlxbf_gige_reg_param devad; + struct mlxbf_gige_reg_param partad; + struct mlxbf_gige_reg_param opcode; + struct mlxbf_gige_reg_param st1; +}; + +struct mlxbf_gige_link_cfg { + void (*set_phy_link_mode)(struct phy_device *phydev); + void (*adjust_link)(struct net_device *netdev); + phy_interface_t phy_mode; +}; + struct mlxbf_gige { void __iomem *base; void __iomem *llu_base; @@ -102,6 +126,9 @@ struct mlxbf_gige { u8 valid_polarity; struct napi_struct napi; struct mlxbf_gige_stats stats; + u8 hw_version; + struct mlxbf_gige_mdio_gw *mdio_gw; + int prev_speed; }; /* Rx Work Queue Element definitions */ diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c index 41ebef25a930..253d7ad9b809 100644 --- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c @@ -135,4 +135,5 @@ const struct ethtool_ops mlxbf_gige_ethtool_ops = { .nway_reset = phy_ethtool_nway_reset, .get_pauseparam = mlxbf_gige_get_pauseparam, .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c index 2292d63a279c..694de9513b9f 100644 --- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c +++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c @@ -205,7 +205,7 @@ static int mlxbf_gige_stop(struct net_device *netdev) } static int mlxbf_gige_eth_ioctl(struct net_device *netdev, - struct ifreq *ifr, int cmd) + struct ifreq *ifr, int cmd) { if (!(netif_running(netdev))) return -EINVAL; @@ -263,13 +263,99 @@ static const struct net_device_ops mlxbf_gige_netdev_ops = { .ndo_get_stats64 = mlxbf_gige_get_stats64, }; -static void mlxbf_gige_adjust_link(struct net_device *netdev) +static void mlxbf_gige_bf2_adjust_link(struct net_device *netdev) { struct phy_device *phydev = netdev->phydev; phy_print_status(phydev); } +static void mlxbf_gige_bf3_adjust_link(struct net_device *netdev) +{ + struct mlxbf_gige *priv = netdev_priv(netdev); + struct phy_device *phydev = netdev->phydev; + u8 sgmii_mode; + u16 ipg_size; + u32 val; + + if (phydev->link && phydev->speed != priv->prev_speed) { + switch (phydev->speed) { + case 1000: + ipg_size = MLXBF_GIGE_1G_IPG_SIZE; + sgmii_mode = MLXBF_GIGE_1G_SGMII_MODE; + break; + case 100: + ipg_size = MLXBF_GIGE_100M_IPG_SIZE; + sgmii_mode = MLXBF_GIGE_100M_SGMII_MODE; + break; + case 10: + ipg_size = MLXBF_GIGE_10M_IPG_SIZE; + sgmii_mode = MLXBF_GIGE_10M_SGMII_MODE; + break; + default: + return; + } + + val = readl(priv->plu_base + MLXBF_GIGE_PLU_TX_REG0); + val &= ~(MLXBF_GIGE_PLU_TX_IPG_SIZE_MASK | MLXBF_GIGE_PLU_TX_SGMII_MODE_MASK); + val |= FIELD_PREP(MLXBF_GIGE_PLU_TX_IPG_SIZE_MASK, ipg_size); + val |= FIELD_PREP(MLXBF_GIGE_PLU_TX_SGMII_MODE_MASK, sgmii_mode); + writel(val, priv->plu_base + MLXBF_GIGE_PLU_TX_REG0); + + val = readl(priv->plu_base + MLXBF_GIGE_PLU_RX_REG0); + val &= ~MLXBF_GIGE_PLU_RX_SGMII_MODE_MASK; + val |= FIELD_PREP(MLXBF_GIGE_PLU_RX_SGMII_MODE_MASK, sgmii_mode); + writel(val, priv->plu_base + MLXBF_GIGE_PLU_RX_REG0); + + priv->prev_speed = phydev->speed; + } + + phy_print_status(phydev); +} + +static void mlxbf_gige_bf2_set_phy_link_mode(struct phy_device *phydev) +{ + /* MAC only supports 1000T full duplex mode */ + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); + + /* Only symmetric pause with flow control enabled is supported so no + * need to negotiate pause. + */ + linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->advertising); + linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->advertising); +} + +static void mlxbf_gige_bf3_set_phy_link_mode(struct phy_device *phydev) +{ + /* MAC only supports full duplex mode */ + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); + + /* Only symmetric pause with flow control enabled is supported so no + * need to negotiate pause. + */ + linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->advertising); + linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->advertising); +} + +static struct mlxbf_gige_link_cfg mlxbf_gige_link_cfgs[] = { + [MLXBF_GIGE_VERSION_BF2] = { + .set_phy_link_mode = mlxbf_gige_bf2_set_phy_link_mode, + .adjust_link = mlxbf_gige_bf2_adjust_link, + .phy_mode = PHY_INTERFACE_MODE_GMII + }, + [MLXBF_GIGE_VERSION_BF3] = { + .set_phy_link_mode = mlxbf_gige_bf3_set_phy_link_mode, + .adjust_link = mlxbf_gige_bf3_adjust_link, + .phy_mode = PHY_INTERFACE_MODE_SGMII + } +}; + static int mlxbf_gige_probe(struct platform_device *pdev) { struct phy_device *phydev; @@ -315,6 +401,8 @@ static int mlxbf_gige_probe(struct platform_device *pdev) spin_lock_init(&priv->lock); + priv->hw_version = readq(base + MLXBF_GIGE_VERSION); + /* Attach MDIO device */ err = mlxbf_gige_mdio_probe(pdev, priv); if (err) @@ -357,25 +445,14 @@ static int mlxbf_gige_probe(struct platform_device *pdev) phydev->irq = phy_irq; err = phy_connect_direct(netdev, phydev, - mlxbf_gige_adjust_link, - PHY_INTERFACE_MODE_GMII); + mlxbf_gige_link_cfgs[priv->hw_version].adjust_link, + mlxbf_gige_link_cfgs[priv->hw_version].phy_mode); if (err) { dev_err(&pdev->dev, "Could not attach to PHY\n"); goto out; } - /* MAC only supports 1000T full duplex mode */ - phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); - phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT); - phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); - phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT); - phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); - - /* Only symmetric pause with flow control enabled is supported so no - * need to negotiate pause. - */ - linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->advertising); - linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->advertising); + mlxbf_gige_link_cfgs[priv->hw_version].set_phy_link_mode(phydev); /* Display information about attached PHY device */ phy_attached_info(phydev); diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c index aa780b1614a3..7ac06fd31011 100644 --- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c +++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c @@ -23,9 +23,75 @@ #include "mlxbf_gige.h" #include "mlxbf_gige_regs.h" +#include "mlxbf_gige_mdio_bf2.h" +#include "mlxbf_gige_mdio_bf3.h" -#define MLXBF_GIGE_MDIO_GW_OFFSET 0x0 -#define MLXBF_GIGE_MDIO_CFG_OFFSET 0x4 +static struct mlxbf_gige_mdio_gw mlxbf_gige_mdio_gw_t[] = { + [MLXBF_GIGE_VERSION_BF2] = { + .gw_address = MLXBF2_GIGE_MDIO_GW_OFFSET, + .read_data_address = MLXBF2_GIGE_MDIO_GW_OFFSET, + .busy = { + .mask = MLXBF2_GIGE_MDIO_GW_BUSY_MASK, + .shift = MLXBF2_GIGE_MDIO_GW_BUSY_SHIFT, + }, + .read_data = { + .mask = MLXBF2_GIGE_MDIO_GW_AD_MASK, + .shift = MLXBF2_GIGE_MDIO_GW_AD_SHIFT, + }, + .write_data = { + .mask = MLXBF2_GIGE_MDIO_GW_AD_MASK, + .shift = MLXBF2_GIGE_MDIO_GW_AD_SHIFT, + }, + .devad = { + .mask = MLXBF2_GIGE_MDIO_GW_DEVAD_MASK, + .shift = MLXBF2_GIGE_MDIO_GW_DEVAD_SHIFT, + }, + .partad = { + .mask = MLXBF2_GIGE_MDIO_GW_PARTAD_MASK, + .shift = MLXBF2_GIGE_MDIO_GW_PARTAD_SHIFT, + }, + .opcode = { + .mask = MLXBF2_GIGE_MDIO_GW_OPCODE_MASK, + .shift = MLXBF2_GIGE_MDIO_GW_OPCODE_SHIFT, + }, + .st1 = { + .mask = MLXBF2_GIGE_MDIO_GW_ST1_MASK, + .shift = MLXBF2_GIGE_MDIO_GW_ST1_SHIFT, + }, + }, + [MLXBF_GIGE_VERSION_BF3] = { + .gw_address = MLXBF3_GIGE_MDIO_GW_OFFSET, + .read_data_address = MLXBF3_GIGE_MDIO_DATA_READ, + .busy = { + .mask = MLXBF3_GIGE_MDIO_GW_BUSY_MASK, + .shift = MLXBF3_GIGE_MDIO_GW_BUSY_SHIFT, + }, + .read_data = { + .mask = MLXBF3_GIGE_MDIO_GW_DATA_READ_MASK, + .shift = MLXBF3_GIGE_MDIO_GW_DATA_READ_SHIFT, + }, + .write_data = { + .mask = MLXBF3_GIGE_MDIO_GW_DATA_MASK, + .shift = MLXBF3_GIGE_MDIO_GW_DATA_SHIFT, + }, + .devad = { + .mask = MLXBF3_GIGE_MDIO_GW_DEVAD_MASK, + .shift = MLXBF3_GIGE_MDIO_GW_DEVAD_SHIFT, + }, + .partad = { + .mask = MLXBF3_GIGE_MDIO_GW_PARTAD_MASK, + .shift = MLXBF3_GIGE_MDIO_GW_PARTAD_SHIFT, + }, + .opcode = { + .mask = MLXBF3_GIGE_MDIO_GW_OPCODE_MASK, + .shift = MLXBF3_GIGE_MDIO_GW_OPCODE_SHIFT, + }, + .st1 = { + .mask = MLXBF3_GIGE_MDIO_GW_ST1_MASK, + .shift = MLXBF3_GIGE_MDIO_GW_ST1_SHIFT, + }, + }, +}; #define MLXBF_GIGE_MDIO_FREQ_REFERENCE 156250000ULL #define MLXBF_GIGE_MDIO_COREPLL_CONST 16384ULL @@ -47,30 +113,10 @@ /* Busy bit is set by software and cleared by hardware */ #define MLXBF_GIGE_MDIO_SET_BUSY 0x1 -/* MDIO GW register bits */ -#define MLXBF_GIGE_MDIO_GW_AD_MASK GENMASK(15, 0) -#define MLXBF_GIGE_MDIO_GW_DEVAD_MASK GENMASK(20, 16) -#define MLXBF_GIGE_MDIO_GW_PARTAD_MASK GENMASK(25, 21) -#define MLXBF_GIGE_MDIO_GW_OPCODE_MASK GENMASK(27, 26) -#define MLXBF_GIGE_MDIO_GW_ST1_MASK GENMASK(28, 28) -#define MLXBF_GIGE_MDIO_GW_BUSY_MASK GENMASK(30, 30) - -/* MDIO config register bits */ -#define MLXBF_GIGE_MDIO_CFG_MDIO_MODE_MASK GENMASK(1, 0) -#define MLXBF_GIGE_MDIO_CFG_MDIO3_3_MASK GENMASK(2, 2) -#define MLXBF_GIGE_MDIO_CFG_MDIO_FULL_DRIVE_MASK GENMASK(4, 4) -#define MLXBF_GIGE_MDIO_CFG_MDC_PERIOD_MASK GENMASK(15, 8) -#define MLXBF_GIGE_MDIO_CFG_MDIO_IN_SAMP_MASK GENMASK(23, 16) -#define MLXBF_GIGE_MDIO_CFG_MDIO_OUT_SAMP_MASK GENMASK(31, 24) - -#define MLXBF_GIGE_MDIO_CFG_VAL (FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_MODE_MASK, 1) | \ - FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO3_3_MASK, 1) | \ - FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_FULL_DRIVE_MASK, 1) | \ - FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_IN_SAMP_MASK, 6) | \ - FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_OUT_SAMP_MASK, 13)) - #define MLXBF_GIGE_BF2_COREPLL_ADDR 0x02800c30 #define MLXBF_GIGE_BF2_COREPLL_SIZE 0x0000000c +#define MLXBF_GIGE_BF3_COREPLL_ADDR 0x13409824 +#define MLXBF_GIGE_BF3_COREPLL_SIZE 0x00000010 static struct resource corepll_params[] = { [MLXBF_GIGE_VERSION_BF2] = { @@ -78,6 +124,11 @@ static struct resource corepll_params[] = { .end = MLXBF_GIGE_BF2_COREPLL_ADDR + MLXBF_GIGE_BF2_COREPLL_SIZE - 1, .name = "COREPLL_RES" }, + [MLXBF_GIGE_VERSION_BF3] = { + .start = MLXBF_GIGE_BF3_COREPLL_ADDR, + .end = MLXBF_GIGE_BF3_COREPLL_ADDR + MLXBF_GIGE_BF3_COREPLL_SIZE - 1, + .name = "COREPLL_RES" + } }; /* Returns core clock i1clk in Hz */ @@ -134,19 +185,23 @@ static u8 mdio_period_map(struct mlxbf_gige *priv) return mdio_period; } -static u32 mlxbf_gige_mdio_create_cmd(u16 data, int phy_add, +static u32 mlxbf_gige_mdio_create_cmd(struct mlxbf_gige_mdio_gw *mdio_gw, u16 data, int phy_add, int phy_reg, u32 opcode) { u32 gw_reg = 0; - gw_reg |= FIELD_PREP(MLXBF_GIGE_MDIO_GW_AD_MASK, data); - gw_reg |= FIELD_PREP(MLXBF_GIGE_MDIO_GW_DEVAD_MASK, phy_reg); - gw_reg |= FIELD_PREP(MLXBF_GIGE_MDIO_GW_PARTAD_MASK, phy_add); - gw_reg |= FIELD_PREP(MLXBF_GIGE_MDIO_GW_OPCODE_MASK, opcode); - gw_reg |= FIELD_PREP(MLXBF_GIGE_MDIO_GW_ST1_MASK, - MLXBF_GIGE_MDIO_CL22_ST1); - gw_reg |= FIELD_PREP(MLXBF_GIGE_MDIO_GW_BUSY_MASK, - MLXBF_GIGE_MDIO_SET_BUSY); + gw_reg |= ((data << mdio_gw->write_data.shift) & + mdio_gw->write_data.mask); + gw_reg |= ((phy_reg << mdio_gw->devad.shift) & + mdio_gw->devad.mask); + gw_reg |= ((phy_add << mdio_gw->partad.shift) & + mdio_gw->partad.mask); + gw_reg |= ((opcode << mdio_gw->opcode.shift) & + mdio_gw->opcode.mask); + gw_reg |= ((MLXBF_GIGE_MDIO_CL22_ST1 << mdio_gw->st1.shift) & + mdio_gw->st1.mask); + gw_reg |= ((MLXBF_GIGE_MDIO_SET_BUSY << mdio_gw->busy.shift) & + mdio_gw->busy.mask); return gw_reg; } @@ -162,25 +217,26 @@ static int mlxbf_gige_mdio_read(struct mii_bus *bus, int phy_add, int phy_reg) return -EOPNOTSUPP; /* Send mdio read request */ - cmd = mlxbf_gige_mdio_create_cmd(0, phy_add, phy_reg, MLXBF_GIGE_MDIO_CL22_READ); + cmd = mlxbf_gige_mdio_create_cmd(priv->mdio_gw, 0, phy_add, phy_reg, + MLXBF_GIGE_MDIO_CL22_READ); - writel(cmd, priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET); + writel(cmd, priv->mdio_io + priv->mdio_gw->gw_address); - ret = readl_poll_timeout_atomic(priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET, - val, !(val & MLXBF_GIGE_MDIO_GW_BUSY_MASK), + ret = readl_poll_timeout_atomic(priv->mdio_io + priv->mdio_gw->gw_address, + val, !(val & priv->mdio_gw->busy.mask), 5, 1000000); if (ret) { - writel(0, priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET); + writel(0, priv->mdio_io + priv->mdio_gw->gw_address); return ret; } - ret = readl(priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET); + ret = readl(priv->mdio_io + priv->mdio_gw->read_data_address); /* Only return ad bits of the gw register */ - ret &= MLXBF_GIGE_MDIO_GW_AD_MASK; + ret &= priv->mdio_gw->read_data.mask; /* The MDIO lock is set on read. To release it, clear gw register */ - writel(0, priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET); + writel(0, priv->mdio_io + priv->mdio_gw->gw_address); return ret; } @@ -197,17 +253,17 @@ static int mlxbf_gige_mdio_write(struct mii_bus *bus, int phy_add, return -EOPNOTSUPP; /* Send mdio write request */ - cmd = mlxbf_gige_mdio_create_cmd(val, phy_add, phy_reg, + cmd = mlxbf_gige_mdio_create_cmd(priv->mdio_gw, val, phy_add, phy_reg, MLXBF_GIGE_MDIO_CL22_WRITE); - writel(cmd, priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET); + writel(cmd, priv->mdio_io + priv->mdio_gw->gw_address); /* If the poll timed out, drop the request */ - ret = readl_poll_timeout_atomic(priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET, - temp, !(temp & MLXBF_GIGE_MDIO_GW_BUSY_MASK), + ret = readl_poll_timeout_atomic(priv->mdio_io + priv->mdio_gw->gw_address, + temp, !(temp & priv->mdio_gw->busy.mask), 5, 1000000); /* The MDIO lock is set on read. To release it, clear gw register */ - writel(0, priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET); + writel(0, priv->mdio_io + priv->mdio_gw->gw_address); return ret; } @@ -219,9 +275,20 @@ static void mlxbf_gige_mdio_cfg(struct mlxbf_gige *priv) mdio_period = mdio_period_map(priv); - val = MLXBF_GIGE_MDIO_CFG_VAL; - val |= FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDC_PERIOD_MASK, mdio_period); - writel(val, priv->mdio_io + MLXBF_GIGE_MDIO_CFG_OFFSET); + if (priv->hw_version == MLXBF_GIGE_VERSION_BF2) { + val = MLXBF2_GIGE_MDIO_CFG_VAL; + val |= FIELD_PREP(MLXBF2_GIGE_MDIO_CFG_MDC_PERIOD_MASK, mdio_period); + writel(val, priv->mdio_io + MLXBF2_GIGE_MDIO_CFG_OFFSET); + } else { + val = FIELD_PREP(MLXBF3_GIGE_MDIO_CFG_MDIO_MODE_MASK, 1) | + FIELD_PREP(MLXBF3_GIGE_MDIO_CFG_MDIO_FULL_DRIVE_MASK, 1); + writel(val, priv->mdio_io + MLXBF3_GIGE_MDIO_CFG_REG0); + val = FIELD_PREP(MLXBF3_GIGE_MDIO_CFG_MDC_PERIOD_MASK, mdio_period); + writel(val, priv->mdio_io + MLXBF3_GIGE_MDIO_CFG_REG1); + val = FIELD_PREP(MLXBF3_GIGE_MDIO_CFG_MDIO_IN_SAMP_MASK, 6) | + FIELD_PREP(MLXBF3_GIGE_MDIO_CFG_MDIO_OUT_SAMP_MASK, 13); + writel(val, priv->mdio_io + MLXBF3_GIGE_MDIO_CFG_REG2); + } } int mlxbf_gige_mdio_probe(struct platform_device *pdev, struct mlxbf_gige *priv) @@ -230,6 +297,9 @@ int mlxbf_gige_mdio_probe(struct platform_device *pdev, struct mlxbf_gige *priv) struct resource *res; int ret; + if (priv->hw_version > MLXBF_GIGE_VERSION_BF3) + return -ENODEV; + priv->mdio_io = devm_platform_ioremap_resource(pdev, MLXBF_GIGE_RES_MDIO9); if (IS_ERR(priv->mdio_io)) return PTR_ERR(priv->mdio_io); @@ -242,13 +312,15 @@ int mlxbf_gige_mdio_probe(struct platform_device *pdev, struct mlxbf_gige *priv) /* For backward compatibility with older ACPI tables, also keep * CLK resource internal to the driver. */ - res = &corepll_params[MLXBF_GIGE_VERSION_BF2]; + res = &corepll_params[priv->hw_version]; } priv->clk_io = devm_ioremap(dev, res->start, resource_size(res)); if (!priv->clk_io) return -ENOMEM; + priv->mdio_gw = &mlxbf_gige_mdio_gw_t[priv->hw_version]; + mlxbf_gige_mdio_cfg(priv); priv->mdiobus = devm_mdiobus_alloc(dev); diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio_bf2.h b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio_bf2.h new file mode 100644 index 000000000000..7f1ff0ac7699 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio_bf2.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause */ + +/* MDIO support for Mellanox Gigabit Ethernet driver + * + * Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES, ALL RIGHTS RESERVED. + * + * This software product is a proprietary product of NVIDIA CORPORATION & + * AFFILIATES (the "Company") and all right, title, and interest in and to the + * software product, including all associated intellectual property rights, are + * and shall remain exclusively with the Company. + * + * This software product is governed by the End User License Agreement + * provided with the software product. + */ + +#ifndef __MLXBF_GIGE_MDIO_BF2_H__ +#define __MLXBF_GIGE_MDIO_BF2_H__ + +#include <linux/bitfield.h> + +#define MLXBF2_GIGE_MDIO_GW_OFFSET 0x0 +#define MLXBF2_GIGE_MDIO_CFG_OFFSET 0x4 + +/* MDIO GW register bits */ +#define MLXBF2_GIGE_MDIO_GW_AD_MASK GENMASK(15, 0) +#define MLXBF2_GIGE_MDIO_GW_DEVAD_MASK GENMASK(20, 16) +#define MLXBF2_GIGE_MDIO_GW_PARTAD_MASK GENMASK(25, 21) +#define MLXBF2_GIGE_MDIO_GW_OPCODE_MASK GENMASK(27, 26) +#define MLXBF2_GIGE_MDIO_GW_ST1_MASK GENMASK(28, 28) +#define MLXBF2_GIGE_MDIO_GW_BUSY_MASK GENMASK(30, 30) + +#define MLXBF2_GIGE_MDIO_GW_AD_SHIFT 0 +#define MLXBF2_GIGE_MDIO_GW_DEVAD_SHIFT 16 +#define MLXBF2_GIGE_MDIO_GW_PARTAD_SHIFT 21 +#define MLXBF2_GIGE_MDIO_GW_OPCODE_SHIFT 26 +#define MLXBF2_GIGE_MDIO_GW_ST1_SHIFT 28 +#define MLXBF2_GIGE_MDIO_GW_BUSY_SHIFT 30 + +/* MDIO config register bits */ +#define MLXBF2_GIGE_MDIO_CFG_MDIO_MODE_MASK GENMASK(1, 0) +#define MLXBF2_GIGE_MDIO_CFG_MDIO3_3_MASK GENMASK(2, 2) +#define MLXBF2_GIGE_MDIO_CFG_MDIO_FULL_DRIVE_MASK GENMASK(4, 4) +#define MLXBF2_GIGE_MDIO_CFG_MDC_PERIOD_MASK GENMASK(15, 8) +#define MLXBF2_GIGE_MDIO_CFG_MDIO_IN_SAMP_MASK GENMASK(23, 16) +#define MLXBF2_GIGE_MDIO_CFG_MDIO_OUT_SAMP_MASK GENMASK(31, 24) + +#define MLXBF2_GIGE_MDIO_CFG_VAL (FIELD_PREP(MLXBF2_GIGE_MDIO_CFG_MDIO_MODE_MASK, 1) | \ + FIELD_PREP(MLXBF2_GIGE_MDIO_CFG_MDIO3_3_MASK, 1) | \ + FIELD_PREP(MLXBF2_GIGE_MDIO_CFG_MDIO_FULL_DRIVE_MASK, 1) | \ + FIELD_PREP(MLXBF2_GIGE_MDIO_CFG_MDIO_IN_SAMP_MASK, 6) | \ + FIELD_PREP(MLXBF2_GIGE_MDIO_CFG_MDIO_OUT_SAMP_MASK, 13)) + +#endif /* __MLXBF_GIGE_MDIO_BF2_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio_bf3.h b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio_bf3.h new file mode 100644 index 000000000000..9dd9144b9173 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio_bf3.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause */ + +/* MDIO support for Mellanox Gigabit Ethernet driver + * + * Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES, ALL RIGHTS RESERVED. + * + * This software product is a proprietary product of NVIDIA CORPORATION & + * AFFILIATES (the "Company") and all right, title, and interest in and to the + * software product, including all associated intellectual property rights, are + * and shall remain exclusively with the Company. + * + * This software product is governed by the End User License Agreement + * provided with the software product. + */ + +#ifndef __MLXBF_GIGE_MDIO_BF3_H__ +#define __MLXBF_GIGE_MDIO_BF3_H__ + +#include <linux/bitfield.h> + +#define MLXBF3_GIGE_MDIO_GW_OFFSET 0x80 +#define MLXBF3_GIGE_MDIO_DATA_READ 0x8c +#define MLXBF3_GIGE_MDIO_CFG_REG0 0x100 +#define MLXBF3_GIGE_MDIO_CFG_REG1 0x104 +#define MLXBF3_GIGE_MDIO_CFG_REG2 0x108 + +/* MDIO GW register bits */ +#define MLXBF3_GIGE_MDIO_GW_ST1_MASK GENMASK(1, 1) +#define MLXBF3_GIGE_MDIO_GW_OPCODE_MASK GENMASK(3, 2) +#define MLXBF3_GIGE_MDIO_GW_PARTAD_MASK GENMASK(8, 4) +#define MLXBF3_GIGE_MDIO_GW_DEVAD_MASK GENMASK(13, 9) +/* For BlueField-3, this field is only used for mdio write */ +#define MLXBF3_GIGE_MDIO_GW_DATA_MASK GENMASK(29, 14) +#define MLXBF3_GIGE_MDIO_GW_BUSY_MASK GENMASK(30, 30) + +#define MLXBF3_GIGE_MDIO_GW_DATA_READ_MASK GENMASK(15, 0) + +#define MLXBF3_GIGE_MDIO_GW_ST1_SHIFT 1 +#define MLXBF3_GIGE_MDIO_GW_OPCODE_SHIFT 2 +#define MLXBF3_GIGE_MDIO_GW_PARTAD_SHIFT 4 +#define MLXBF3_GIGE_MDIO_GW_DEVAD_SHIFT 9 +#define MLXBF3_GIGE_MDIO_GW_DATA_SHIFT 14 +#define MLXBF3_GIGE_MDIO_GW_BUSY_SHIFT 30 + +#define MLXBF3_GIGE_MDIO_GW_DATA_READ_SHIFT 0 + +/* MDIO config register bits */ +#define MLXBF3_GIGE_MDIO_CFG_MDIO_MODE_MASK GENMASK(1, 0) +#define MLXBF3_GIGE_MDIO_CFG_MDIO_FULL_DRIVE_MASK GENMASK(2, 2) +#define MLXBF3_GIGE_MDIO_CFG_MDC_PERIOD_MASK GENMASK(7, 0) +#define MLXBF3_GIGE_MDIO_CFG_MDIO_IN_SAMP_MASK GENMASK(7, 0) +#define MLXBF3_GIGE_MDIO_CFG_MDIO_OUT_SAMP_MASK GENMASK(15, 8) + +#endif /* __MLXBF_GIGE_MDIO_BF3_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h index 7be3a793984d..cd0973229c9b 100644 --- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h +++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h @@ -8,8 +8,11 @@ #ifndef __MLXBF_GIGE_REGS_H__ #define __MLXBF_GIGE_REGS_H__ +#include <linux/bitfield.h> + #define MLXBF_GIGE_VERSION 0x0000 #define MLXBF_GIGE_VERSION_BF2 0x0 +#define MLXBF_GIGE_VERSION_BF3 0x1 #define MLXBF_GIGE_STATUS 0x0010 #define MLXBF_GIGE_STATUS_READY BIT(0) #define MLXBF_GIGE_INT_STATUS 0x0028 @@ -77,4 +80,23 @@ */ #define MLXBF_GIGE_MMIO_REG_SZ (MLXBF_GIGE_MAC_CFG + 8) +#define MLXBF_GIGE_PLU_TX_REG0 0x80 +#define MLXBF_GIGE_PLU_TX_IPG_SIZE_MASK GENMASK(11, 0) +#define MLXBF_GIGE_PLU_TX_SGMII_MODE_MASK GENMASK(15, 14) + +#define MLXBF_GIGE_PLU_RX_REG0 0x10 +#define MLXBF_GIGE_PLU_RX_SGMII_MODE_MASK GENMASK(25, 24) + +#define MLXBF_GIGE_1G_SGMII_MODE 0x0 +#define MLXBF_GIGE_10M_SGMII_MODE 0x1 +#define MLXBF_GIGE_100M_SGMII_MODE 0x2 + +/* ipg_size default value for 1G is fixed by HW to 11 + End = 12. + * So for 100M it is 12 * 10 - 1 = 119 + * For 10M, it is 12 * 100 - 1 = 1199 + */ +#define MLXBF_GIGE_1G_IPG_SIZE 11 +#define MLXBF_GIGE_100M_IPG_SIZE 119 +#define MLXBF_GIGE_10M_IPG_SIZE 1199 + #endif /* !defined(__MLXBF_GIGE_REGS_H__) */ diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index a0a06e2eff82..33ef726e4d54 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -2051,8 +2051,8 @@ static int mlxsw_core_health_init(struct mlxsw_core *mlxsw_core) if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) return 0; - fw_fatal = devlink_health_reporter_create(devlink, &mlxsw_core_health_fw_fatal_ops, - 0, mlxsw_core); + fw_fatal = devl_health_reporter_create(devlink, &mlxsw_core_health_fw_fatal_ops, + 0, mlxsw_core); if (IS_ERR(fw_fatal)) { dev_err(mlxsw_core->bus_info->dev, "Failed to create fw fatal reporter"); return PTR_ERR(fw_fatal); @@ -2072,7 +2072,7 @@ static int mlxsw_core_health_init(struct mlxsw_core *mlxsw_core) err_fw_fatal_config: mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_core_health_listener, mlxsw_core); err_trap_register: - devlink_health_reporter_destroy(mlxsw_core->health.fw_fatal); + devl_health_reporter_destroy(mlxsw_core->health.fw_fatal); return err; } @@ -2085,7 +2085,7 @@ static void mlxsw_core_health_fini(struct mlxsw_core *mlxsw_core) mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_core_health_listener, mlxsw_core); /* Make sure there is no more event work scheduled */ mlxsw_core_flush_owq(); - devlink_health_reporter_destroy(mlxsw_core->health.fw_fatal); + devl_health_reporter_destroy(mlxsw_core->health.fw_fatal); } static void mlxsw_core_irq_event_handler_init(struct mlxsw_core *mlxsw_core) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c index 83d2dc91ba2c..025e0db983fe 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c @@ -1259,9 +1259,9 @@ static int mlxsw_linecard_init(struct mlxsw_core *mlxsw_core, linecard->linecards = linecards; mutex_init(&linecard->lock); - devlink_linecard = devlink_linecard_create(priv_to_devlink(mlxsw_core), - slot_index, &mlxsw_linecard_ops, - linecard); + devlink_linecard = devl_linecard_create(priv_to_devlink(mlxsw_core), + slot_index, &mlxsw_linecard_ops, + linecard); if (IS_ERR(devlink_linecard)) return PTR_ERR(devlink_linecard); @@ -1285,7 +1285,7 @@ static void mlxsw_linecard_fini(struct mlxsw_core *mlxsw_core, if (linecard->active) mlxsw_linecard_active_clear(linecard); mlxsw_linecard_bdev_del(linecard); - devlink_linecard_destroy(linecard->devlink_linecard); + devl_linecard_destroy(linecard->devlink_linecard); mutex_destroy(&linecard->lock); } diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index 534840f9a7ca..7e0871b631e4 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -792,7 +792,7 @@ static int lan743x_mac_mii_wait_till_not_busy(struct lan743x_adapter *adapter) !(data & MAC_MII_ACC_MII_BUSY_), 0, 1000000); } -static int lan743x_mdiobus_read(struct mii_bus *bus, int phy_id, int index) +static int lan743x_mdiobus_read_c22(struct mii_bus *bus, int phy_id, int index) { struct lan743x_adapter *adapter = bus->priv; u32 val, mii_access; @@ -814,8 +814,8 @@ static int lan743x_mdiobus_read(struct mii_bus *bus, int phy_id, int index) return (int)(val & 0xFFFF); } -static int lan743x_mdiobus_write(struct mii_bus *bus, - int phy_id, int index, u16 regval) +static int lan743x_mdiobus_write_c22(struct mii_bus *bus, + int phy_id, int index, u16 regval) { struct lan743x_adapter *adapter = bus->priv; u32 val, mii_access; @@ -835,12 +835,10 @@ static int lan743x_mdiobus_write(struct mii_bus *bus, return ret; } -static u32 lan743x_mac_mmd_access(int id, int index, int op) +static u32 lan743x_mac_mmd_access(int id, int dev_addr, int op) { - u16 dev_addr; u32 ret; - dev_addr = (index >> 16) & 0x1f; ret = (id << MAC_MII_ACC_PHY_ADDR_SHIFT_) & MAC_MII_ACC_PHY_ADDR_MASK_; ret |= (dev_addr << MAC_MII_ACC_MIIMMD_SHIFT_) & @@ -858,7 +856,8 @@ static u32 lan743x_mac_mmd_access(int id, int index, int op) return ret; } -static int lan743x_mdiobus_c45_read(struct mii_bus *bus, int phy_id, int index) +static int lan743x_mdiobus_read_c45(struct mii_bus *bus, int phy_id, + int dev_addr, int index) { struct lan743x_adapter *adapter = bus->priv; u32 mmd_access; @@ -868,32 +867,30 @@ static int lan743x_mdiobus_c45_read(struct mii_bus *bus, int phy_id, int index) ret = lan743x_mac_mii_wait_till_not_busy(adapter); if (ret < 0) return ret; - if (index & MII_ADDR_C45) { - /* Load Register Address */ - lan743x_csr_write(adapter, MAC_MII_DATA, (u32)(index & 0xffff)); - mmd_access = lan743x_mac_mmd_access(phy_id, index, - MMD_ACCESS_ADDRESS); - lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access); - ret = lan743x_mac_mii_wait_till_not_busy(adapter); - if (ret < 0) - return ret; - /* Read Data */ - mmd_access = lan743x_mac_mmd_access(phy_id, index, - MMD_ACCESS_READ); - lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access); - ret = lan743x_mac_mii_wait_till_not_busy(adapter); - if (ret < 0) - return ret; - ret = lan743x_csr_read(adapter, MAC_MII_DATA); - return (int)(ret & 0xFFFF); - } - ret = lan743x_mdiobus_read(bus, phy_id, index); - return ret; + /* Load Register Address */ + lan743x_csr_write(adapter, MAC_MII_DATA, index); + mmd_access = lan743x_mac_mmd_access(phy_id, dev_addr, + MMD_ACCESS_ADDRESS); + lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access); + ret = lan743x_mac_mii_wait_till_not_busy(adapter); + if (ret < 0) + return ret; + + /* Read Data */ + mmd_access = lan743x_mac_mmd_access(phy_id, dev_addr, + MMD_ACCESS_READ); + lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access); + ret = lan743x_mac_mii_wait_till_not_busy(adapter); + if (ret < 0) + return ret; + + ret = lan743x_csr_read(adapter, MAC_MII_DATA); + return (int)(ret & 0xFFFF); } -static int lan743x_mdiobus_c45_write(struct mii_bus *bus, - int phy_id, int index, u16 regval) +static int lan743x_mdiobus_write_c45(struct mii_bus *bus, int phy_id, + int dev_addr, int index, u16 regval) { struct lan743x_adapter *adapter = bus->priv; u32 mmd_access; @@ -903,26 +900,23 @@ static int lan743x_mdiobus_c45_write(struct mii_bus *bus, ret = lan743x_mac_mii_wait_till_not_busy(adapter); if (ret < 0) return ret; - if (index & MII_ADDR_C45) { - /* Load Register Address */ - lan743x_csr_write(adapter, MAC_MII_DATA, (u32)(index & 0xffff)); - mmd_access = lan743x_mac_mmd_access(phy_id, index, - MMD_ACCESS_ADDRESS); - lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access); - ret = lan743x_mac_mii_wait_till_not_busy(adapter); - if (ret < 0) - return ret; - /* Write Data */ - lan743x_csr_write(adapter, MAC_MII_DATA, (u32)regval); - mmd_access = lan743x_mac_mmd_access(phy_id, index, - MMD_ACCESS_WRITE); - lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access); - ret = lan743x_mac_mii_wait_till_not_busy(adapter); - } else { - ret = lan743x_mdiobus_write(bus, phy_id, index, regval); - } - return ret; + /* Load Register Address */ + lan743x_csr_write(adapter, MAC_MII_DATA, (u32)index); + mmd_access = lan743x_mac_mmd_access(phy_id, dev_addr, + MMD_ACCESS_ADDRESS); + lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access); + ret = lan743x_mac_mii_wait_till_not_busy(adapter); + if (ret < 0) + return ret; + + /* Write Data */ + lan743x_csr_write(adapter, MAC_MII_DATA, (u32)regval); + mmd_access = lan743x_mac_mmd_access(phy_id, dev_addr, + MMD_ACCESS_WRITE); + lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access); + + return lan743x_mac_mii_wait_till_not_busy(adapter); } static int lan743x_sgmii_wait_till_not_busy(struct lan743x_adapter *adapter) @@ -1424,14 +1418,6 @@ static void lan743x_phy_link_status_change(struct net_device *netdev) data = lan743x_csr_read(adapter, MAC_CR); - /* set interface mode */ - if (phy_interface_is_rgmii(phydev)) - /* RGMII */ - data &= ~MAC_CR_MII_EN_; - else - /* GMII */ - data |= MAC_CR_MII_EN_; - /* set duplex mode */ if (phydev->duplex) data |= MAC_CR_DPX_; @@ -1483,10 +1469,33 @@ static void lan743x_phy_close(struct lan743x_adapter *adapter) netdev->phydev = NULL; } +static void lan743x_phy_interface_select(struct lan743x_adapter *adapter) +{ + u32 id_rev; + u32 data; + + data = lan743x_csr_read(adapter, MAC_CR); + id_rev = adapter->csr.id_rev & ID_REV_ID_MASK_; + + if (adapter->is_pci11x1x && adapter->is_sgmii_en) + adapter->phy_interface = PHY_INTERFACE_MODE_SGMII; + else if (id_rev == ID_REV_ID_LAN7430_) + adapter->phy_interface = PHY_INTERFACE_MODE_GMII; + else if ((id_rev == ID_REV_ID_LAN7431_) && (data & MAC_CR_MII_EN_)) + adapter->phy_interface = PHY_INTERFACE_MODE_MII; + else + adapter->phy_interface = PHY_INTERFACE_MODE_RGMII; +} + static int lan743x_phy_open(struct lan743x_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct lan743x_phy *phy = &adapter->phy; + struct fixed_phy_status fphy_status = { + .link = 1, + .speed = SPEED_1000, + .duplex = DUPLEX_FULL, + }; struct phy_device *phydev; int ret = -EIO; @@ -1497,17 +1506,25 @@ static int lan743x_phy_open(struct lan743x_adapter *adapter) if (!phydev) { /* try internal phy */ phydev = phy_find_first(adapter->mdiobus); - if (!phydev) - goto return_error; + if (!phydev) { + if ((adapter->csr.id_rev & ID_REV_ID_MASK_) == + ID_REV_ID_LAN7431_) { + phydev = fixed_phy_register(PHY_POLL, + &fphy_status, NULL); + if (IS_ERR(phydev)) { + netdev_err(netdev, "No PHY/fixed_PHY found\n"); + return -EIO; + } + } else { + goto return_error; + } + } - if (adapter->is_pci11x1x) - ret = phy_connect_direct(netdev, phydev, - lan743x_phy_link_status_change, - PHY_INTERFACE_MODE_RGMII); - else - ret = phy_connect_direct(netdev, phydev, - lan743x_phy_link_status_change, - PHY_INTERFACE_MODE_GMII); + lan743x_phy_interface_select(adapter); + + ret = phy_connect_direct(netdev, phydev, + lan743x_phy_link_status_change, + adapter->phy_interface); if (ret) goto return_error; } @@ -3285,9 +3302,10 @@ static int lan743x_mdiobus_init(struct lan743x_adapter *adapter) lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl); netif_dbg(adapter, drv, adapter->netdev, "SGMII operation\n"); - adapter->mdiobus->probe_capabilities = MDIOBUS_C22_C45; - adapter->mdiobus->read = lan743x_mdiobus_c45_read; - adapter->mdiobus->write = lan743x_mdiobus_c45_write; + adapter->mdiobus->read = lan743x_mdiobus_read_c22; + adapter->mdiobus->write = lan743x_mdiobus_write_c22; + adapter->mdiobus->read_c45 = lan743x_mdiobus_read_c45; + adapter->mdiobus->write_c45 = lan743x_mdiobus_write_c45; adapter->mdiobus->name = "lan743x-mdiobus-c45"; netif_dbg(adapter, drv, adapter->netdev, "lan743x-mdiobus-c45\n"); @@ -3299,16 +3317,15 @@ static int lan743x_mdiobus_init(struct lan743x_adapter *adapter) netif_dbg(adapter, drv, adapter->netdev, "RGMII operation\n"); // Only C22 support when RGMII I/F - adapter->mdiobus->probe_capabilities = MDIOBUS_C22; - adapter->mdiobus->read = lan743x_mdiobus_read; - adapter->mdiobus->write = lan743x_mdiobus_write; + adapter->mdiobus->read = lan743x_mdiobus_read_c22; + adapter->mdiobus->write = lan743x_mdiobus_write_c22; adapter->mdiobus->name = "lan743x-mdiobus"; netif_dbg(adapter, drv, adapter->netdev, "lan743x-mdiobus\n"); } } else { - adapter->mdiobus->read = lan743x_mdiobus_read; - adapter->mdiobus->write = lan743x_mdiobus_write; + adapter->mdiobus->read = lan743x_mdiobus_read_c22; + adapter->mdiobus->write = lan743x_mdiobus_write_c22; adapter->mdiobus->name = "lan743x-mdiobus"; netif_dbg(adapter, drv, adapter->netdev, "lan743x-mdiobus\n"); } diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h index 8438c3dbcf36..52609fc13ad9 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.h +++ b/drivers/net/ethernet/microchip/lan743x_main.h @@ -1042,6 +1042,7 @@ struct lan743x_adapter { #define LAN743X_ADAPTER_FLAG_OTP BIT(0) u32 flags; u32 hw_cfg; + phy_interface_t phy_interface; }; #define LAN743X_COMPONENT_FLAG_RX(channel) BIT(20 + (channel)) diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_goto.c b/drivers/net/ethernet/microchip/lan966x/lan966x_goto.c index bf0cfe24a8fc..9b18156eea1a 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_goto.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_goto.c @@ -4,7 +4,7 @@ #include "vcap_api_client.h" int lan966x_goto_port_add(struct lan966x_port *port, - struct flow_action_entry *act, + int from_cid, int to_cid, unsigned long goto_id, struct netlink_ext_ack *extack) { @@ -12,7 +12,7 @@ int lan966x_goto_port_add(struct lan966x_port *port, int err; err = vcap_enable_lookups(lan966x->vcap_ctrl, port->dev, - act->chain_index, goto_id, + from_cid, to_cid, goto_id, true); if (err == -EFAULT) { NL_SET_ERR_MSG_MOD(extack, "Unsupported goto chain"); @@ -29,8 +29,6 @@ int lan966x_goto_port_add(struct lan966x_port *port, return err; } - port->tc.goto_id = goto_id; - return 0; } @@ -41,14 +39,12 @@ int lan966x_goto_port_del(struct lan966x_port *port, struct lan966x *lan966x = port->lan966x; int err; - err = vcap_enable_lookups(lan966x->vcap_ctrl, port->dev, 0, + err = vcap_enable_lookups(lan966x->vcap_ctrl, port->dev, 0, 0, goto_id, false); if (err) { NL_SET_ERR_MSG_MOD(extack, "Could not disable VCAP lookups"); return err; } - port->tc.goto_id = 0; - return 0; } diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h index 3491f1961835..0106f9487cbe 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h @@ -332,7 +332,6 @@ struct lan966x_port_tc { unsigned long police_id; unsigned long ingress_mirror_id; unsigned long egress_mirror_id; - unsigned long goto_id; struct flow_stats police_stat; struct flow_stats mirror_stat; }; @@ -607,7 +606,7 @@ int lan966x_tc_flower(struct lan966x_port *port, struct flow_cls_offload *f); int lan966x_goto_port_add(struct lan966x_port *port, - struct flow_action_entry *act, + int from_cid, int to_cid, unsigned long goto_id, struct netlink_ext_ack *extack); int lan966x_goto_port_del(struct lan966x_port *port, diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c b/drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c index ba3fa917d6b7..b66a8725a071 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c @@ -82,8 +82,8 @@ static int lan966x_tc_flower_use_dissectors(struct flow_cls_offload *f, } static int lan966x_tc_flower_action_check(struct vcap_control *vctrl, - struct flow_cls_offload *fco, - struct vcap_admin *admin) + struct net_device *dev, + struct flow_cls_offload *fco) { struct flow_rule *rule = flow_cls_offload_flow_rule(fco); struct flow_action_entry *actent, *last_actent = NULL; @@ -109,21 +109,23 @@ static int lan966x_tc_flower_action_check(struct vcap_control *vctrl, last_actent = actent; /* Save last action for later check */ } - /* Check that last action is a goto */ - if (last_actent->id != FLOW_ACTION_GOTO) { + /* Check that last action is a goto + * The last chain/lookup does not need to have goto action + */ + if (last_actent->id == FLOW_ACTION_GOTO) { + /* Check if the destination chain is in one of the VCAPs */ + if (!vcap_is_next_lookup(vctrl, fco->common.chain_index, + last_actent->chain_index)) { + NL_SET_ERR_MSG_MOD(fco->common.extack, + "Invalid goto chain"); + return -EINVAL; + } + } else if (!vcap_is_last_chain(vctrl, fco->common.chain_index)) { NL_SET_ERR_MSG_MOD(fco->common.extack, "Last action must be 'goto'"); return -EINVAL; } - /* Check if the goto chain is in the next lookup */ - if (!vcap_is_next_lookup(vctrl, fco->common.chain_index, - last_actent->chain_index)) { - NL_SET_ERR_MSG_MOD(fco->common.extack, - "Invalid goto chain"); - return -EINVAL; - } - /* Catch unsupported combinations of actions */ if (action_mask & BIT(FLOW_ACTION_TRAP) && action_mask & BIT(FLOW_ACTION_ACCEPT)) { @@ -145,8 +147,8 @@ static int lan966x_tc_flower_add(struct lan966x_port *port, struct vcap_rule *vrule; int err, idx; - err = lan966x_tc_flower_action_check(port->lan966x->vcap_ctrl, f, - admin); + err = lan966x_tc_flower_action_check(port->lan966x->vcap_ctrl, + port->dev, f); if (err) return err; diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_tc_matchall.c b/drivers/net/ethernet/microchip/lan966x/lan966x_tc_matchall.c index a539abaad9b6..20627323d656 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_tc_matchall.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_tc_matchall.c @@ -24,7 +24,8 @@ static int lan966x_tc_matchall_add(struct lan966x_port *port, return lan966x_mirror_port_add(port, act, f->cookie, ingress, f->common.extack); case FLOW_ACTION_GOTO: - return lan966x_goto_port_add(port, act, f->cookie, + return lan966x_goto_port_add(port, f->common.chain_index, + act->chain_index, f->cookie, f->common.extack); default: NL_SET_ERR_MSG_MOD(f->common.extack, @@ -46,13 +47,8 @@ static int lan966x_tc_matchall_del(struct lan966x_port *port, f->cookie == port->tc.egress_mirror_id) { return lan966x_mirror_port_del(port, ingress, f->common.extack); - } else if (f->cookie == port->tc.goto_id) { - return lan966x_goto_port_del(port, f->cookie, - f->common.extack); } else { - NL_SET_ERR_MSG_MOD(f->common.extack, - "Unsupported action"); - return -EOPNOTSUPP; + return lan966x_goto_port_del(port, f->cookie, f->common.extack); } return 0; @@ -80,12 +76,6 @@ int lan966x_tc_matchall(struct lan966x_port *port, struct tc_cls_matchall_offload *f, bool ingress) { - if (!tc_cls_can_offload_and_chain0(port->dev, &f->common)) { - NL_SET_ERR_MSG_MOD(f->common.extack, - "Only chain zero is supported"); - return -EOPNOTSUPP; - } - switch (f->command) { case TC_CLSMATCHALL_REPLACE: return lan966x_tc_matchall_add(port, f, ingress); diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c index a54c0426a35f..76a9fb113f50 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c @@ -390,20 +390,6 @@ static int lan966x_vcap_port_info(struct net_device *dev, return 0; } -static int lan966x_vcap_enable(struct net_device *dev, - struct vcap_admin *admin, - bool enable) -{ - struct lan966x_port *port = netdev_priv(dev); - struct lan966x *lan966x = port->lan966x; - - lan_rmw(ANA_VCAP_S2_CFG_ENA_SET(enable), - ANA_VCAP_S2_CFG_ENA, - lan966x, ANA_VCAP_S2_CFG(port->chip_port)); - - return 0; -} - static struct vcap_operations lan966x_vcap_ops = { .validate_keyset = lan966x_vcap_validate_keyset, .add_default_fields = lan966x_vcap_add_default_fields, @@ -414,7 +400,6 @@ static struct vcap_operations lan966x_vcap_ops = { .update = lan966x_vcap_update, .move = lan966x_vcap_move, .port_info = lan966x_vcap_port_info, - .enable = lan966x_vcap_enable, }; static void lan966x_vcap_admin_free(struct vcap_admin *admin) @@ -521,6 +506,12 @@ int lan966x_vcap_init(struct lan966x *lan966x) list_add_tail(&admin->list, &ctrl->list); } + for (int p = 0; p < lan966x->num_phys_ports; ++p) + if (lan966x->ports[p]) + lan_rmw(ANA_VCAP_S2_CFG_ENA_SET(true), + ANA_VCAP_S2_CFG_ENA, lan966x, + ANA_VCAP_S2_CFG(lan966x->ports[p]->chip_port)); + lan966x->vcap_ctrl = ctrl; return 0; diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c b/drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c index 74abb946b2a3..871a3e62f852 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c @@ -133,12 +133,17 @@ static bool sparx5_dcb_apptrust_contains(int portno, u8 selector) static int sparx5_dcb_app_update(struct net_device *dev) { + struct dcb_ieee_app_prio_map dscp_rewr_map = {0}; + struct dcb_rewr_prio_pcp_map pcp_rewr_map = {0}; struct sparx5_port *port = netdev_priv(dev); struct sparx5_port_qos_dscp_map *dscp_map; struct sparx5_port_qos_pcp_map *pcp_map; struct sparx5_port_qos qos = {0}; struct dcb_app app_itr = {0}; int portno = port->portno; + bool dscp_rewr = false; + bool pcp_rewr = false; + u16 dscp; int i; dscp_map = &qos.dscp.map; @@ -163,31 +168,72 @@ static int sparx5_dcb_app_update(struct net_device *dev) pcp_map->map[i] = dcb_getapp(dev, &app_itr); } + /* Get pcp rewrite mapping */ + dcb_getrewr_prio_pcp_mask_map(dev, &pcp_rewr_map); + for (i = 0; i < ARRAY_SIZE(pcp_rewr_map.map); i++) { + if (!pcp_rewr_map.map[i]) + continue; + pcp_rewr = true; + qos.pcp_rewr.map.map[i] = fls(pcp_rewr_map.map[i]) - 1; + } + + /* Get dscp rewrite mapping */ + dcb_getrewr_prio_dscp_mask_map(dev, &dscp_rewr_map); + for (i = 0; i < ARRAY_SIZE(dscp_rewr_map.map); i++) { + if (!dscp_rewr_map.map[i]) + continue; + + /* The rewrite table of the switch has 32 entries; one for each + * priority for each DP level. Currently, the rewrite map does + * not indicate DP level, so we map classified QoS class to + * classified DSCP, for each classified DP level. Rewrite of + * DSCP is only enabled, if we have active mappings. + */ + dscp_rewr = true; + dscp = fls64(dscp_rewr_map.map[i]) - 1; + qos.dscp_rewr.map.map[i] = dscp; /* DP 0 */ + qos.dscp_rewr.map.map[i + 8] = dscp; /* DP 1 */ + qos.dscp_rewr.map.map[i + 16] = dscp; /* DP 2 */ + qos.dscp_rewr.map.map[i + 24] = dscp; /* DP 3 */ + } + /* Enable use of pcp for queue classification ? */ if (sparx5_dcb_apptrust_contains(portno, DCB_APP_SEL_PCP)) { qos.pcp.qos_enable = true; qos.pcp.dp_enable = qos.pcp.qos_enable; + /* Enable rewrite of PCP and DEI if PCP is trusted *and* rewrite + * table is not empty. + */ + if (pcp_rewr) + qos.pcp_rewr.enable = true; } /* Enable use of dscp for queue classification ? */ if (sparx5_dcb_apptrust_contains(portno, IEEE_8021QAZ_APP_SEL_DSCP)) { qos.dscp.qos_enable = true; qos.dscp.dp_enable = qos.dscp.qos_enable; + if (dscp_rewr) + /* Do not enable rewrite if no mappings are active, as + * classified DSCP will then be zero for all classified + * QoS class and DP combinations. + */ + qos.dscp_rewr.enable = true; } return sparx5_port_qos_set(port, &qos); } -/* Set or delete dscp app entry. +/* Set or delete DSCP app entry. * - * Dscp mapping is global for all ports, so set and delete app entries are + * DSCP mapping is global for all ports, so set and delete app entries are * replicated for each port. */ -static int sparx5_dcb_ieee_dscp_setdel_app(struct net_device *dev, - struct dcb_app *app, bool del) +static int sparx5_dcb_ieee_dscp_setdel(struct net_device *dev, + struct dcb_app *app, + int (*setdel)(struct net_device *, + struct dcb_app *)) { struct sparx5_port *port = netdev_priv(dev); - struct dcb_app apps[SPX5_PORTS]; struct sparx5_port *port_itr; int err, i; @@ -195,11 +241,7 @@ static int sparx5_dcb_ieee_dscp_setdel_app(struct net_device *dev, port_itr = port->sparx5->ports[i]; if (!port_itr) continue; - memcpy(&apps[i], app, sizeof(struct dcb_app)); - if (del) - err = dcb_ieee_delapp(port_itr->ndev, &apps[i]); - else - err = dcb_ieee_setapp(port_itr->ndev, &apps[i]); + err = setdel(port_itr->ndev, app); if (err) return err; } @@ -226,7 +268,7 @@ static int sparx5_dcb_ieee_setapp(struct net_device *dev, struct dcb_app *app) } if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP) - err = sparx5_dcb_ieee_dscp_setdel_app(dev, app, false); + err = sparx5_dcb_ieee_dscp_setdel(dev, app, dcb_ieee_setapp); else err = dcb_ieee_setapp(dev, app); @@ -244,7 +286,7 @@ static int sparx5_dcb_ieee_delapp(struct net_device *dev, struct dcb_app *app) int err; if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP) - err = sparx5_dcb_ieee_dscp_setdel_app(dev, app, true); + err = sparx5_dcb_ieee_dscp_setdel(dev, app, dcb_ieee_delapp); else err = dcb_ieee_delapp(dev, app); @@ -283,11 +325,60 @@ static int sparx5_dcb_getapptrust(struct net_device *dev, u8 *selectors, return 0; } +static int sparx5_dcb_delrewr(struct net_device *dev, struct dcb_app *app) +{ + int err; + + if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP) + err = sparx5_dcb_ieee_dscp_setdel(dev, app, dcb_delrewr); + else + err = dcb_delrewr(dev, app); + + if (err < 0) + return err; + + return sparx5_dcb_app_update(dev); +} + +static int sparx5_dcb_setrewr(struct net_device *dev, struct dcb_app *app) +{ + struct dcb_app app_itr; + int err = 0; + u16 proto; + + err = sparx5_dcb_app_validate(dev, app); + if (err) + goto out; + + /* Delete current mapping, if it exists. */ + proto = dcb_getrewr(dev, app); + if (proto) { + app_itr = *app; + app_itr.protocol = proto; + sparx5_dcb_delrewr(dev, &app_itr); + } + + if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP) + err = sparx5_dcb_ieee_dscp_setdel(dev, app, dcb_setrewr); + else + err = dcb_setrewr(dev, app); + + if (err) + goto out; + + sparx5_dcb_app_update(dev); + +out: + return err; +} + const struct dcbnl_rtnl_ops sparx5_dcbnl_ops = { .ieee_setapp = sparx5_dcb_ieee_setapp, .ieee_delapp = sparx5_dcb_ieee_delapp, .dcbnl_setapptrust = sparx5_dcb_setapptrust, .dcbnl_getapptrust = sparx5_dcb_getapptrust, + .dcbnl_setrewr = sparx5_dcb_setrewr, + .dcbnl_delrewr = sparx5_dcb_delrewr, }; int sparx5_dcb_init(struct sparx5 *sparx5) @@ -304,6 +395,12 @@ int sparx5_dcb_init(struct sparx5 *sparx5) sparx5_port_apptrust[port->portno] = &sparx5_dcb_apptrust_policies [SPARX5_DCB_APPTRUST_DSCP_PCP]; + + /* Enable DSCP classification based on classified QoS class and + * DP, for all DSCP values, for all ports. + */ + sparx5_port_qos_dscp_rewr_mode_set(port, + SPARX5_PORT_REW_DSCP_ALL); } return 0; diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h index 6c93dd6b01b0..a4a4d893dcb2 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h @@ -4,8 +4,8 @@ * Copyright (c) 2021 Microchip Technology Inc. */ -/* This file is autogenerated by cml-utils 2022-09-28 11:17:02 +0200. - * Commit ID: 385c8a11d71a9f6a60368d3a3cb648fa257b479a +/* This file is autogenerated by cml-utils 2022-11-04 11:22:22 +0100. + * Commit ID: 498242727be5db9b423cc0923bc966fc7b40607e */ #ifndef _SPARX5_MAIN_REGS_H_ @@ -885,6 +885,16 @@ enum sparx5_target { #define ANA_CL_DSCP_CFG_DSCP_TRUST_ENA_GET(x)\ FIELD_GET(ANA_CL_DSCP_CFG_DSCP_TRUST_ENA, x) +/* ANA_CL:COMMON:QOS_MAP_CFG */ +#define ANA_CL_QOS_MAP_CFG(r) \ + __REG(TARGET_ANA_CL, 0, 1, 166912, 0, 1, 756, 512, r, 32, 4) + +#define ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL GENMASK(9, 4) +#define ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL_SET(x)\ + FIELD_PREP(ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL, x) +#define ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL_GET(x)\ + FIELD_GET(ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL, x) + /* ANA_L2:COMMON:AUTO_LRN_CFG */ #define ANA_L2_AUTO_LRN_CFG __REG(TARGET_ANA_L2, 0, 1, 566024, 0, 1, 700, 24, 0, 1, 4) @@ -5345,6 +5355,62 @@ enum sparx5_target { #define REW_PORT_VLAN_CFG_PORT_VID_GET(x)\ FIELD_GET(REW_PORT_VLAN_CFG_PORT_VID, x) +/* REW:PORT:PCP_MAP_DE0 */ +#define REW_PCP_MAP_DE0(g, r) \ + __REG(TARGET_REW, 0, 1, 360448, g, 70, 256, 4, r, 8, 4) + +#define REW_PCP_MAP_DE0_PCP_DE0 GENMASK(2, 0) +#define REW_PCP_MAP_DE0_PCP_DE0_SET(x)\ + FIELD_PREP(REW_PCP_MAP_DE0_PCP_DE0, x) +#define REW_PCP_MAP_DE0_PCP_DE0_GET(x)\ + FIELD_GET(REW_PCP_MAP_DE0_PCP_DE0, x) + +/* REW:PORT:PCP_MAP_DE1 */ +#define REW_PCP_MAP_DE1(g, r) \ + __REG(TARGET_REW, 0, 1, 360448, g, 70, 256, 36, r, 8, 4) + +#define REW_PCP_MAP_DE1_PCP_DE1 GENMASK(2, 0) +#define REW_PCP_MAP_DE1_PCP_DE1_SET(x)\ + FIELD_PREP(REW_PCP_MAP_DE1_PCP_DE1, x) +#define REW_PCP_MAP_DE1_PCP_DE1_GET(x)\ + FIELD_GET(REW_PCP_MAP_DE1_PCP_DE1, x) + +/* REW:PORT:DEI_MAP_DE0 */ +#define REW_DEI_MAP_DE0(g, r) \ + __REG(TARGET_REW, 0, 1, 360448, g, 70, 256, 68, r, 8, 4) + +#define REW_DEI_MAP_DE0_DEI_DE0 BIT(0) +#define REW_DEI_MAP_DE0_DEI_DE0_SET(x)\ + FIELD_PREP(REW_DEI_MAP_DE0_DEI_DE0, x) +#define REW_DEI_MAP_DE0_DEI_DE0_GET(x)\ + FIELD_GET(REW_DEI_MAP_DE0_DEI_DE0, x) + +/* REW:PORT:DEI_MAP_DE1 */ +#define REW_DEI_MAP_DE1(g, r) \ + __REG(TARGET_REW, 0, 1, 360448, g, 70, 256, 100, r, 8, 4) + +#define REW_DEI_MAP_DE1_DEI_DE1 BIT(0) +#define REW_DEI_MAP_DE1_DEI_DE1_SET(x)\ + FIELD_PREP(REW_DEI_MAP_DE1_DEI_DE1, x) +#define REW_DEI_MAP_DE1_DEI_DE1_GET(x)\ + FIELD_GET(REW_DEI_MAP_DE1_DEI_DE1, x) + +/* REW:PORT:DSCP_MAP */ +#define REW_DSCP_MAP(g) \ + __REG(TARGET_REW, 0, 1, 360448, g, 70, 256, 136, 0, 1, 4) + +#define REW_DSCP_MAP_DSCP_UPDATE_ENA BIT(1) +#define REW_DSCP_MAP_DSCP_UPDATE_ENA_SET(x)\ + FIELD_PREP(REW_DSCP_MAP_DSCP_UPDATE_ENA, x) +#define REW_DSCP_MAP_DSCP_UPDATE_ENA_GET(x)\ + FIELD_GET(REW_DSCP_MAP_DSCP_UPDATE_ENA, x) + +#define REW_DSCP_MAP_DSCP_REMAP_ENA BIT(0) +#define REW_DSCP_MAP_DSCP_REMAP_ENA_SET(x)\ + FIELD_PREP(REW_DSCP_MAP_DSCP_REMAP_ENA, x) +#define REW_DSCP_MAP_DSCP_REMAP_ENA_GET(x)\ + FIELD_GET(REW_DSCP_MAP_DSCP_REMAP_ENA, x) + /* REW:PORT:TAG_CTRL */ #define REW_TAG_CTRL(g) __REG(TARGET_REW, 0, 1, 360448, g, 70, 256, 132, 0, 1, 4) diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c index 107b9cd931c0..246259b2ae94 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c @@ -1151,11 +1151,69 @@ int sparx5_port_qos_set(struct sparx5_port *port, { sparx5_port_qos_dscp_set(port, &qos->dscp); sparx5_port_qos_pcp_set(port, &qos->pcp); + sparx5_port_qos_pcp_rewr_set(port, &qos->pcp_rewr); + sparx5_port_qos_dscp_rewr_set(port, &qos->dscp_rewr); sparx5_port_qos_default_set(port, qos); return 0; } +int sparx5_port_qos_pcp_rewr_set(const struct sparx5_port *port, + struct sparx5_port_qos_pcp_rewr *qos) +{ + int i, mode = SPARX5_PORT_REW_TAG_CTRL_CLASSIFIED; + struct sparx5 *sparx5 = port->sparx5; + u8 pcp, dei; + + /* Use mapping table, with classified QoS as index, to map QoS and DP + * to tagged PCP and DEI, if PCP is trusted. Otherwise use classified + * PCP. Classified PCP equals frame PCP. + */ + if (qos->enable) + mode = SPARX5_PORT_REW_TAG_CTRL_MAPPED; + + spx5_rmw(REW_TAG_CTRL_TAG_PCP_CFG_SET(mode) | + REW_TAG_CTRL_TAG_DEI_CFG_SET(mode), + REW_TAG_CTRL_TAG_PCP_CFG | REW_TAG_CTRL_TAG_DEI_CFG, + port->sparx5, REW_TAG_CTRL(port->portno)); + + for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) { + /* Extract PCP and DEI */ + pcp = qos->map.map[i]; + if (pcp > SPARX5_PORT_QOS_PCP_COUNT) + dei = 1; + else + dei = 0; + + /* Rewrite PCP and DEI, for each classified QoS class and DP + * level. This table is only used if tag ctrl mode is set to + * 'mapped'. + * + * 0:0nd - prio=0 and dp:0 => pcp=0 and dei=0 + * 0:0de - prio=0 and dp:1 => pcp=0 and dei=1 + */ + if (dei) { + spx5_rmw(REW_PCP_MAP_DE1_PCP_DE1_SET(pcp), + REW_PCP_MAP_DE1_PCP_DE1, sparx5, + REW_PCP_MAP_DE1(port->portno, i)); + + spx5_rmw(REW_DEI_MAP_DE1_DEI_DE1_SET(dei), + REW_DEI_MAP_DE1_DEI_DE1, port->sparx5, + REW_DEI_MAP_DE1(port->portno, i)); + } else { + spx5_rmw(REW_PCP_MAP_DE0_PCP_DE0_SET(pcp), + REW_PCP_MAP_DE0_PCP_DE0, sparx5, + REW_PCP_MAP_DE0(port->portno, i)); + + spx5_rmw(REW_DEI_MAP_DE0_DEI_DE0_SET(dei), + REW_DEI_MAP_DE0_DEI_DE0, port->sparx5, + REW_DEI_MAP_DE0(port->portno, i)); + } + } + + return 0; +} + int sparx5_port_qos_pcp_set(const struct sparx5_port *port, struct sparx5_port_qos_pcp *qos) { @@ -1184,6 +1242,45 @@ int sparx5_port_qos_pcp_set(const struct sparx5_port *port, return 0; } +void sparx5_port_qos_dscp_rewr_mode_set(const struct sparx5_port *port, + int mode) +{ + spx5_rmw(ANA_CL_QOS_CFG_DSCP_REWR_MODE_SEL_SET(mode), + ANA_CL_QOS_CFG_DSCP_REWR_MODE_SEL, port->sparx5, + ANA_CL_QOS_CFG(port->portno)); +} + +int sparx5_port_qos_dscp_rewr_set(const struct sparx5_port *port, + struct sparx5_port_qos_dscp_rewr *qos) +{ + struct sparx5 *sparx5 = port->sparx5; + bool rewr = false; + u16 dscp; + int i; + + /* On egress, rewrite DSCP value to either classified DSCP or frame + * DSCP. If enabled; classified DSCP, if disabled; frame DSCP. + */ + if (qos->enable) + rewr = true; + + spx5_rmw(REW_DSCP_MAP_DSCP_UPDATE_ENA_SET(rewr), + REW_DSCP_MAP_DSCP_UPDATE_ENA, sparx5, + REW_DSCP_MAP(port->portno)); + + /* On ingress, map each classified QoS class and DP to classified DSCP + * value. This mapping table is global for all ports. + */ + for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) { + dscp = qos->map.map[i]; + spx5_rmw(ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL_SET(dscp), + ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL, sparx5, + ANA_CL_QOS_MAP_CFG(i)); + } + + return 0; +} + int sparx5_port_qos_dscp_set(const struct sparx5_port *port, struct sparx5_port_qos_dscp *qos) { diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.h b/drivers/net/ethernet/microchip/sparx5/sparx5_port.h index fbafe22e25cc..607c4ff1df6b 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_port.h +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_port.h @@ -9,6 +9,17 @@ #include "sparx5_main.h" +/* Port PCP rewrite mode */ +#define SPARX5_PORT_REW_TAG_CTRL_CLASSIFIED 0 +#define SPARX5_PORT_REW_TAG_CTRL_DEFAULT 1 +#define SPARX5_PORT_REW_TAG_CTRL_MAPPED 2 + +/* Port DSCP rewrite mode */ +#define SPARX5_PORT_REW_DSCP_NONE 0 +#define SPARX5_PORT_REW_DSCP_IF_ZERO 1 +#define SPARX5_PORT_REW_DSCP_SELECTED 2 +#define SPARX5_PORT_REW_DSCP_ALL 3 + static inline bool sparx5_port_is_2g5(int portno) { return portno >= 16 && portno <= 47; @@ -99,6 +110,15 @@ struct sparx5_port_qos_pcp_map { u8 map[SPARX5_PORT_QOS_PCP_DEI_COUNT]; }; +struct sparx5_port_qos_pcp_rewr_map { + u16 map[SPX5_PRIOS]; +}; + +#define SPARX5_PORT_QOS_DP_NUM 4 +struct sparx5_port_qos_dscp_rewr_map { + u16 map[SPX5_PRIOS * SPARX5_PORT_QOS_DP_NUM]; +}; + #define SPARX5_PORT_QOS_DSCP_COUNT 64 struct sparx5_port_qos_dscp_map { u8 map[SPARX5_PORT_QOS_DSCP_COUNT]; @@ -110,15 +130,27 @@ struct sparx5_port_qos_pcp { bool dp_enable; }; +struct sparx5_port_qos_pcp_rewr { + struct sparx5_port_qos_pcp_rewr_map map; + bool enable; +}; + struct sparx5_port_qos_dscp { struct sparx5_port_qos_dscp_map map; bool qos_enable; bool dp_enable; }; +struct sparx5_port_qos_dscp_rewr { + struct sparx5_port_qos_dscp_rewr_map map; + bool enable; +}; + struct sparx5_port_qos { struct sparx5_port_qos_pcp pcp; + struct sparx5_port_qos_pcp_rewr pcp_rewr; struct sparx5_port_qos_dscp dscp; + struct sparx5_port_qos_dscp_rewr dscp_rewr; u8 default_prio; }; @@ -127,9 +159,18 @@ int sparx5_port_qos_set(struct sparx5_port *port, struct sparx5_port_qos *qos); int sparx5_port_qos_pcp_set(const struct sparx5_port *port, struct sparx5_port_qos_pcp *qos); +int sparx5_port_qos_pcp_rewr_set(const struct sparx5_port *port, + struct sparx5_port_qos_pcp_rewr *qos); + int sparx5_port_qos_dscp_set(const struct sparx5_port *port, struct sparx5_port_qos_dscp *qos); +void sparx5_port_qos_dscp_rewr_mode_set(const struct sparx5_port *port, + int mode); + +int sparx5_port_qos_dscp_rewr_set(const struct sparx5_port *port, + struct sparx5_port_qos_dscp_rewr *qos); + int sparx5_port_qos_default_set(const struct sparx5_port *port, const struct sparx5_port_qos *qos); diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c index 1ed304a816cc..affaa1656710 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c @@ -35,11 +35,6 @@ struct sparx5_tc_flower_parse_usage { unsigned int used_keys; }; -struct sparx5_tc_rule_pkt_cnt { - u64 cookie; - u32 pkts; -}; - /* These protocols have dedicated keysets in IS2 and a TC dissector * ETH_P_ARP does not have a TC dissector */ @@ -573,8 +568,8 @@ static int sparx5_tc_use_dissectors(struct flow_cls_offload *fco, } static int sparx5_tc_flower_action_check(struct vcap_control *vctrl, - struct flow_cls_offload *fco, - struct vcap_admin *admin) + struct net_device *ndev, + struct flow_cls_offload *fco) { struct flow_rule *rule = flow_cls_offload_flow_rule(fco); struct flow_action_entry *actent, *last_actent = NULL; @@ -600,21 +595,23 @@ static int sparx5_tc_flower_action_check(struct vcap_control *vctrl, last_actent = actent; /* Save last action for later check */ } - /* Check that last action is a goto */ - if (last_actent->id != FLOW_ACTION_GOTO) { + /* Check if last action is a goto + * The last chain/lookup does not need to have a goto action + */ + if (last_actent->id == FLOW_ACTION_GOTO) { + /* Check if the destination chain is in one of the VCAPs */ + if (!vcap_is_next_lookup(vctrl, fco->common.chain_index, + last_actent->chain_index)) { + NL_SET_ERR_MSG_MOD(fco->common.extack, + "Invalid goto chain"); + return -EINVAL; + } + } else if (!vcap_is_last_chain(vctrl, fco->common.chain_index)) { NL_SET_ERR_MSG_MOD(fco->common.extack, "Last action must be 'goto'"); return -EINVAL; } - /* Check if the goto chain is in the next lookup */ - if (!vcap_is_next_lookup(vctrl, fco->common.chain_index, - last_actent->chain_index)) { - NL_SET_ERR_MSG_MOD(fco->common.extack, - "Invalid goto chain"); - return -EINVAL; - } - /* Catch unsupported combinations of actions */ if (action_mask & BIT(FLOW_ACTION_TRAP) && action_mask & BIT(FLOW_ACTION_ACCEPT)) { @@ -833,7 +830,7 @@ static int sparx5_tc_flower_replace(struct net_device *ndev, vctrl = port->sparx5->vcap_ctrl; - err = sparx5_tc_flower_action_check(vctrl, fco, admin); + err = sparx5_tc_flower_action_check(vctrl, ndev, fco); if (err) return err; @@ -945,44 +942,21 @@ static int sparx5_tc_flower_destroy(struct net_device *ndev, return err; } -/* Collect packet counts from all rules with the same cookie */ -static int sparx5_tc_rule_counter_cb(void *arg, struct vcap_rule *rule) -{ - struct sparx5_tc_rule_pkt_cnt *rinfo = arg; - struct vcap_counter counter; - int err = 0; - - if (rule->cookie == rinfo->cookie) { - err = vcap_rule_get_counter(rule, &counter); - if (err) - return err; - rinfo->pkts += counter.value; - /* Reset the rule counter */ - counter.value = 0; - vcap_rule_set_counter(rule, &counter); - } - return err; -} - static int sparx5_tc_flower_stats(struct net_device *ndev, struct flow_cls_offload *fco, struct vcap_admin *admin) { struct sparx5_port *port = netdev_priv(ndev); - struct sparx5_tc_rule_pkt_cnt rinfo = {}; + struct vcap_counter ctr = {}; struct vcap_control *vctrl; ulong lastused = 0; - u64 drops = 0; - u32 pkts = 0; int err; - rinfo.cookie = fco->cookie; vctrl = port->sparx5->vcap_ctrl; - err = vcap_rule_iter(vctrl, sparx5_tc_rule_counter_cb, &rinfo); + err = vcap_get_rule_count_by_cookie(vctrl, &ctr, fco->cookie); if (err) return err; - pkts = rinfo.pkts; - flow_stats_update(&fco->stats, 0x0, pkts, drops, lastused, + flow_stats_update(&fco->stats, 0x0, ctr.value, 0, lastused, FLOW_ACTION_HW_STATS_IMMEDIATE); return err; } diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_tc_matchall.c b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_matchall.c index 30dd61e5d150..d88a93f22606 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_tc_matchall.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_matchall.c @@ -31,6 +31,7 @@ static int sparx5_tc_matchall_replace(struct net_device *ndev, switch (action->id) { case FLOW_ACTION_GOTO: err = vcap_enable_lookups(sparx5->vcap_ctrl, ndev, + tmo->common.chain_index, action->chain_index, tmo->cookie, true); if (err == -EFAULT) { @@ -43,6 +44,11 @@ static int sparx5_tc_matchall_replace(struct net_device *ndev, "VCAP already enabled"); return -EOPNOTSUPP; } + if (err == -EADDRNOTAVAIL) { + NL_SET_ERR_MSG_MOD(tmo->common.extack, + "Already matching this chain"); + return -EOPNOTSUPP; + } if (err) { NL_SET_ERR_MSG_MOD(tmo->common.extack, "Could not enable VCAP lookups"); @@ -66,8 +72,8 @@ static int sparx5_tc_matchall_destroy(struct net_device *ndev, sparx5 = port->sparx5; if (!tmo->rule && tmo->cookie) { - err = vcap_enable_lookups(sparx5->vcap_ctrl, ndev, 0, - tmo->cookie, false); + err = vcap_enable_lookups(sparx5->vcap_ctrl, ndev, + 0, 0, tmo->cookie, false); if (err) return err; return 0; @@ -80,12 +86,6 @@ int sparx5_tc_matchall(struct net_device *ndev, struct tc_cls_matchall_offload *tmo, bool ingress) { - if (!tc_cls_can_offload_and_chain0(ndev, &tmo->common)) { - NL_SET_ERR_MSG_MOD(tmo->common.extack, - "Only chain zero is supported"); - return -EOPNOTSUPP; - } - switch (tmo->command) { case TC_CLSMATCHALL_REPLACE: return sparx5_tc_matchall_replace(ndev, tmo, ingress); diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_debugfs.c b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_debugfs.c index b91e05ffe2f4..c9423adc92ce 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_debugfs.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_debugfs.c @@ -29,7 +29,7 @@ static void sparx5_vcap_port_keys(struct sparx5 *sparx5, /* Get lookup state */ value = spx5_rd(sparx5, ANA_ACL_VCAP_S2_CFG(port->portno)); out->prf(out->dst, "\n state: "); - if (ANA_ACL_VCAP_S2_CFG_SEC_ENA_GET(value)) + if (ANA_ACL_VCAP_S2_CFG_SEC_ENA_GET(value) & BIT(lookup)) out->prf(out->dst, "on"); else out->prf(out->dst, "off"); diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c index a0c126ba9a87..0d4b40997bb4 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c @@ -510,28 +510,6 @@ static void sparx5_vcap_move(struct net_device *ndev, struct vcap_admin *admin, sparx5_vcap_wait_super_update(sparx5); } -/* Enable all lookups in the VCAP instance */ -static int sparx5_vcap_enable(struct net_device *ndev, - struct vcap_admin *admin, - bool enable) -{ - struct sparx5_port *port = netdev_priv(ndev); - struct sparx5 *sparx5; - int portno; - - sparx5 = port->sparx5; - portno = port->portno; - - /* For now we only consider IS2 */ - if (enable) - spx5_wr(ANA_ACL_VCAP_S2_CFG_SEC_ENA_SET(0xf), sparx5, - ANA_ACL_VCAP_S2_CFG(portno)); - else - spx5_wr(ANA_ACL_VCAP_S2_CFG_SEC_ENA_SET(0), sparx5, - ANA_ACL_VCAP_S2_CFG(portno)); - return 0; -} - /* API callback operations: only IS2 is supported for now */ static struct vcap_operations sparx5_vcap_ops = { .validate_keyset = sparx5_vcap_validate_keyset, @@ -543,7 +521,6 @@ static struct vcap_operations sparx5_vcap_ops = { .update = sparx5_vcap_update, .move = sparx5_vcap_move, .port_info = sparx5_port_info, - .enable = sparx5_vcap_enable, }; /* Enable lookups per port and set the keyset generation: only IS2 for now */ @@ -568,6 +545,12 @@ static void sparx5_vcap_port_key_selection(struct sparx5 *sparx5, ANA_ACL_VCAP_S2_KEY_SEL(portno, lookup)); } } + /* IS2 lookups are in bit 0:3 */ + for (portno = 0; portno < SPX5_PORTS; ++portno) + spx5_rmw(ANA_ACL_VCAP_S2_CFG_SEC_ENA_SET(0xf), + ANA_ACL_VCAP_S2_CFG_SEC_ENA, + sparx5, + ANA_ACL_VCAP_S2_CFG(portno)); } /* Disable lookups per port and set the keyset generation: only IS2 for now */ diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api.c b/drivers/net/ethernet/microchip/vcap/vcap_api.c index 664aae3e2acd..71f787a78295 100644 --- a/drivers/net/ethernet/microchip/vcap/vcap_api.c +++ b/drivers/net/ethernet/microchip/vcap/vcap_api.c @@ -37,11 +37,13 @@ struct vcap_rule_move { int count; /* blocksize of addresses to move */ }; -/* Stores the filter cookie that enabled the port */ +/* Stores the filter cookie and chain id that enabled the port */ struct vcap_enabled_port { struct list_head list; /* for insertion in enabled ports list */ struct net_device *ndev; /* the enabled port */ unsigned long cookie; /* filter that enabled the port */ + int src_cid; /* source chain id */ + int dst_cid; /* destination chain id */ }; void vcap_iter_set(struct vcap_stream_iter *itr, int sw_width, @@ -508,10 +510,133 @@ static void vcap_encode_keyfield_typegroups(struct vcap_control *vctrl, vcap_encode_typegroups(cache->maskstream, sw_width, tgt, true); } +/* Copy data from src to dst but reverse the data in chunks of 32bits. + * For example if src is 00:11:22:33:44:55 where 55 is LSB the dst will + * have the value 22:33:44:55:00:11. + */ +static void vcap_copy_to_w32be(u8 *dst, const u8 *src, int size) +{ + for (int idx = 0; idx < size; ++idx) { + int first_byte_index = 0; + int nidx; + + first_byte_index = size - (((idx >> 2) + 1) << 2); + if (first_byte_index < 0) + first_byte_index = 0; + nidx = idx + first_byte_index - (idx & ~0x3); + dst[nidx] = src[idx]; + } +} + +static void +vcap_copy_from_client_keyfield(struct vcap_rule *rule, + struct vcap_client_keyfield *dst, + const struct vcap_client_keyfield *src) +{ + struct vcap_rule_internal *ri = to_intrule(rule); + const struct vcap_client_keyfield_data *sdata; + struct vcap_client_keyfield_data *ddata; + int size; + + dst->ctrl.type = src->ctrl.type; + dst->ctrl.key = src->ctrl.key; + INIT_LIST_HEAD(&dst->ctrl.list); + sdata = &src->data; + ddata = &dst->data; + + if (!ri->admin->w32be) { + memcpy(ddata, sdata, sizeof(dst->data)); + return; + } + + size = keyfield_size_table[dst->ctrl.type] / 2; + + switch (dst->ctrl.type) { + case VCAP_FIELD_BIT: + case VCAP_FIELD_U32: + memcpy(ddata, sdata, sizeof(dst->data)); + break; + case VCAP_FIELD_U48: + vcap_copy_to_w32be(ddata->u48.value, src->data.u48.value, size); + vcap_copy_to_w32be(ddata->u48.mask, src->data.u48.mask, size); + break; + case VCAP_FIELD_U56: + vcap_copy_to_w32be(ddata->u56.value, sdata->u56.value, size); + vcap_copy_to_w32be(ddata->u56.mask, sdata->u56.mask, size); + break; + case VCAP_FIELD_U64: + vcap_copy_to_w32be(ddata->u64.value, sdata->u64.value, size); + vcap_copy_to_w32be(ddata->u64.mask, sdata->u64.mask, size); + break; + case VCAP_FIELD_U72: + vcap_copy_to_w32be(ddata->u72.value, sdata->u72.value, size); + vcap_copy_to_w32be(ddata->u72.mask, sdata->u72.mask, size); + break; + case VCAP_FIELD_U112: + vcap_copy_to_w32be(ddata->u112.value, sdata->u112.value, size); + vcap_copy_to_w32be(ddata->u112.mask, sdata->u112.mask, size); + break; + case VCAP_FIELD_U128: + vcap_copy_to_w32be(ddata->u128.value, sdata->u128.value, size); + vcap_copy_to_w32be(ddata->u128.mask, sdata->u128.mask, size); + break; + } +} + +static void +vcap_copy_from_client_actionfield(struct vcap_rule *rule, + struct vcap_client_actionfield *dst, + const struct vcap_client_actionfield *src) +{ + struct vcap_rule_internal *ri = to_intrule(rule); + const struct vcap_client_actionfield_data *sdata; + struct vcap_client_actionfield_data *ddata; + int size; + + dst->ctrl.type = src->ctrl.type; + dst->ctrl.action = src->ctrl.action; + INIT_LIST_HEAD(&dst->ctrl.list); + sdata = &src->data; + ddata = &dst->data; + + if (!ri->admin->w32be) { + memcpy(ddata, sdata, sizeof(dst->data)); + return; + } + + size = actionfield_size_table[dst->ctrl.type]; + + switch (dst->ctrl.type) { + case VCAP_FIELD_BIT: + case VCAP_FIELD_U32: + memcpy(ddata, sdata, sizeof(dst->data)); + break; + case VCAP_FIELD_U48: + vcap_copy_to_w32be(ddata->u48.value, sdata->u48.value, size); + break; + case VCAP_FIELD_U56: + vcap_copy_to_w32be(ddata->u56.value, sdata->u56.value, size); + break; + case VCAP_FIELD_U64: + vcap_copy_to_w32be(ddata->u64.value, sdata->u64.value, size); + break; + case VCAP_FIELD_U72: + vcap_copy_to_w32be(ddata->u72.value, sdata->u72.value, size); + break; + case VCAP_FIELD_U112: + vcap_copy_to_w32be(ddata->u112.value, sdata->u112.value, size); + break; + case VCAP_FIELD_U128: + vcap_copy_to_w32be(ddata->u128.value, sdata->u128.value, size); + break; + } +} + static int vcap_encode_rule_keyset(struct vcap_rule_internal *ri) { const struct vcap_client_keyfield *ckf; const struct vcap_typegroup *tg_table; + struct vcap_client_keyfield tempkf; const struct vcap_field *kf_table; int keyset_size; @@ -552,7 +677,9 @@ static int vcap_encode_rule_keyset(struct vcap_rule_internal *ri) __func__, __LINE__, ckf->ctrl.key); return -EINVAL; } - vcap_encode_keyfield(ri, ckf, &kf_table[ckf->ctrl.key], tg_table); + vcap_copy_from_client_keyfield(&ri->data, &tempkf, ckf); + vcap_encode_keyfield(ri, &tempkf, &kf_table[ckf->ctrl.key], + tg_table); } /* Add typegroup bits to the key/mask bitstreams */ vcap_encode_keyfield_typegroups(ri->vctrl, ri, tg_table); @@ -667,6 +794,7 @@ static int vcap_encode_rule_actionset(struct vcap_rule_internal *ri) { const struct vcap_client_actionfield *caf; const struct vcap_typegroup *tg_table; + struct vcap_client_actionfield tempaf; const struct vcap_field *af_table; int actionset_size; @@ -707,8 +835,9 @@ static int vcap_encode_rule_actionset(struct vcap_rule_internal *ri) __func__, __LINE__, caf->ctrl.action); return -EINVAL; } - vcap_encode_actionfield(ri, caf, &af_table[caf->ctrl.action], - tg_table); + vcap_copy_from_client_actionfield(&ri->data, &tempaf, caf); + vcap_encode_actionfield(ri, &tempaf, + &af_table[caf->ctrl.action], tg_table); } /* Add typegroup bits to the entry bitstreams */ vcap_encode_actionfield_typegroups(ri, tg_table); @@ -738,7 +867,7 @@ int vcap_api_check(struct vcap_control *ctrl) !ctrl->ops->add_default_fields || !ctrl->ops->cache_erase || !ctrl->ops->cache_write || !ctrl->ops->cache_read || !ctrl->ops->init || !ctrl->ops->update || !ctrl->ops->move || - !ctrl->ops->port_info || !ctrl->ops->enable) { + !ctrl->ops->port_info) { pr_err("%s:%d: client operations are missing\n", __func__, __LINE__); return -ENOENT; @@ -791,9 +920,8 @@ int vcap_set_rule_set_actionset(struct vcap_rule *rule, } EXPORT_SYMBOL_GPL(vcap_set_rule_set_actionset); -/* Find a rule with a provided rule id */ -static struct vcap_rule_internal *vcap_lookup_rule(struct vcap_control *vctrl, - u32 id) +/* Check if a rule with this id exists */ +static bool vcap_rule_exists(struct vcap_control *vctrl, u32 id) { struct vcap_rule_internal *ri; struct vcap_admin *admin; @@ -802,7 +930,25 @@ static struct vcap_rule_internal *vcap_lookup_rule(struct vcap_control *vctrl, list_for_each_entry(admin, &vctrl->list, list) list_for_each_entry(ri, &admin->rules, list) if (ri->data.id == id) + return true; + return false; +} + +/* Find a rule with a provided rule id return a locked vcap */ +static struct vcap_rule_internal * +vcap_get_locked_rule(struct vcap_control *vctrl, u32 id) +{ + struct vcap_rule_internal *ri; + struct vcap_admin *admin; + + /* Look for the rule id in all vcaps */ + list_for_each_entry(admin, &vctrl->list, list) { + mutex_lock(&admin->lock); + list_for_each_entry(ri, &admin->rules, list) + if (ri->data.id == id) return ri; + mutex_unlock(&admin->lock); + } return NULL; } @@ -811,19 +957,31 @@ int vcap_lookup_rule_by_cookie(struct vcap_control *vctrl, u64 cookie) { struct vcap_rule_internal *ri; struct vcap_admin *admin; + int id = 0; /* Look for the rule id in all vcaps */ - list_for_each_entry(admin, &vctrl->list, list) - list_for_each_entry(ri, &admin->rules, list) - if (ri->data.cookie == cookie) - return ri->data.id; + list_for_each_entry(admin, &vctrl->list, list) { + mutex_lock(&admin->lock); + list_for_each_entry(ri, &admin->rules, list) { + if (ri->data.cookie == cookie) { + id = ri->data.id; + break; + } + } + mutex_unlock(&admin->lock); + if (id) + return id; + } return -ENOENT; } EXPORT_SYMBOL_GPL(vcap_lookup_rule_by_cookie); -/* Make a shallow copy of the rule without the fields */ -struct vcap_rule_internal *vcap_dup_rule(struct vcap_rule_internal *ri) +/* Make a copy of the rule, shallow or full */ +static struct vcap_rule_internal *vcap_dup_rule(struct vcap_rule_internal *ri, + bool full) { + struct vcap_client_actionfield *caf, *newcaf; + struct vcap_client_keyfield *ckf, *newckf; struct vcap_rule_internal *duprule; /* Allocate the client part */ @@ -836,6 +994,27 @@ struct vcap_rule_internal *vcap_dup_rule(struct vcap_rule_internal *ri) /* No elements in these lists */ INIT_LIST_HEAD(&duprule->data.keyfields); INIT_LIST_HEAD(&duprule->data.actionfields); + + /* A full rule copy includes keys and actions */ + if (!full) + return duprule; + + list_for_each_entry(ckf, &ri->data.keyfields, ctrl.list) { + newckf = kzalloc(sizeof(*newckf), GFP_KERNEL); + if (!newckf) + return ERR_PTR(-ENOMEM); + memcpy(newckf, ckf, sizeof(*newckf)); + list_add_tail(&newckf->ctrl.list, &duprule->data.keyfields); + } + + list_for_each_entry(caf, &ri->data.actionfields, ctrl.list) { + newcaf = kzalloc(sizeof(*newcaf), GFP_KERNEL); + if (!newcaf) + return ERR_PTR(-ENOMEM); + memcpy(newcaf, caf, sizeof(*newcaf)); + list_add_tail(&newcaf->ctrl.list, &duprule->data.actionfields); + } + return duprule; } @@ -1424,39 +1603,31 @@ struct vcap_admin *vcap_find_admin(struct vcap_control *vctrl, int cid) } EXPORT_SYMBOL_GPL(vcap_find_admin); -/* Is the next chain id in the following lookup, possible in another VCAP */ -bool vcap_is_next_lookup(struct vcap_control *vctrl, int cur_cid, int next_cid) +/* Is the next chain id in one of the following lookups + * For now this does not support filters linked to other filters using + * keys and actions. That will be added later. + */ +bool vcap_is_next_lookup(struct vcap_control *vctrl, int src_cid, int dst_cid) { - struct vcap_admin *admin, *next_admin; - int lookup, next_lookup; + struct vcap_admin *admin; + int next_cid; - /* The offset must be at least one lookup */ - if (next_cid < cur_cid + VCAP_CID_LOOKUP_SIZE) + if (vcap_api_check(vctrl)) return false; - if (vcap_api_check(vctrl)) + /* The offset must be at least one lookup, round up */ + next_cid = src_cid + VCAP_CID_LOOKUP_SIZE; + next_cid /= VCAP_CID_LOOKUP_SIZE; + next_cid *= VCAP_CID_LOOKUP_SIZE; + + if (dst_cid < next_cid) return false; - admin = vcap_find_admin(vctrl, cur_cid); + admin = vcap_find_admin(vctrl, dst_cid); if (!admin) return false; - /* If no VCAP contains the next chain, the next chain must be beyond - * the last chain in the current VCAP - */ - next_admin = vcap_find_admin(vctrl, next_cid); - if (!next_admin) - return next_cid > admin->last_cid; - - lookup = vcap_chain_id_to_lookup(admin, cur_cid); - next_lookup = vcap_chain_id_to_lookup(next_admin, next_cid); - - /* Next lookup must be the following lookup */ - if (admin == next_admin || admin->vtype == next_admin->vtype) - return next_lookup == lookup + 1; - - /* Must be the first lookup in the next VCAP instance */ - return next_lookup == 0; + return true; } EXPORT_SYMBOL_GPL(vcap_is_next_lookup); @@ -1721,7 +1892,7 @@ static u32 vcap_set_rule_id(struct vcap_rule_internal *ri) return ri->data.id; for (u32 next_id = 1; next_id < ~0; ++next_id) { - if (!vcap_lookup_rule(ri->vctrl, next_id)) { + if (!vcap_rule_exists(ri->vctrl, next_id)) { ri->data.id = next_id; break; } @@ -1756,8 +1927,8 @@ static int vcap_insert_rule(struct vcap_rule_internal *ri, ri->addr = vcap_next_rule_addr(admin->last_used_addr, ri); admin->last_used_addr = ri->addr; - /* Add a shallow copy of the rule to the VCAP list */ - duprule = vcap_dup_rule(ri); + /* Add a copy of the rule to the VCAP list */ + duprule = vcap_dup_rule(ri, ri->state == VCAP_RS_DISABLED); if (IS_ERR(duprule)) return PTR_ERR(duprule); @@ -1770,8 +1941,8 @@ static int vcap_insert_rule(struct vcap_rule_internal *ri, ri->addr = vcap_next_rule_addr(addr, ri); addr = ri->addr; - /* Add a shallow copy of the rule to the VCAP list */ - duprule = vcap_dup_rule(ri); + /* Add a copy of the rule to the VCAP list */ + duprule = vcap_dup_rule(ri, ri->state == VCAP_RS_DISABLED); if (IS_ERR(duprule)) return PTR_ERR(duprule); @@ -1803,11 +1974,94 @@ static void vcap_move_rules(struct vcap_rule_internal *ri, move->offset, move->count); } +/* Check if the chain is already used to enable a VCAP lookup for this port */ +static bool vcap_is_chain_used(struct vcap_control *vctrl, + struct net_device *ndev, int src_cid) +{ + struct vcap_enabled_port *eport; + struct vcap_admin *admin; + + list_for_each_entry(admin, &vctrl->list, list) + list_for_each_entry(eport, &admin->enabled, list) + if (eport->src_cid == src_cid && eport->ndev == ndev) + return true; + + return false; +} + +/* Fetch the next chain in the enabled list for the port */ +static int vcap_get_next_chain(struct vcap_control *vctrl, + struct net_device *ndev, + int dst_cid) +{ + struct vcap_enabled_port *eport; + struct vcap_admin *admin; + + list_for_each_entry(admin, &vctrl->list, list) { + list_for_each_entry(eport, &admin->enabled, list) { + if (eport->ndev != ndev) + continue; + if (eport->src_cid == dst_cid) + return eport->dst_cid; + } + } + + return 0; +} + +static bool vcap_path_exist(struct vcap_control *vctrl, struct net_device *ndev, + int dst_cid) +{ + struct vcap_enabled_port *eport, *elem; + struct vcap_admin *admin; + int tmp; + + if (dst_cid == 0) /* Chain zero is always available */ + return true; + + /* Find first entry that starts from chain 0*/ + list_for_each_entry(admin, &vctrl->list, list) { + list_for_each_entry(elem, &admin->enabled, list) { + if (elem->src_cid == 0 && elem->ndev == ndev) { + eport = elem; + break; + } + } + if (eport) + break; + } + + if (!eport) + return false; + + tmp = eport->dst_cid; + while (tmp != dst_cid && tmp != 0) + tmp = vcap_get_next_chain(vctrl, ndev, tmp); + + return !!tmp; +} + +/* Internal clients can always store their rules in HW + * External clients can store their rules if the chain is enabled all + * the way from chain 0, otherwise the rule will be cached until + * the chain is enabled. + */ +static void vcap_rule_set_state(struct vcap_rule_internal *ri) +{ + if (ri->data.user <= VCAP_USER_QOS) + ri->state = VCAP_RS_PERMANENT; + else if (vcap_path_exist(ri->vctrl, ri->ndev, ri->data.vcap_chain_id)) + ri->state = VCAP_RS_ENABLED; + else + ri->state = VCAP_RS_DISABLED; +} + /* Encode and write a validated rule to the VCAP */ int vcap_add_rule(struct vcap_rule *rule) { struct vcap_rule_internal *ri = to_intrule(rule); struct vcap_rule_move move = {0}; + struct vcap_counter ctr = {0}; int ret; ret = vcap_api_check(ri->vctrl); @@ -1815,6 +2069,8 @@ int vcap_add_rule(struct vcap_rule *rule) return ret; /* Insert the new rule in the list of vcap rules */ mutex_lock(&ri->admin->lock); + + vcap_rule_set_state(ri); ret = vcap_insert_rule(ri, &move); if (ret < 0) { pr_err("%s:%d: could not insert rule in vcap list: %d\n", @@ -1823,6 +2079,14 @@ int vcap_add_rule(struct vcap_rule *rule) } if (move.count > 0) vcap_move_rules(ri, &move); + + if (ri->state == VCAP_RS_DISABLED) { + /* Erase the rule area */ + ri->vctrl->ops->init(ri->ndev, ri->admin, ri->addr, ri->size); + goto out; + } + + vcap_erase_cache(ri); ret = vcap_encode_rule(ri); if (ret) { pr_err("%s:%d: rule encoding error: %d\n", __func__, __LINE__, ret); @@ -1830,8 +2094,12 @@ int vcap_add_rule(struct vcap_rule *rule) } ret = vcap_write_rule(ri); - if (ret) + if (ret) { pr_err("%s:%d: rule write error: %d\n", __func__, __LINE__, ret); + goto out; + } + /* Set the counter to zero */ + ret = vcap_write_counter(ri, &ctr); out: mutex_unlock(&ri->admin->lock); return ret; @@ -1860,17 +2128,28 @@ struct vcap_rule *vcap_alloc_rule(struct vcap_control *vctrl, /* Sanity check that this VCAP is supported on this platform */ if (vctrl->vcaps[admin->vtype].rows == 0) return ERR_PTR(-EINVAL); + + mutex_lock(&admin->lock); /* Check if a rule with this id already exists */ - if (vcap_lookup_rule(vctrl, id)) - return ERR_PTR(-EEXIST); + if (vcap_rule_exists(vctrl, id)) { + err = -EINVAL; + goto out_unlock; + } + /* Check if there is room for the rule in the block(s) of the VCAP */ maxsize = vctrl->vcaps[admin->vtype].sw_count; /* worst case rule size */ - if (vcap_rule_space(admin, maxsize)) - return ERR_PTR(-ENOSPC); + if (vcap_rule_space(admin, maxsize)) { + err = -ENOSPC; + goto out_unlock; + } + /* Create a container for the rule and return it */ ri = kzalloc(sizeof(*ri), GFP_KERNEL); - if (!ri) - return ERR_PTR(-ENOMEM); + if (!ri) { + err = -ENOMEM; + goto out_unlock; + } + ri->data.vcap_chain_id = vcap_chain_id; ri->data.user = user; ri->data.priority = priority; @@ -1883,14 +2162,21 @@ struct vcap_rule *vcap_alloc_rule(struct vcap_control *vctrl, ri->ndev = ndev; ri->admin = admin; /* refer to the vcap instance */ ri->vctrl = vctrl; /* refer to the client */ - if (vcap_set_rule_id(ri) == 0) + + if (vcap_set_rule_id(ri) == 0) { + err = -EINVAL; goto out_free; - vcap_erase_cache(ri); + } + + mutex_unlock(&admin->lock); return (struct vcap_rule *)ri; out_free: kfree(ri); - return ERR_PTR(-EINVAL); +out_unlock: + mutex_unlock(&admin->lock); + return ERR_PTR(err); + } EXPORT_SYMBOL_GPL(vcap_alloc_rule); @@ -1915,43 +2201,52 @@ void vcap_free_rule(struct vcap_rule *rule) } EXPORT_SYMBOL_GPL(vcap_free_rule); -struct vcap_rule *vcap_get_rule(struct vcap_control *vctrl, u32 id) +/* Decode a rule from the VCAP cache and return a copy */ +struct vcap_rule *vcap_decode_rule(struct vcap_rule_internal *elem) { - struct vcap_rule_internal *elem; struct vcap_rule_internal *ri; int err; - ri = NULL; + ri = vcap_dup_rule(elem, elem->state == VCAP_RS_DISABLED); + if (IS_ERR(ri)) + return ERR_PTR(PTR_ERR(ri)); + + if (ri->state == VCAP_RS_DISABLED) + goto out; + + err = vcap_read_rule(ri); + if (err) + return ERR_PTR(err); + + err = vcap_decode_keyset(ri); + if (err) + return ERR_PTR(err); + + err = vcap_decode_actionset(ri); + if (err) + return ERR_PTR(err); + +out: + return &ri->data; +} + +struct vcap_rule *vcap_get_rule(struct vcap_control *vctrl, u32 id) +{ + struct vcap_rule_internal *elem; + struct vcap_rule *rule; + int err; err = vcap_api_check(vctrl); if (err) return ERR_PTR(err); - elem = vcap_lookup_rule(vctrl, id); + + elem = vcap_get_locked_rule(vctrl, id); if (!elem) return NULL; - mutex_lock(&elem->admin->lock); - ri = vcap_dup_rule(elem); - if (IS_ERR(ri)) - goto unlock; - err = vcap_read_rule(ri); - if (err) { - ri = ERR_PTR(err); - goto unlock; - } - err = vcap_decode_keyset(ri); - if (err) { - ri = ERR_PTR(err); - goto unlock; - } - err = vcap_decode_actionset(ri); - if (err) { - ri = ERR_PTR(err); - goto unlock; - } -unlock: + rule = vcap_decode_rule(elem); mutex_unlock(&elem->admin->lock); - return (struct vcap_rule *)ri; + return rule; } EXPORT_SYMBOL_GPL(vcap_get_rule); @@ -1966,10 +2261,13 @@ int vcap_mod_rule(struct vcap_rule *rule) if (err) return err; - if (!vcap_lookup_rule(ri->vctrl, ri->data.id)) + if (!vcap_get_locked_rule(ri->vctrl, ri->data.id)) return -ENOENT; - mutex_lock(&ri->admin->lock); + vcap_rule_set_state(ri); + if (ri->state == VCAP_RS_DISABLED) + goto out; + /* Encode the bitstreams to the VCAP cache */ vcap_erase_cache(ri); err = vcap_encode_rule(ri); @@ -1982,8 +2280,6 @@ int vcap_mod_rule(struct vcap_rule *rule) memset(&ctr, 0, sizeof(ctr)); err = vcap_write_counter(ri, &ctr); - if (err) - goto out; out: mutex_unlock(&ri->admin->lock); @@ -2050,20 +2346,19 @@ int vcap_del_rule(struct vcap_control *vctrl, struct net_device *ndev, u32 id) if (err) return err; /* Look for the rule id in all vcaps */ - ri = vcap_lookup_rule(vctrl, id); + ri = vcap_get_locked_rule(vctrl, id); if (!ri) - return -EINVAL; + return -ENOENT; + admin = ri->admin; if (ri->addr > admin->last_used_addr) gap = vcap_fill_rule_gap(ri); /* Delete the rule from the list of rules and the cache */ - mutex_lock(&admin->lock); list_del(&ri->list); vctrl->ops->init(ndev, admin, admin->last_used_addr, ri->size + gap); - kfree(ri); - mutex_unlock(&admin->lock); + vcap_free_rule(&ri->data); /* Update the last used address, set to default when no rules */ if (list_empty(&admin->rules)) { @@ -2073,7 +2368,9 @@ int vcap_del_rule(struct vcap_control *vctrl, struct net_device *ndev, u32 id) list); admin->last_used_addr = elem->addr; } - return 0; + + mutex_unlock(&admin->lock); + return err; } EXPORT_SYMBOL_GPL(vcap_del_rule); @@ -2091,7 +2388,7 @@ int vcap_del_rules(struct vcap_control *vctrl, struct vcap_admin *admin) list_for_each_entry_safe(ri, next_ri, &admin->rules, list) { vctrl->ops->init(ri->ndev, admin, ri->addr, ri->size); list_del(&ri->list); - kfree(ri); + vcap_free_rule(&ri->data); } admin->last_used_addr = admin->last_valid_addr; @@ -2137,69 +2434,6 @@ const struct vcap_field *vcap_lookup_keyfield(struct vcap_rule *rule, } EXPORT_SYMBOL_GPL(vcap_lookup_keyfield); -/* Copy data from src to dst but reverse the data in chunks of 32bits. - * For example if src is 00:11:22:33:44:55 where 55 is LSB the dst will - * have the value 22:33:44:55:00:11. - */ -static void vcap_copy_to_w32be(u8 *dst, u8 *src, int size) -{ - for (int idx = 0; idx < size; ++idx) { - int first_byte_index = 0; - int nidx; - - first_byte_index = size - (((idx >> 2) + 1) << 2); - if (first_byte_index < 0) - first_byte_index = 0; - nidx = idx + first_byte_index - (idx & ~0x3); - dst[nidx] = src[idx]; - } -} - -static void vcap_copy_from_client_keyfield(struct vcap_rule *rule, - struct vcap_client_keyfield *field, - struct vcap_client_keyfield_data *data) -{ - struct vcap_rule_internal *ri = to_intrule(rule); - int size; - - if (!ri->admin->w32be) { - memcpy(&field->data, data, sizeof(field->data)); - return; - } - - size = keyfield_size_table[field->ctrl.type] / 2; - switch (field->ctrl.type) { - case VCAP_FIELD_BIT: - case VCAP_FIELD_U32: - memcpy(&field->data, data, sizeof(field->data)); - break; - case VCAP_FIELD_U48: - vcap_copy_to_w32be(field->data.u48.value, data->u48.value, size); - vcap_copy_to_w32be(field->data.u48.mask, data->u48.mask, size); - break; - case VCAP_FIELD_U56: - vcap_copy_to_w32be(field->data.u56.value, data->u56.value, size); - vcap_copy_to_w32be(field->data.u56.mask, data->u56.mask, size); - break; - case VCAP_FIELD_U64: - vcap_copy_to_w32be(field->data.u64.value, data->u64.value, size); - vcap_copy_to_w32be(field->data.u64.mask, data->u64.mask, size); - break; - case VCAP_FIELD_U72: - vcap_copy_to_w32be(field->data.u72.value, data->u72.value, size); - vcap_copy_to_w32be(field->data.u72.mask, data->u72.mask, size); - break; - case VCAP_FIELD_U112: - vcap_copy_to_w32be(field->data.u112.value, data->u112.value, size); - vcap_copy_to_w32be(field->data.u112.mask, data->u112.mask, size); - break; - case VCAP_FIELD_U128: - vcap_copy_to_w32be(field->data.u128.value, data->u128.value, size); - vcap_copy_to_w32be(field->data.u128.mask, data->u128.mask, size); - break; - } -} - /* Check if the keyfield is already in the rule */ static bool vcap_keyfield_unique(struct vcap_rule *rule, enum vcap_key_field key) @@ -2257,9 +2491,9 @@ static int vcap_rule_add_key(struct vcap_rule *rule, field = kzalloc(sizeof(*field), GFP_KERNEL); if (!field) return -ENOMEM; + memcpy(&field->data, data, sizeof(field->data)); field->ctrl.key = key; field->ctrl.type = ftype; - vcap_copy_from_client_keyfield(rule, field, data); list_add_tail(&field->ctrl.list, &rule->keyfields); return 0; } @@ -2367,45 +2601,6 @@ vcap_find_actionfield(struct vcap_rule *rule, enum vcap_action_field act) return NULL; } -static void vcap_copy_from_client_actionfield(struct vcap_rule *rule, - struct vcap_client_actionfield *field, - struct vcap_client_actionfield_data *data) -{ - struct vcap_rule_internal *ri = to_intrule(rule); - int size; - - if (!ri->admin->w32be) { - memcpy(&field->data, data, sizeof(field->data)); - return; - } - - size = actionfield_size_table[field->ctrl.type]; - switch (field->ctrl.type) { - case VCAP_FIELD_BIT: - case VCAP_FIELD_U32: - memcpy(&field->data, data, sizeof(field->data)); - break; - case VCAP_FIELD_U48: - vcap_copy_to_w32be(field->data.u48.value, data->u48.value, size); - break; - case VCAP_FIELD_U56: - vcap_copy_to_w32be(field->data.u56.value, data->u56.value, size); - break; - case VCAP_FIELD_U64: - vcap_copy_to_w32be(field->data.u64.value, data->u64.value, size); - break; - case VCAP_FIELD_U72: - vcap_copy_to_w32be(field->data.u72.value, data->u72.value, size); - break; - case VCAP_FIELD_U112: - vcap_copy_to_w32be(field->data.u112.value, data->u112.value, size); - break; - case VCAP_FIELD_U128: - vcap_copy_to_w32be(field->data.u128.value, data->u128.value, size); - break; - } -} - /* Check if the actionfield is already in the rule */ static bool vcap_actionfield_unique(struct vcap_rule *rule, enum vcap_action_field act) @@ -2463,9 +2658,9 @@ static int vcap_rule_add_action(struct vcap_rule *rule, field = kzalloc(sizeof(*field), GFP_KERNEL); if (!field) return -ENOMEM; + memcpy(&field->data, data, sizeof(field->data)); field->ctrl.action = action; field->ctrl.type = ftype; - vcap_copy_from_client_actionfield(rule, field, data); list_add_tail(&field->ctrl.list, &rule->actionfields); return 0; } @@ -2564,24 +2759,153 @@ void vcap_set_tc_exterr(struct flow_cls_offload *fco, struct vcap_rule *vrule) } EXPORT_SYMBOL_GPL(vcap_set_tc_exterr); +/* Write a rule to VCAP HW to enable it */ +static int vcap_enable_rule(struct vcap_rule_internal *ri) +{ + struct vcap_client_actionfield *af, *naf; + struct vcap_client_keyfield *kf, *nkf; + int err; + + vcap_erase_cache(ri); + err = vcap_encode_rule(ri); + if (err) + goto out; + err = vcap_write_rule(ri); + if (err) + goto out; + + /* Deallocate the list of keys and actions */ + list_for_each_entry_safe(kf, nkf, &ri->data.keyfields, ctrl.list) { + list_del(&kf->ctrl.list); + kfree(kf); + } + list_for_each_entry_safe(af, naf, &ri->data.actionfields, ctrl.list) { + list_del(&af->ctrl.list); + kfree(af); + } + ri->state = VCAP_RS_ENABLED; +out: + return err; +} + +/* Enable all disabled rules for a specific chain/port in the VCAP HW */ +static int vcap_enable_rules(struct vcap_control *vctrl, + struct net_device *ndev, int chain) +{ + struct vcap_rule_internal *ri; + struct vcap_admin *admin; + int err = 0; + + list_for_each_entry(admin, &vctrl->list, list) { + if (!(chain >= admin->first_cid && chain <= admin->last_cid)) + continue; + + /* Found the admin, now find the offloadable rules */ + mutex_lock(&admin->lock); + list_for_each_entry(ri, &admin->rules, list) { + if (ri->data.vcap_chain_id != chain) + continue; + + if (ri->ndev != ndev) + continue; + + if (ri->state != VCAP_RS_DISABLED) + continue; + + err = vcap_enable_rule(ri); + if (err) + break; + } + mutex_unlock(&admin->lock); + if (err) + break; + } + return err; +} + +/* Read and erase a rule from VCAP HW to disable it */ +static int vcap_disable_rule(struct vcap_rule_internal *ri) +{ + int err; + + err = vcap_read_rule(ri); + if (err) + return err; + err = vcap_decode_keyset(ri); + if (err) + return err; + err = vcap_decode_actionset(ri); + if (err) + return err; + + ri->state = VCAP_RS_DISABLED; + ri->vctrl->ops->init(ri->ndev, ri->admin, ri->addr, ri->size); + return 0; +} + +/* Disable all enabled rules for a specific chain/port in the VCAP HW */ +static int vcap_disable_rules(struct vcap_control *vctrl, + struct net_device *ndev, int chain) +{ + struct vcap_rule_internal *ri; + struct vcap_admin *admin; + int err = 0; + + list_for_each_entry(admin, &vctrl->list, list) { + if (!(chain >= admin->first_cid && chain <= admin->last_cid)) + continue; + + /* Found the admin, now find the rules on the chain */ + mutex_lock(&admin->lock); + list_for_each_entry(ri, &admin->rules, list) { + if (ri->data.vcap_chain_id != chain) + continue; + + if (ri->ndev != ndev) + continue; + + if (ri->state != VCAP_RS_ENABLED) + continue; + + err = vcap_disable_rule(ri); + if (err) + break; + } + mutex_unlock(&admin->lock); + if (err) + break; + } + return err; +} + /* Check if this port is already enabled for this VCAP instance */ -static bool vcap_is_enabled(struct vcap_admin *admin, struct net_device *ndev, - unsigned long cookie) +static bool vcap_is_enabled(struct vcap_control *vctrl, struct net_device *ndev, + int dst_cid) { struct vcap_enabled_port *eport; + struct vcap_admin *admin; - list_for_each_entry(eport, &admin->enabled, list) - if (eport->cookie == cookie || eport->ndev == ndev) - return true; + list_for_each_entry(admin, &vctrl->list, list) + list_for_each_entry(eport, &admin->enabled, list) + if (eport->dst_cid == dst_cid && eport->ndev == ndev) + return true; return false; } -/* Enable this port for this VCAP instance */ -static int vcap_enable(struct vcap_admin *admin, struct net_device *ndev, - unsigned long cookie) +/* Enable this port and chain id in a VCAP instance */ +static int vcap_enable(struct vcap_control *vctrl, struct net_device *ndev, + unsigned long cookie, int src_cid, int dst_cid) { struct vcap_enabled_port *eport; + struct vcap_admin *admin; + + if (src_cid >= dst_cid) + return -EFAULT; + + admin = vcap_find_admin(vctrl, dst_cid); + if (!admin) + return -ENOENT; eport = kzalloc(sizeof(*eport), GFP_KERNEL); if (!eport) @@ -2589,48 +2913,72 @@ static int vcap_enable(struct vcap_admin *admin, struct net_device *ndev, eport->ndev = ndev; eport->cookie = cookie; + eport->src_cid = src_cid; + eport->dst_cid = dst_cid; + mutex_lock(&admin->lock); list_add_tail(&eport->list, &admin->enabled); + mutex_unlock(&admin->lock); + if (vcap_path_exist(vctrl, ndev, src_cid)) { + /* Enable chained lookups */ + while (dst_cid) { + admin = vcap_find_admin(vctrl, dst_cid); + if (!admin) + return -ENOENT; + + vcap_enable_rules(vctrl, ndev, dst_cid); + dst_cid = vcap_get_next_chain(vctrl, ndev, dst_cid); + } + } return 0; } -/* Disable this port for this VCAP instance */ -static int vcap_disable(struct vcap_admin *admin, struct net_device *ndev, +/* Disable this port and chain id for a VCAP instance */ +static int vcap_disable(struct vcap_control *vctrl, struct net_device *ndev, unsigned long cookie) { - struct vcap_enabled_port *eport; + struct vcap_enabled_port *elem, *eport = NULL; + struct vcap_admin *found = NULL, *admin; + int dst_cid; - list_for_each_entry(eport, &admin->enabled, list) { - if (eport->cookie == cookie && eport->ndev == ndev) { - list_del(&eport->list); - kfree(eport); - return 0; + list_for_each_entry(admin, &vctrl->list, list) { + list_for_each_entry(elem, &admin->enabled, list) { + if (elem->cookie == cookie && elem->ndev == ndev) { + eport = elem; + found = admin; + break; + } } + if (eport) + break; } - return -ENOENT; -} + if (!eport) + return -ENOENT; -/* Find the VCAP instance that enabled the port using a specific filter */ -static struct vcap_admin *vcap_find_admin_by_cookie(struct vcap_control *vctrl, - unsigned long cookie) -{ - struct vcap_enabled_port *eport; - struct vcap_admin *admin; + /* Disable chained lookups */ + dst_cid = eport->dst_cid; + while (dst_cid) { + admin = vcap_find_admin(vctrl, dst_cid); + if (!admin) + return -ENOENT; - list_for_each_entry(admin, &vctrl->list, list) - list_for_each_entry(eport, &admin->enabled, list) - if (eport->cookie == cookie) - return admin; + vcap_disable_rules(vctrl, ndev, dst_cid); + dst_cid = vcap_get_next_chain(vctrl, ndev, dst_cid); + } - return NULL; + mutex_lock(&found->lock); + list_del(&eport->list); + mutex_unlock(&found->lock); + kfree(eport); + return 0; } -/* Enable/Disable the VCAP instance lookups. Chain id 0 means disable */ +/* Enable/Disable the VCAP instance lookups */ int vcap_enable_lookups(struct vcap_control *vctrl, struct net_device *ndev, - int chain_id, unsigned long cookie, bool enable) + int src_cid, int dst_cid, unsigned long cookie, + bool enable) { - struct vcap_admin *admin; int err; err = vcap_api_check(vctrl); @@ -2640,36 +2988,45 @@ int vcap_enable_lookups(struct vcap_control *vctrl, struct net_device *ndev, if (!ndev) return -ENODEV; - if (chain_id) - admin = vcap_find_admin(vctrl, chain_id); - else - admin = vcap_find_admin_by_cookie(vctrl, cookie); - if (!admin) - return -ENOENT; - - /* first instance and first chain */ - if (admin->vinst || chain_id > admin->first_cid) + /* Source and destination must be the first chain in a lookup */ + if (src_cid % VCAP_CID_LOOKUP_SIZE) + return -EFAULT; + if (dst_cid % VCAP_CID_LOOKUP_SIZE) return -EFAULT; - err = vctrl->ops->enable(ndev, admin, enable); - if (err) - return err; - - if (chain_id) { - if (vcap_is_enabled(admin, ndev, cookie)) + if (enable) { + if (vcap_is_enabled(vctrl, ndev, dst_cid)) return -EADDRINUSE; - mutex_lock(&admin->lock); - vcap_enable(admin, ndev, cookie); + if (vcap_is_chain_used(vctrl, ndev, src_cid)) + return -EADDRNOTAVAIL; + err = vcap_enable(vctrl, ndev, cookie, src_cid, dst_cid); } else { - mutex_lock(&admin->lock); - vcap_disable(admin, ndev, cookie); + err = vcap_disable(vctrl, ndev, cookie); } - mutex_unlock(&admin->lock); - return 0; + return err; } EXPORT_SYMBOL_GPL(vcap_enable_lookups); +/* Is this chain id the last lookup of all VCAPs */ +bool vcap_is_last_chain(struct vcap_control *vctrl, int cid) +{ + struct vcap_admin *admin; + int lookup; + + if (vcap_api_check(vctrl)) + return false; + + admin = vcap_find_admin(vctrl, cid); + if (!admin) + return false; + + /* This must be the last lookup in this VCAP type */ + lookup = vcap_chain_id_to_lookup(admin, cid); + return lookup == admin->lookups - 1; +} +EXPORT_SYMBOL_GPL(vcap_is_last_chain); + /* Set a rule counter id (for certain vcaps only) */ void vcap_rule_set_counter_id(struct vcap_rule *rule, u32 counter_id) { @@ -2679,31 +3036,6 @@ void vcap_rule_set_counter_id(struct vcap_rule *rule, u32 counter_id) } EXPORT_SYMBOL_GPL(vcap_rule_set_counter_id); -/* Provide all rules via a callback interface */ -int vcap_rule_iter(struct vcap_control *vctrl, - int (*callback)(void *, struct vcap_rule *), void *arg) -{ - struct vcap_rule_internal *ri; - struct vcap_admin *admin; - int ret; - - ret = vcap_api_check(vctrl); - if (ret) - return ret; - - /* Iterate all rules in each VCAP instance */ - list_for_each_entry(admin, &vctrl->list, list) { - list_for_each_entry(ri, &admin->rules, list) { - ret = callback(arg, &ri->data); - if (ret) - return ret; - } - } - - return 0; -} -EXPORT_SYMBOL_GPL(vcap_rule_iter); - int vcap_rule_set_counter(struct vcap_rule *rule, struct vcap_counter *ctr) { struct vcap_rule_internal *ri = to_intrule(rule); @@ -2716,7 +3048,12 @@ int vcap_rule_set_counter(struct vcap_rule *rule, struct vcap_counter *ctr) pr_err("%s:%d: counter is missing\n", __func__, __LINE__); return -EINVAL; } - return vcap_write_counter(ri, ctr); + + mutex_lock(&ri->admin->lock); + err = vcap_write_counter(ri, ctr); + mutex_unlock(&ri->admin->lock); + + return err; } EXPORT_SYMBOL_GPL(vcap_rule_set_counter); @@ -2732,10 +3069,116 @@ int vcap_rule_get_counter(struct vcap_rule *rule, struct vcap_counter *ctr) pr_err("%s:%d: counter is missing\n", __func__, __LINE__); return -EINVAL; } - return vcap_read_counter(ri, ctr); + + mutex_lock(&ri->admin->lock); + err = vcap_read_counter(ri, ctr); + mutex_unlock(&ri->admin->lock); + + return err; } EXPORT_SYMBOL_GPL(vcap_rule_get_counter); +/* Get a copy of a client key field */ +static int vcap_rule_get_key(struct vcap_rule *rule, + enum vcap_key_field key, + struct vcap_client_keyfield *ckf) +{ + struct vcap_client_keyfield *field; + + field = vcap_find_keyfield(rule, key); + if (!field) + return -EINVAL; + memcpy(ckf, field, sizeof(*ckf)); + INIT_LIST_HEAD(&ckf->ctrl.list); + return 0; +} + +/* Get the keysets that matches the rule key type/mask */ +int vcap_rule_get_keysets(struct vcap_rule_internal *ri, + struct vcap_keyset_list *matches) +{ + struct vcap_control *vctrl = ri->vctrl; + enum vcap_type vt = ri->admin->vtype; + const struct vcap_set *keyfield_set; + struct vcap_client_keyfield kf = {}; + u32 value, mask; + int err, idx; + + err = vcap_rule_get_key(&ri->data, VCAP_KF_TYPE, &kf); + if (err) + return err; + + if (kf.ctrl.type == VCAP_FIELD_BIT) { + value = kf.data.u1.value; + mask = kf.data.u1.mask; + } else if (kf.ctrl.type == VCAP_FIELD_U32) { + value = kf.data.u32.value; + mask = kf.data.u32.mask; + } else { + return -EINVAL; + } + + keyfield_set = vctrl->vcaps[vt].keyfield_set; + for (idx = 0; idx < vctrl->vcaps[vt].keyfield_set_size; ++idx) { + if (keyfield_set[idx].sw_per_item != ri->keyset_sw) + continue; + + if (keyfield_set[idx].type_id == (u8)-1) { + vcap_keyset_list_add(matches, idx); + continue; + } + + if ((keyfield_set[idx].type_id & mask) == value) + vcap_keyset_list_add(matches, idx); + } + if (matches->cnt > 0) + return 0; + + return -EINVAL; +} + +/* Collect packet counts from all rules with the same cookie */ +int vcap_get_rule_count_by_cookie(struct vcap_control *vctrl, + struct vcap_counter *ctr, u64 cookie) +{ + struct vcap_rule_internal *ri; + struct vcap_counter temp = {}; + struct vcap_admin *admin; + int err; + + err = vcap_api_check(vctrl); + if (err) + return err; + + /* Iterate all rules in each VCAP instance */ + list_for_each_entry(admin, &vctrl->list, list) { + mutex_lock(&admin->lock); + list_for_each_entry(ri, &admin->rules, list) { + if (ri->data.cookie != cookie) + continue; + + err = vcap_read_counter(ri, &temp); + if (err) + goto unlock; + ctr->value += temp.value; + + /* Reset the rule counter */ + temp.value = 0; + temp.sticky = 0; + err = vcap_write_counter(ri, &temp); + if (err) + goto unlock; + } + mutex_unlock(&admin->lock); + } + return err; + +unlock: + mutex_unlock(&admin->lock); + return err; +} +EXPORT_SYMBOL_GPL(vcap_get_rule_count_by_cookie); + static int vcap_rule_mod_key(struct vcap_rule *rule, enum vcap_key_field key, enum vcap_field_type ftype, @@ -2746,7 +3189,7 @@ static int vcap_rule_mod_key(struct vcap_rule *rule, field = vcap_find_keyfield(rule, key); if (!field) return vcap_rule_add_key(rule, key, ftype, data); - vcap_copy_from_client_keyfield(rule, field, data); + memcpy(&field->data, data, sizeof(field->data)); return 0; } @@ -2772,7 +3215,7 @@ static int vcap_rule_mod_action(struct vcap_rule *rule, field = vcap_find_actionfield(rule, action); if (!field) return vcap_rule_add_action(rule, action, ftype, data); - vcap_copy_from_client_actionfield(rule, field, data); + memcpy(&field->data, data, sizeof(field->data)); return 0; } diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api.h b/drivers/net/ethernet/microchip/vcap/vcap_api.h index 689c7270f2a8..c61f13a65030 100644 --- a/drivers/net/ethernet/microchip/vcap/vcap_api.h +++ b/drivers/net/ethernet/microchip/vcap/vcap_api.h @@ -259,11 +259,6 @@ struct vcap_operations { (struct net_device *ndev, struct vcap_admin *admin, struct vcap_output_print *out); - /* enable/disable the lookups in a vcap instance */ - int (*enable) - (struct net_device *ndev, - struct vcap_admin *admin, - bool enable); }; /* VCAP API Client control interface */ diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_client.h b/drivers/net/ethernet/microchip/vcap/vcap_api_client.h index 0319866f9c94..2cdcd3b56b30 100644 --- a/drivers/net/ethernet/microchip/vcap/vcap_api_client.h +++ b/drivers/net/ethernet/microchip/vcap/vcap_api_client.h @@ -148,9 +148,10 @@ struct vcap_counter { bool sticky; }; -/* Enable/Disable the VCAP instance lookups. Chain id 0 means disable */ +/* Enable/Disable the VCAP instance lookups */ int vcap_enable_lookups(struct vcap_control *vctrl, struct net_device *ndev, - int chain_id, unsigned long cookie, bool enable); + int from_cid, int to_cid, unsigned long cookie, + bool enable); /* VCAP rule operations */ /* Allocate a rule and fill in the basic information */ @@ -201,6 +202,8 @@ int vcap_rule_add_action_u32(struct vcap_rule *rule, enum vcap_action_field action, u32 value); /* VCAP rule counter operations */ +int vcap_get_rule_count_by_cookie(struct vcap_control *vctrl, + struct vcap_counter *ctr, u64 cookie); int vcap_rule_set_counter(struct vcap_rule *rule, struct vcap_counter *ctr); int vcap_rule_get_counter(struct vcap_rule *rule, struct vcap_counter *ctr); @@ -216,6 +219,8 @@ const struct vcap_field *vcap_lookup_keyfield(struct vcap_rule *rule, int vcap_lookup_rule_by_cookie(struct vcap_control *vctrl, u64 cookie); /* Is the next chain id in the following lookup, possible in another VCAP */ bool vcap_is_next_lookup(struct vcap_control *vctrl, int cur_cid, int next_cid); +/* Is this chain id the last lookup of all VCAPs */ +bool vcap_is_last_chain(struct vcap_control *vctrl, int cid); /* Provide all rules via a callback interface */ int vcap_rule_iter(struct vcap_control *vctrl, int (*callback)(void *, struct vcap_rule *), void *arg); @@ -261,5 +266,4 @@ int vcap_rule_mod_action_u32(struct vcap_rule *rule, /* Get a 32 bit key field value and mask from the rule */ int vcap_rule_get_key_u32(struct vcap_rule *rule, enum vcap_key_field key, u32 *value, u32 *mask); - #endif /* __VCAP_API_CLIENT__ */ diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.c b/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.c index e0b206247f2e..d49b1cf7712f 100644 --- a/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.c +++ b/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.c @@ -152,37 +152,48 @@ vcap_debugfs_show_rule_actionfield(struct vcap_control *vctrl, out->prf(out->dst, "\n"); } -static int vcap_debugfs_show_rule_keyset(struct vcap_rule_internal *ri, - struct vcap_output_print *out) +static int vcap_debugfs_show_keysets(struct vcap_rule_internal *ri, + struct vcap_output_print *out) { - struct vcap_control *vctrl = ri->vctrl; struct vcap_admin *admin = ri->admin; enum vcap_keyfield_set keysets[10]; - const struct vcap_field *keyfield; - enum vcap_type vt = admin->vtype; - struct vcap_client_keyfield *ckf; struct vcap_keyset_list matches; - u32 *maskstream; - u32 *keystream; - int res; + int err; - keystream = admin->cache.keystream; - maskstream = admin->cache.maskstream; matches.keysets = keysets; matches.cnt = 0; matches.max = ARRAY_SIZE(keysets); - res = vcap_find_keystream_keysets(vctrl, vt, keystream, maskstream, - false, 0, &matches); - if (res < 0) { + + if (ri->state == VCAP_RS_DISABLED) + err = vcap_rule_get_keysets(ri, &matches); + else + err = vcap_find_keystream_keysets(ri->vctrl, admin->vtype, + admin->cache.keystream, + admin->cache.maskstream, + false, 0, &matches); + if (err) { pr_err("%s:%d: could not find valid keysets: %d\n", - __func__, __LINE__, res); - return -EINVAL; + __func__, __LINE__, err); + return err; } + out->prf(out->dst, " keysets:"); for (int idx = 0; idx < matches.cnt; ++idx) out->prf(out->dst, " %s", - vcap_keyset_name(vctrl, matches.keysets[idx])); + vcap_keyset_name(ri->vctrl, matches.keysets[idx])); out->prf(out->dst, "\n"); + return 0; +} + +static int vcap_debugfs_show_rule_keyset(struct vcap_rule_internal *ri, + struct vcap_output_print *out) +{ + struct vcap_control *vctrl = ri->vctrl; + struct vcap_admin *admin = ri->admin; + const struct vcap_field *keyfield; + struct vcap_client_keyfield *ckf; + + vcap_debugfs_show_keysets(ri, out); out->prf(out->dst, " keyset_sw: %d\n", ri->keyset_sw); out->prf(out->dst, " keyset_sw_regs: %d\n", ri->keyset_sw_regs); @@ -233,6 +244,18 @@ static void vcap_show_admin_rule(struct vcap_control *vctrl, out->prf(out->dst, " chain_id: %d\n", ri->data.vcap_chain_id); out->prf(out->dst, " user: %d\n", ri->data.user); out->prf(out->dst, " priority: %d\n", ri->data.priority); + out->prf(out->dst, " state: "); + switch (ri->state) { + case VCAP_RS_PERMANENT: + out->prf(out->dst, "permanent\n"); + break; + case VCAP_RS_DISABLED: + out->prf(out->dst, "disabled\n"); + break; + case VCAP_RS_ENABLED: + out->prf(out->dst, "enabled\n"); + break; + } vcap_debugfs_show_rule_keyset(ri, out); vcap_debugfs_show_rule_actionset(ri, out); } @@ -272,7 +295,7 @@ static int vcap_show_admin(struct vcap_control *vctrl, vcap_show_admin_info(vctrl, admin, out); list_for_each_entry(elem, &admin->rules, list) { - vrule = vcap_get_rule(vctrl, elem->data.id); + vrule = vcap_decode_rule(elem); if (IS_ERR_OR_NULL(vrule)) { ret = PTR_ERR(vrule); break; @@ -381,8 +404,12 @@ static int vcap_debugfs_show(struct seq_file *m, void *unused) .prf = (void *)seq_printf, .dst = m, }; + int ret; - return vcap_show_admin(info->vctrl, info->admin, &out); + mutex_lock(&info->admin->lock); + ret = vcap_show_admin(info->vctrl, info->admin, &out); + mutex_unlock(&info->admin->lock); + return ret; } DEFINE_SHOW_ATTRIBUTE(vcap_debugfs); @@ -394,8 +421,12 @@ static int vcap_raw_debugfs_show(struct seq_file *m, void *unused) .prf = (void *)seq_printf, .dst = m, }; + int ret; - return vcap_show_admin_raw(info->vctrl, info->admin, &out); + mutex_lock(&info->admin->lock); + ret = vcap_show_admin_raw(info->vctrl, info->admin, &out); + mutex_unlock(&info->admin->lock); + return ret; } DEFINE_SHOW_ATTRIBUTE(vcap_raw_debugfs); diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c b/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c index cf594668d5d9..cbf7e0f110b8 100644 --- a/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c +++ b/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c @@ -221,13 +221,6 @@ static int vcap_test_port_info(struct net_device *ndev, return 0; } -static int vcap_test_enable(struct net_device *ndev, - struct vcap_admin *admin, - bool enable) -{ - return 0; -} - static struct vcap_operations test_callbacks = { .validate_keyset = test_val_keyset, .add_default_fields = test_add_def_fields, @@ -238,7 +231,6 @@ static struct vcap_operations test_callbacks = { .update = test_cache_update, .move = test_cache_move, .port_info = vcap_test_port_info, - .enable = vcap_test_enable, }; static struct vcap_control test_vctrl = { @@ -253,6 +245,8 @@ static void vcap_test_api_init(struct vcap_admin *admin) INIT_LIST_HEAD(&test_vctrl.list); INIT_LIST_HEAD(&admin->list); INIT_LIST_HEAD(&admin->rules); + INIT_LIST_HEAD(&admin->enabled); + mutex_init(&admin->lock); list_add_tail(&admin->list, &test_vctrl.list); memset(test_updateaddr, 0, sizeof(test_updateaddr)); test_updateaddridx = 0; @@ -452,6 +446,7 @@ static const char * const test_admin_expect[] = { " chain_id: 0\n", " user: 0\n", " priority: 0\n", + " state: permanent\n", " keysets: VCAP_KFS_MAC_ETYPE\n", " keyset_sw: 6\n", " keyset_sw_regs: 2\n", diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c b/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c index 76a31215ebfb..82981176218c 100644 --- a/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c +++ b/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c @@ -211,13 +211,6 @@ static int vcap_test_port_info(struct net_device *ndev, return 0; } -static int vcap_test_enable(struct net_device *ndev, - struct vcap_admin *admin, - bool enable) -{ - return 0; -} - static struct vcap_operations test_callbacks = { .validate_keyset = test_val_keyset, .add_default_fields = test_add_def_fields, @@ -228,7 +221,6 @@ static struct vcap_operations test_callbacks = { .update = test_cache_update, .move = test_cache_move, .port_info = vcap_test_port_info, - .enable = vcap_test_enable, }; static struct vcap_control test_vctrl = { @@ -243,6 +235,8 @@ static void vcap_test_api_init(struct vcap_admin *admin) INIT_LIST_HEAD(&test_vctrl.list); INIT_LIST_HEAD(&admin->list); INIT_LIST_HEAD(&admin->rules); + INIT_LIST_HEAD(&admin->enabled); + mutex_init(&admin->lock); list_add_tail(&admin->list, &test_vctrl.list); memset(test_updateaddr, 0, sizeof(test_updateaddr)); test_updateaddridx = 0; @@ -1312,8 +1306,8 @@ static void vcap_api_encode_rule_test(struct kunit *test) struct vcap_admin is2_admin = { .vtype = VCAP_TYPE_IS2, - .first_cid = 10000, - .last_cid = 19999, + .first_cid = 8000000, + .last_cid = 8099999, .lookups = 4, .last_valid_addr = 3071, .first_valid_addr = 0, @@ -1326,7 +1320,7 @@ static void vcap_api_encode_rule_test(struct kunit *test) }; struct vcap_rule *rule; struct vcap_rule_internal *ri; - int vcap_chain_id = 10005; + int vcap_chain_id = 8000000; enum vcap_user user = VCAP_USER_VCAP_UTIL; u16 priority = 10; int id = 100; @@ -1343,8 +1337,8 @@ static void vcap_api_encode_rule_test(struct kunit *test) u32 port_mask_rng_mask = 0x0f; u32 igr_port_mask_value = 0xffabcd01; u32 igr_port_mask_mask = ~0; - /* counter is not written yet, so it is not in expwriteaddr */ - u32 expwriteaddr[] = {792, 793, 794, 795, 796, 797, 0}; + /* counter is written as the last operation */ + u32 expwriteaddr[] = {792, 793, 794, 795, 796, 797, 792}; int idx; vcap_test_api_init(&is2_admin); @@ -1398,6 +1392,11 @@ static void vcap_api_encode_rule_test(struct kunit *test) KUNIT_EXPECT_EQ(test, 2, ri->keyset_sw_regs); KUNIT_EXPECT_EQ(test, 4, ri->actionset_sw_regs); + /* Enable lookup, so the rule will be written */ + ret = vcap_enable_lookups(&test_vctrl, &test_netdev, 0, + rule->vcap_chain_id, rule->cookie, true); + KUNIT_EXPECT_EQ(test, 0, ret); + /* Add rule with write callback */ ret = vcap_add_rule(rule); KUNIT_EXPECT_EQ(test, 0, ret); @@ -1872,7 +1871,7 @@ static void vcap_api_next_lookup_basic_test(struct kunit *test) ret = vcap_is_next_lookup(&test_vctrl, 8300000, 8301000); KUNIT_EXPECT_EQ(test, false, ret); ret = vcap_is_next_lookup(&test_vctrl, 8300000, 8401000); - KUNIT_EXPECT_EQ(test, true, ret); + KUNIT_EXPECT_EQ(test, false, ret); } static void vcap_api_next_lookup_advanced_test(struct kunit *test) @@ -1933,9 +1932,9 @@ static void vcap_api_next_lookup_advanced_test(struct kunit *test) ret = vcap_is_next_lookup(&test_vctrl, 1100000, 1201000); KUNIT_EXPECT_EQ(test, true, ret); ret = vcap_is_next_lookup(&test_vctrl, 1100000, 1301000); - KUNIT_EXPECT_EQ(test, false, ret); + KUNIT_EXPECT_EQ(test, true, ret); ret = vcap_is_next_lookup(&test_vctrl, 1100000, 8101000); - KUNIT_EXPECT_EQ(test, false, ret); + KUNIT_EXPECT_EQ(test, true, ret); ret = vcap_is_next_lookup(&test_vctrl, 1300000, 1401000); KUNIT_EXPECT_EQ(test, true, ret); ret = vcap_is_next_lookup(&test_vctrl, 1400000, 1501000); @@ -1951,7 +1950,7 @@ static void vcap_api_next_lookup_advanced_test(struct kunit *test) ret = vcap_is_next_lookup(&test_vctrl, 8300000, 8301000); KUNIT_EXPECT_EQ(test, false, ret); ret = vcap_is_next_lookup(&test_vctrl, 8300000, 8401000); - KUNIT_EXPECT_EQ(test, true, ret); + KUNIT_EXPECT_EQ(test, false, ret); } static void vcap_api_filter_unsupported_keys_test(struct kunit *test) diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_private.h b/drivers/net/ethernet/microchip/vcap/vcap_api_private.h index 4fd21da97679..df81d9ff502b 100644 --- a/drivers/net/ethernet/microchip/vcap/vcap_api_private.h +++ b/drivers/net/ethernet/microchip/vcap/vcap_api_private.h @@ -13,6 +13,12 @@ #define to_intrule(rule) container_of((rule), struct vcap_rule_internal, data) +enum vcap_rule_state { + VCAP_RS_PERMANENT, /* the rule is always stored in HW */ + VCAP_RS_ENABLED, /* enabled in HW but can be disabled */ + VCAP_RS_DISABLED, /* disabled (stored in SW) and can be enabled */ +}; + /* Private VCAP API rule data */ struct vcap_rule_internal { struct vcap_rule data; /* provided by the client */ @@ -29,6 +35,7 @@ struct vcap_rule_internal { u32 addr; /* address in the VCAP at insertion */ u32 counter_id; /* counter id (if a dedicated counter is available) */ struct vcap_counter counter; /* last read counter value */ + enum vcap_rule_state state; /* rule storage state */ }; /* Bit iterator for the VCAP cache streams */ @@ -43,8 +50,6 @@ struct vcap_stream_iter { /* Check that the control has a valid set of callbacks */ int vcap_api_check(struct vcap_control *ctrl); -/* Make a shallow copy of the rule without the fields */ -struct vcap_rule_internal *vcap_dup_rule(struct vcap_rule_internal *ri); /* Erase the VCAP cache area used or encoding and decoding */ void vcap_erase_cache(struct vcap_rule_internal *ri); @@ -110,4 +115,10 @@ int vcap_find_keystream_keysets(struct vcap_control *vctrl, enum vcap_type vt, u32 *keystream, u32 *mskstream, bool mask, int sw_max, struct vcap_keyset_list *kslist); +/* Get the keysets that matches the rule key type/mask */ +int vcap_rule_get_keysets(struct vcap_rule_internal *ri, + struct vcap_keyset_list *matches); +/* Decode a rule from the VCAP cache and return a copy */ +struct vcap_rule *vcap_decode_rule(struct vcap_rule_internal *elem); + #endif /* __VCAP_API_PRIVATE__ */ diff --git a/drivers/net/ethernet/netronome/Kconfig b/drivers/net/ethernet/netronome/Kconfig index e785c00b5845..d03d6e96f730 100644 --- a/drivers/net/ethernet/netronome/Kconfig +++ b/drivers/net/ethernet/netronome/Kconfig @@ -18,7 +18,7 @@ if NET_VENDOR_NETRONOME config NFP tristate "Netronome(R) NFP4000/NFP6000 NIC driver" - depends on PCI && PCI_MSI + depends on PCI_MSI depends on VXLAN || VXLAN=n depends on TLS && TLS_DEVICE || TLS_DEVICE=n select NET_DEVLINK diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile index 8a250214e289..c90d35f5ebca 100644 --- a/drivers/net/ethernet/netronome/nfp/Makefile +++ b/drivers/net/ethernet/netronome/nfp/Makefile @@ -83,3 +83,5 @@ endif nfp-$(CONFIG_NFP_NET_IPSEC) += crypto/ipsec.o nfd3/ipsec.o nfp-$(CONFIG_NFP_DEBUG) += nfp_net_debugfs.o + +nfp-$(CONFIG_DCB) += nic/dcb.o diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h index 51124309ae1f..a4096050c9bd 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h @@ -413,6 +413,7 @@ #define NFP_NET_CFG_MBOX_CMD_IPSEC 3 #define NFP_NET_CFG_MBOX_CMD_PCI_DSCP_PRIOMAP_SET 5 #define NFP_NET_CFG_MBOX_CMD_TLV_CMSG 6 +#define NFP_NET_CFG_MBOX_CMD_DCB_UPDATE 7 #define NFP_NET_CFG_MBOX_CMD_MULTICAST_ADD 8 #define NFP_NET_CFG_MBOX_CMD_MULTICAST_DEL 9 diff --git a/drivers/net/ethernet/netronome/nfp/nic/dcb.c b/drivers/net/ethernet/netronome/nfp/nic/dcb.c new file mode 100644 index 000000000000..bb498ac6bd7d --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nic/dcb.c @@ -0,0 +1,571 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2023 Corigine, Inc. */ + +#include <linux/device.h> +#include <linux/netdevice.h> +#include <net/dcbnl.h> + +#include "../nfp_app.h" +#include "../nfp_net.h" +#include "../nfp_main.h" +#include "../nfpcore/nfp_cpp.h" +#include "../nfpcore/nfp_nffw.h" +#include "../nfp_net_sriov.h" + +#include "main.h" + +#define NFP_DCB_TRUST_PCP 1 +#define NFP_DCB_TRUST_DSCP 2 +#define NFP_DCB_TRUST_INVALID 0xff + +#define NFP_DCB_TSA_VENDOR 1 +#define NFP_DCB_TSA_STRICT 2 +#define NFP_DCB_TSA_ETS 3 + +#define NFP_DCB_GBL_ENABLE BIT(0) +#define NFP_DCB_QOS_ENABLE BIT(1) +#define NFP_DCB_DISABLE 0 +#define NFP_DCB_ALL_QOS_ENABLE (NFP_DCB_GBL_ENABLE | NFP_DCB_QOS_ENABLE) + +#define NFP_DCB_UPDATE_MSK_SZ 4 +#define NFP_DCB_TC_RATE_MAX 0xffff + +#define NFP_DCB_DATA_OFF_DSCP2IDX 0 +#define NFP_DCB_DATA_OFF_PCP2IDX 64 +#define NFP_DCB_DATA_OFF_TSA 80 +#define NFP_DCB_DATA_OFF_IDX_BW_PCT 88 +#define NFP_DCB_DATA_OFF_RATE 96 +#define NFP_DCB_DATA_OFF_CAP 112 +#define NFP_DCB_DATA_OFF_ENABLE 116 +#define NFP_DCB_DATA_OFF_TRUST 120 + +#define NFP_DCB_MSG_MSK_ENABLE BIT(31) +#define NFP_DCB_MSG_MSK_TRUST BIT(30) +#define NFP_DCB_MSG_MSK_TSA BIT(29) +#define NFP_DCB_MSG_MSK_DSCP BIT(28) +#define NFP_DCB_MSG_MSK_PCP BIT(27) +#define NFP_DCB_MSG_MSK_RATE BIT(26) +#define NFP_DCB_MSG_MSK_PCT BIT(25) + +static struct nfp_dcb *get_dcb_priv(struct nfp_net *nn) +{ + struct nfp_dcb *dcb = &((struct nfp_app_nic_private *)nn->app_priv)->dcb; + + return dcb; +} + +static u8 nfp_tsa_ieee2nfp(u8 tsa) +{ + switch (tsa) { + case IEEE_8021QAZ_TSA_STRICT: + return NFP_DCB_TSA_STRICT; + case IEEE_8021QAZ_TSA_ETS: + return NFP_DCB_TSA_ETS; + default: + return NFP_DCB_TSA_VENDOR; + } +} + +static int nfp_nic_dcbnl_ieee_getets(struct net_device *dev, + struct ieee_ets *ets) +{ + struct nfp_net *nn = netdev_priv(dev); + struct nfp_dcb *dcb; + + dcb = get_dcb_priv(nn); + + for (unsigned int i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + ets->prio_tc[i] = dcb->prio2tc[i]; + ets->tc_tx_bw[i] = dcb->tc_tx_pct[i]; + ets->tc_tsa[i] = dcb->tc_tsa[i]; + } + + return 0; +} + +static bool nfp_refresh_tc2idx(struct nfp_net *nn) +{ + u8 tc2idx[IEEE_8021QAZ_MAX_TCS]; + bool change = false; + struct nfp_dcb *dcb; + int maxstrict = 0; + + dcb = get_dcb_priv(nn); + + for (unsigned int i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + tc2idx[i] = i; + if (dcb->tc_tsa[i] == IEEE_8021QAZ_TSA_STRICT) + maxstrict = i; + } + + if (maxstrict > 0 && dcb->tc_tsa[0] != IEEE_8021QAZ_TSA_STRICT) { + tc2idx[0] = maxstrict; + tc2idx[maxstrict] = 0; + } + + for (unsigned int j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { + if (dcb->tc2idx[j] != tc2idx[j]) { + change = true; + dcb->tc2idx[j] = tc2idx[j]; + } + } + + return change; +} + +static int nfp_fill_maxrate(struct nfp_net *nn, u64 *max_rate_array) +{ + struct nfp_app *app = nn->app; + struct nfp_dcb *dcb; + u32 ratembps; + + dcb = get_dcb_priv(nn); + + for (unsigned int i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + /* Convert bandwidth from kbps to mbps. */ + ratembps = max_rate_array[i] / 1024; + + /* Reject input values >= NFP_DCB_TC_RATE_MAX */ + if (ratembps >= NFP_DCB_TC_RATE_MAX) { + nfp_warn(app->cpp, "ratembps(%d) must less than %d.", + ratembps, NFP_DCB_TC_RATE_MAX); + return -EINVAL; + } + /* Input value 0 mapped to NFP_DCB_TC_RATE_MAX for firmware. */ + if (ratembps == 0) + ratembps = NFP_DCB_TC_RATE_MAX; + + writew((u16)ratembps, dcb->dcbcfg_tbl + + dcb->cfg_offset + NFP_DCB_DATA_OFF_RATE + dcb->tc2idx[i] * 2); + /* for rate value from user space, need to sync to dcb structure */ + if (dcb->tc_maxrate != max_rate_array) + dcb->tc_maxrate[i] = max_rate_array[i]; + } + + return 0; +} + +static int update_dscp_maxrate(struct net_device *dev, u32 *update) +{ + struct nfp_net *nn = netdev_priv(dev); + struct nfp_dcb *dcb; + int err; + + dcb = get_dcb_priv(nn); + + err = nfp_fill_maxrate(nn, dcb->tc_maxrate); + if (err) + return err; + + *update |= NFP_DCB_MSG_MSK_RATE; + + /* We only refresh dscp in dscp trust mode. */ + if (dcb->dscp_cnt > 0) { + for (unsigned int i = 0; i < NFP_NET_MAX_DSCP; i++) { + writeb(dcb->tc2idx[dcb->prio2tc[dcb->dscp2prio[i]]], + dcb->dcbcfg_tbl + dcb->cfg_offset + + NFP_DCB_DATA_OFF_DSCP2IDX + i); + } + *update |= NFP_DCB_MSG_MSK_DSCP; + } + + return 0; +} + +static void nfp_nic_set_trust(struct nfp_net *nn, u32 *update) +{ + struct nfp_dcb *dcb; + u8 trust; + + dcb = get_dcb_priv(nn); + + if (dcb->trust_status != NFP_DCB_TRUST_INVALID) + return; + + trust = dcb->dscp_cnt > 0 ? NFP_DCB_TRUST_DSCP : NFP_DCB_TRUST_PCP; + writeb(trust, dcb->dcbcfg_tbl + dcb->cfg_offset + + NFP_DCB_DATA_OFF_TRUST); + + dcb->trust_status = trust; + *update |= NFP_DCB_MSG_MSK_TRUST; +} + +static void nfp_nic_set_enable(struct nfp_net *nn, u32 enable, u32 *update) +{ + struct nfp_dcb *dcb; + u32 value = 0; + + dcb = get_dcb_priv(nn); + + value = readl(dcb->dcbcfg_tbl + dcb->cfg_offset + + NFP_DCB_DATA_OFF_ENABLE); + if (value != enable) { + writel(enable, dcb->dcbcfg_tbl + dcb->cfg_offset + + NFP_DCB_DATA_OFF_ENABLE); + *update |= NFP_DCB_MSG_MSK_ENABLE; + } +} + +static int dcb_ets_check(struct net_device *dev, struct ieee_ets *ets) +{ + struct nfp_net *nn = netdev_priv(dev); + struct nfp_app *app = nn->app; + bool ets_exists = false; + int sum = 0; + + for (unsigned int i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + /* For ets mode, check bw percentage sum. */ + if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) { + ets_exists = true; + sum += ets->tc_tx_bw[i]; + } else if (ets->tc_tx_bw[i]) { + nfp_warn(app->cpp, "ETS BW for strict/vendor TC must be 0."); + return -EINVAL; + } + } + + if (ets_exists && sum != 100) { + nfp_warn(app->cpp, "Failed to validate ETS BW: sum must be 100."); + return -EINVAL; + } + + return 0; +} + +static void nfp_nic_fill_ets(struct nfp_net *nn) +{ + struct nfp_dcb *dcb; + + dcb = get_dcb_priv(nn); + + for (unsigned int i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + writeb(dcb->tc2idx[dcb->prio2tc[i]], + dcb->dcbcfg_tbl + dcb->cfg_offset + NFP_DCB_DATA_OFF_PCP2IDX + i); + writeb(dcb->tc_tx_pct[i], dcb->dcbcfg_tbl + + dcb->cfg_offset + NFP_DCB_DATA_OFF_IDX_BW_PCT + dcb->tc2idx[i]); + writeb(nfp_tsa_ieee2nfp(dcb->tc_tsa[i]), dcb->dcbcfg_tbl + + dcb->cfg_offset + NFP_DCB_DATA_OFF_TSA + dcb->tc2idx[i]); + } +} + +static void nfp_nic_ets_init(struct nfp_net *nn, u32 *update) +{ + struct nfp_dcb *dcb = get_dcb_priv(nn); + + if (dcb->ets_init) + return; + + nfp_nic_fill_ets(nn); + dcb->ets_init = true; + *update |= NFP_DCB_MSG_MSK_TSA | NFP_DCB_MSG_MSK_PCT | NFP_DCB_MSG_MSK_PCP; +} + +static int nfp_nic_dcbnl_ieee_setets(struct net_device *dev, + struct ieee_ets *ets) +{ + const u32 cmd = NFP_NET_CFG_MBOX_CMD_DCB_UPDATE; + struct nfp_net *nn = netdev_priv(dev); + struct nfp_app *app = nn->app; + struct nfp_dcb *dcb; + u32 update = 0; + bool change; + int err; + + err = dcb_ets_check(dev, ets); + if (err) + return err; + + dcb = get_dcb_priv(nn); + + for (unsigned int i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + dcb->prio2tc[i] = ets->prio_tc[i]; + dcb->tc_tx_pct[i] = ets->tc_tx_bw[i]; + dcb->tc_tsa[i] = ets->tc_tsa[i]; + } + + change = nfp_refresh_tc2idx(nn); + nfp_nic_fill_ets(nn); + dcb->ets_init = true; + if (change || !dcb->rate_init) { + err = update_dscp_maxrate(dev, &update); + if (err) { + nfp_warn(app->cpp, + "nfp dcbnl ieee setets ERROR:%d.", + err); + return err; + } + + dcb->rate_init = true; + } + nfp_nic_set_enable(nn, NFP_DCB_ALL_QOS_ENABLE, &update); + nfp_nic_set_trust(nn, &update); + err = nfp_net_mbox_lock(nn, NFP_DCB_UPDATE_MSK_SZ); + if (err) + return err; + + nn_writel(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_MBOX_SIMPLE_VAL, + update | NFP_DCB_MSG_MSK_TSA | NFP_DCB_MSG_MSK_PCT | + NFP_DCB_MSG_MSK_PCP); + + return nfp_net_mbox_reconfig_and_unlock(nn, cmd); +} + +static int nfp_nic_dcbnl_ieee_getmaxrate(struct net_device *dev, + struct ieee_maxrate *maxrate) +{ + struct nfp_net *nn = netdev_priv(dev); + struct nfp_dcb *dcb; + + dcb = get_dcb_priv(nn); + + for (unsigned int i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + maxrate->tc_maxrate[i] = dcb->tc_maxrate[i]; + + return 0; +} + +static int nfp_nic_dcbnl_ieee_setmaxrate(struct net_device *dev, + struct ieee_maxrate *maxrate) +{ + const u32 cmd = NFP_NET_CFG_MBOX_CMD_DCB_UPDATE; + struct nfp_net *nn = netdev_priv(dev); + struct nfp_app *app = nn->app; + struct nfp_dcb *dcb; + u32 update = 0; + int err; + + err = nfp_fill_maxrate(nn, maxrate->tc_maxrate); + if (err) { + nfp_warn(app->cpp, + "nfp dcbnl ieee setmaxrate ERROR:%d.", + err); + return err; + } + + dcb = get_dcb_priv(nn); + + dcb->rate_init = true; + nfp_nic_set_enable(nn, NFP_DCB_ALL_QOS_ENABLE, &update); + nfp_nic_set_trust(nn, &update); + nfp_nic_ets_init(nn, &update); + + err = nfp_net_mbox_lock(nn, NFP_DCB_UPDATE_MSK_SZ); + if (err) + return err; + + nn_writel(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_MBOX_SIMPLE_VAL, + update | NFP_DCB_MSG_MSK_RATE); + + return nfp_net_mbox_reconfig_and_unlock(nn, cmd); +} + +static int nfp_nic_set_trust_status(struct nfp_net *nn, u8 status) +{ + const u32 cmd = NFP_NET_CFG_MBOX_CMD_DCB_UPDATE; + struct nfp_dcb *dcb; + u32 update = 0; + int err; + + dcb = get_dcb_priv(nn); + if (!dcb->rate_init) { + err = nfp_fill_maxrate(nn, dcb->tc_maxrate); + if (err) + return err; + + update |= NFP_DCB_MSG_MSK_RATE; + dcb->rate_init = true; + } + + err = nfp_net_mbox_lock(nn, NFP_DCB_UPDATE_MSK_SZ); + if (err) + return err; + + nfp_nic_ets_init(nn, &update); + writeb(status, dcb->dcbcfg_tbl + dcb->cfg_offset + + NFP_DCB_DATA_OFF_TRUST); + nfp_nic_set_enable(nn, NFP_DCB_ALL_QOS_ENABLE, &update); + nn_writel(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_MBOX_SIMPLE_VAL, + update | NFP_DCB_MSG_MSK_TRUST); + + err = nfp_net_mbox_reconfig_and_unlock(nn, cmd); + if (err) + return err; + + dcb->trust_status = status; + + return 0; +} + +static int nfp_nic_set_dscp2prio(struct nfp_net *nn, u8 dscp, u8 prio) +{ + const u32 cmd = NFP_NET_CFG_MBOX_CMD_DCB_UPDATE; + struct nfp_dcb *dcb; + u8 idx, tc; + int err; + + err = nfp_net_mbox_lock(nn, NFP_DCB_UPDATE_MSK_SZ); + if (err) + return err; + + dcb = get_dcb_priv(nn); + + tc = dcb->prio2tc[prio]; + idx = dcb->tc2idx[tc]; + + writeb(idx, dcb->dcbcfg_tbl + dcb->cfg_offset + + NFP_DCB_DATA_OFF_DSCP2IDX + dscp); + + nn_writel(nn, nn->tlv_caps.mbox_off + + NFP_NET_CFG_MBOX_SIMPLE_VAL, NFP_DCB_MSG_MSK_DSCP); + + err = nfp_net_mbox_reconfig_and_unlock(nn, cmd); + if (err) + return err; + + dcb->dscp2prio[dscp] = prio; + + return 0; +} + +static int nfp_nic_dcbnl_ieee_setapp(struct net_device *dev, + struct dcb_app *app) +{ + struct nfp_net *nn = netdev_priv(dev); + struct dcb_app old_app; + struct nfp_dcb *dcb; + bool is_new; + int err; + + if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP) + return -EINVAL; + + dcb = get_dcb_priv(nn); + + /* Save the old entry info */ + old_app.selector = IEEE_8021QAZ_APP_SEL_DSCP; + old_app.protocol = app->protocol; + old_app.priority = dcb->dscp2prio[app->protocol]; + + /* Check trust status */ + if (!dcb->dscp_cnt) { + err = nfp_nic_set_trust_status(nn, NFP_DCB_TRUST_DSCP); + if (err) + return err; + } + + /* Check if the new mapping is same as old or in init stage */ + if (app->priority != old_app.priority || app->priority == 0) { + err = nfp_nic_set_dscp2prio(nn, app->protocol, app->priority); + if (err) + return err; + } + + /* Delete the old entry if exists */ + is_new = !!dcb_ieee_delapp(dev, &old_app); + + /* Add new entry and update counter */ + err = dcb_ieee_setapp(dev, app); + if (err) + return err; + + if (is_new) + dcb->dscp_cnt++; + + return 0; +} + +static int nfp_nic_dcbnl_ieee_delapp(struct net_device *dev, + struct dcb_app *app) +{ + struct nfp_net *nn = netdev_priv(dev); + struct nfp_dcb *dcb; + int err; + + if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP) + return -EINVAL; + + dcb = get_dcb_priv(nn); + + /* Check if the dcb_app param match fw */ + if (app->priority != dcb->dscp2prio[app->protocol]) + return -ENOENT; + + /* Set fw dscp mapping to 0 */ + err = nfp_nic_set_dscp2prio(nn, app->protocol, 0); + if (err) + return err; + + /* Delete app from dcb list */ + err = dcb_ieee_delapp(dev, app); + if (err) + return err; + + /* Decrease dscp counter */ + dcb->dscp_cnt--; + + /* If no dscp mapping is configured, trust pcp */ + if (dcb->dscp_cnt == 0) + return nfp_nic_set_trust_status(nn, NFP_DCB_TRUST_PCP); + + return 0; +} + +static const struct dcbnl_rtnl_ops nfp_nic_dcbnl_ops = { + /* ieee 802.1Qaz std */ + .ieee_getets = nfp_nic_dcbnl_ieee_getets, + .ieee_setets = nfp_nic_dcbnl_ieee_setets, + .ieee_getmaxrate = nfp_nic_dcbnl_ieee_getmaxrate, + .ieee_setmaxrate = nfp_nic_dcbnl_ieee_setmaxrate, + .ieee_setapp = nfp_nic_dcbnl_ieee_setapp, + .ieee_delapp = nfp_nic_dcbnl_ieee_delapp, +}; + +int nfp_nic_dcb_init(struct nfp_net *nn) +{ + struct nfp_app *app = nn->app; + struct nfp_dcb *dcb; + int err; + + dcb = get_dcb_priv(nn); + dcb->cfg_offset = NFP_DCB_CFG_STRIDE * nn->id; + dcb->dcbcfg_tbl = nfp_pf_map_rtsym(app->pf, "net.dcbcfg_tbl", + "_abi_dcb_cfg", + dcb->cfg_offset, &dcb->dcbcfg_tbl_area); + if (IS_ERR(dcb->dcbcfg_tbl)) { + if (PTR_ERR(dcb->dcbcfg_tbl) != -ENOENT) { + err = PTR_ERR(dcb->dcbcfg_tbl); + dcb->dcbcfg_tbl = NULL; + nfp_err(app->cpp, + "Failed to map dcbcfg_tbl area, min_size %u.\n", + dcb->cfg_offset); + return err; + } + dcb->dcbcfg_tbl = NULL; + } + + if (dcb->dcbcfg_tbl) { + for (unsigned int i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + dcb->prio2tc[i] = i; + dcb->tc2idx[i] = i; + dcb->tc_tx_pct[i] = 0; + dcb->tc_maxrate[i] = 0; + dcb->tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR; + } + dcb->trust_status = NFP_DCB_TRUST_INVALID; + dcb->rate_init = false; + dcb->ets_init = false; + + nn->dp.netdev->dcbnl_ops = &nfp_nic_dcbnl_ops; + } + + return 0; +} + +void nfp_nic_dcb_clean(struct nfp_net *nn) +{ + struct nfp_dcb *dcb; + + dcb = get_dcb_priv(nn); + if (dcb->dcbcfg_tbl_area) + nfp_cpp_area_release_free(dcb->dcbcfg_tbl_area); +} diff --git a/drivers/net/ethernet/netronome/nfp/nic/main.c b/drivers/net/ethernet/netronome/nfp/nic/main.c index aea8579206ee..f78c2447d45b 100644 --- a/drivers/net/ethernet/netronome/nfp/nic/main.c +++ b/drivers/net/ethernet/netronome/nfp/nic/main.c @@ -5,6 +5,8 @@ #include "../nfpcore/nfp_nsp.h" #include "../nfp_app.h" #include "../nfp_main.h" +#include "../nfp_net.h" +#include "main.h" static int nfp_nic_init(struct nfp_app *app) { @@ -28,13 +30,46 @@ static void nfp_nic_sriov_disable(struct nfp_app *app) { } +static int nfp_nic_vnic_init(struct nfp_app *app, struct nfp_net *nn) +{ + nfp_nic_dcb_init(nn); + + return 0; +} + +static int nfp_nic_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, + unsigned int id) +{ + struct nfp_app_nic_private *app_pri = nn->app_priv; + int err; + + err = nfp_app_nic_vnic_alloc(app, nn, id); + if (err) + return err; + + if (sizeof(*app_pri)) { + nn->app_priv = kzalloc(sizeof(*app_pri), GFP_KERNEL); + if (!nn->app_priv) + return -ENOMEM; + } + + return 0; +} + +static void nfp_nic_vnic_free(struct nfp_app *app, struct nfp_net *nn) +{ + kfree(nn->app_priv); +} + const struct nfp_app_type app_nic = { .id = NFP_APP_CORE_NIC, .name = "nic", .init = nfp_nic_init, - .vnic_alloc = nfp_app_nic_vnic_alloc, - + .vnic_alloc = nfp_nic_vnic_alloc, + .vnic_free = nfp_nic_vnic_free, .sriov_enable = nfp_nic_sriov_enable, .sriov_disable = nfp_nic_sriov_disable, + + .vnic_init = nfp_nic_vnic_init, }; diff --git a/drivers/net/ethernet/netronome/nfp/nic/main.h b/drivers/net/ethernet/netronome/nfp/nic/main.h new file mode 100644 index 000000000000..7ba04451b8ba --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nic/main.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2023 Corigine, Inc. */ + +#ifndef __NFP_NIC_H__ +#define __NFP_NIC_H__ 1 + +#include <linux/netdevice.h> + +#ifdef CONFIG_DCB +/* DCB feature definitions */ +#define NFP_NET_MAX_DSCP 4 +#define NFP_NET_MAX_TC IEEE_8021QAZ_MAX_TCS +#define NFP_NET_MAX_PRIO 8 +#define NFP_DCB_CFG_STRIDE 256 + +struct nfp_dcb { + u8 dscp2prio[NFP_NET_MAX_DSCP]; + u8 prio2tc[NFP_NET_MAX_PRIO]; + u8 tc2idx[IEEE_8021QAZ_MAX_TCS]; + u64 tc_maxrate[IEEE_8021QAZ_MAX_TCS]; + u8 tc_tx_pct[IEEE_8021QAZ_MAX_TCS]; + u8 tc_tsa[IEEE_8021QAZ_MAX_TCS]; + u8 dscp_cnt; + u8 trust_status; + bool rate_init; + bool ets_init; + + struct nfp_cpp_area *dcbcfg_tbl_area; + u8 __iomem *dcbcfg_tbl; + u32 cfg_offset; +}; + +int nfp_nic_dcb_init(struct nfp_net *nn); +void nfp_nic_dcb_clean(struct nfp_net *nn); +#else +static inline int nfp_nic_dcb_init(struct nfp_net *nn) {return 0; } +static inline void nfp_nic_dcb_clean(struct nfp_net *nn) {} +#endif + +struct nfp_app_nic_private { +#ifdef CONFIG_DCB + struct nfp_dcb dcb; +#endif +}; + +#endif diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c index 62320be4de5a..56e02cba0b8a 100644 --- a/drivers/net/ethernet/ni/nixge.c +++ b/drivers/net/ethernet/ni/nixge.c @@ -1081,40 +1081,59 @@ static const struct ethtool_ops nixge_ethtool_ops = { .get_link = ethtool_op_get_link, }; -static int nixge_mdio_read(struct mii_bus *bus, int phy_id, int reg) +static int nixge_mdio_read_c22(struct mii_bus *bus, int phy_id, int reg) { struct nixge_priv *priv = bus->priv; u32 status, tmp; int err; u16 device; - if (reg & MII_ADDR_C45) { - device = (reg >> 16) & 0x1f; + device = reg & 0x1f; - nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_ADDR, reg & 0xffff); + tmp = NIXGE_MDIO_CLAUSE22 | NIXGE_MDIO_OP(NIXGE_MDIO_C22_READ) | + NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device); - tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_OP_ADDRESS) - | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device); + nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp); + nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1); - nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp); - nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1); + err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status, + !status, 10, 1000); + if (err) { + dev_err(priv->dev, "timeout setting read command"); + return err; + } - err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status, - !status, 10, 1000); - if (err) { - dev_err(priv->dev, "timeout setting address"); - return err; - } + status = nixge_ctrl_read_reg(priv, NIXGE_REG_MDIO_DATA); - tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_C45_READ) | - NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device); - } else { - device = reg & 0x1f; + return status; +} - tmp = NIXGE_MDIO_CLAUSE22 | NIXGE_MDIO_OP(NIXGE_MDIO_C22_READ) | - NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device); +static int nixge_mdio_read_c45(struct mii_bus *bus, int phy_id, int device, + int reg) +{ + struct nixge_priv *priv = bus->priv; + u32 status, tmp; + int err; + + nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_ADDR, reg & 0xffff); + + tmp = NIXGE_MDIO_CLAUSE45 | + NIXGE_MDIO_OP(NIXGE_MDIO_OP_ADDRESS) | + NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device); + + nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp); + nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1); + + err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status, + !status, 10, 1000); + if (err) { + dev_err(priv->dev, "timeout setting address"); + return err; } + tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_C45_READ) | + NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device); + nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp); nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1); @@ -1130,57 +1149,65 @@ static int nixge_mdio_read(struct mii_bus *bus, int phy_id, int reg) return status; } -static int nixge_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val) +static int nixge_mdio_write_c22(struct mii_bus *bus, int phy_id, int reg, + u16 val) { struct nixge_priv *priv = bus->priv; u32 status, tmp; u16 device; int err; - if (reg & MII_ADDR_C45) { - device = (reg >> 16) & 0x1f; + device = reg & 0x1f; - nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_ADDR, reg & 0xffff); + tmp = NIXGE_MDIO_CLAUSE22 | NIXGE_MDIO_OP(NIXGE_MDIO_C22_WRITE) | + NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device); - tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_OP_ADDRESS) - | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device); + nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_DATA, val); + nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp); + nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1); - nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp); - nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1); + err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status, + !status, 10, 1000); + if (err) + dev_err(priv->dev, "timeout setting write command"); - err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status, - !status, 10, 1000); - if (err) { - dev_err(priv->dev, "timeout setting address"); - return err; - } + return err; +} - tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_C45_WRITE) - | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device); +static int nixge_mdio_write_c45(struct mii_bus *bus, int phy_id, + int device, int reg, u16 val) +{ + struct nixge_priv *priv = bus->priv; + u32 status, tmp; + int err; - nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_DATA, val); - nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp); - err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status, - !status, 10, 1000); - if (err) - dev_err(priv->dev, "timeout setting write command"); - } else { - device = reg & 0x1f; + nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_ADDR, reg & 0xffff); - tmp = NIXGE_MDIO_CLAUSE22 | - NIXGE_MDIO_OP(NIXGE_MDIO_C22_WRITE) | - NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device); + tmp = NIXGE_MDIO_CLAUSE45 | + NIXGE_MDIO_OP(NIXGE_MDIO_OP_ADDRESS) | + NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device); - nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_DATA, val); - nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp); - nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1); + nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp); + nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1); - err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status, - !status, 10, 1000); - if (err) - dev_err(priv->dev, "timeout setting write command"); + err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status, + !status, 10, 1000); + if (err) { + dev_err(priv->dev, "timeout setting address"); + return err; } + tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_C45_WRITE) | + NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device); + + nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_DATA, val); + nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp); + + err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status, + !status, 10, 1000); + if (err) + dev_err(priv->dev, "timeout setting write command"); + return err; } @@ -1195,8 +1222,10 @@ static int nixge_mdio_setup(struct nixge_priv *priv, struct device_node *np) snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(priv->dev)); bus->priv = priv; bus->name = "nixge_mii_bus"; - bus->read = nixge_mdio_read; - bus->write = nixge_mdio_write; + bus->read = nixge_mdio_read_c22; + bus->write = nixge_mdio_write_c22; + bus->read_c45 = nixge_mdio_read_c45; + bus->write_c45 = nixge_mdio_write_c45; bus->parent = priv->dev; priv->mii_bus = bus; diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 0848b5529d48..2bf18748581d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -831,7 +831,7 @@ static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn, * @p_hwfn: HW device data. * @p_ptt: PTT window for writing the registers. * @vf: VF info data. - * @enable: The actual permision for this VF. + * @enable: The actual permission for this VF. * * In E4, queue zone permission table size is 320x9. There * are 320 VF queues for single engine device (256 for dual diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c index 27b1663c476e..39d24e07f306 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c @@ -12,6 +12,7 @@ #include "rmnet_handlers.h" #include "rmnet_vnd.h" #include "rmnet_private.h" +#include "rmnet_map.h" /* Local Definitions and Declarations */ @@ -39,6 +40,8 @@ static int rmnet_unregister_real_device(struct net_device *real_dev) if (port->nr_rmnet_devs) return -EINVAL; + rmnet_map_tx_aggregate_exit(port); + netdev_rx_handler_unregister(real_dev); kfree(port); @@ -79,6 +82,8 @@ static int rmnet_register_real_device(struct net_device *real_dev, for (entry = 0; entry < RMNET_MAX_LOGICAL_EP; entry++) INIT_HLIST_HEAD(&port->muxed_ep[entry]); + rmnet_map_tx_aggregate_init(port); + netdev_dbg(real_dev, "registered with rmnet\n"); return 0; } diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h index 3d3cba56c516..ed112d51ac5a 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h @@ -6,6 +6,7 @@ */ #include <linux/skbuff.h> +#include <linux/time.h> #include <net/gro_cells.h> #ifndef _RMNET_CONFIG_H_ @@ -19,6 +20,12 @@ struct rmnet_endpoint { struct hlist_node hlnode; }; +struct rmnet_egress_agg_params { + u32 bytes; + u32 count; + u64 time_nsec; +}; + /* One instance of this structure is instantiated for each real_dev associated * with rmnet. */ @@ -30,6 +37,19 @@ struct rmnet_port { struct hlist_head muxed_ep[RMNET_MAX_LOGICAL_EP]; struct net_device *bridge_ep; struct net_device *rmnet_dev; + + /* Egress aggregation information */ + struct rmnet_egress_agg_params egress_agg_params; + /* Protect aggregation related elements */ + spinlock_t agg_lock; + struct sk_buff *skbagg_head; + struct sk_buff *skbagg_tail; + int agg_state; + u8 agg_count; + struct timespec64 agg_time; + struct timespec64 agg_last; + struct hrtimer hrtimer; + struct work_struct agg_wq; }; extern struct rtnl_link_ops rmnet_link_ops; diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c index a313242a762e..9f3479500f85 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c @@ -164,8 +164,18 @@ static int rmnet_map_egress_handler(struct sk_buff *skb, map_header->mux_id = mux_id; - skb->protocol = htons(ETH_P_MAP); + if (READ_ONCE(port->egress_agg_params.count) > 1) { + unsigned int len; + + len = rmnet_map_tx_aggregate(skb, port, orig_dev); + if (likely(len)) { + rmnet_vnd_tx_fixup_len(len, orig_dev); + return -EINPROGRESS; + } + return -ENOMEM; + } + skb->protocol = htons(ETH_P_MAP); return 0; } @@ -235,6 +245,7 @@ void rmnet_egress_handler(struct sk_buff *skb) struct rmnet_port *port; struct rmnet_priv *priv; u8 mux_id; + int err; sk_pacing_shift_update(skb->sk, 8); @@ -247,8 +258,11 @@ void rmnet_egress_handler(struct sk_buff *skb) if (!port) goto drop; - if (rmnet_map_egress_handler(skb, port, mux_id, orig_dev)) + err = rmnet_map_egress_handler(skb, port, mux_id, orig_dev); + if (err == -ENOMEM) goto drop; + else if (err == -EINPROGRESS) + return; rmnet_vnd_tx_fixup(skb, orig_dev); diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h index 2b033060fc20..b70284095568 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h @@ -53,5 +53,11 @@ void rmnet_map_checksum_uplink_packet(struct sk_buff *skb, struct net_device *orig_dev, int csum_type); int rmnet_map_process_next_hdr_packet(struct sk_buff *skb, u16 len); +unsigned int rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port, + struct net_device *orig_dev); +void rmnet_map_tx_aggregate_init(struct rmnet_port *port); +void rmnet_map_tx_aggregate_exit(struct rmnet_port *port); +void rmnet_map_update_ul_agg_config(struct rmnet_port *port, u32 size, + u32 count, u32 time); #endif /* _RMNET_MAP_H_ */ diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c index ba194698cc14..a5e3d1a88305 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c @@ -12,6 +12,7 @@ #include "rmnet_config.h" #include "rmnet_map.h" #include "rmnet_private.h" +#include "rmnet_vnd.h" #define RMNET_MAP_DEAGGR_SPACING 64 #define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2) @@ -518,3 +519,193 @@ int rmnet_map_process_next_hdr_packet(struct sk_buff *skb, return 0; } + +#define RMNET_AGG_BYPASS_TIME_NSEC 10000000L + +static void reset_aggr_params(struct rmnet_port *port) +{ + port->skbagg_head = NULL; + port->agg_count = 0; + port->agg_state = 0; + memset(&port->agg_time, 0, sizeof(struct timespec64)); +} + +static void rmnet_send_skb(struct rmnet_port *port, struct sk_buff *skb) +{ + if (skb_needs_linearize(skb, port->dev->features)) { + if (unlikely(__skb_linearize(skb))) { + struct rmnet_priv *priv; + + priv = netdev_priv(port->rmnet_dev); + this_cpu_inc(priv->pcpu_stats->stats.tx_drops); + dev_kfree_skb_any(skb); + return; + } + } + + dev_queue_xmit(skb); +} + +static void rmnet_map_flush_tx_packet_work(struct work_struct *work) +{ + struct sk_buff *skb = NULL; + struct rmnet_port *port; + + port = container_of(work, struct rmnet_port, agg_wq); + + spin_lock_bh(&port->agg_lock); + if (likely(port->agg_state == -EINPROGRESS)) { + /* Buffer may have already been shipped out */ + if (likely(port->skbagg_head)) { + skb = port->skbagg_head; + reset_aggr_params(port); + } + port->agg_state = 0; + } + + spin_unlock_bh(&port->agg_lock); + if (skb) + rmnet_send_skb(port, skb); +} + +static enum hrtimer_restart rmnet_map_flush_tx_packet_queue(struct hrtimer *t) +{ + struct rmnet_port *port; + + port = container_of(t, struct rmnet_port, hrtimer); + + schedule_work(&port->agg_wq); + + return HRTIMER_NORESTART; +} + +unsigned int rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port, + struct net_device *orig_dev) +{ + struct timespec64 diff, last; + unsigned int len = skb->len; + struct sk_buff *agg_skb; + int size; + + spin_lock_bh(&port->agg_lock); + memcpy(&last, &port->agg_last, sizeof(struct timespec64)); + ktime_get_real_ts64(&port->agg_last); + + if (!port->skbagg_head) { + /* Check to see if we should agg first. If the traffic is very + * sparse, don't aggregate. + */ +new_packet: + diff = timespec64_sub(port->agg_last, last); + size = port->egress_agg_params.bytes - skb->len; + + if (size < 0) { + /* dropped */ + spin_unlock_bh(&port->agg_lock); + return 0; + } + + if (diff.tv_sec > 0 || diff.tv_nsec > RMNET_AGG_BYPASS_TIME_NSEC || + size == 0) + goto no_aggr; + + port->skbagg_head = skb_copy_expand(skb, 0, size, GFP_ATOMIC); + if (!port->skbagg_head) + goto no_aggr; + + dev_kfree_skb_any(skb); + port->skbagg_head->protocol = htons(ETH_P_MAP); + port->agg_count = 1; + ktime_get_real_ts64(&port->agg_time); + skb_frag_list_init(port->skbagg_head); + goto schedule; + } + diff = timespec64_sub(port->agg_last, port->agg_time); + size = port->egress_agg_params.bytes - port->skbagg_head->len; + + if (skb->len > size) { + agg_skb = port->skbagg_head; + reset_aggr_params(port); + spin_unlock_bh(&port->agg_lock); + hrtimer_cancel(&port->hrtimer); + rmnet_send_skb(port, agg_skb); + spin_lock_bh(&port->agg_lock); + goto new_packet; + } + + if (skb_has_frag_list(port->skbagg_head)) + port->skbagg_tail->next = skb; + else + skb_shinfo(port->skbagg_head)->frag_list = skb; + + port->skbagg_head->len += skb->len; + port->skbagg_head->data_len += skb->len; + port->skbagg_head->truesize += skb->truesize; + port->skbagg_tail = skb; + port->agg_count++; + + if (diff.tv_sec > 0 || diff.tv_nsec > port->egress_agg_params.time_nsec || + port->agg_count >= port->egress_agg_params.count || + port->skbagg_head->len == port->egress_agg_params.bytes) { + agg_skb = port->skbagg_head; + reset_aggr_params(port); + spin_unlock_bh(&port->agg_lock); + hrtimer_cancel(&port->hrtimer); + rmnet_send_skb(port, agg_skb); + return len; + } + +schedule: + if (!hrtimer_active(&port->hrtimer) && port->agg_state != -EINPROGRESS) { + port->agg_state = -EINPROGRESS; + hrtimer_start(&port->hrtimer, + ns_to_ktime(port->egress_agg_params.time_nsec), + HRTIMER_MODE_REL); + } + spin_unlock_bh(&port->agg_lock); + + return len; + +no_aggr: + spin_unlock_bh(&port->agg_lock); + skb->protocol = htons(ETH_P_MAP); + dev_queue_xmit(skb); + + return len; +} + +void rmnet_map_update_ul_agg_config(struct rmnet_port *port, u32 size, + u32 count, u32 time) +{ + spin_lock_bh(&port->agg_lock); + port->egress_agg_params.bytes = size; + WRITE_ONCE(port->egress_agg_params.count, count); + port->egress_agg_params.time_nsec = time * NSEC_PER_USEC; + spin_unlock_bh(&port->agg_lock); +} + +void rmnet_map_tx_aggregate_init(struct rmnet_port *port) +{ + hrtimer_init(&port->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + port->hrtimer.function = rmnet_map_flush_tx_packet_queue; + spin_lock_init(&port->agg_lock); + rmnet_map_update_ul_agg_config(port, 4096, 1, 800); + INIT_WORK(&port->agg_wq, rmnet_map_flush_tx_packet_work); +} + +void rmnet_map_tx_aggregate_exit(struct rmnet_port *port) +{ + hrtimer_cancel(&port->hrtimer); + cancel_work_sync(&port->agg_wq); + + spin_lock_bh(&port->agg_lock); + if (port->agg_state == -EINPROGRESS) { + if (port->skbagg_head) { + dev_kfree_skb_any(port->skbagg_head); + reset_aggr_params(port); + } + + port->agg_state = 0; + } + spin_unlock_bh(&port->agg_lock); +} diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c index 3f5e6572d20e..046b5f7d8e7c 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c @@ -29,7 +29,7 @@ void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev) u64_stats_update_end(&pcpu_ptr->syncp); } -void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev) +void rmnet_vnd_tx_fixup_len(unsigned int len, struct net_device *dev) { struct rmnet_priv *priv = netdev_priv(dev); struct rmnet_pcpu_stats *pcpu_ptr; @@ -38,10 +38,15 @@ void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev) u64_stats_update_begin(&pcpu_ptr->syncp); pcpu_ptr->stats.tx_pkts++; - pcpu_ptr->stats.tx_bytes += skb->len; + pcpu_ptr->stats.tx_bytes += len; u64_stats_update_end(&pcpu_ptr->syncp); } +void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev) +{ + rmnet_vnd_tx_fixup_len(skb->len, dev); +} + /* Network Device Operations */ static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb, @@ -210,7 +215,52 @@ static void rmnet_get_ethtool_stats(struct net_device *dev, memcpy(data, st, ARRAY_SIZE(rmnet_gstrings_stats) * sizeof(u64)); } +static int rmnet_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct rmnet_priv *priv = netdev_priv(dev); + struct rmnet_port *port; + + port = rmnet_get_port_rtnl(priv->real_dev); + + memset(kernel_coal, 0, sizeof(*kernel_coal)); + kernel_coal->tx_aggr_max_bytes = port->egress_agg_params.bytes; + kernel_coal->tx_aggr_max_frames = port->egress_agg_params.count; + kernel_coal->tx_aggr_time_usecs = div_u64(port->egress_agg_params.time_nsec, + NSEC_PER_USEC); + + return 0; +} + +static int rmnet_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct rmnet_priv *priv = netdev_priv(dev); + struct rmnet_port *port; + + port = rmnet_get_port_rtnl(priv->real_dev); + + if (kernel_coal->tx_aggr_max_frames < 1 || kernel_coal->tx_aggr_max_frames > 64) + return -EINVAL; + + if (kernel_coal->tx_aggr_max_bytes > 32768) + return -EINVAL; + + rmnet_map_update_ul_agg_config(port, kernel_coal->tx_aggr_max_bytes, + kernel_coal->tx_aggr_max_frames, + kernel_coal->tx_aggr_time_usecs); + + return 0; +} + static const struct ethtool_ops rmnet_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_TX_AGGR, + .get_coalesce = rmnet_get_coalesce, + .set_coalesce = rmnet_set_coalesce, .get_ethtool_stats = rmnet_get_ethtool_stats, .get_strings = rmnet_get_strings, .get_sset_count = rmnet_get_sset_count, diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h index dc3a4443ef0a..c2b2baf86894 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h @@ -16,6 +16,7 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev, int rmnet_vnd_dellink(u8 id, struct rmnet_port *port, struct rmnet_endpoint *ep); void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev); +void rmnet_vnd_tx_fixup_len(unsigned int len, struct net_device *dev); void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev); void rmnet_vnd_setup(struct net_device *dev); int rmnet_vnd_validate_real_dev_mtu(struct net_device *real_dev); diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index dadd61bccfe7..45147a1016be 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -576,6 +576,7 @@ struct rtl8169_tc_offsets { enum rtl_flag { RTL_FLAG_TASK_ENABLED = 0, RTL_FLAG_TASK_RESET_PENDING, + RTL_FLAG_TASK_TX_TIMEOUT, RTL_FLAG_MAX }; @@ -3928,7 +3929,7 @@ static void rtl8169_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct rtl8169_private *tp = netdev_priv(dev); - rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); + rtl_schedule_task(tp, RTL_FLAG_TASK_TX_TIMEOUT); } static int rtl8169_tx_map(struct rtl8169_private *tp, const u32 *opts, u32 len, @@ -4522,6 +4523,7 @@ static void rtl_task(struct work_struct *work) { struct rtl8169_private *tp = container_of(work, struct rtl8169_private, wk.work); + int ret; rtnl_lock(); @@ -4529,7 +4531,27 @@ static void rtl_task(struct work_struct *work) !test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags)) goto out_unlock; + if (test_and_clear_bit(RTL_FLAG_TASK_TX_TIMEOUT, tp->wk.flags)) { + /* if chip isn't accessible, reset bus to revive it */ + if (RTL_R32(tp, TxConfig) == ~0) { + ret = pci_reset_bus(tp->pci_dev); + if (ret < 0) { + netdev_err(tp->dev, "Can't reset secondary PCI bus, detach NIC\n"); + netif_device_detach(tp->dev); + goto out_unlock; + } + } + + /* ASPM compatibility issues are a typical reason for tx timeouts */ + ret = pci_disable_link_state(tp->pci_dev, PCIE_LINK_STATE_L1 | + PCIE_LINK_STATE_L0S); + if (!ret) + netdev_warn_once(tp->dev, "ASPM disabled on Tx timeout\n"); + goto reset; + } + if (test_and_clear_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags)) { +reset: rtl_reset_work(tp); netif_wake_queue(tp->dev); } diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c index 6441892636db..885fdb077b62 100644 --- a/drivers/net/ethernet/renesas/rswitch.c +++ b/drivers/net/ethernet/renesas/rswitch.c @@ -1024,34 +1024,18 @@ static int rswitch_etha_set_access(struct rswitch_etha *etha, bool read, return ret; } -static int rswitch_etha_mii_read(struct mii_bus *bus, int addr, int regnum) +static int rswitch_etha_mii_read_c45(struct mii_bus *bus, int addr, int devad, + int regad) { struct rswitch_etha *etha = bus->priv; - int mode, devad, regad; - - mode = regnum & MII_ADDR_C45; - devad = (regnum >> MII_DEVADDR_C45_SHIFT) & 0x1f; - regad = regnum & MII_REGADDR_C45_MASK; - - /* Not support Clause 22 access method */ - if (!mode) - return -EOPNOTSUPP; return rswitch_etha_set_access(etha, true, addr, devad, regad, 0); } -static int rswitch_etha_mii_write(struct mii_bus *bus, int addr, int regnum, u16 val) +static int rswitch_etha_mii_write_c45(struct mii_bus *bus, int addr, int devad, + int regad, u16 val) { struct rswitch_etha *etha = bus->priv; - int mode, devad, regad; - - mode = regnum & MII_ADDR_C45; - devad = (regnum >> MII_DEVADDR_C45_SHIFT) & 0x1f; - regad = regnum & MII_REGADDR_C45_MASK; - - /* Not support Clause 22 access method */ - if (!mode) - return -EOPNOTSUPP; return rswitch_etha_set_access(etha, false, addr, devad, regad, val); } @@ -1142,8 +1126,8 @@ static int rswitch_mii_register(struct rswitch_device *rdev) mii_bus->name = "rswitch_mii"; sprintf(mii_bus->id, "etha%d", rdev->etha->index); mii_bus->priv = rdev->etha; - mii_bus->read = rswitch_etha_mii_read; - mii_bus->write = rswitch_etha_mii_write; + mii_bus->read_c45 = rswitch_etha_mii_read_c45; + mii_bus->write_c45 = rswitch_etha_mii_write_c45; mii_bus->parent = &rdev->priv->pdev->dev; mdio_np = rswitch_get_mdio_node(rdev); diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 71a499113308..ed17163d7811 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -3044,23 +3044,46 @@ static int sh_mdio_release(struct sh_eth_private *mdp) return 0; } -static int sh_mdiobb_read(struct mii_bus *bus, int phy, int reg) +static int sh_mdiobb_read_c22(struct mii_bus *bus, int phy, int reg) { int res; pm_runtime_get_sync(bus->parent); - res = mdiobb_read(bus, phy, reg); + res = mdiobb_read_c22(bus, phy, reg); pm_runtime_put(bus->parent); return res; } -static int sh_mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val) +static int sh_mdiobb_write_c22(struct mii_bus *bus, int phy, int reg, u16 val) { int res; pm_runtime_get_sync(bus->parent); - res = mdiobb_write(bus, phy, reg, val); + res = mdiobb_write_c22(bus, phy, reg, val); + pm_runtime_put(bus->parent); + + return res; +} + +static int sh_mdiobb_read_c45(struct mii_bus *bus, int phy, int devad, int reg) +{ + int res; + + pm_runtime_get_sync(bus->parent); + res = mdiobb_read_c45(bus, phy, devad, reg); + pm_runtime_put(bus->parent); + + return res; +} + +static int sh_mdiobb_write_c45(struct mii_bus *bus, int phy, int devad, + int reg, u16 val) +{ + int res; + + pm_runtime_get_sync(bus->parent); + res = mdiobb_write_c45(bus, phy, devad, reg, val); pm_runtime_put(bus->parent); return res; @@ -3091,8 +3114,10 @@ static int sh_mdio_init(struct sh_eth_private *mdp, return -ENOMEM; /* Wrap accessors with Runtime PM-aware ops */ - mdp->mii_bus->read = sh_mdiobb_read; - mdp->mii_bus->write = sh_mdiobb_write; + mdp->mii_bus->read = sh_mdiobb_read_c22; + mdp->mii_bus->write = sh_mdiobb_write_c22; + mdp->mii_bus->read_c45 = sh_mdiobb_read_c45; + mdp->mii_bus->write_c45 = sh_mdiobb_write_c45; /* Hook up MII support for ethtool */ mdp->mii_bus->name = "sh_mii"; diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c index fceb6d637235..0227223c06fa 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c @@ -50,12 +50,12 @@ static void sxgbe_mdio_ctrl_data(struct sxgbe_priv_data *sp, u32 cmd, } static void sxgbe_mdio_c45(struct sxgbe_priv_data *sp, u32 cmd, int phyaddr, - int phyreg, u16 phydata) + int devad, int phyreg, u16 phydata) { u32 reg; /* set mdio address register */ - reg = ((phyreg >> 16) & 0x1f) << 21; + reg = (devad & 0x1f) << 21; reg |= (phyaddr << 16) | (phyreg & 0xffff); writel(reg, sp->ioaddr + sp->hw->mii.addr); @@ -76,8 +76,8 @@ static void sxgbe_mdio_c22(struct sxgbe_priv_data *sp, u32 cmd, int phyaddr, sxgbe_mdio_ctrl_data(sp, cmd, phydata); } -static int sxgbe_mdio_access(struct sxgbe_priv_data *sp, u32 cmd, int phyaddr, - int phyreg, u16 phydata) +static int sxgbe_mdio_access_c22(struct sxgbe_priv_data *sp, u32 cmd, + int phyaddr, int phyreg, u16 phydata) { const struct mii_regs *mii = &sp->hw->mii; int rc; @@ -86,33 +86,46 @@ static int sxgbe_mdio_access(struct sxgbe_priv_data *sp, u32 cmd, int phyaddr, if (rc < 0) return rc; - if (phyreg & MII_ADDR_C45) { - sxgbe_mdio_c45(sp, cmd, phyaddr, phyreg, phydata); - } else { - /* Ports 0-3 only support C22. */ - if (phyaddr >= 4) - return -ENODEV; + /* Ports 0-3 only support C22. */ + if (phyaddr >= 4) + return -ENODEV; - sxgbe_mdio_c22(sp, cmd, phyaddr, phyreg, phydata); - } + sxgbe_mdio_c22(sp, cmd, phyaddr, phyreg, phydata); + + return sxgbe_mdio_busy_wait(sp->ioaddr, mii->data); +} + +static int sxgbe_mdio_access_c45(struct sxgbe_priv_data *sp, u32 cmd, + int phyaddr, int devad, int phyreg, + u16 phydata) +{ + const struct mii_regs *mii = &sp->hw->mii; + int rc; + + rc = sxgbe_mdio_busy_wait(sp->ioaddr, mii->data); + if (rc < 0) + return rc; + + sxgbe_mdio_c45(sp, cmd, phyaddr, devad, phyreg, phydata); return sxgbe_mdio_busy_wait(sp->ioaddr, mii->data); } /** - * sxgbe_mdio_read + * sxgbe_mdio_read_c22 * @bus: points to the mii_bus structure * @phyaddr: address of phy port * @phyreg: address of register with in phy register - * Description: this function used for C45 and C22 MDIO Read + * Description: this function used for C22 MDIO Read */ -static int sxgbe_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg) +static int sxgbe_mdio_read_c22(struct mii_bus *bus, int phyaddr, int phyreg) { struct net_device *ndev = bus->priv; struct sxgbe_priv_data *priv = netdev_priv(ndev); int rc; - rc = sxgbe_mdio_access(priv, SXGBE_SMA_READ_CMD, phyaddr, phyreg, 0); + rc = sxgbe_mdio_access_c22(priv, SXGBE_SMA_READ_CMD, phyaddr, + phyreg, 0); if (rc < 0) return rc; @@ -120,21 +133,63 @@ static int sxgbe_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg) } /** - * sxgbe_mdio_write + * sxgbe_mdio_read_c45 + * @bus: points to the mii_bus structure + * @phyaddr: address of phy port + * @devad: device (MMD) address + * @phyreg: address of register with in phy register + * Description: this function used for C45 MDIO Read + */ +static int sxgbe_mdio_read_c45(struct mii_bus *bus, int phyaddr, int devad, + int phyreg) +{ + struct net_device *ndev = bus->priv; + struct sxgbe_priv_data *priv = netdev_priv(ndev); + int rc; + + rc = sxgbe_mdio_access_c45(priv, SXGBE_SMA_READ_CMD, phyaddr, + devad, phyreg, 0); + if (rc < 0) + return rc; + + return readl(priv->ioaddr + priv->hw->mii.data) & 0xffff; +} + +/** + * sxgbe_mdio_write_c22 + * @bus: points to the mii_bus structure + * @phyaddr: address of phy port + * @phyreg: address of phy registers + * @phydata: data to be written into phy register + * Description: this function is used for C22 MDIO write + */ +static int sxgbe_mdio_write_c22(struct mii_bus *bus, int phyaddr, int phyreg, + u16 phydata) +{ + struct net_device *ndev = bus->priv; + struct sxgbe_priv_data *priv = netdev_priv(ndev); + + return sxgbe_mdio_access_c22(priv, SXGBE_SMA_WRITE_CMD, phyaddr, phyreg, + phydata); +} + +/** + * sxgbe_mdio_write_c45 * @bus: points to the mii_bus structure * @phyaddr: address of phy port * @phyreg: address of phy registers + * @devad: device (MMD) address * @phydata: data to be written into phy register - * Description: this function is used for C45 and C22 MDIO write + * Description: this function is used for C45 MDIO write */ -static int sxgbe_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg, - u16 phydata) +static int sxgbe_mdio_write_c45(struct mii_bus *bus, int phyaddr, int devad, + int phyreg, u16 phydata) { struct net_device *ndev = bus->priv; struct sxgbe_priv_data *priv = netdev_priv(ndev); - return sxgbe_mdio_access(priv, SXGBE_SMA_WRITE_CMD, phyaddr, phyreg, - phydata); + return sxgbe_mdio_access_c45(priv, SXGBE_SMA_WRITE_CMD, phyaddr, + devad, phyreg, phydata); } int sxgbe_mdio_register(struct net_device *ndev) @@ -161,8 +216,10 @@ int sxgbe_mdio_register(struct net_device *ndev) /* assign mii bus fields */ mdio_bus->name = "sxgbe"; - mdio_bus->read = &sxgbe_mdio_read; - mdio_bus->write = &sxgbe_mdio_write; + mdio_bus->read = sxgbe_mdio_read_c22; + mdio_bus->write = sxgbe_mdio_write_c22; + mdio_bus->read_c45 = sxgbe_mdio_read_c45; + mdio_bus->write_c45 = sxgbe_mdio_write_c45; snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%x", mdio_bus->name, priv->plat->bus_id); mdio_bus->priv = ndev; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c index bd52fb7cf486..ac8580f501e2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c @@ -31,6 +31,12 @@ #define GPR_ENET_QOS_CLK_TX_CLK_SEL (0x1 << 20) #define GPR_ENET_QOS_RGMII_EN (0x1 << 21) +#define MX93_GPR_ENET_QOS_INTF_MODE_MASK GENMASK(3, 0) +#define MX93_GPR_ENET_QOS_INTF_SEL_MII (0x0 << 1) +#define MX93_GPR_ENET_QOS_INTF_SEL_RMII (0x4 << 1) +#define MX93_GPR_ENET_QOS_INTF_SEL_RGMII (0x1 << 1) +#define MX93_GPR_ENET_QOS_CLK_GEN_EN (0x1 << 0) + struct imx_dwmac_ops { u32 addr_width; bool mac_rgmii_txclk_auto_adj; @@ -90,6 +96,35 @@ imx8dxl_set_intf_mode(struct plat_stmmacenet_data *plat_dat) return ret; } +static int imx93_set_intf_mode(struct plat_stmmacenet_data *plat_dat) +{ + struct imx_priv_data *dwmac = plat_dat->bsp_priv; + int val; + + switch (plat_dat->interface) { + case PHY_INTERFACE_MODE_MII: + val = MX93_GPR_ENET_QOS_INTF_SEL_MII; + break; + case PHY_INTERFACE_MODE_RMII: + val = MX93_GPR_ENET_QOS_INTF_SEL_RMII; + break; + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: + val = MX93_GPR_ENET_QOS_INTF_SEL_RGMII; + break; + default: + dev_dbg(dwmac->dev, "imx dwmac doesn't support %d interface\n", + plat_dat->interface); + return -EINVAL; + } + + val |= MX93_GPR_ENET_QOS_CLK_GEN_EN; + return regmap_update_bits(dwmac->intf_regmap, dwmac->intf_reg_off, + MX93_GPR_ENET_QOS_INTF_MODE_MASK, val); +}; + static int imx_dwmac_clks_config(void *priv, bool enabled) { struct imx_priv_data *dwmac = priv; @@ -188,7 +223,9 @@ imx_dwmac_parse_dt(struct imx_priv_data *dwmac, struct device *dev) } dwmac->clk_mem = NULL; - if (of_machine_is_compatible("fsl,imx8dxl")) { + + if (of_machine_is_compatible("fsl,imx8dxl") || + of_machine_is_compatible("fsl,imx93")) { dwmac->clk_mem = devm_clk_get(dev, "mem"); if (IS_ERR(dwmac->clk_mem)) { dev_err(dev, "failed to get mem clock\n"); @@ -196,10 +233,11 @@ imx_dwmac_parse_dt(struct imx_priv_data *dwmac, struct device *dev) } } - if (of_machine_is_compatible("fsl,imx8mp")) { - /* Binding doc describes the property: - is required by i.MX8MP. - is optional for i.MX8DXL. + if (of_machine_is_compatible("fsl,imx8mp") || + of_machine_is_compatible("fsl,imx93")) { + /* Binding doc describes the propety: + * is required by i.MX8MP, i.MX93. + * is optinoal for i.MX8DXL. */ dwmac->intf_regmap = syscon_regmap_lookup_by_phandle(np, "intf_mode"); if (IS_ERR(dwmac->intf_regmap)) @@ -296,9 +334,16 @@ static struct imx_dwmac_ops imx8dxl_dwmac_data = { .set_intf_mode = imx8dxl_set_intf_mode, }; +static struct imx_dwmac_ops imx93_dwmac_data = { + .addr_width = 32, + .mac_rgmii_txclk_auto_adj = true, + .set_intf_mode = imx93_set_intf_mode, +}; + static const struct of_device_id imx_dwmac_match[] = { { .compatible = "nxp,imx8mp-dwmac-eqos", .data = &imx8mp_dwmac_data }, { .compatible = "nxp,imx8dxl-dwmac-eqos", .data = &imx8dxl_dwmac_data }, + { .compatible = "nxp,imx93-dwmac-eqos", .data = &imx93_dwmac_data }, { } }; MODULE_DEVICE_TABLE(of, imx_dwmac_match); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index 5f177ea80725..21aaa2730ac8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -45,8 +45,8 @@ #define MII_XGMAC_PA_SHIFT 16 #define MII_XGMAC_DA_SHIFT 21 -static int stmmac_xgmac2_c45_format(struct stmmac_priv *priv, int phyaddr, - int phyreg, u32 *hw_addr) +static void stmmac_xgmac2_c45_format(struct stmmac_priv *priv, int phyaddr, + int devad, int phyreg, u32 *hw_addr) { u32 tmp; @@ -56,19 +56,14 @@ static int stmmac_xgmac2_c45_format(struct stmmac_priv *priv, int phyaddr, writel(tmp, priv->ioaddr + XGMAC_MDIO_C22P); *hw_addr = (phyaddr << MII_XGMAC_PA_SHIFT) | (phyreg & 0xffff); - *hw_addr |= (phyreg >> MII_DEVADDR_C45_SHIFT) << MII_XGMAC_DA_SHIFT; - return 0; + *hw_addr |= devad << MII_XGMAC_DA_SHIFT; } -static int stmmac_xgmac2_c22_format(struct stmmac_priv *priv, int phyaddr, - int phyreg, u32 *hw_addr) +static void stmmac_xgmac2_c22_format(struct stmmac_priv *priv, int phyaddr, + int phyreg, u32 *hw_addr) { u32 tmp; - /* HW does not support C22 addr >= 4 */ - if (phyaddr > MII_XGMAC_MAX_C22ADDR) - return -ENODEV; - /* Set port as Clause 22 */ tmp = readl(priv->ioaddr + XGMAC_MDIO_C22P); tmp &= ~MII_XGMAC_C22P_MASK; @@ -76,16 +71,14 @@ static int stmmac_xgmac2_c22_format(struct stmmac_priv *priv, int phyaddr, writel(tmp, priv->ioaddr + XGMAC_MDIO_C22P); *hw_addr = (phyaddr << MII_XGMAC_PA_SHIFT) | (phyreg & 0x1f); - return 0; } -static int stmmac_xgmac2_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg) +static int stmmac_xgmac2_mdio_read(struct stmmac_priv *priv, u32 addr, + u32 value) { - struct net_device *ndev = bus->priv; - struct stmmac_priv *priv = netdev_priv(ndev); unsigned int mii_address = priv->hw->mii.addr; unsigned int mii_data = priv->hw->mii.data; - u32 tmp, addr, value = MII_XGMAC_BUSY; + u32 tmp; int ret; ret = pm_runtime_resume_and_get(priv->device); @@ -99,20 +92,6 @@ static int stmmac_xgmac2_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg) goto err_disable_clks; } - if (phyreg & MII_ADDR_C45) { - phyreg &= ~MII_ADDR_C45; - - ret = stmmac_xgmac2_c45_format(priv, phyaddr, phyreg, &addr); - if (ret) - goto err_disable_clks; - } else { - ret = stmmac_xgmac2_c22_format(priv, phyaddr, phyreg, &addr); - if (ret) - goto err_disable_clks; - - value |= MII_XGMAC_SADDR; - } - value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift) & priv->hw->mii.clk_csr_mask; value |= MII_XGMAC_READ; @@ -144,14 +123,44 @@ err_disable_clks: return ret; } -static int stmmac_xgmac2_mdio_write(struct mii_bus *bus, int phyaddr, - int phyreg, u16 phydata) +static int stmmac_xgmac2_mdio_read_c22(struct mii_bus *bus, int phyaddr, + int phyreg) { struct net_device *ndev = bus->priv; - struct stmmac_priv *priv = netdev_priv(ndev); + struct stmmac_priv *priv; + u32 addr; + + priv = netdev_priv(ndev); + + /* HW does not support C22 addr >= 4 */ + if (phyaddr > MII_XGMAC_MAX_C22ADDR) + return -ENODEV; + + stmmac_xgmac2_c22_format(priv, phyaddr, phyreg, &addr); + + return stmmac_xgmac2_mdio_read(priv, addr, MII_XGMAC_BUSY); +} + +static int stmmac_xgmac2_mdio_read_c45(struct mii_bus *bus, int phyaddr, + int devad, int phyreg) +{ + struct net_device *ndev = bus->priv; + struct stmmac_priv *priv; + u32 addr; + + priv = netdev_priv(ndev); + + stmmac_xgmac2_c45_format(priv, phyaddr, devad, phyreg, &addr); + + return stmmac_xgmac2_mdio_read(priv, addr, MII_XGMAC_BUSY); +} + +static int stmmac_xgmac2_mdio_write(struct stmmac_priv *priv, u32 addr, + u32 value, u16 phydata) +{ unsigned int mii_address = priv->hw->mii.addr; unsigned int mii_data = priv->hw->mii.data; - u32 addr, tmp, value = MII_XGMAC_BUSY; + u32 tmp; int ret; ret = pm_runtime_resume_and_get(priv->device); @@ -165,20 +174,6 @@ static int stmmac_xgmac2_mdio_write(struct mii_bus *bus, int phyaddr, goto err_disable_clks; } - if (phyreg & MII_ADDR_C45) { - phyreg &= ~MII_ADDR_C45; - - ret = stmmac_xgmac2_c45_format(priv, phyaddr, phyreg, &addr); - if (ret) - goto err_disable_clks; - } else { - ret = stmmac_xgmac2_c22_format(priv, phyaddr, phyreg, &addr); - if (ret) - goto err_disable_clks; - - value |= MII_XGMAC_SADDR; - } - value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift) & priv->hw->mii.clk_csr_mask; value |= phydata; @@ -205,8 +200,63 @@ err_disable_clks: return ret; } +static int stmmac_xgmac2_mdio_write_c22(struct mii_bus *bus, int phyaddr, + int phyreg, u16 phydata) +{ + struct net_device *ndev = bus->priv; + struct stmmac_priv *priv; + u32 addr; + + priv = netdev_priv(ndev); + + /* HW does not support C22 addr >= 4 */ + if (phyaddr > MII_XGMAC_MAX_C22ADDR) + return -ENODEV; + + stmmac_xgmac2_c22_format(priv, phyaddr, phyreg, &addr); + + return stmmac_xgmac2_mdio_write(priv, addr, + MII_XGMAC_BUSY | MII_XGMAC_SADDR, phydata); +} + +static int stmmac_xgmac2_mdio_write_c45(struct mii_bus *bus, int phyaddr, + int devad, int phyreg, u16 phydata) +{ + struct net_device *ndev = bus->priv; + struct stmmac_priv *priv; + u32 addr; + + priv = netdev_priv(ndev); + + stmmac_xgmac2_c45_format(priv, phyaddr, devad, phyreg, &addr); + + return stmmac_xgmac2_mdio_write(priv, addr, MII_XGMAC_BUSY, + phydata); +} + +static int stmmac_mdio_read(struct stmmac_priv *priv, int data, u32 value) +{ + unsigned int mii_address = priv->hw->mii.addr; + unsigned int mii_data = priv->hw->mii.data; + u32 v; + + if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY), + 100, 10000)) + return -EBUSY; + + writel(data, priv->ioaddr + mii_data); + writel(value, priv->ioaddr + mii_address); + + if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY), + 100, 10000)) + return -EBUSY; + + /* Read the data from the MII data register */ + return readl(priv->ioaddr + mii_data) & MII_DATA_MASK; +} + /** - * stmmac_mdio_read + * stmmac_mdio_read_c22 * @bus: points to the mii_bus structure * @phyaddr: MII addr * @phyreg: MII reg @@ -215,15 +265,12 @@ err_disable_clks: * accessing the PHY registers. * Fortunately, it seems this has no drawback for the 7109 MAC. */ -static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg) +static int stmmac_mdio_read_c22(struct mii_bus *bus, int phyaddr, int phyreg) { struct net_device *ndev = bus->priv; struct stmmac_priv *priv = netdev_priv(ndev); - unsigned int mii_address = priv->hw->mii.addr; - unsigned int mii_data = priv->hw->mii.data; u32 value = MII_BUSY; int data = 0; - u32 v; data = pm_runtime_resume_and_get(priv->device); if (data < 0) @@ -236,60 +283,94 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg) & priv->hw->mii.clk_csr_mask; if (priv->plat->has_gmac4) { value |= MII_GMAC4_READ; - if (phyreg & MII_ADDR_C45) { - value |= MII_GMAC4_C45E; - value &= ~priv->hw->mii.reg_mask; - value |= ((phyreg >> MII_DEVADDR_C45_SHIFT) << - priv->hw->mii.reg_shift) & - priv->hw->mii.reg_mask; - - data |= (phyreg & MII_REGADDR_C45_MASK) << - MII_GMAC4_REG_ADDR_SHIFT; - } } - if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY), - 100, 10000)) { - data = -EBUSY; - goto err_disable_clks; - } + data = stmmac_mdio_read(priv, data, value); - writel(data, priv->ioaddr + mii_data); - writel(value, priv->ioaddr + mii_address); + pm_runtime_put(priv->device); - if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY), - 100, 10000)) { - data = -EBUSY; - goto err_disable_clks; + return data; +} + +/** + * stmmac_mdio_read_c45 + * @bus: points to the mii_bus structure + * @phyaddr: MII addr + * @devad: device address to read + * @phyreg: MII reg + * Description: it reads data from the MII register from within the phy device. + * For the 7111 GMAC, we must set the bit 0 in the MII address register while + * accessing the PHY registers. + * Fortunately, it seems this has no drawback for the 7109 MAC. + */ +static int stmmac_mdio_read_c45(struct mii_bus *bus, int phyaddr, int devad, + int phyreg) +{ + struct net_device *ndev = bus->priv; + struct stmmac_priv *priv = netdev_priv(ndev); + u32 value = MII_BUSY; + int data = 0; + + data = pm_runtime_get_sync(priv->device); + if (data < 0) { + pm_runtime_put_noidle(priv->device); + return data; } - /* Read the data from the MII data register */ - data = (int)readl(priv->ioaddr + mii_data) & MII_DATA_MASK; + value |= (phyaddr << priv->hw->mii.addr_shift) + & priv->hw->mii.addr_mask; + value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask; + value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift) + & priv->hw->mii.clk_csr_mask; + value |= MII_GMAC4_READ; + value |= MII_GMAC4_C45E; + value &= ~priv->hw->mii.reg_mask; + value |= (devad << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask; + + data |= phyreg << MII_GMAC4_REG_ADDR_SHIFT; + + data = stmmac_mdio_read(priv, data, value); -err_disable_clks: pm_runtime_put(priv->device); return data; } +static int stmmac_mdio_write(struct stmmac_priv *priv, int data, u32 value) +{ + unsigned int mii_address = priv->hw->mii.addr; + unsigned int mii_data = priv->hw->mii.data; + u32 v; + + /* Wait until any existing MII operation is complete */ + if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY), + 100, 10000)) + return -EBUSY; + + /* Set the MII address register to write */ + writel(data, priv->ioaddr + mii_data); + writel(value, priv->ioaddr + mii_address); + + /* Wait until any existing MII operation is complete */ + return readl_poll_timeout(priv->ioaddr + mii_address, v, + !(v & MII_BUSY), 100, 10000); +} + /** - * stmmac_mdio_write + * stmmac_mdio_write_c22 * @bus: points to the mii_bus structure * @phyaddr: MII addr * @phyreg: MII reg * @phydata: phy data * Description: it writes the data into the MII register from within the device. */ -static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg, - u16 phydata) +static int stmmac_mdio_write_c22(struct mii_bus *bus, int phyaddr, int phyreg, + u16 phydata) { struct net_device *ndev = bus->priv; struct stmmac_priv *priv = netdev_priv(ndev); - unsigned int mii_address = priv->hw->mii.addr; - unsigned int mii_data = priv->hw->mii.data; int ret, data = phydata; u32 value = MII_BUSY; - u32 v; ret = pm_runtime_resume_and_get(priv->device); if (ret < 0) @@ -301,38 +382,57 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg, value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift) & priv->hw->mii.clk_csr_mask; - if (priv->plat->has_gmac4) { + if (priv->plat->has_gmac4) value |= MII_GMAC4_WRITE; - if (phyreg & MII_ADDR_C45) { - value |= MII_GMAC4_C45E; - value &= ~priv->hw->mii.reg_mask; - value |= ((phyreg >> MII_DEVADDR_C45_SHIFT) << - priv->hw->mii.reg_shift) & - priv->hw->mii.reg_mask; - - data |= (phyreg & MII_REGADDR_C45_MASK) << - MII_GMAC4_REG_ADDR_SHIFT; - } - } else { + else value |= MII_WRITE; - } - /* Wait until any existing MII operation is complete */ - if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY), - 100, 10000)) { - ret = -EBUSY; - goto err_disable_clks; + ret = stmmac_mdio_write(priv, data, value); + + pm_runtime_put(priv->device); + + return ret; +} + +/** + * stmmac_mdio_write_c45 + * @bus: points to the mii_bus structure + * @phyaddr: MII addr + * @phyreg: MII reg + * @devad: device address to read + * @phydata: phy data + * Description: it writes the data into the MII register from within the device. + */ +static int stmmac_mdio_write_c45(struct mii_bus *bus, int phyaddr, + int devad, int phyreg, u16 phydata) +{ + struct net_device *ndev = bus->priv; + struct stmmac_priv *priv = netdev_priv(ndev); + int ret, data = phydata; + u32 value = MII_BUSY; + + ret = pm_runtime_get_sync(priv->device); + if (ret < 0) { + pm_runtime_put_noidle(priv->device); + return ret; } - /* Set the MII address register to write */ - writel(data, priv->ioaddr + mii_data); - writel(value, priv->ioaddr + mii_address); + value |= (phyaddr << priv->hw->mii.addr_shift) + & priv->hw->mii.addr_mask; + value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask; - /* Wait until any existing MII operation is complete */ - ret = readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY), - 100, 10000); + value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift) + & priv->hw->mii.clk_csr_mask; + + value |= MII_GMAC4_WRITE; + value |= MII_GMAC4_C45E; + value &= ~priv->hw->mii.reg_mask; + value |= (devad << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask; + + data |= phyreg << MII_GMAC4_REG_ADDR_SHIFT; + + ret = stmmac_mdio_write(priv, data, value); -err_disable_clks: pm_runtime_put(priv->device); return ret; @@ -453,12 +553,11 @@ int stmmac_mdio_register(struct net_device *ndev) new_bus->name = "stmmac"; - if (priv->plat->has_gmac4) - new_bus->probe_capabilities = MDIOBUS_C22_C45; - if (priv->plat->has_xgmac) { - new_bus->read = &stmmac_xgmac2_mdio_read; - new_bus->write = &stmmac_xgmac2_mdio_write; + new_bus->read = &stmmac_xgmac2_mdio_read_c22; + new_bus->write = &stmmac_xgmac2_mdio_write_c22; + new_bus->read_c45 = &stmmac_xgmac2_mdio_read_c45; + new_bus->write_c45 = &stmmac_xgmac2_mdio_write_c45; /* Right now only C22 phys are supported */ max_addr = MII_XGMAC_MAX_C22ADDR + 1; @@ -468,8 +567,13 @@ int stmmac_mdio_register(struct net_device *ndev) dev_err(dev, "Unsupported phy_addr (max=%d)\n", MII_XGMAC_MAX_C22ADDR); } else { - new_bus->read = &stmmac_mdio_read; - new_bus->write = &stmmac_mdio_write; + new_bus->read = &stmmac_mdio_read_c22; + new_bus->write = &stmmac_mdio_write_c22; + if (priv->plat->has_gmac4) { + new_bus->read_c45 = &stmmac_mdio_read_c45; + new_bus->write_c45 = &stmmac_mdio_write_c45; + } + max_addr = PHY_MAX_ADDR; } @@ -490,7 +594,7 @@ int stmmac_mdio_register(struct net_device *ndev) /* Looks like we need a dummy read for XGMAC only and C45 PHYs */ if (priv->plat->has_xgmac) - stmmac_xgmac2_mdio_read(new_bus, 0, MII_ADDR_C45); + stmmac_xgmac2_mdio_read_c45(new_bus, 0, 0, 0); /* If fixed-link is set, skip PHY scanning */ if (!fwnode) diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index ecbde83b5243..c696da89962f 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -1416,6 +1416,70 @@ static const struct net_device_ops am65_cpsw_nuss_netdev_ops = { .ndo_setup_tc = am65_cpsw_qos_ndo_setup_tc, }; +static void am65_cpsw_disable_phy(struct phy *phy) +{ + phy_power_off(phy); + phy_exit(phy); +} + +static int am65_cpsw_enable_phy(struct phy *phy) +{ + int ret; + + ret = phy_init(phy); + if (ret < 0) + return ret; + + ret = phy_power_on(phy); + if (ret < 0) { + phy_exit(phy); + return ret; + } + + return 0; +} + +static void am65_cpsw_disable_serdes_phy(struct am65_cpsw_common *common) +{ + struct am65_cpsw_port *port; + struct phy *phy; + int i; + + for (i = 0; i < common->port_num; i++) { + port = &common->ports[i]; + phy = port->slave.serdes_phy; + if (phy) + am65_cpsw_disable_phy(phy); + } +} + +static int am65_cpsw_init_serdes_phy(struct device *dev, struct device_node *port_np, + struct am65_cpsw_port *port) +{ + const char *name = "serdes-phy"; + struct phy *phy; + int ret; + + phy = devm_of_phy_get(dev, port_np, name); + if (PTR_ERR(phy) == -ENODEV) + return 0; + if (IS_ERR(phy)) + return PTR_ERR(phy); + + /* Serdes PHY exists. Store it. */ + port->slave.serdes_phy = phy; + + ret = am65_cpsw_enable_phy(phy); + if (ret < 0) + goto err_phy; + + return 0; + +err_phy: + devm_phy_put(dev, phy); + return ret; +} + static void am65_cpsw_nuss_mac_config(struct phylink_config *config, unsigned int mode, const struct phylink_link_state *state) { @@ -1959,6 +2023,11 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common) goto of_node_put; } + /* Initialize the Serdes PHY for the port */ + ret = am65_cpsw_init_serdes_phy(dev, port_np, port); + if (ret) + return ret; + port->slave.mac_only = of_property_read_bool(port_np, "ti,mac-only"); @@ -2684,11 +2753,19 @@ static const struct am65_cpsw_pdata j7200_cpswxg_pdata = { .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII), }; +static const struct am65_cpsw_pdata j721e_cpswxg_pdata = { + .quirks = 0, + .ale_dev_id = "am64-cpswxg", + .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE, + .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII), +}; + static const struct of_device_id am65_cpsw_nuss_of_mtable[] = { { .compatible = "ti,am654-cpsw-nuss", .data = &am65x_sr1_0}, { .compatible = "ti,j721e-cpsw-nuss", .data = &j721e_pdata}, { .compatible = "ti,am642-cpsw-nuss", .data = &am64x_cpswxg_pdata}, { .compatible = "ti,j7200-cpswxg-nuss", .data = &j7200_cpswxg_pdata}, + { .compatible = "ti,j721e-cpswxg-nuss", .data = &j721e_cpswxg_pdata}, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, am65_cpsw_nuss_of_mtable); @@ -2870,6 +2947,7 @@ static int am65_cpsw_nuss_remove(struct platform_device *pdev) */ am65_cpsw_nuss_cleanup_ndev(common); am65_cpsw_nuss_phylink_cleanup(common); + am65_cpsw_disable_serdes_phy(common); of_platform_device_destroy(common->mdio_dev, NULL); diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h index 4b75620f8d28..ed26768a6e51 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h @@ -32,6 +32,7 @@ struct am65_cpsw_slave_data { struct device_node *phy_node; phy_interface_t phy_if; struct phy *ifphy; + struct phy *serdes_phy; bool rx_pause; bool tx_pause; u8 mac_addr[ETH_ALEN]; diff --git a/drivers/net/ethernet/ti/am65-cpts.c b/drivers/net/ethernet/ti/am65-cpts.c index 9535396b28cd..bf0f74b20ba6 100644 --- a/drivers/net/ethernet/ti/am65-cpts.c +++ b/drivers/net/ethernet/ti/am65-cpts.c @@ -176,6 +176,10 @@ struct am65_cpts { u32 genf_enable; u32 hw_ts_enable; struct sk_buff_head txq; + bool pps_enabled; + bool pps_present; + u32 pps_hw_ts_idx; + u32 pps_genf_idx; /* context save/restore */ u64 sr_cpts_ns; u64 sr_ktime_ns; @@ -319,8 +323,15 @@ static int am65_cpts_fifo_read(struct am65_cpts *cpts) case AM65_CPTS_EV_HW: pevent.index = am65_cpts_event_get_port(event) - 1; pevent.timestamp = event->timestamp; - pevent.type = PTP_CLOCK_EXTTS; - dev_dbg(cpts->dev, "AM65_CPTS_EV_HW p:%d t:%llu\n", + if (cpts->pps_enabled && pevent.index == cpts->pps_hw_ts_idx) { + pevent.type = PTP_CLOCK_PPSUSR; + pevent.pps_times.ts_real = ns_to_timespec64(pevent.timestamp); + } else { + pevent.type = PTP_CLOCK_EXTTS; + } + dev_dbg(cpts->dev, "AM65_CPTS_EV_HW:%s p:%d t:%llu\n", + pevent.type == PTP_CLOCK_EXTTS ? + "extts" : "pps", pevent.index, event->timestamp); ptp_clock_event(cpts->ptp_clock, &pevent); @@ -394,10 +405,13 @@ static irqreturn_t am65_cpts_interrupt(int irq, void *dev_id) static int am65_cpts_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) { struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info); + u32 pps_ctrl_val = 0, pps_ppm_hi = 0, pps_ppm_low = 0; s32 ppb = scaled_ppm_to_ppb(scaled_ppm); + int pps_index = cpts->pps_genf_idx; + u64 adj_period, pps_adj_period; + u32 ctrl_val, ppm_hi, ppm_low; + unsigned long flags; int neg_adj = 0; - u64 adj_period; - u32 val; if (ppb < 0) { neg_adj = 1; @@ -417,17 +431,53 @@ static int am65_cpts_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) mutex_lock(&cpts->ptp_clk_lock); - val = am65_cpts_read32(cpts, control); + ctrl_val = am65_cpts_read32(cpts, control); if (neg_adj) - val |= AM65_CPTS_CONTROL_TS_PPM_DIR; + ctrl_val |= AM65_CPTS_CONTROL_TS_PPM_DIR; else - val &= ~AM65_CPTS_CONTROL_TS_PPM_DIR; - am65_cpts_write32(cpts, val, control); + ctrl_val &= ~AM65_CPTS_CONTROL_TS_PPM_DIR; + + ppm_hi = upper_32_bits(adj_period) & 0x3FF; + ppm_low = lower_32_bits(adj_period); + + if (cpts->pps_enabled) { + pps_ctrl_val = am65_cpts_read32(cpts, genf[pps_index].control); + if (neg_adj) + pps_ctrl_val &= ~BIT(1); + else + pps_ctrl_val |= BIT(1); + + /* GenF PPM will do correction using cpts refclk tick which is + * (cpts->ts_add_val + 1) ns, so GenF length PPM adj period + * need to be corrected. + */ + pps_adj_period = adj_period * (cpts->ts_add_val + 1); + pps_ppm_hi = upper_32_bits(pps_adj_period) & 0x3FF; + pps_ppm_low = lower_32_bits(pps_adj_period); + } - val = upper_32_bits(adj_period) & 0x3FF; - am65_cpts_write32(cpts, val, ts_ppm_hi); - val = lower_32_bits(adj_period); - am65_cpts_write32(cpts, val, ts_ppm_low); + spin_lock_irqsave(&cpts->lock, flags); + + /* All below writes must be done extremely fast: + * - delay between PPM dir and PPM value changes can cause err due old + * PPM correction applied in wrong direction + * - delay between CPTS-clock PPM cfg and GenF PPM cfg can cause err + * due CPTS-clock PPM working with new cfg while GenF PPM cfg still + * with old for short period of time + */ + + am65_cpts_write32(cpts, ctrl_val, control); + am65_cpts_write32(cpts, ppm_hi, ts_ppm_hi); + am65_cpts_write32(cpts, ppm_low, ts_ppm_low); + + if (cpts->pps_enabled) { + am65_cpts_write32(cpts, pps_ctrl_val, genf[pps_index].control); + am65_cpts_write32(cpts, pps_ppm_hi, genf[pps_index].ppm_hi); + am65_cpts_write32(cpts, pps_ppm_low, genf[pps_index].ppm_low); + } + + /* All GenF/EstF can be updated here the same way */ + spin_unlock_irqrestore(&cpts->lock, flags); mutex_unlock(&cpts->ptp_clk_lock); @@ -507,7 +557,13 @@ static void am65_cpts_extts_enable_hw(struct am65_cpts *cpts, u32 index, int on) static int am65_cpts_extts_enable(struct am65_cpts *cpts, u32 index, int on) { - if (!!(cpts->hw_ts_enable & BIT(index)) == !!on) + if (index >= cpts->ptp_info.n_ext_ts) + return -ENXIO; + + if (cpts->pps_present && index == cpts->pps_hw_ts_idx) + return -EINVAL; + + if (((cpts->hw_ts_enable & BIT(index)) >> index) == on) return 0; mutex_lock(&cpts->ptp_clk_lock); @@ -591,6 +647,12 @@ static void am65_cpts_perout_enable_hw(struct am65_cpts *cpts, static int am65_cpts_perout_enable(struct am65_cpts *cpts, struct ptp_perout_request *req, int on) { + if (req->index >= cpts->ptp_info.n_per_out) + return -ENXIO; + + if (cpts->pps_present && req->index == cpts->pps_genf_idx) + return -EINVAL; + if (!!(cpts->genf_enable & BIT(req->index)) == !!on) return 0; @@ -604,6 +666,48 @@ static int am65_cpts_perout_enable(struct am65_cpts *cpts, return 0; } +static int am65_cpts_pps_enable(struct am65_cpts *cpts, int on) +{ + int ret = 0; + struct timespec64 ts; + struct ptp_clock_request rq; + u64 ns; + + if (!cpts->pps_present) + return -EINVAL; + + if (cpts->pps_enabled == !!on) + return 0; + + mutex_lock(&cpts->ptp_clk_lock); + + if (on) { + am65_cpts_extts_enable_hw(cpts, cpts->pps_hw_ts_idx, on); + + ns = am65_cpts_gettime(cpts, NULL); + ts = ns_to_timespec64(ns); + rq.perout.period.sec = 1; + rq.perout.period.nsec = 0; + rq.perout.start.sec = ts.tv_sec + 2; + rq.perout.start.nsec = 0; + rq.perout.index = cpts->pps_genf_idx; + + am65_cpts_perout_enable_hw(cpts, &rq.perout, on); + cpts->pps_enabled = true; + } else { + rq.perout.index = cpts->pps_genf_idx; + am65_cpts_perout_enable_hw(cpts, &rq.perout, on); + am65_cpts_extts_enable_hw(cpts, cpts->pps_hw_ts_idx, on); + cpts->pps_enabled = false; + } + + mutex_unlock(&cpts->ptp_clk_lock); + + dev_dbg(cpts->dev, "%s: pps: %s\n", + __func__, on ? "enabled" : "disabled"); + return ret; +} + static int am65_cpts_ptp_enable(struct ptp_clock_info *ptp, struct ptp_clock_request *rq, int on) { @@ -614,6 +718,8 @@ static int am65_cpts_ptp_enable(struct ptp_clock_info *ptp, return am65_cpts_extts_enable(cpts, rq->extts.index, on); case PTP_CLK_REQ_PEROUT: return am65_cpts_perout_enable(cpts, &rq->perout, on); + case PTP_CLK_REQ_PPS: + return am65_cpts_pps_enable(cpts, on); default: break; } @@ -926,6 +1032,23 @@ static int am65_cpts_of_parse(struct am65_cpts *cpts, struct device_node *node) if (!of_property_read_u32(node, "ti,cpts-periodic-outputs", &prop[0])) cpts->genf_num = prop[0]; + if (!of_property_read_u32_array(node, "ti,pps", prop, 2)) { + cpts->pps_present = true; + + if (prop[0] > 7) { + dev_err(cpts->dev, "invalid HWx_TS_PUSH index: %u provided\n", prop[0]); + cpts->pps_present = false; + } + if (prop[1] > 1) { + dev_err(cpts->dev, "invalid GENFy index: %u provided\n", prop[1]); + cpts->pps_present = false; + } + if (cpts->pps_present) { + cpts->pps_hw_ts_idx = prop[0]; + cpts->pps_genf_idx = prop[1]; + } + } + return cpts_of_mux_clk_setup(cpts, node); } @@ -993,6 +1116,8 @@ struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs, cpts->ptp_info.n_ext_ts = cpts->ext_ts_inputs; if (cpts->genf_num) cpts->ptp_info.n_per_out = cpts->genf_num; + if (cpts->pps_present) + cpts->ptp_info.pps = 1; am65_cpts_set_add_val(cpts); @@ -1028,9 +1153,9 @@ struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs, return ERR_PTR(ret); } - dev_info(dev, "CPTS ver 0x%08x, freq:%u, add_val:%u\n", + dev_info(dev, "CPTS ver 0x%08x, freq:%u, add_val:%u pps:%d\n", am65_cpts_read32(cpts, idver), - cpts->refclk_freq, cpts->ts_add_val); + cpts->refclk_freq, cpts->ts_add_val, cpts->pps_present); return cpts; diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c index 946b9753ccfb..23169e36a3d4 100644 --- a/drivers/net/ethernet/ti/davinci_mdio.c +++ b/drivers/net/ethernet/ti/davinci_mdio.c @@ -225,7 +225,7 @@ static int davinci_get_mdio_data(struct mdiobb_ctrl *ctrl) return test_bit(MDIO_PIN, ®); } -static int davinci_mdiobb_read(struct mii_bus *bus, int phy, int reg) +static int davinci_mdiobb_read_c22(struct mii_bus *bus, int phy, int reg) { int ret; @@ -233,7 +233,7 @@ static int davinci_mdiobb_read(struct mii_bus *bus, int phy, int reg) if (ret < 0) return ret; - ret = mdiobb_read(bus, phy, reg); + ret = mdiobb_read_c22(bus, phy, reg); pm_runtime_mark_last_busy(bus->parent); pm_runtime_put_autosuspend(bus->parent); @@ -241,8 +241,8 @@ static int davinci_mdiobb_read(struct mii_bus *bus, int phy, int reg) return ret; } -static int davinci_mdiobb_write(struct mii_bus *bus, int phy, int reg, - u16 val) +static int davinci_mdiobb_write_c22(struct mii_bus *bus, int phy, int reg, + u16 val) { int ret; @@ -250,7 +250,41 @@ static int davinci_mdiobb_write(struct mii_bus *bus, int phy, int reg, if (ret < 0) return ret; - ret = mdiobb_write(bus, phy, reg, val); + ret = mdiobb_write_c22(bus, phy, reg, val); + + pm_runtime_mark_last_busy(bus->parent); + pm_runtime_put_autosuspend(bus->parent); + + return ret; +} + +static int davinci_mdiobb_read_c45(struct mii_bus *bus, int phy, int devad, + int reg) +{ + int ret; + + ret = pm_runtime_resume_and_get(bus->parent); + if (ret < 0) + return ret; + + ret = mdiobb_read_c45(bus, phy, devad, reg); + + pm_runtime_mark_last_busy(bus->parent); + pm_runtime_put_autosuspend(bus->parent); + + return ret; +} + +static int davinci_mdiobb_write_c45(struct mii_bus *bus, int phy, int devad, + int reg, u16 val) +{ + int ret; + + ret = pm_runtime_resume_and_get(bus->parent); + if (ret < 0) + return ret; + + ret = mdiobb_write_c45(bus, phy, devad, reg, val); pm_runtime_mark_last_busy(bus->parent); pm_runtime_put_autosuspend(bus->parent); @@ -573,8 +607,10 @@ static int davinci_mdio_probe(struct platform_device *pdev) data->bus->name = dev_name(dev); if (data->manual_mode) { - data->bus->read = davinci_mdiobb_read; - data->bus->write = davinci_mdiobb_write; + data->bus->read = davinci_mdiobb_read_c22; + data->bus->write = davinci_mdiobb_write_c22; + data->bus->read_c45 = davinci_mdiobb_read_c45; + data->bus->write_c45 = davinci_mdiobb_write_c45; data->bus->reset = davinci_mdiobb_reset; dev_info(dev, "Configuring MDIO in manual mode\n"); diff --git a/drivers/net/ethernet/wangxun/Kconfig b/drivers/net/ethernet/wangxun/Kconfig index 86310588c6c1..0922beac3ec0 100644 --- a/drivers/net/ethernet/wangxun/Kconfig +++ b/drivers/net/ethernet/wangxun/Kconfig @@ -25,6 +25,7 @@ config NGBE tristate "Wangxun(R) GbE PCI Express adapters support" depends on PCI select LIBWX + select PHYLIB help This driver supports Wangxun(R) GbE PCI Express family of adapters. diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c index c57dc3238b3f..3d7ba0c0df38 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c @@ -2,6 +2,7 @@ /* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */ #include <linux/etherdevice.h> +#include <linux/netdevice.h> #include <linux/if_ether.h> #include <linux/iopoll.h> #include <linux/pci.h> @@ -9,18 +10,18 @@ #include "wx_type.h" #include "wx_hw.h" -static void wx_intr_disable(struct wx_hw *wxhw, u64 qmask) +static void wx_intr_disable(struct wx *wx, u64 qmask) { u32 mask; mask = (qmask & 0xFFFFFFFF); if (mask) - wr32(wxhw, WX_PX_IMS(0), mask); + wr32(wx, WX_PX_IMS(0), mask); - if (wxhw->mac.type == wx_mac_sp) { + if (wx->mac.type == wx_mac_sp) { mask = (qmask >> 32); if (mask) - wr32(wxhw, WX_PX_IMS(1), mask); + wr32(wx, WX_PX_IMS(1), mask); } } @@ -28,33 +29,33 @@ static void wx_intr_disable(struct wx_hw *wxhw, u64 qmask) * 1. to be sector address, when implemented erase sector command * 2. to be flash address when implemented read, write flash address */ -static int wx_fmgr_cmd_op(struct wx_hw *wxhw, u32 cmd, u32 cmd_addr) +static int wx_fmgr_cmd_op(struct wx *wx, u32 cmd, u32 cmd_addr) { u32 cmd_val = 0, val = 0; cmd_val = WX_SPI_CMD_CMD(cmd) | WX_SPI_CMD_CLK(WX_SPI_CLK_DIV) | cmd_addr; - wr32(wxhw, WX_SPI_CMD, cmd_val); + wr32(wx, WX_SPI_CMD, cmd_val); return read_poll_timeout(rd32, val, (val & 0x1), 10, 100000, - false, wxhw, WX_SPI_STATUS); + false, wx, WX_SPI_STATUS); } -static int wx_flash_read_dword(struct wx_hw *wxhw, u32 addr, u32 *data) +static int wx_flash_read_dword(struct wx *wx, u32 addr, u32 *data) { int ret = 0; - ret = wx_fmgr_cmd_op(wxhw, WX_SPI_CMD_READ_DWORD, addr); + ret = wx_fmgr_cmd_op(wx, WX_SPI_CMD_READ_DWORD, addr); if (ret < 0) return ret; - *data = rd32(wxhw, WX_SPI_DATA); + *data = rd32(wx, WX_SPI_DATA); return ret; } -int wx_check_flash_load(struct wx_hw *hw, u32 check_bit) +int wx_check_flash_load(struct wx *hw, u32 check_bit) { u32 reg = 0; int err = 0; @@ -73,29 +74,25 @@ int wx_check_flash_load(struct wx_hw *hw, u32 check_bit) } EXPORT_SYMBOL(wx_check_flash_load); -void wx_control_hw(struct wx_hw *wxhw, bool drv) +void wx_control_hw(struct wx *wx, bool drv) { - if (drv) { - /* Let firmware know the driver has taken over */ - wr32m(wxhw, WX_CFG_PORT_CTL, - WX_CFG_PORT_CTL_DRV_LOAD, WX_CFG_PORT_CTL_DRV_LOAD); - } else { - /* Let firmware take over control of hw */ - wr32m(wxhw, WX_CFG_PORT_CTL, - WX_CFG_PORT_CTL_DRV_LOAD, 0); - } + /* True : Let firmware know the driver has taken over + * False : Let firmware take over control of hw + */ + wr32m(wx, WX_CFG_PORT_CTL, WX_CFG_PORT_CTL_DRV_LOAD, + drv ? WX_CFG_PORT_CTL_DRV_LOAD : 0); } EXPORT_SYMBOL(wx_control_hw); /** * wx_mng_present - returns 0 when management capability is present - * @wxhw: pointer to hardware structure + * @wx: pointer to hardware structure */ -int wx_mng_present(struct wx_hw *wxhw) +int wx_mng_present(struct wx *wx) { u32 fwsm; - fwsm = rd32(wxhw, WX_MIS_ST); + fwsm = rd32(wx, WX_MIS_ST); if (fwsm & WX_MIS_ST_MNG_INIT_DN) return 0; else @@ -108,40 +105,40 @@ static DEFINE_MUTEX(wx_sw_sync_lock); /** * wx_release_sw_sync - Release SW semaphore - * @wxhw: pointer to hardware structure + * @wx: pointer to hardware structure * @mask: Mask to specify which semaphore to release * * Releases the SW semaphore for the specified * function (CSR, PHY0, PHY1, EEPROM, Flash) **/ -static void wx_release_sw_sync(struct wx_hw *wxhw, u32 mask) +static void wx_release_sw_sync(struct wx *wx, u32 mask) { mutex_lock(&wx_sw_sync_lock); - wr32m(wxhw, WX_MNG_SWFW_SYNC, mask, 0); + wr32m(wx, WX_MNG_SWFW_SYNC, mask, 0); mutex_unlock(&wx_sw_sync_lock); } /** * wx_acquire_sw_sync - Acquire SW semaphore - * @wxhw: pointer to hardware structure + * @wx: pointer to hardware structure * @mask: Mask to specify which semaphore to acquire * * Acquires the SW semaphore for the specified * function (CSR, PHY0, PHY1, EEPROM, Flash) **/ -static int wx_acquire_sw_sync(struct wx_hw *wxhw, u32 mask) +static int wx_acquire_sw_sync(struct wx *wx, u32 mask) { u32 sem = 0; int ret = 0; mutex_lock(&wx_sw_sync_lock); ret = read_poll_timeout(rd32, sem, !(sem & mask), - 5000, 2000000, false, wxhw, WX_MNG_SWFW_SYNC); + 5000, 2000000, false, wx, WX_MNG_SWFW_SYNC); if (!ret) { sem |= mask; - wr32(wxhw, WX_MNG_SWFW_SYNC, sem); + wr32(wx, WX_MNG_SWFW_SYNC, sem); } else { - wx_err(wxhw, "SW Semaphore not granted: 0x%x.\n", sem); + wx_err(wx, "SW Semaphore not granted: 0x%x.\n", sem); } mutex_unlock(&wx_sw_sync_lock); @@ -150,7 +147,7 @@ static int wx_acquire_sw_sync(struct wx_hw *wxhw, u32 mask) /** * wx_host_interface_command - Issue command to manageability block - * @wxhw: pointer to the HW structure + * @wx: pointer to the HW structure * @buffer: contains the command to write and where the return status will * be placed * @length: length of buffer, must be multiple of 4 bytes @@ -162,7 +159,7 @@ static int wx_acquire_sw_sync(struct wx_hw *wxhw, u32 mask) * So we will leave this up to the caller to read back the data * in these cases. **/ -int wx_host_interface_command(struct wx_hw *wxhw, u32 *buffer, +int wx_host_interface_command(struct wx *wx, u32 *buffer, u32 length, u32 timeout, bool return_data) { u32 hdr_size = sizeof(struct wx_hic_hdr); @@ -172,17 +169,17 @@ int wx_host_interface_command(struct wx_hw *wxhw, u32 *buffer, u16 buf_len; if (length == 0 || length > WX_HI_MAX_BLOCK_BYTE_LENGTH) { - wx_err(wxhw, "Buffer length failure buffersize=%d.\n", length); + wx_err(wx, "Buffer length failure buffersize=%d.\n", length); return -EINVAL; } - status = wx_acquire_sw_sync(wxhw, WX_MNG_SWFW_SYNC_SW_MB); + status = wx_acquire_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_MB); if (status != 0) return status; /* Calculate length in DWORDs. We must be DWORD aligned */ if ((length % (sizeof(u32))) != 0) { - wx_err(wxhw, "Buffer length failure, not aligned to dword"); + wx_err(wx, "Buffer length failure, not aligned to dword"); status = -EINVAL; goto rel_out; } @@ -193,38 +190,38 @@ int wx_host_interface_command(struct wx_hw *wxhw, u32 *buffer, * into the ram area. */ for (i = 0; i < dword_len; i++) { - wr32a(wxhw, WX_MNG_MBOX, i, (__force u32)cpu_to_le32(buffer[i])); + wr32a(wx, WX_MNG_MBOX, i, (__force u32)cpu_to_le32(buffer[i])); /* write flush */ - buf[i] = rd32a(wxhw, WX_MNG_MBOX, i); + buf[i] = rd32a(wx, WX_MNG_MBOX, i); } /* Setting this bit tells the ARC that a new command is pending. */ - wr32m(wxhw, WX_MNG_MBOX_CTL, + wr32m(wx, WX_MNG_MBOX_CTL, WX_MNG_MBOX_CTL_SWRDY, WX_MNG_MBOX_CTL_SWRDY); status = read_poll_timeout(rd32, hicr, hicr & WX_MNG_MBOX_CTL_FWRDY, 1000, - timeout * 1000, false, wxhw, WX_MNG_MBOX_CTL); + timeout * 1000, false, wx, WX_MNG_MBOX_CTL); /* Check command completion */ if (status) { - wx_dbg(wxhw, "Command has failed with no status valid.\n"); + wx_dbg(wx, "Command has failed with no status valid.\n"); - buf[0] = rd32(wxhw, WX_MNG_MBOX); + buf[0] = rd32(wx, WX_MNG_MBOX); if ((buffer[0] & 0xff) != (~buf[0] >> 24)) { status = -EINVAL; goto rel_out; } if ((buf[0] & 0xff0000) >> 16 == 0x80) { - wx_dbg(wxhw, "It's unknown cmd.\n"); + wx_dbg(wx, "It's unknown cmd.\n"); status = -EINVAL; goto rel_out; } - wx_dbg(wxhw, "write value:\n"); + wx_dbg(wx, "write value:\n"); for (i = 0; i < dword_len; i++) - wx_dbg(wxhw, "%x ", buffer[i]); - wx_dbg(wxhw, "read value:\n"); + wx_dbg(wx, "%x ", buffer[i]); + wx_dbg(wx, "read value:\n"); for (i = 0; i < dword_len; i++) - wx_dbg(wxhw, "%x ", buf[i]); + wx_dbg(wx, "%x ", buf[i]); } if (!return_data) @@ -235,7 +232,7 @@ int wx_host_interface_command(struct wx_hw *wxhw, u32 *buffer, /* first pull in the header so we know the buffer length */ for (bi = 0; bi < dword_len; bi++) { - buffer[bi] = rd32a(wxhw, WX_MNG_MBOX, bi); + buffer[bi] = rd32a(wx, WX_MNG_MBOX, bi); le32_to_cpus(&buffer[bi]); } @@ -245,7 +242,7 @@ int wx_host_interface_command(struct wx_hw *wxhw, u32 *buffer, goto rel_out; if (length < buf_len + hdr_size) { - wx_err(wxhw, "Buffer not large enough for reply message.\n"); + wx_err(wx, "Buffer not large enough for reply message.\n"); status = -EFAULT; goto rel_out; } @@ -255,12 +252,12 @@ int wx_host_interface_command(struct wx_hw *wxhw, u32 *buffer, /* Pull in the rest of the buffer (bi is where we left off) */ for (; bi <= dword_len; bi++) { - buffer[bi] = rd32a(wxhw, WX_MNG_MBOX, bi); + buffer[bi] = rd32a(wx, WX_MNG_MBOX, bi); le32_to_cpus(&buffer[bi]); } rel_out: - wx_release_sw_sync(wxhw, WX_MNG_SWFW_SYNC_SW_MB); + wx_release_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_MB); return status; } EXPORT_SYMBOL(wx_host_interface_command); @@ -268,13 +265,13 @@ EXPORT_SYMBOL(wx_host_interface_command); /** * wx_read_ee_hostif_data - Read EEPROM word using a host interface cmd * assuming that the semaphore is already obtained. - * @wxhw: pointer to hardware structure + * @wx: pointer to hardware structure * @offset: offset of word in the EEPROM to read * @data: word read from the EEPROM * * Reads a 16 bit word from the EEPROM using the hostif. **/ -static int wx_read_ee_hostif_data(struct wx_hw *wxhw, u16 offset, u16 *data) +static int wx_read_ee_hostif_data(struct wx *wx, u16 offset, u16 *data) { struct wx_hic_read_shadow_ram buffer; int status; @@ -289,33 +286,33 @@ static int wx_read_ee_hostif_data(struct wx_hw *wxhw, u16 offset, u16 *data) /* one word */ buffer.length = (__force u16)cpu_to_be16(sizeof(u16)); - status = wx_host_interface_command(wxhw, (u32 *)&buffer, sizeof(buffer), + status = wx_host_interface_command(wx, (u32 *)&buffer, sizeof(buffer), WX_HI_COMMAND_TIMEOUT, false); if (status != 0) return status; - *data = (u16)rd32a(wxhw, WX_MNG_MBOX, FW_NVM_DATA_OFFSET); + *data = (u16)rd32a(wx, WX_MNG_MBOX, FW_NVM_DATA_OFFSET); return status; } /** * wx_read_ee_hostif - Read EEPROM word using a host interface cmd - * @wxhw: pointer to hardware structure + * @wx: pointer to hardware structure * @offset: offset of word in the EEPROM to read * @data: word read from the EEPROM * * Reads a 16 bit word from the EEPROM using the hostif. **/ -int wx_read_ee_hostif(struct wx_hw *wxhw, u16 offset, u16 *data) +int wx_read_ee_hostif(struct wx *wx, u16 offset, u16 *data) { int status = 0; - status = wx_acquire_sw_sync(wxhw, WX_MNG_SWFW_SYNC_SW_FLASH); + status = wx_acquire_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_FLASH); if (status == 0) { - status = wx_read_ee_hostif_data(wxhw, offset, data); - wx_release_sw_sync(wxhw, WX_MNG_SWFW_SYNC_SW_FLASH); + status = wx_read_ee_hostif_data(wx, offset, data); + wx_release_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_FLASH); } return status; @@ -324,14 +321,14 @@ EXPORT_SYMBOL(wx_read_ee_hostif); /** * wx_read_ee_hostif_buffer- Read EEPROM word(s) using hostif - * @wxhw: pointer to hardware structure + * @wx: pointer to hardware structure * @offset: offset of word in the EEPROM to read * @words: number of words * @data: word(s) read from the EEPROM * * Reads a 16 bit word(s) from the EEPROM using the hostif. **/ -int wx_read_ee_hostif_buffer(struct wx_hw *wxhw, +int wx_read_ee_hostif_buffer(struct wx *wx, u16 offset, u16 words, u16 *data) { struct wx_hic_read_shadow_ram buffer; @@ -342,7 +339,7 @@ int wx_read_ee_hostif_buffer(struct wx_hw *wxhw, u32 i; /* Take semaphore for the entire operation. */ - status = wx_acquire_sw_sync(wxhw, WX_MNG_SWFW_SYNC_SW_FLASH); + status = wx_acquire_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_FLASH); if (status != 0) return status; @@ -361,20 +358,20 @@ int wx_read_ee_hostif_buffer(struct wx_hw *wxhw, buffer.address = (__force u32)cpu_to_be32((offset + current_word) * 2); buffer.length = (__force u16)cpu_to_be16(words_to_read * 2); - status = wx_host_interface_command(wxhw, (u32 *)&buffer, + status = wx_host_interface_command(wx, (u32 *)&buffer, sizeof(buffer), WX_HI_COMMAND_TIMEOUT, false); if (status != 0) { - wx_err(wxhw, "Host interface command failed\n"); + wx_err(wx, "Host interface command failed\n"); goto out; } for (i = 0; i < words_to_read; i++) { u32 reg = WX_MNG_MBOX + (FW_NVM_DATA_OFFSET << 2) + 2 * i; - value = rd32(wxhw, reg); + value = rd32(wx, reg); data[current_word] = (u16)(value & 0xffff); current_word++; i++; @@ -388,7 +385,7 @@ int wx_read_ee_hostif_buffer(struct wx_hw *wxhw, } out: - wx_release_sw_sync(wxhw, WX_MNG_SWFW_SYNC_SW_FLASH); + wx_release_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_FLASH); return status; } EXPORT_SYMBOL(wx_read_ee_hostif_buffer); @@ -416,12 +413,12 @@ static u8 wx_calculate_checksum(u8 *buffer, u32 length) /** * wx_reset_hostif - send reset cmd to fw - * @wxhw: pointer to hardware structure + * @wx: pointer to hardware structure * * Sends reset cmd to firmware through the manageability * block. **/ -int wx_reset_hostif(struct wx_hw *wxhw) +int wx_reset_hostif(struct wx *wx) { struct wx_hic_reset reset_cmd; int ret_val = 0; @@ -430,15 +427,15 @@ int wx_reset_hostif(struct wx_hw *wxhw) reset_cmd.hdr.cmd = FW_RESET_CMD; reset_cmd.hdr.buf_len = FW_RESET_LEN; reset_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; - reset_cmd.lan_id = wxhw->bus.func; - reset_cmd.reset_type = (u16)wxhw->reset_type; + reset_cmd.lan_id = wx->bus.func; + reset_cmd.reset_type = (u16)wx->reset_type; reset_cmd.hdr.checksum = 0; reset_cmd.hdr.checksum = wx_calculate_checksum((u8 *)&reset_cmd, (FW_CEM_HDR_LEN + reset_cmd.hdr.buf_len)); for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { - ret_val = wx_host_interface_command(wxhw, (u32 *)&reset_cmd, + ret_val = wx_host_interface_command(wx, (u32 *)&reset_cmd, sizeof(reset_cmd), WX_HI_COMMAND_TIMEOUT, true); @@ -460,14 +457,14 @@ EXPORT_SYMBOL(wx_reset_hostif); /** * wx_init_eeprom_params - Initialize EEPROM params - * @wxhw: pointer to hardware structure + * @wx: pointer to hardware structure * * Initializes the EEPROM parameters wx_eeprom_info within the * wx_hw struct in order to set up EEPROM access. **/ -void wx_init_eeprom_params(struct wx_hw *wxhw) +void wx_init_eeprom_params(struct wx *wx) { - struct wx_eeprom_info *eeprom = &wxhw->eeprom; + struct wx_eeprom_info *eeprom = &wx->eeprom; u16 eeprom_size; u16 data = 0x80; @@ -475,21 +472,21 @@ void wx_init_eeprom_params(struct wx_hw *wxhw) eeprom->semaphore_delay = 10; eeprom->type = wx_eeprom_none; - if (!(rd32(wxhw, WX_SPI_STATUS) & + if (!(rd32(wx, WX_SPI_STATUS) & WX_SPI_STATUS_FLASH_BYPASS)) { eeprom->type = wx_flash; eeprom_size = 4096; eeprom->word_size = eeprom_size >> 1; - wx_dbg(wxhw, "Eeprom params: type = %d, size = %d\n", + wx_dbg(wx, "Eeprom params: type = %d, size = %d\n", eeprom->type, eeprom->word_size); } } - if (wxhw->mac.type == wx_mac_sp) { - if (wx_read_ee_hostif(wxhw, WX_SW_REGION_PTR, &data)) { - wx_err(wxhw, "NVM Read Error\n"); + if (wx->mac.type == wx_mac_sp) { + if (wx_read_ee_hostif(wx, WX_SW_REGION_PTR, &data)) { + wx_err(wx, "NVM Read Error\n"); return; } data = data >> 1; @@ -501,22 +498,22 @@ EXPORT_SYMBOL(wx_init_eeprom_params); /** * wx_get_mac_addr - Generic get MAC address - * @wxhw: pointer to hardware structure + * @wx: pointer to hardware structure * @mac_addr: Adapter MAC address * * Reads the adapter's MAC address from first Receive Address Register (RAR0) * A reset of the adapter must be performed prior to calling this function * in order for the MAC address to have been loaded from the EEPROM into RAR0 **/ -void wx_get_mac_addr(struct wx_hw *wxhw, u8 *mac_addr) +void wx_get_mac_addr(struct wx *wx, u8 *mac_addr) { u32 rar_high; u32 rar_low; u16 i; - wr32(wxhw, WX_PSR_MAC_SWC_IDX, 0); - rar_high = rd32(wxhw, WX_PSR_MAC_SWC_AD_H); - rar_low = rd32(wxhw, WX_PSR_MAC_SWC_AD_L); + wr32(wx, WX_PSR_MAC_SWC_IDX, 0); + rar_high = rd32(wx, WX_PSR_MAC_SWC_AD_H); + rar_low = rd32(wx, WX_PSR_MAC_SWC_AD_L); for (i = 0; i < 2; i++) mac_addr[i] = (u8)(rar_high >> (1 - i) * 8); @@ -528,7 +525,7 @@ EXPORT_SYMBOL(wx_get_mac_addr); /** * wx_set_rar - Set Rx address register - * @wxhw: pointer to hardware structure + * @wx: pointer to hardware structure * @index: Receive address register to write * @addr: Address to put into receive address register * @pools: VMDq "set" or "pool" index @@ -536,25 +533,25 @@ EXPORT_SYMBOL(wx_get_mac_addr); * * Puts an ethernet address into a receive address register. **/ -int wx_set_rar(struct wx_hw *wxhw, u32 index, u8 *addr, u64 pools, - u32 enable_addr) +static int wx_set_rar(struct wx *wx, u32 index, u8 *addr, u64 pools, + u32 enable_addr) { - u32 rar_entries = wxhw->mac.num_rar_entries; + u32 rar_entries = wx->mac.num_rar_entries; u32 rar_low, rar_high; /* Make sure we are using a valid rar index range */ if (index >= rar_entries) { - wx_err(wxhw, "RAR index %d is out of range.\n", index); + wx_err(wx, "RAR index %d is out of range.\n", index); return -EINVAL; } /* select the MAC address */ - wr32(wxhw, WX_PSR_MAC_SWC_IDX, index); + wr32(wx, WX_PSR_MAC_SWC_IDX, index); /* setup VMDq pool mapping */ - wr32(wxhw, WX_PSR_MAC_SWC_VM_L, pools & 0xFFFFFFFF); - if (wxhw->mac.type == wx_mac_sp) - wr32(wxhw, WX_PSR_MAC_SWC_VM_H, pools >> 32); + wr32(wx, WX_PSR_MAC_SWC_VM_L, pools & 0xFFFFFFFF); + if (wx->mac.type == wx_mac_sp) + wr32(wx, WX_PSR_MAC_SWC_VM_H, pools >> 32); /* HW expects these in little endian so we reverse the byte * order from network order (big endian) to little endian @@ -572,31 +569,30 @@ int wx_set_rar(struct wx_hw *wxhw, u32 index, u8 *addr, u64 pools, if (enable_addr != 0) rar_high |= WX_PSR_MAC_SWC_AD_H_AV; - wr32(wxhw, WX_PSR_MAC_SWC_AD_L, rar_low); - wr32m(wxhw, WX_PSR_MAC_SWC_AD_H, - (WX_PSR_MAC_SWC_AD_H_AD(~0) | - WX_PSR_MAC_SWC_AD_H_ADTYPE(~0) | + wr32(wx, WX_PSR_MAC_SWC_AD_L, rar_low); + wr32m(wx, WX_PSR_MAC_SWC_AD_H, + (WX_PSR_MAC_SWC_AD_H_AD(U16_MAX) | + WX_PSR_MAC_SWC_AD_H_ADTYPE(1) | WX_PSR_MAC_SWC_AD_H_AV), rar_high); return 0; } -EXPORT_SYMBOL(wx_set_rar); /** * wx_clear_rar - Remove Rx address register - * @wxhw: pointer to hardware structure + * @wx: pointer to hardware structure * @index: Receive address register to write * * Clears an ethernet address from a receive address register. **/ -int wx_clear_rar(struct wx_hw *wxhw, u32 index) +static int wx_clear_rar(struct wx *wx, u32 index) { - u32 rar_entries = wxhw->mac.num_rar_entries; + u32 rar_entries = wx->mac.num_rar_entries; /* Make sure we are using a valid rar index range */ if (index >= rar_entries) { - wx_err(wxhw, "RAR index %d is out of range.\n", index); + wx_err(wx, "RAR index %d is out of range.\n", index); return -EINVAL; } @@ -604,78 +600,77 @@ int wx_clear_rar(struct wx_hw *wxhw, u32 index) * so save everything except the lower 16 bits that hold part * of the address and the address valid bit. */ - wr32(wxhw, WX_PSR_MAC_SWC_IDX, index); + wr32(wx, WX_PSR_MAC_SWC_IDX, index); - wr32(wxhw, WX_PSR_MAC_SWC_VM_L, 0); - wr32(wxhw, WX_PSR_MAC_SWC_VM_H, 0); + wr32(wx, WX_PSR_MAC_SWC_VM_L, 0); + wr32(wx, WX_PSR_MAC_SWC_VM_H, 0); - wr32(wxhw, WX_PSR_MAC_SWC_AD_L, 0); - wr32m(wxhw, WX_PSR_MAC_SWC_AD_H, - (WX_PSR_MAC_SWC_AD_H_AD(~0) | - WX_PSR_MAC_SWC_AD_H_ADTYPE(~0) | + wr32(wx, WX_PSR_MAC_SWC_AD_L, 0); + wr32m(wx, WX_PSR_MAC_SWC_AD_H, + (WX_PSR_MAC_SWC_AD_H_AD(U16_MAX) | + WX_PSR_MAC_SWC_AD_H_ADTYPE(1) | WX_PSR_MAC_SWC_AD_H_AV), 0); return 0; } -EXPORT_SYMBOL(wx_clear_rar); /** * wx_clear_vmdq - Disassociate a VMDq pool index from a rx address - * @wxhw: pointer to hardware struct + * @wx: pointer to hardware struct * @rar: receive address register index to disassociate * @vmdq: VMDq pool index to remove from the rar **/ -static int wx_clear_vmdq(struct wx_hw *wxhw, u32 rar, u32 __maybe_unused vmdq) +static int wx_clear_vmdq(struct wx *wx, u32 rar, u32 __maybe_unused vmdq) { - u32 rar_entries = wxhw->mac.num_rar_entries; + u32 rar_entries = wx->mac.num_rar_entries; u32 mpsar_lo, mpsar_hi; /* Make sure we are using a valid rar index range */ if (rar >= rar_entries) { - wx_err(wxhw, "RAR index %d is out of range.\n", rar); + wx_err(wx, "RAR index %d is out of range.\n", rar); return -EINVAL; } - wr32(wxhw, WX_PSR_MAC_SWC_IDX, rar); - mpsar_lo = rd32(wxhw, WX_PSR_MAC_SWC_VM_L); - mpsar_hi = rd32(wxhw, WX_PSR_MAC_SWC_VM_H); + wr32(wx, WX_PSR_MAC_SWC_IDX, rar); + mpsar_lo = rd32(wx, WX_PSR_MAC_SWC_VM_L); + mpsar_hi = rd32(wx, WX_PSR_MAC_SWC_VM_H); if (!mpsar_lo && !mpsar_hi) return 0; /* was that the last pool using this rar? */ if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0) - wx_clear_rar(wxhw, rar); + wx_clear_rar(wx, rar); return 0; } /** * wx_init_uta_tables - Initialize the Unicast Table Array - * @wxhw: pointer to hardware structure + * @wx: pointer to hardware structure **/ -static void wx_init_uta_tables(struct wx_hw *wxhw) +static void wx_init_uta_tables(struct wx *wx) { int i; - wx_dbg(wxhw, " Clearing UTA\n"); + wx_dbg(wx, " Clearing UTA\n"); for (i = 0; i < 128; i++) - wr32(wxhw, WX_PSR_UC_TBL(i), 0); + wr32(wx, WX_PSR_UC_TBL(i), 0); } /** * wx_init_rx_addrs - Initializes receive address filters. - * @wxhw: pointer to hardware structure + * @wx: pointer to hardware structure * * Places the MAC address in receive address register 0 and clears the rest * of the receive address registers. Clears the multicast table. Assumes * the receiver is in reset when the routine is called. **/ -void wx_init_rx_addrs(struct wx_hw *wxhw) +void wx_init_rx_addrs(struct wx *wx) { - u32 rar_entries = wxhw->mac.num_rar_entries; + u32 rar_entries = wx->mac.num_rar_entries; u32 psrctl; int i; @@ -683,67 +678,166 @@ void wx_init_rx_addrs(struct wx_hw *wxhw) * to the permanent address. * Otherwise, use the permanent address from the eeprom. */ - if (!is_valid_ether_addr(wxhw->mac.addr)) { + if (!is_valid_ether_addr(wx->mac.addr)) { /* Get the MAC address from the RAR0 for later reference */ - wx_get_mac_addr(wxhw, wxhw->mac.addr); - wx_dbg(wxhw, "Keeping Current RAR0 Addr = %pM\n", wxhw->mac.addr); + wx_get_mac_addr(wx, wx->mac.addr); + wx_dbg(wx, "Keeping Current RAR0 Addr = %pM\n", wx->mac.addr); } else { /* Setup the receive address. */ - wx_dbg(wxhw, "Overriding MAC Address in RAR[0]\n"); - wx_dbg(wxhw, "New MAC Addr = %pM\n", wxhw->mac.addr); + wx_dbg(wx, "Overriding MAC Address in RAR[0]\n"); + wx_dbg(wx, "New MAC Addr = %pM\n", wx->mac.addr); - wx_set_rar(wxhw, 0, wxhw->mac.addr, 0, WX_PSR_MAC_SWC_AD_H_AV); + wx_set_rar(wx, 0, wx->mac.addr, 0, WX_PSR_MAC_SWC_AD_H_AV); - if (wxhw->mac.type == wx_mac_sp) { + if (wx->mac.type == wx_mac_sp) { /* clear VMDq pool/queue selection for RAR 0 */ - wx_clear_vmdq(wxhw, 0, WX_CLEAR_VMDQ_ALL); + wx_clear_vmdq(wx, 0, WX_CLEAR_VMDQ_ALL); } } /* Zero out the other receive addresses. */ - wx_dbg(wxhw, "Clearing RAR[1-%d]\n", rar_entries - 1); + wx_dbg(wx, "Clearing RAR[1-%d]\n", rar_entries - 1); for (i = 1; i < rar_entries; i++) { - wr32(wxhw, WX_PSR_MAC_SWC_IDX, i); - wr32(wxhw, WX_PSR_MAC_SWC_AD_L, 0); - wr32(wxhw, WX_PSR_MAC_SWC_AD_H, 0); + wr32(wx, WX_PSR_MAC_SWC_IDX, i); + wr32(wx, WX_PSR_MAC_SWC_AD_L, 0); + wr32(wx, WX_PSR_MAC_SWC_AD_H, 0); } /* Clear the MTA */ - wxhw->addr_ctrl.mta_in_use = 0; - psrctl = rd32(wxhw, WX_PSR_CTL); + wx->addr_ctrl.mta_in_use = 0; + psrctl = rd32(wx, WX_PSR_CTL); psrctl &= ~(WX_PSR_CTL_MO | WX_PSR_CTL_MFE); - psrctl |= wxhw->mac.mc_filter_type << WX_PSR_CTL_MO_SHIFT; - wr32(wxhw, WX_PSR_CTL, psrctl); - wx_dbg(wxhw, " Clearing MTA\n"); - for (i = 0; i < wxhw->mac.mcft_size; i++) - wr32(wxhw, WX_PSR_MC_TBL(i), 0); + psrctl |= wx->mac.mc_filter_type << WX_PSR_CTL_MO_SHIFT; + wr32(wx, WX_PSR_CTL, psrctl); + wx_dbg(wx, " Clearing MTA\n"); + for (i = 0; i < wx->mac.mcft_size; i++) + wr32(wx, WX_PSR_MC_TBL(i), 0); - wx_init_uta_tables(wxhw); + wx_init_uta_tables(wx); } EXPORT_SYMBOL(wx_init_rx_addrs); -void wx_disable_rx(struct wx_hw *wxhw) +static void wx_sync_mac_table(struct wx *wx) +{ + int i; + + for (i = 0; i < wx->mac.num_rar_entries; i++) { + if (wx->mac_table[i].state & WX_MAC_STATE_MODIFIED) { + if (wx->mac_table[i].state & WX_MAC_STATE_IN_USE) { + wx_set_rar(wx, i, + wx->mac_table[i].addr, + wx->mac_table[i].pools, + WX_PSR_MAC_SWC_AD_H_AV); + } else { + wx_clear_rar(wx, i); + } + wx->mac_table[i].state &= ~(WX_MAC_STATE_MODIFIED); + } + } +} + +/* this function destroys the first RAR entry */ +void wx_mac_set_default_filter(struct wx *wx, u8 *addr) +{ + memcpy(&wx->mac_table[0].addr, addr, ETH_ALEN); + wx->mac_table[0].pools = 1ULL; + wx->mac_table[0].state = (WX_MAC_STATE_DEFAULT | WX_MAC_STATE_IN_USE); + wx_set_rar(wx, 0, wx->mac_table[0].addr, + wx->mac_table[0].pools, + WX_PSR_MAC_SWC_AD_H_AV); +} +EXPORT_SYMBOL(wx_mac_set_default_filter); + +void wx_flush_sw_mac_table(struct wx *wx) +{ + u32 i; + + for (i = 0; i < wx->mac.num_rar_entries; i++) { + if (!(wx->mac_table[i].state & WX_MAC_STATE_IN_USE)) + continue; + + wx->mac_table[i].state |= WX_MAC_STATE_MODIFIED; + wx->mac_table[i].state &= ~WX_MAC_STATE_IN_USE; + memset(wx->mac_table[i].addr, 0, ETH_ALEN); + wx->mac_table[i].pools = 0; + } + wx_sync_mac_table(wx); +} +EXPORT_SYMBOL(wx_flush_sw_mac_table); + +static int wx_del_mac_filter(struct wx *wx, u8 *addr, u16 pool) +{ + u32 i; + + if (is_zero_ether_addr(addr)) + return -EINVAL; + + /* search table for addr, if found, set to 0 and sync */ + for (i = 0; i < wx->mac.num_rar_entries; i++) { + if (!ether_addr_equal(addr, wx->mac_table[i].addr)) + continue; + + wx->mac_table[i].state |= WX_MAC_STATE_MODIFIED; + wx->mac_table[i].pools &= ~(1ULL << pool); + if (!wx->mac_table[i].pools) { + wx->mac_table[i].state &= ~WX_MAC_STATE_IN_USE; + memset(wx->mac_table[i].addr, 0, ETH_ALEN); + } + wx_sync_mac_table(wx); + return 0; + } + return -ENOMEM; +} + +/** + * wx_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +int wx_set_mac(struct net_device *netdev, void *p) +{ + struct wx *wx = netdev_priv(netdev); + struct sockaddr *addr = p; + int retval; + + retval = eth_prepare_mac_addr_change(netdev, addr); + if (retval) + return retval; + + wx_del_mac_filter(wx, wx->mac.addr, 0); + eth_hw_addr_set(netdev, addr->sa_data); + memcpy(wx->mac.addr, addr->sa_data, netdev->addr_len); + + wx_mac_set_default_filter(wx, wx->mac.addr); + + return 0; +} +EXPORT_SYMBOL(wx_set_mac); + +void wx_disable_rx(struct wx *wx) { u32 pfdtxgswc; u32 rxctrl; - rxctrl = rd32(wxhw, WX_RDB_PB_CTL); + rxctrl = rd32(wx, WX_RDB_PB_CTL); if (rxctrl & WX_RDB_PB_CTL_RXEN) { - pfdtxgswc = rd32(wxhw, WX_PSR_CTL); + pfdtxgswc = rd32(wx, WX_PSR_CTL); if (pfdtxgswc & WX_PSR_CTL_SW_EN) { pfdtxgswc &= ~WX_PSR_CTL_SW_EN; - wr32(wxhw, WX_PSR_CTL, pfdtxgswc); - wxhw->mac.set_lben = true; + wr32(wx, WX_PSR_CTL, pfdtxgswc); + wx->mac.set_lben = true; } else { - wxhw->mac.set_lben = false; + wx->mac.set_lben = false; } rxctrl &= ~WX_RDB_PB_CTL_RXEN; - wr32(wxhw, WX_RDB_PB_CTL, rxctrl); + wr32(wx, WX_RDB_PB_CTL, rxctrl); - if (!(((wxhw->subsystem_device_id & WX_NCSI_MASK) == WX_NCSI_SUP) || - ((wxhw->subsystem_device_id & WX_WOL_MASK) == WX_WOL_SUP))) { + if (!(((wx->subsystem_device_id & WX_NCSI_MASK) == WX_NCSI_SUP) || + ((wx->subsystem_device_id & WX_WOL_MASK) == WX_WOL_SUP))) { /* disable mac receiver */ - wr32m(wxhw, WX_MAC_RX_CFG, + wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_RE, 0); } } @@ -752,28 +846,28 @@ EXPORT_SYMBOL(wx_disable_rx); /** * wx_disable_pcie_master - Disable PCI-express master access - * @wxhw: pointer to hardware structure + * @wx: pointer to hardware structure * * Disables PCI-Express master access and verifies there are no pending * requests. **/ -int wx_disable_pcie_master(struct wx_hw *wxhw) +int wx_disable_pcie_master(struct wx *wx) { int status = 0; u32 val; /* Always set this bit to ensure any future transactions are blocked */ - pci_clear_master(wxhw->pdev); + pci_clear_master(wx->pdev); /* Exit if master requests are blocked */ - if (!(rd32(wxhw, WX_PX_TRANSACTION_PENDING))) + if (!(rd32(wx, WX_PX_TRANSACTION_PENDING))) return 0; /* Poll for master request bit to clear */ status = read_poll_timeout(rd32, val, !val, 100, WX_PCI_MASTER_DISABLE_TIMEOUT, - false, wxhw, WX_PX_TRANSACTION_PENDING); + false, wx, WX_PX_TRANSACTION_PENDING); if (status < 0) - wx_err(wxhw, "PCIe transaction pending bit did not clear.\n"); + wx_err(wx, "PCIe transaction pending bit did not clear.\n"); return status; } @@ -781,106 +875,106 @@ EXPORT_SYMBOL(wx_disable_pcie_master); /** * wx_stop_adapter - Generic stop Tx/Rx units - * @wxhw: pointer to hardware structure + * @wx: pointer to hardware structure * * Sets the adapter_stopped flag within wx_hw struct. Clears interrupts, * disables transmit and receive units. The adapter_stopped flag is used by * the shared code and drivers to determine if the adapter is in a stopped * state and should not touch the hardware. **/ -int wx_stop_adapter(struct wx_hw *wxhw) +int wx_stop_adapter(struct wx *wx) { u16 i; /* Set the adapter_stopped flag so other driver functions stop touching * the hardware */ - wxhw->adapter_stopped = true; + wx->adapter_stopped = true; /* Disable the receive unit */ - wx_disable_rx(wxhw); + wx_disable_rx(wx); /* Set interrupt mask to stop interrupts from being generated */ - wx_intr_disable(wxhw, WX_INTR_ALL); + wx_intr_disable(wx, WX_INTR_ALL); /* Clear any pending interrupts, flush previous writes */ - wr32(wxhw, WX_PX_MISC_IC, 0xffffffff); - wr32(wxhw, WX_BME_CTL, 0x3); + wr32(wx, WX_PX_MISC_IC, 0xffffffff); + wr32(wx, WX_BME_CTL, 0x3); /* Disable the transmit unit. Each queue must be disabled. */ - for (i = 0; i < wxhw->mac.max_tx_queues; i++) { - wr32m(wxhw, WX_PX_TR_CFG(i), + for (i = 0; i < wx->mac.max_tx_queues; i++) { + wr32m(wx, WX_PX_TR_CFG(i), WX_PX_TR_CFG_SWFLSH | WX_PX_TR_CFG_ENABLE, WX_PX_TR_CFG_SWFLSH); } /* Disable the receive unit by stopping each queue */ - for (i = 0; i < wxhw->mac.max_rx_queues; i++) { - wr32m(wxhw, WX_PX_RR_CFG(i), + for (i = 0; i < wx->mac.max_rx_queues; i++) { + wr32m(wx, WX_PX_RR_CFG(i), WX_PX_RR_CFG_RR_EN, 0); } /* flush all queues disables */ - WX_WRITE_FLUSH(wxhw); + WX_WRITE_FLUSH(wx); /* Prevent the PCI-E bus from hanging by disabling PCI-E master * access and verify no pending requests */ - return wx_disable_pcie_master(wxhw); + return wx_disable_pcie_master(wx); } EXPORT_SYMBOL(wx_stop_adapter); -void wx_reset_misc(struct wx_hw *wxhw) +void wx_reset_misc(struct wx *wx) { int i; /* receive packets that size > 2048 */ - wr32m(wxhw, WX_MAC_RX_CFG, WX_MAC_RX_CFG_JE, WX_MAC_RX_CFG_JE); + wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_JE, WX_MAC_RX_CFG_JE); /* clear counters on read */ - wr32m(wxhw, WX_MMC_CONTROL, + wr32m(wx, WX_MMC_CONTROL, WX_MMC_CONTROL_RSTONRD, WX_MMC_CONTROL_RSTONRD); - wr32m(wxhw, WX_MAC_RX_FLOW_CTRL, + wr32m(wx, WX_MAC_RX_FLOW_CTRL, WX_MAC_RX_FLOW_CTRL_RFE, WX_MAC_RX_FLOW_CTRL_RFE); - wr32(wxhw, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR); + wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR); - wr32m(wxhw, WX_MIS_RST_ST, + wr32m(wx, WX_MIS_RST_ST, WX_MIS_RST_ST_RST_INIT, 0x1E00); /* errata 4: initialize mng flex tbl and wakeup flex tbl*/ - wr32(wxhw, WX_PSR_MNG_FLEX_SEL, 0); + wr32(wx, WX_PSR_MNG_FLEX_SEL, 0); for (i = 0; i < 16; i++) { - wr32(wxhw, WX_PSR_MNG_FLEX_DW_L(i), 0); - wr32(wxhw, WX_PSR_MNG_FLEX_DW_H(i), 0); - wr32(wxhw, WX_PSR_MNG_FLEX_MSK(i), 0); + wr32(wx, WX_PSR_MNG_FLEX_DW_L(i), 0); + wr32(wx, WX_PSR_MNG_FLEX_DW_H(i), 0); + wr32(wx, WX_PSR_MNG_FLEX_MSK(i), 0); } - wr32(wxhw, WX_PSR_LAN_FLEX_SEL, 0); + wr32(wx, WX_PSR_LAN_FLEX_SEL, 0); for (i = 0; i < 16; i++) { - wr32(wxhw, WX_PSR_LAN_FLEX_DW_L(i), 0); - wr32(wxhw, WX_PSR_LAN_FLEX_DW_H(i), 0); - wr32(wxhw, WX_PSR_LAN_FLEX_MSK(i), 0); + wr32(wx, WX_PSR_LAN_FLEX_DW_L(i), 0); + wr32(wx, WX_PSR_LAN_FLEX_DW_H(i), 0); + wr32(wx, WX_PSR_LAN_FLEX_MSK(i), 0); } /* set pause frame dst mac addr */ - wr32(wxhw, WX_RDB_PFCMACDAL, 0xC2000001); - wr32(wxhw, WX_RDB_PFCMACDAH, 0x0180); + wr32(wx, WX_RDB_PFCMACDAL, 0xC2000001); + wr32(wx, WX_RDB_PFCMACDAH, 0x0180); } EXPORT_SYMBOL(wx_reset_misc); /** * wx_get_pcie_msix_counts - Gets MSI-X vector count - * @wxhw: pointer to hardware structure + * @wx: pointer to hardware structure * @msix_count: number of MSI interrupts that can be obtained * @max_msix_count: number of MSI interrupts that mac need * * Read PCIe configuration space, and get the MSI-X vector count from * the capabilities table. **/ -int wx_get_pcie_msix_counts(struct wx_hw *wxhw, u16 *msix_count, u16 max_msix_count) +int wx_get_pcie_msix_counts(struct wx *wx, u16 *msix_count, u16 max_msix_count) { - struct pci_dev *pdev = wxhw->pdev; + struct pci_dev *pdev = wx->pdev; struct device *dev = &pdev->dev; int pos; @@ -904,31 +998,39 @@ int wx_get_pcie_msix_counts(struct wx_hw *wxhw, u16 *msix_count, u16 max_msix_co } EXPORT_SYMBOL(wx_get_pcie_msix_counts); -int wx_sw_init(struct wx_hw *wxhw) +int wx_sw_init(struct wx *wx) { - struct pci_dev *pdev = wxhw->pdev; + struct pci_dev *pdev = wx->pdev; u32 ssid = 0; int err = 0; - wxhw->vendor_id = pdev->vendor; - wxhw->device_id = pdev->device; - wxhw->revision_id = pdev->revision; - wxhw->oem_svid = pdev->subsystem_vendor; - wxhw->oem_ssid = pdev->subsystem_device; - wxhw->bus.device = PCI_SLOT(pdev->devfn); - wxhw->bus.func = PCI_FUNC(pdev->devfn); - - if (wxhw->oem_svid == PCI_VENDOR_ID_WANGXUN) { - wxhw->subsystem_vendor_id = pdev->subsystem_vendor; - wxhw->subsystem_device_id = pdev->subsystem_device; + wx->vendor_id = pdev->vendor; + wx->device_id = pdev->device; + wx->revision_id = pdev->revision; + wx->oem_svid = pdev->subsystem_vendor; + wx->oem_ssid = pdev->subsystem_device; + wx->bus.device = PCI_SLOT(pdev->devfn); + wx->bus.func = PCI_FUNC(pdev->devfn); + + if (wx->oem_svid == PCI_VENDOR_ID_WANGXUN) { + wx->subsystem_vendor_id = pdev->subsystem_vendor; + wx->subsystem_device_id = pdev->subsystem_device; } else { - err = wx_flash_read_dword(wxhw, 0xfffdc, &ssid); + err = wx_flash_read_dword(wx, 0xfffdc, &ssid); if (!err) - wxhw->subsystem_device_id = swab16((u16)ssid); + wx->subsystem_device_id = swab16((u16)ssid); return err; } + wx->mac_table = kcalloc(wx->mac.num_rar_entries, + sizeof(struct wx_mac_addr), + GFP_KERNEL); + if (!wx->mac_table) { + wx_err(wx, "mac_table allocation failed\n"); + return -ENOMEM; + } + return 0; } EXPORT_SYMBOL(wx_sw_init); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.h b/drivers/net/ethernet/wangxun/libwx/wx_hw.h index a0652f5e9939..803983546f3a 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_hw.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.h @@ -4,25 +4,26 @@ #ifndef _WX_HW_H_ #define _WX_HW_H_ -int wx_check_flash_load(struct wx_hw *hw, u32 check_bit); -void wx_control_hw(struct wx_hw *wxhw, bool drv); -int wx_mng_present(struct wx_hw *wxhw); -int wx_host_interface_command(struct wx_hw *wxhw, u32 *buffer, +int wx_check_flash_load(struct wx *wx, u32 check_bit); +void wx_control_hw(struct wx *wx, bool drv); +int wx_mng_present(struct wx *wx); +int wx_host_interface_command(struct wx *wx, u32 *buffer, u32 length, u32 timeout, bool return_data); -int wx_read_ee_hostif(struct wx_hw *wxhw, u16 offset, u16 *data); -int wx_read_ee_hostif_buffer(struct wx_hw *wxhw, +int wx_read_ee_hostif(struct wx *wx, u16 offset, u16 *data); +int wx_read_ee_hostif_buffer(struct wx *wx, u16 offset, u16 words, u16 *data); -int wx_reset_hostif(struct wx_hw *wxhw); -void wx_init_eeprom_params(struct wx_hw *wxhw); -void wx_get_mac_addr(struct wx_hw *wxhw, u8 *mac_addr); -int wx_set_rar(struct wx_hw *wxhw, u32 index, u8 *addr, u64 pools, u32 enable_addr); -int wx_clear_rar(struct wx_hw *wxhw, u32 index); -void wx_init_rx_addrs(struct wx_hw *wxhw); -void wx_disable_rx(struct wx_hw *wxhw); -int wx_disable_pcie_master(struct wx_hw *wxhw); -int wx_stop_adapter(struct wx_hw *wxhw); -void wx_reset_misc(struct wx_hw *wxhw); -int wx_get_pcie_msix_counts(struct wx_hw *wxhw, u16 *msix_count, u16 max_msix_count); -int wx_sw_init(struct wx_hw *wxhw); +int wx_reset_hostif(struct wx *wx); +void wx_init_eeprom_params(struct wx *wx); +void wx_get_mac_addr(struct wx *wx, u8 *mac_addr); +void wx_init_rx_addrs(struct wx *wx); +void wx_mac_set_default_filter(struct wx *wx, u8 *addr); +void wx_flush_sw_mac_table(struct wx *wx); +int wx_set_mac(struct net_device *netdev, void *p); +void wx_disable_rx(struct wx *wx); +int wx_disable_pcie_master(struct wx *wx); +int wx_stop_adapter(struct wx *wx); +void wx_reset_misc(struct wx *wx); +int wx_get_pcie_msix_counts(struct wx *wx, u16 *msix_count, u16 max_msix_count); +int wx_sw_init(struct wx *wx); #endif /* _WX_HW_H_ */ diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h index 1cbeef8230bf..c86a37914d43 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_type.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h @@ -4,6 +4,8 @@ #ifndef _WX_TYPE_H_ #define _WX_TYPE_H_ +#include <linux/bitfield.h> + /* Vendor ID */ #ifndef PCI_VENDOR_ID_WANGXUN #define PCI_VENDOR_ID_WANGXUN 0x8088 @@ -36,12 +38,11 @@ #define WX_SPI_CMD 0x10104 #define WX_SPI_CMD_READ_DWORD 0x1 #define WX_SPI_CLK_DIV 0x3 -#define WX_SPI_CMD_CMD(_v) (((_v) & 0x7) << 28) -#define WX_SPI_CMD_CLK(_v) (((_v) & 0x7) << 25) -#define WX_SPI_CMD_ADDR(_v) (((_v) & 0xFFFFFF)) +#define WX_SPI_CMD_CMD(_v) FIELD_PREP(GENMASK(30, 28), _v) +#define WX_SPI_CMD_CLK(_v) FIELD_PREP(GENMASK(27, 25), _v) +#define WX_SPI_CMD_ADDR(_v) FIELD_PREP(GENMASK(23, 0), _v) #define WX_SPI_DATA 0x10108 #define WX_SPI_DATA_BYPASS BIT(31) -#define WX_SPI_DATA_STATUS(_v) (((_v) & 0xFF) << 16) #define WX_SPI_DATA_OP_DONE BIT(0) #define WX_SPI_STATUS 0x1010C #define WX_SPI_STATUS_OPDONE BIT(0) @@ -113,8 +114,8 @@ /* mac switcher */ #define WX_PSR_MAC_SWC_AD_L 0x16200 #define WX_PSR_MAC_SWC_AD_H 0x16204 -#define WX_PSR_MAC_SWC_AD_H_AD(v) (((v) & 0xFFFF)) -#define WX_PSR_MAC_SWC_AD_H_ADTYPE(v) (((v) & 0x1) << 30) +#define WX_PSR_MAC_SWC_AD_H_AD(v) FIELD_PREP(U16_MAX, v) +#define WX_PSR_MAC_SWC_AD_H_ADTYPE(v) FIELD_PREP(BIT(30), v) #define WX_PSR_MAC_SWC_AD_H_AV BIT(31) #define WX_PSR_MAC_SWC_VM_L 0x16208 #define WX_PSR_MAC_SWC_VM_H 0x1620C @@ -133,11 +134,14 @@ /************************************* ETH MAC *****************************/ #define WX_MAC_TX_CFG 0x11000 #define WX_MAC_TX_CFG_TE BIT(0) +#define WX_MAC_TX_CFG_SPEED_MASK GENMASK(30, 29) +#define WX_MAC_TX_CFG_SPEED_1G FIELD_PREP(WX_MAC_TX_CFG_SPEED_MASK, 3) #define WX_MAC_RX_CFG 0x11004 #define WX_MAC_RX_CFG_RE BIT(0) #define WX_MAC_RX_CFG_JE BIT(8) #define WX_MAC_PKT_FLT 0x11008 #define WX_MAC_PKT_FLT_PR BIT(0) /* promiscuous mode */ +#define WX_MAC_WDG_TIMEOUT 0x1100C #define WX_MAC_RX_FLOW_CTRL 0x11090 #define WX_MAC_RX_FLOW_CTRL_RFE BIT(0) /* receive fc enable */ #define WX_MMC_CONTROL 0x11800 @@ -185,6 +189,12 @@ #define WX_SW_REGION_PTR 0x1C +#define WX_MAC_STATE_DEFAULT 0x1 +#define WX_MAC_STATE_MODIFIED 0x2 +#define WX_MAC_STATE_IN_USE 0x4 + +#define WX_CFG_PORT_ST 0x14404 + /* Host Interface Command Structures */ struct wx_hic_hdr { u8 cmd; @@ -249,6 +259,12 @@ enum wx_mac_type { wx_mac_em }; +enum em_mac_type { + em_mac_type_unknown = 0, + em_mac_type_mdi, + em_mac_type_rgmii +}; + struct wx_mac_info { enum wx_mac_type type; bool set_lben; @@ -284,19 +300,28 @@ struct wx_addr_filter_info { bool user_set_promisc; }; +struct wx_mac_addr { + u8 addr[ETH_ALEN]; + u16 state; /* bitmask */ + u64 pools; +}; + enum wx_reset_type { WX_LAN_RESET = 0, WX_SW_RESET, WX_GLOBAL_RESET }; -struct wx_hw { +struct wx { u8 __iomem *hw_addr; struct pci_dev *pdev; + struct net_device *netdev; struct wx_bus_info bus; struct wx_mac_info mac; + enum em_mac_type mac_type; struct wx_eeprom_info eeprom; struct wx_addr_filter_info addr_ctrl; + struct wx_mac_addr *mac_table; u16 device_id; u16 vendor_id; u16 subsystem_device_id; @@ -304,8 +329,45 @@ struct wx_hw { u8 revision_id; u16 oem_ssid; u16 oem_svid; + u16 msg_enable; bool adapter_stopped; + char eeprom_id[32]; enum wx_reset_type reset_type; + + /* PHY stuff */ + unsigned int link; + int speed; + int duplex; + struct phy_device *phydev; + + bool wol_enabled; + bool ncsi_enabled; + bool gpio_ctrl; + + /* Tx fast path data */ + int num_tx_queues; + u16 tx_itr_setting; + u16 tx_work_limit; + + /* Rx fast path data */ + int num_rx_queues; + u16 rx_itr_setting; + u16 rx_work_limit; + + int num_q_vectors; /* current number of q_vectors for device */ + int max_q_vectors; /* upper limit of q_vectors for device */ + + u32 tx_ring_count; + u32 rx_ring_count; + +#define WX_MAX_RETA_ENTRIES 128 + u8 rss_indir_tbl[WX_MAX_RETA_ENTRIES]; + +#define WX_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */ + u32 *rss_key; + u32 wol; + + u16 bd_number; }; #define WX_INTR_ALL (~0ULL) @@ -319,23 +381,23 @@ struct wx_hw { wr32((a), (reg) + ((off) << 2), (val)) static inline u32 -rd32m(struct wx_hw *wxhw, u32 reg, u32 mask) +rd32m(struct wx *wx, u32 reg, u32 mask) { u32 val; - val = rd32(wxhw, reg); + val = rd32(wx, reg); return val & mask; } static inline void -wr32m(struct wx_hw *wxhw, u32 reg, u32 mask, u32 field) +wr32m(struct wx *wx, u32 reg, u32 mask, u32 field) { u32 val; - val = rd32(wxhw, reg); + val = rd32(wx, reg); val = ((val & ~mask) | (field & mask)); - wr32(wxhw, reg, val); + wr32(wx, reg, val); } /* On some domestic CPU platforms, sometimes IO is not synchronized with @@ -343,10 +405,10 @@ wr32m(struct wx_hw *wxhw, u32 reg, u32 mask, u32 field) */ #define WX_WRITE_FLUSH(H) rd32(H, WX_MIS_PWR) -#define wx_err(wxhw, fmt, arg...) \ - dev_err(&(wxhw)->pdev->dev, fmt, ##arg) +#define wx_err(wx, fmt, arg...) \ + dev_err(&(wx)->pdev->dev, fmt, ##arg) -#define wx_dbg(wxhw, fmt, arg...) \ - dev_dbg(&(wxhw)->pdev->dev, fmt, ##arg) +#define wx_dbg(wx, fmt, arg...) \ + dev_dbg(&(wx)->pdev->dev, fmt, ##arg) #endif /* _WX_TYPE_H_ */ diff --git a/drivers/net/ethernet/wangxun/ngbe/Makefile b/drivers/net/ethernet/wangxun/ngbe/Makefile index 391c2cbc1bb4..50fdca87d2a5 100644 --- a/drivers/net/ethernet/wangxun/ngbe/Makefile +++ b/drivers/net/ethernet/wangxun/ngbe/Makefile @@ -6,4 +6,4 @@ obj-$(CONFIG_NGBE) += ngbe.o -ngbe-objs := ngbe_main.o ngbe_hw.o +ngbe-objs := ngbe_main.o ngbe_hw.o ngbe_mdio.o diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe.h b/drivers/net/ethernet/wangxun/ngbe/ngbe.h deleted file mode 100644 index af147ca8605c..000000000000 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe.h +++ /dev/null @@ -1,79 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */ - -#ifndef _NGBE_H_ -#define _NGBE_H_ - -#include "ngbe_type.h" - -#define NGBE_MAX_FDIR_INDICES 7 - -#define NGBE_MAX_RX_QUEUES (NGBE_MAX_FDIR_INDICES + 1) -#define NGBE_MAX_TX_QUEUES (NGBE_MAX_FDIR_INDICES + 1) - -#define NGBE_ETH_LENGTH_OF_ADDRESS 6 -#define NGBE_MAX_MSIX_VECTORS 0x09 -#define NGBE_RAR_ENTRIES 32 - -/* TX/RX descriptor defines */ -#define NGBE_DEFAULT_TXD 512 /* default ring size */ -#define NGBE_DEFAULT_TX_WORK 256 -#define NGBE_MAX_TXD 8192 -#define NGBE_MIN_TXD 128 - -#define NGBE_DEFAULT_RXD 512 /* default ring size */ -#define NGBE_DEFAULT_RX_WORK 256 -#define NGBE_MAX_RXD 8192 -#define NGBE_MIN_RXD 128 - -#define NGBE_MAC_STATE_DEFAULT 0x1 -#define NGBE_MAC_STATE_MODIFIED 0x2 -#define NGBE_MAC_STATE_IN_USE 0x4 - -struct ngbe_mac_addr { - u8 addr[ETH_ALEN]; - u16 state; /* bitmask */ - u64 pools; -}; - -/* board specific private data structure */ -struct ngbe_adapter { - u8 __iomem *io_addr; /* Mainly for iounmap use */ - /* OS defined structs */ - struct net_device *netdev; - struct pci_dev *pdev; - - /* structs defined in ngbe_hw.h */ - struct ngbe_hw hw; - struct ngbe_mac_addr *mac_table; - u16 msg_enable; - - /* Tx fast path data */ - int num_tx_queues; - u16 tx_itr_setting; - u16 tx_work_limit; - - /* Rx fast path data */ - int num_rx_queues; - u16 rx_itr_setting; - u16 rx_work_limit; - - int num_q_vectors; /* current number of q_vectors for device */ - int max_q_vectors; /* upper limit of q_vectors for device */ - - u32 tx_ring_count; - u32 rx_ring_count; - -#define NGBE_MAX_RETA_ENTRIES 128 - u8 rss_indir_tbl[NGBE_MAX_RETA_ENTRIES]; - -#define NGBE_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */ - u32 *rss_key; - u32 wol; - - u16 bd_number; -}; - -extern char ngbe_driver_name[]; - -#endif /* _NGBE_H_ */ diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c index 0e3923b3737e..6562a2de9527 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c @@ -9,12 +9,10 @@ #include "../libwx/wx_hw.h" #include "ngbe_type.h" #include "ngbe_hw.h" -#include "ngbe.h" -int ngbe_eeprom_chksum_hostif(struct ngbe_hw *hw) +int ngbe_eeprom_chksum_hostif(struct wx *wx) { struct wx_hic_read_shadow_ram buffer; - struct wx_hw *wxhw = &hw->wxhw; int status; int tmp; @@ -27,61 +25,73 @@ int ngbe_eeprom_chksum_hostif(struct ngbe_hw *hw) /* one word */ buffer.length = 0; - status = wx_host_interface_command(wxhw, (u32 *)&buffer, sizeof(buffer), + status = wx_host_interface_command(wx, (u32 *)&buffer, sizeof(buffer), WX_HI_COMMAND_TIMEOUT, false); if (status < 0) return status; - tmp = rd32a(wxhw, WX_MNG_MBOX, 1); + tmp = rd32a(wx, WX_MNG_MBOX, 1); if (tmp == NGBE_FW_CMD_ST_PASS) return 0; return -EIO; } -static int ngbe_reset_misc(struct ngbe_hw *hw) +static int ngbe_reset_misc(struct wx *wx) { - struct wx_hw *wxhw = &hw->wxhw; - - wx_reset_misc(wxhw); - if (hw->mac_type == ngbe_mac_type_rgmii) - wr32(wxhw, NGBE_MDIO_CLAUSE_SELECT, 0xF); - if (hw->gpio_ctrl) { + wx_reset_misc(wx); + if (wx->gpio_ctrl) { /* gpio0 is used to power on/off control*/ - wr32(wxhw, NGBE_GPIO_DDR, 0x1); - wr32(wxhw, NGBE_GPIO_DR, NGBE_GPIO_DR_0); + wr32(wx, NGBE_GPIO_DDR, 0x1); + ngbe_sfp_modules_txrx_powerctl(wx, false); } return 0; } +void ngbe_sfp_modules_txrx_powerctl(struct wx *wx, bool swi) +{ + /* gpio0 is used to power on control . 0 is on */ + wr32(wx, NGBE_GPIO_DR, swi ? 0 : NGBE_GPIO_DR_0); +} + /** * ngbe_reset_hw - Perform hardware reset - * @hw: pointer to hardware structure + * @wx: pointer to hardware structure * * Resets the hardware by resetting the transmit and receive units, masks * and clears all interrupts, perform a PHY reset, and perform a link (MAC) * reset. **/ -int ngbe_reset_hw(struct ngbe_hw *hw) +int ngbe_reset_hw(struct wx *wx) { - struct wx_hw *wxhw = &hw->wxhw; - int status = 0; - u32 reset = 0; + u32 val = 0; + int ret = 0; - /* Call adapter stop to disable tx/rx and clear interrupts */ - status = wx_stop_adapter(wxhw); - if (status != 0) - return status; - reset = WX_MIS_RST_LAN_RST(wxhw->bus.func); - wr32(wxhw, WX_MIS_RST, reset | rd32(wxhw, WX_MIS_RST)); - ngbe_reset_misc(hw); + /* Call wx stop to disable tx/rx and clear interrupts */ + ret = wx_stop_adapter(wx); + if (ret != 0) + return ret; + + if (wx->mac_type != em_mac_type_mdi) { + val = WX_MIS_RST_LAN_RST(wx->bus.func); + wr32(wx, WX_MIS_RST, val | rd32(wx, WX_MIS_RST)); + + ret = read_poll_timeout(rd32, val, + !(val & (BIT(9) << wx->bus.func)), 1000, + 100000, false, wx, 0x10028); + if (ret) { + wx_err(wx, "Lan reset exceed s maximum times.\n"); + return ret; + } + } + ngbe_reset_misc(wx); /* Store the permanent mac address */ - wx_get_mac_addr(wxhw, wxhw->mac.perm_addr); + wx_get_mac_addr(wx, wx->mac.perm_addr); /* reset num_rar_entries to 128 */ - wxhw->mac.num_rar_entries = NGBE_RAR_ENTRIES; - wx_init_rx_addrs(wxhw); - pci_set_master(wxhw->pdev); + wx->mac.num_rar_entries = NGBE_RAR_ENTRIES; + wx_init_rx_addrs(wx); + pci_set_master(wx->pdev); return 0; } diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.h index 42476a3fe57c..a4693e006816 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.h +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.h @@ -7,6 +7,7 @@ #ifndef _NGBE_HW_H_ #define _NGBE_HW_H_ -int ngbe_eeprom_chksum_hostif(struct ngbe_hw *hw); -int ngbe_reset_hw(struct ngbe_hw *hw); +int ngbe_eeprom_chksum_hostif(struct wx *wx); +void ngbe_sfp_modules_txrx_powerctl(struct wx *wx, bool swi); +int ngbe_reset_hw(struct wx *wx); #endif /* _NGBE_HW_H_ */ diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c index f0b24366da18..ed52f80b5475 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c @@ -9,12 +9,14 @@ #include <linux/aer.h> #include <linux/etherdevice.h> #include <net/ip.h> +#include <linux/phy.h> #include "../libwx/wx_type.h" #include "../libwx/wx_hw.h" #include "ngbe_type.h" +#include "ngbe_mdio.h" #include "ngbe_hw.h" -#include "ngbe.h" + char ngbe_driver_name[] = "ngbe"; /* ngbe_pci_tbl - PCI Device ID Table @@ -39,70 +41,27 @@ static const struct pci_device_id ngbe_pci_tbl[] = { { .device = 0 } }; -static void ngbe_mac_set_default_filter(struct ngbe_adapter *adapter, u8 *addr) -{ - struct ngbe_hw *hw = &adapter->hw; - - memcpy(&adapter->mac_table[0].addr, addr, ETH_ALEN); - adapter->mac_table[0].pools = 1ULL; - adapter->mac_table[0].state = (NGBE_MAC_STATE_DEFAULT | - NGBE_MAC_STATE_IN_USE); - wx_set_rar(&hw->wxhw, 0, adapter->mac_table[0].addr, - adapter->mac_table[0].pools, - WX_PSR_MAC_SWC_AD_H_AV); -} - /** * ngbe_init_type_code - Initialize the shared code - * @hw: pointer to hardware structure + * @wx: pointer to hardware structure **/ -static void ngbe_init_type_code(struct ngbe_hw *hw) +static void ngbe_init_type_code(struct wx *wx) { int wol_mask = 0, ncsi_mask = 0; - struct wx_hw *wxhw = &hw->wxhw; - u16 type_mask = 0; + u16 type_mask = 0, val; - wxhw->mac.type = wx_mac_em; - type_mask = (u16)(wxhw->subsystem_device_id & NGBE_OEM_MASK); - ncsi_mask = wxhw->subsystem_device_id & NGBE_NCSI_MASK; - wol_mask = wxhw->subsystem_device_id & NGBE_WOL_MASK; - - switch (type_mask) { - case NGBE_SUBID_M88E1512_SFP: - case NGBE_SUBID_LY_M88E1512_SFP: - hw->phy.type = ngbe_phy_m88e1512_sfi; - break; - case NGBE_SUBID_M88E1512_RJ45: - hw->phy.type = ngbe_phy_m88e1512; - break; - case NGBE_SUBID_M88E1512_MIX: - hw->phy.type = ngbe_phy_m88e1512_unknown; - break; - case NGBE_SUBID_YT8521S_SFP: - case NGBE_SUBID_YT8521S_SFP_GPIO: - case NGBE_SUBID_LY_YT8521S_SFP: - hw->phy.type = ngbe_phy_yt8521s_sfi; - break; - case NGBE_SUBID_INTERNAL_YT8521S_SFP: - case NGBE_SUBID_INTERNAL_YT8521S_SFP_GPIO: - hw->phy.type = ngbe_phy_internal_yt8521s_sfi; - break; - case NGBE_SUBID_RGMII_FPGA: - case NGBE_SUBID_OCP_CARD: - fallthrough; - default: - hw->phy.type = ngbe_phy_internal; - break; - } + wx->mac.type = wx_mac_em; + type_mask = (u16)(wx->subsystem_device_id & NGBE_OEM_MASK); + ncsi_mask = wx->subsystem_device_id & NGBE_NCSI_MASK; + wol_mask = wx->subsystem_device_id & NGBE_WOL_MASK; - if (hw->phy.type == ngbe_phy_internal || - hw->phy.type == ngbe_phy_internal_yt8521s_sfi) - hw->mac_type = ngbe_mac_type_mdi; - else - hw->mac_type = ngbe_mac_type_rgmii; + val = rd32(wx, WX_CFG_PORT_ST); + wx->mac_type = (val & BIT(7)) >> 7 ? + em_mac_type_rgmii : + em_mac_type_mdi; - hw->wol_enabled = (wol_mask == NGBE_WOL_SUP) ? 1 : 0; - hw->ncsi_enabled = (ncsi_mask == NGBE_NCSI_MASK || + wx->wol_enabled = (wol_mask == NGBE_WOL_SUP) ? 1 : 0; + wx->ncsi_enabled = (ncsi_mask == NGBE_NCSI_MASK || type_mask == NGBE_SUBID_OCP_CARD) ? 1 : 0; switch (type_mask) { @@ -110,31 +69,31 @@ static void ngbe_init_type_code(struct ngbe_hw *hw) case NGBE_SUBID_LY_M88E1512_SFP: case NGBE_SUBID_YT8521S_SFP_GPIO: case NGBE_SUBID_INTERNAL_YT8521S_SFP_GPIO: - hw->gpio_ctrl = 1; + wx->gpio_ctrl = 1; break; default: - hw->gpio_ctrl = 0; + wx->gpio_ctrl = 0; break; } } /** - * ngbe_init_rss_key - Initialize adapter RSS key - * @adapter: device handle + * ngbe_init_rss_key - Initialize wx RSS key + * @wx: device handle * * Allocates and initializes the RSS key if it is not allocated. **/ -static inline int ngbe_init_rss_key(struct ngbe_adapter *adapter) +static inline int ngbe_init_rss_key(struct wx *wx) { u32 *rss_key; - if (!adapter->rss_key) { - rss_key = kzalloc(NGBE_RSS_KEY_SIZE, GFP_KERNEL); + if (!wx->rss_key) { + rss_key = kzalloc(WX_RSS_KEY_SIZE, GFP_KERNEL); if (unlikely(!rss_key)) return -ENOMEM; - netdev_rss_key_fill(rss_key, NGBE_RSS_KEY_SIZE); - adapter->rss_key = rss_key; + netdev_rss_key_fill(rss_key, WX_RSS_KEY_SIZE); + wx->rss_key = rss_key; } return 0; @@ -142,72 +101,76 @@ static inline int ngbe_init_rss_key(struct ngbe_adapter *adapter) /** * ngbe_sw_init - Initialize general software structures - * @adapter: board private structure to initialize + * @wx: board private structure to initialize **/ -static int ngbe_sw_init(struct ngbe_adapter *adapter) +static int ngbe_sw_init(struct wx *wx) { - struct pci_dev *pdev = adapter->pdev; - struct ngbe_hw *hw = &adapter->hw; - struct wx_hw *wxhw = &hw->wxhw; + struct pci_dev *pdev = wx->pdev; u16 msix_count = 0; int err = 0; - wxhw->hw_addr = adapter->io_addr; - wxhw->pdev = pdev; + wx->mac.num_rar_entries = NGBE_RAR_ENTRIES; + wx->mac.max_rx_queues = NGBE_MAX_RX_QUEUES; + wx->mac.max_tx_queues = NGBE_MAX_TX_QUEUES; /* PCI config space info */ - err = wx_sw_init(wxhw); + err = wx_sw_init(wx); if (err < 0) { - netif_err(adapter, probe, adapter->netdev, - "Read of internal subsystem device id failed\n"); + wx_err(wx, "read of internal subsystem device id failed\n"); return err; } /* mac type, phy type , oem type */ - ngbe_init_type_code(hw); + ngbe_init_type_code(wx); - wxhw->mac.max_rx_queues = NGBE_MAX_RX_QUEUES; - wxhw->mac.max_tx_queues = NGBE_MAX_TX_QUEUES; - wxhw->mac.num_rar_entries = NGBE_RAR_ENTRIES; /* Set common capability flags and settings */ - adapter->max_q_vectors = NGBE_MAX_MSIX_VECTORS; - - err = wx_get_pcie_msix_counts(wxhw, &msix_count, NGBE_MAX_MSIX_VECTORS); + wx->max_q_vectors = NGBE_MAX_MSIX_VECTORS; + err = wx_get_pcie_msix_counts(wx, &msix_count, NGBE_MAX_MSIX_VECTORS); if (err) dev_err(&pdev->dev, "Do not support MSI-X\n"); - wxhw->mac.max_msix_vectors = msix_count; + wx->mac.max_msix_vectors = msix_count; - adapter->mac_table = kcalloc(wxhw->mac.num_rar_entries, - sizeof(struct ngbe_mac_addr), - GFP_KERNEL); - if (!adapter->mac_table) { - dev_err(&pdev->dev, "mac_table allocation failed: %d\n", err); - return -ENOMEM; - } - - if (ngbe_init_rss_key(adapter)) + if (ngbe_init_rss_key(wx)) return -ENOMEM; /* enable itr by default in dynamic mode */ - adapter->rx_itr_setting = 1; - adapter->tx_itr_setting = 1; + wx->rx_itr_setting = 1; + wx->tx_itr_setting = 1; /* set default ring sizes */ - adapter->tx_ring_count = NGBE_DEFAULT_TXD; - adapter->rx_ring_count = NGBE_DEFAULT_RXD; + wx->tx_ring_count = NGBE_DEFAULT_TXD; + wx->rx_ring_count = NGBE_DEFAULT_RXD; /* set default work limits */ - adapter->tx_work_limit = NGBE_DEFAULT_TX_WORK; - adapter->rx_work_limit = NGBE_DEFAULT_RX_WORK; + wx->tx_work_limit = NGBE_DEFAULT_TX_WORK; + wx->rx_work_limit = NGBE_DEFAULT_RX_WORK; return 0; } -static void ngbe_down(struct ngbe_adapter *adapter) +static void ngbe_disable_device(struct wx *wx) { - netif_carrier_off(adapter->netdev); - netif_tx_disable(adapter->netdev); -}; + struct net_device *netdev = wx->netdev; + + /* disable receives */ + wx_disable_rx(wx); + netif_tx_disable(netdev); + if (wx->gpio_ctrl) + ngbe_sfp_modules_txrx_powerctl(wx, false); +} + +static void ngbe_down(struct wx *wx) +{ + phy_stop(wx->phydev); + ngbe_disable_device(wx); +} + +static void ngbe_up(struct wx *wx) +{ + if (wx->gpio_ctrl) + ngbe_sfp_modules_txrx_powerctl(wx, true); + phy_start(wx->phydev); +} /** * ngbe_open - Called when a network interface is made active @@ -220,11 +183,14 @@ static void ngbe_down(struct ngbe_adapter *adapter) **/ static int ngbe_open(struct net_device *netdev) { - struct ngbe_adapter *adapter = netdev_priv(netdev); - struct ngbe_hw *hw = &adapter->hw; - struct wx_hw *wxhw = &hw->wxhw; + struct wx *wx = netdev_priv(netdev); + int err; - wx_control_hw(wxhw, true); + wx_control_hw(wx, true); + err = ngbe_phy_connect(wx); + if (err) + return err; + ngbe_up(wx); return 0; } @@ -242,10 +208,11 @@ static int ngbe_open(struct net_device *netdev) **/ static int ngbe_close(struct net_device *netdev) { - struct ngbe_adapter *adapter = netdev_priv(netdev); + struct wx *wx = netdev_priv(netdev); - ngbe_down(adapter); - wx_control_hw(&adapter->hw.wxhw, false); + ngbe_down(wx); + phy_disconnect(wx->phydev); + wx_control_hw(wx, false); return 0; } @@ -256,52 +223,29 @@ static netdev_tx_t ngbe_xmit_frame(struct sk_buff *skb, return NETDEV_TX_OK; } -/** - * ngbe_set_mac - Change the Ethernet Address of the NIC - * @netdev: network interface device structure - * @p: pointer to an address structure - * - * Returns 0 on success, negative on failure - **/ -static int ngbe_set_mac(struct net_device *netdev, void *p) -{ - struct ngbe_adapter *adapter = netdev_priv(netdev); - struct wx_hw *wxhw = &adapter->hw.wxhw; - struct sockaddr *addr = p; - - if (!is_valid_ether_addr(addr->sa_data)) - return -EADDRNOTAVAIL; - - eth_hw_addr_set(netdev, addr->sa_data); - memcpy(wxhw->mac.addr, addr->sa_data, netdev->addr_len); - - ngbe_mac_set_default_filter(adapter, wxhw->mac.addr); - - return 0; -} - static void ngbe_dev_shutdown(struct pci_dev *pdev, bool *enable_wake) { - struct ngbe_adapter *adapter = pci_get_drvdata(pdev); - struct net_device *netdev = adapter->netdev; + struct wx *wx = pci_get_drvdata(pdev); + struct net_device *netdev; + netdev = wx->netdev; netif_device_detach(netdev); rtnl_lock(); if (netif_running(netdev)) - ngbe_down(adapter); + ngbe_down(wx); rtnl_unlock(); - wx_control_hw(&adapter->hw.wxhw, false); + wx_control_hw(wx, false); pci_disable_device(pdev); } static void ngbe_shutdown(struct pci_dev *pdev) { - struct ngbe_adapter *adapter = pci_get_drvdata(pdev); + struct wx *wx = pci_get_drvdata(pdev); bool wake; - wake = !!adapter->wol; + wake = !!wx->wol; ngbe_dev_shutdown(pdev, &wake); @@ -316,7 +260,7 @@ static const struct net_device_ops ngbe_netdev_ops = { .ndo_stop = ngbe_close, .ndo_start_xmit = ngbe_xmit_frame, .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = ngbe_set_mac, + .ndo_set_mac_address = wx_set_mac, }; /** @@ -326,18 +270,16 @@ static const struct net_device_ops ngbe_netdev_ops = { * * Returns 0 on success, negative on failure * - * ngbe_probe initializes an adapter identified by a pci_dev structure. - * The OS initialization, configuring of the adapter private structure, + * ngbe_probe initializes an wx identified by a pci_dev structure. + * The OS initialization, configuring of the wx private structure, * and a hardware reset occur. **/ static int ngbe_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) { - struct ngbe_adapter *adapter = NULL; - struct ngbe_hw *hw = NULL; - struct wx_hw *wxhw = NULL; struct net_device *netdev; u32 e2rom_cksum_cap = 0; + struct wx *wx = NULL; static int func_nums; u16 e2rom_ver = 0; u32 etrack_id = 0; @@ -368,7 +310,7 @@ static int ngbe_probe(struct pci_dev *pdev, pci_set_master(pdev); netdev = devm_alloc_etherdev_mqs(&pdev->dev, - sizeof(struct ngbe_adapter), + sizeof(struct wx), NGBE_MAX_TX_QUEUES, NGBE_MAX_RX_QUEUES); if (!netdev) { @@ -378,17 +320,15 @@ static int ngbe_probe(struct pci_dev *pdev, SET_NETDEV_DEV(netdev, &pdev->dev); - adapter = netdev_priv(netdev); - adapter->netdev = netdev; - adapter->pdev = pdev; - hw = &adapter->hw; - wxhw = &hw->wxhw; - adapter->msg_enable = BIT(3) - 1; - - adapter->io_addr = devm_ioremap(&pdev->dev, - pci_resource_start(pdev, 0), - pci_resource_len(pdev, 0)); - if (!adapter->io_addr) { + wx = netdev_priv(netdev); + wx->netdev = netdev; + wx->pdev = pdev; + wx->msg_enable = BIT(3) - 1; + + wx->hw_addr = devm_ioremap(&pdev->dev, + pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + if (!wx->hw_addr) { err = -EIO; goto err_pci_release_regions; } @@ -397,44 +337,44 @@ static int ngbe_probe(struct pci_dev *pdev, netdev->features |= NETIF_F_HIGHDMA; - adapter->bd_number = func_nums; + wx->bd_number = func_nums; /* setup the private structure */ - err = ngbe_sw_init(adapter); + err = ngbe_sw_init(wx); if (err) goto err_free_mac_table; /* check if flash load is done after hw power up */ - err = wx_check_flash_load(wxhw, NGBE_SPI_ILDR_STATUS_PERST); + err = wx_check_flash_load(wx, NGBE_SPI_ILDR_STATUS_PERST); if (err) goto err_free_mac_table; - err = wx_check_flash_load(wxhw, NGBE_SPI_ILDR_STATUS_PWRRST); + err = wx_check_flash_load(wx, NGBE_SPI_ILDR_STATUS_PWRRST); if (err) goto err_free_mac_table; - err = wx_mng_present(wxhw); + err = wx_mng_present(wx); if (err) { dev_err(&pdev->dev, "Management capability is not present\n"); goto err_free_mac_table; } - err = ngbe_reset_hw(hw); + err = ngbe_reset_hw(wx); if (err) { dev_err(&pdev->dev, "HW Init failed: %d\n", err); goto err_free_mac_table; } - if (wxhw->bus.func == 0) { - wr32(wxhw, NGBE_CALSUM_CAP_STATUS, 0x0); - wr32(wxhw, NGBE_EEPROM_VERSION_STORE_REG, 0x0); + if (wx->bus.func == 0) { + wr32(wx, NGBE_CALSUM_CAP_STATUS, 0x0); + wr32(wx, NGBE_EEPROM_VERSION_STORE_REG, 0x0); } else { - e2rom_cksum_cap = rd32(wxhw, NGBE_CALSUM_CAP_STATUS); - saved_ver = rd32(wxhw, NGBE_EEPROM_VERSION_STORE_REG); + e2rom_cksum_cap = rd32(wx, NGBE_CALSUM_CAP_STATUS); + saved_ver = rd32(wx, NGBE_EEPROM_VERSION_STORE_REG); } - wx_init_eeprom_params(wxhw); - if (wxhw->bus.func == 0 || e2rom_cksum_cap == 0) { + wx_init_eeprom_params(wx); + if (wx->bus.func == 0 || e2rom_cksum_cap == 0) { /* make sure the EEPROM is ready */ - err = ngbe_eeprom_chksum_hostif(hw); + err = ngbe_eeprom_chksum_hostif(wx); if (err) { dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n"); err = -EIO; @@ -442,14 +382,14 @@ static int ngbe_probe(struct pci_dev *pdev, } } - adapter->wol = 0; - if (hw->wol_enabled) - adapter->wol = NGBE_PSR_WKUP_CTL_MAG; + wx->wol = 0; + if (wx->wol_enabled) + wx->wol = NGBE_PSR_WKUP_CTL_MAG; - hw->wol_enabled = !!(adapter->wol); - wr32(wxhw, NGBE_PSR_WKUP_CTL, adapter->wol); + wx->wol_enabled = !!(wx->wol); + wr32(wx, NGBE_PSR_WKUP_CTL, wx->wol); - device_set_wakeup_enable(&pdev->dev, adapter->wol); + device_set_wakeup_enable(&pdev->dev, wx->wol); /* Save off EEPROM version number and Option Rom version which * together make a unique identify for the eeprom @@ -457,37 +397,42 @@ static int ngbe_probe(struct pci_dev *pdev, if (saved_ver) { etrack_id = saved_ver; } else { - wx_read_ee_hostif(wxhw, - wxhw->eeprom.sw_region_offset + NGBE_EEPROM_VERSION_H, + wx_read_ee_hostif(wx, + wx->eeprom.sw_region_offset + NGBE_EEPROM_VERSION_H, &e2rom_ver); etrack_id = e2rom_ver << 16; - wx_read_ee_hostif(wxhw, - wxhw->eeprom.sw_region_offset + NGBE_EEPROM_VERSION_L, + wx_read_ee_hostif(wx, + wx->eeprom.sw_region_offset + NGBE_EEPROM_VERSION_L, &e2rom_ver); etrack_id |= e2rom_ver; - wr32(wxhw, NGBE_EEPROM_VERSION_STORE_REG, etrack_id); + wr32(wx, NGBE_EEPROM_VERSION_STORE_REG, etrack_id); } - eth_hw_addr_set(netdev, wxhw->mac.perm_addr); - ngbe_mac_set_default_filter(adapter, wxhw->mac.perm_addr); + eth_hw_addr_set(netdev, wx->mac.perm_addr); + wx_mac_set_default_filter(wx, wx->mac.perm_addr); + + /* phy Interface Configuration */ + err = ngbe_mdio_init(wx); + if (err) + goto err_free_mac_table; err = register_netdev(netdev); if (err) goto err_register; - pci_set_drvdata(pdev, adapter); + pci_set_drvdata(pdev, wx); - netif_info(adapter, probe, netdev, + netif_info(wx, probe, netdev, "PHY: %s, PBA No: Wang Xun GbE Family Controller\n", - hw->phy.type == ngbe_phy_internal ? "Internal" : "External"); - netif_info(adapter, probe, netdev, "%pM\n", netdev->dev_addr); + wx->mac_type == em_mac_type_mdi ? "Internal" : "External"); + netif_info(wx, probe, netdev, "%pM\n", netdev->dev_addr); return 0; err_register: - wx_control_hw(wxhw, false); + wx_control_hw(wx, false); err_free_mac_table: - kfree(adapter->mac_table); + kfree(wx->mac_table); err_pci_release_regions: pci_disable_pcie_error_reporting(pdev); pci_release_selected_regions(pdev, @@ -508,15 +453,15 @@ err_pci_disable_dev: **/ static void ngbe_remove(struct pci_dev *pdev) { - struct ngbe_adapter *adapter = pci_get_drvdata(pdev); + struct wx *wx = pci_get_drvdata(pdev); struct net_device *netdev; - netdev = adapter->netdev; + netdev = wx->netdev; unregister_netdev(netdev); pci_release_selected_regions(pdev, pci_select_bars(pdev, IORESOURCE_MEM)); - kfree(adapter->mac_table); + kfree(wx->mac_table); pci_disable_pcie_error_reporting(pdev); pci_disable_device(pdev); diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c new file mode 100644 index 000000000000..ba33a57b42c2 --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c @@ -0,0 +1,286 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */ + +#include <linux/ethtool.h> +#include <linux/iopoll.h> +#include <linux/pci.h> +#include <linux/phy.h> + +#include "../libwx/wx_type.h" +#include "../libwx/wx_hw.h" +#include "ngbe_type.h" +#include "ngbe_mdio.h" + +static int ngbe_phy_read_reg_internal(struct mii_bus *bus, int phy_addr, int regnum) +{ + struct wx *wx = bus->priv; + + if (phy_addr != 0) + return 0xffff; + return (u16)rd32(wx, NGBE_PHY_CONFIG(regnum)); +} + +static int ngbe_phy_write_reg_internal(struct mii_bus *bus, int phy_addr, int regnum, u16 value) +{ + struct wx *wx = bus->priv; + + if (phy_addr == 0) + wr32(wx, NGBE_PHY_CONFIG(regnum), value); + return 0; +} + +static int ngbe_phy_read_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum) +{ + u32 command, val, device_type = 0; + struct wx *wx = bus->priv; + int ret; + + wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0xF); + /* setup and write the address cycle command */ + command = NGBE_MSCA_RA(regnum) | + NGBE_MSCA_PA(phy_addr) | + NGBE_MSCA_DA(device_type); + wr32(wx, NGBE_MSCA, command); + command = NGBE_MSCC_CMD(NGBE_MSCA_CMD_READ) | + NGBE_MSCC_BUSY | + NGBE_MDIO_CLK(6); + wr32(wx, NGBE_MSCC, command); + + /* wait to complete */ + ret = read_poll_timeout(rd32, val, !(val & NGBE_MSCC_BUSY), 1000, + 100000, false, wx, NGBE_MSCC); + if (ret) { + wx_err(wx, "Mdio read c22 command did not complete.\n"); + return ret; + } + + return (u16)rd32(wx, NGBE_MSCC); +} + +static int ngbe_phy_write_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum, u16 value) +{ + u32 command, val, device_type = 0; + struct wx *wx = bus->priv; + int ret; + + wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0xF); + /* setup and write the address cycle command */ + command = NGBE_MSCA_RA(regnum) | + NGBE_MSCA_PA(phy_addr) | + NGBE_MSCA_DA(device_type); + wr32(wx, NGBE_MSCA, command); + command = value | + NGBE_MSCC_CMD(NGBE_MSCA_CMD_WRITE) | + NGBE_MSCC_BUSY | + NGBE_MDIO_CLK(6); + wr32(wx, NGBE_MSCC, command); + + /* wait to complete */ + ret = read_poll_timeout(rd32, val, !(val & NGBE_MSCC_BUSY), 1000, + 100000, false, wx, NGBE_MSCC); + if (ret) + wx_err(wx, "Mdio write c22 command did not complete.\n"); + + return ret; +} + +static int ngbe_phy_read_reg_mdi_c45(struct mii_bus *bus, int phy_addr, int devnum, int regnum) +{ + struct wx *wx = bus->priv; + u32 val, command; + int ret; + + wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0x0); + /* setup and write the address cycle command */ + command = NGBE_MSCA_RA(mdiobus_c45_regad(regnum)) | + NGBE_MSCA_PA(phy_addr) | + NGBE_MSCA_DA(devnum); + wr32(wx, NGBE_MSCA, command); + command = NGBE_MSCC_CMD(NGBE_MSCA_CMD_READ) | + NGBE_MSCC_BUSY | + NGBE_MDIO_CLK(6); + wr32(wx, NGBE_MSCC, command); + + /* wait to complete */ + ret = read_poll_timeout(rd32, val, !(val & NGBE_MSCC_BUSY), 1000, + 100000, false, wx, NGBE_MSCC); + if (ret) { + wx_err(wx, "Mdio read c45 command did not complete.\n"); + return ret; + } + + return (u16)rd32(wx, NGBE_MSCC); +} + +static int ngbe_phy_write_reg_mdi_c45(struct mii_bus *bus, int phy_addr, + int devnum, int regnum, u16 value) +{ + struct wx *wx = bus->priv; + int ret, command; + u16 val; + + wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0x0); + /* setup and write the address cycle command */ + command = NGBE_MSCA_RA(mdiobus_c45_regad(regnum)) | + NGBE_MSCA_PA(phy_addr) | + NGBE_MSCA_DA(devnum); + wr32(wx, NGBE_MSCA, command); + command = value | + NGBE_MSCC_CMD(NGBE_MSCA_CMD_WRITE) | + NGBE_MSCC_BUSY | + NGBE_MDIO_CLK(6); + wr32(wx, NGBE_MSCC, command); + + /* wait to complete */ + ret = read_poll_timeout(rd32, val, !(val & NGBE_MSCC_BUSY), 1000, + 100000, false, wx, NGBE_MSCC); + if (ret) + wx_err(wx, "Mdio write c45 command did not complete.\n"); + + return ret; +} + +static int ngbe_phy_read_reg_c22(struct mii_bus *bus, int phy_addr, int regnum) +{ + struct wx *wx = bus->priv; + u16 phy_data; + + if (wx->mac_type == em_mac_type_mdi) + phy_data = ngbe_phy_read_reg_internal(bus, phy_addr, regnum); + else + phy_data = ngbe_phy_read_reg_mdi_c22(bus, phy_addr, regnum); + + return phy_data; +} + +static int ngbe_phy_write_reg_c22(struct mii_bus *bus, int phy_addr, + int regnum, u16 value) +{ + struct wx *wx = bus->priv; + int ret; + + if (wx->mac_type == em_mac_type_mdi) + ret = ngbe_phy_write_reg_internal(bus, phy_addr, regnum, value); + else + ret = ngbe_phy_write_reg_mdi_c22(bus, phy_addr, regnum, value); + + return ret; +} + +static void ngbe_handle_link_change(struct net_device *dev) +{ + struct wx *wx = netdev_priv(dev); + struct phy_device *phydev; + u32 lan_speed, reg; + + phydev = wx->phydev; + if (!(wx->link != phydev->link || + wx->speed != phydev->speed || + wx->duplex != phydev->duplex)) + return; + + wx->link = phydev->link; + wx->speed = phydev->speed; + wx->duplex = phydev->duplex; + switch (phydev->speed) { + case SPEED_10: + lan_speed = 0; + break; + case SPEED_100: + lan_speed = 1; + break; + case SPEED_1000: + default: + lan_speed = 2; + break; + } + wr32m(wx, NGBE_CFG_LAN_SPEED, 0x3, lan_speed); + + if (phydev->link) { + reg = rd32(wx, WX_MAC_TX_CFG); + reg &= ~WX_MAC_TX_CFG_SPEED_MASK; + reg |= WX_MAC_TX_CFG_SPEED_1G | WX_MAC_TX_CFG_TE; + wr32(wx, WX_MAC_TX_CFG, reg); + /* Re configure MAC RX */ + reg = rd32(wx, WX_MAC_RX_CFG); + wr32(wx, WX_MAC_RX_CFG, reg); + wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR); + reg = rd32(wx, WX_MAC_WDG_TIMEOUT); + wr32(wx, WX_MAC_WDG_TIMEOUT, reg); + } + phy_print_status(phydev); +} + +int ngbe_phy_connect(struct wx *wx) +{ + int ret; + + ret = phy_connect_direct(wx->netdev, + wx->phydev, + ngbe_handle_link_change, + PHY_INTERFACE_MODE_RGMII_ID); + if (ret) { + wx_err(wx, "PHY connect failed.\n"); + return ret; + } + + return 0; +} + +static void ngbe_phy_fixup(struct wx *wx) +{ + struct phy_device *phydev = wx->phydev; + struct ethtool_eee eee; + + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); + + if (wx->mac_type != em_mac_type_mdi) + return; + /* disable EEE, internal phy does not support eee */ + memset(&eee, 0, sizeof(eee)); + phy_ethtool_set_eee(phydev, &eee); +} + +int ngbe_mdio_init(struct wx *wx) +{ + struct pci_dev *pdev = wx->pdev; + struct mii_bus *mii_bus; + int ret; + + mii_bus = devm_mdiobus_alloc(&pdev->dev); + if (!mii_bus) + return -ENOMEM; + + mii_bus->name = "ngbe_mii_bus"; + mii_bus->read = ngbe_phy_read_reg_c22; + mii_bus->write = ngbe_phy_write_reg_c22; + mii_bus->phy_mask = GENMASK(31, 4); + mii_bus->parent = &pdev->dev; + mii_bus->priv = wx; + + if (wx->mac_type == em_mac_type_rgmii) { + mii_bus->read_c45 = ngbe_phy_read_reg_mdi_c45; + mii_bus->write_c45 = ngbe_phy_write_reg_mdi_c45; + } + + snprintf(mii_bus->id, MII_BUS_ID_SIZE, "ngbe-%x", + (pdev->bus->number << 8) | pdev->devfn); + ret = devm_mdiobus_register(&pdev->dev, mii_bus); + if (ret) + return ret; + + wx->phydev = phy_find_first(mii_bus); + if (!wx->phydev) + return -ENODEV; + + phy_attached_info(wx->phydev); + ngbe_phy_fixup(wx); + + wx->link = 0; + wx->speed = 0; + wx->duplex = 0; + + return 0; +} diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.h new file mode 100644 index 000000000000..0a6400dd89c4 --- /dev/null +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * WangXun Gigabit PCI Express Linux driver + * Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. + */ + +#ifndef _NGBE_MDIO_H_ +#define _NGBE_MDIO_H_ + +int ngbe_phy_connect(struct wx *wx); +int ngbe_mdio_init(struct wx *wx); +#endif /* _NGBE_MDIO_H_ */ diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h index 39f6c03f1a54..fd71260f73de 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h @@ -49,7 +49,6 @@ #define NGBE_SPI_ILDR_STATUS 0x10120 #define NGBE_SPI_ILDR_STATUS_PERST BIT(0) /* PCIE_PERST is done */ #define NGBE_SPI_ILDR_STATUS_PWRRST BIT(1) /* Power on reset is done */ -#define NGBE_SPI_ILDR_STATUS_LAN_SW_RST(_i) BIT((_i) + 9) /* lan soft reset done */ /* Checksum and EEPROM pointers */ #define NGBE_CALSUM_COMMAND 0xE9 @@ -60,6 +59,25 @@ #define NGBE_EEPROM_VERSION_L 0x1D #define NGBE_EEPROM_VERSION_H 0x1E +/* mdio access */ +#define NGBE_MSCA 0x11200 +#define NGBE_MSCA_RA(v) FIELD_PREP(U16_MAX, v) +#define NGBE_MSCA_PA(v) FIELD_PREP(GENMASK(20, 16), v) +#define NGBE_MSCA_DA(v) FIELD_PREP(GENMASK(25, 21), v) +#define NGBE_MSCC 0x11204 +#define NGBE_MSCC_CMD(v) FIELD_PREP(GENMASK(17, 16), v) + +enum NGBE_MSCA_CMD_value { + NGBE_MSCA_CMD_RSV = 0, + NGBE_MSCA_CMD_WRITE, + NGBE_MSCA_CMD_POST_READ, + NGBE_MSCA_CMD_READ, +}; + +#define NGBE_MSCC_SADDR BIT(18) +#define NGBE_MSCC_BUSY BIT(22) +#define NGBE_MDIO_CLK(v) FIELD_PREP(GENMASK(21, 19), v) + /* Media-dependent registers. */ #define NGBE_MDIO_CLAUSE_SELECT 0x11220 @@ -72,6 +90,10 @@ #define NGBE_GPIO_DDR_0 BIT(0) /* SDP0 IO direction */ #define NGBE_GPIO_DDR_1 BIT(1) /* SDP1 IO direction */ +#define NGBE_PHY_CONFIG(reg_offset) (0x14000 + ((reg_offset) * 4)) +#define NGBE_CFG_LAN_SPEED 0x14440 +#define NGBE_CFG_PORT_ST 0x14404 + /* Wake up registers */ #define NGBE_PSR_WKUP_CTL 0x15B80 /* Wake Up Filter Control Bit */ @@ -90,50 +112,26 @@ #define NGBE_FW_CMD_ST_PASS 0x80658383 #define NGBE_FW_CMD_ST_FAIL 0x70657376 -enum ngbe_phy_type { - ngbe_phy_unknown = 0, - ngbe_phy_none, - ngbe_phy_internal, - ngbe_phy_m88e1512, - ngbe_phy_m88e1512_sfi, - ngbe_phy_m88e1512_unknown, - ngbe_phy_yt8521s, - ngbe_phy_yt8521s_sfi, - ngbe_phy_internal_yt8521s_sfi, - ngbe_phy_generic -}; +#define NGBE_MAX_FDIR_INDICES 7 -enum ngbe_media_type { - ngbe_media_type_unknown = 0, - ngbe_media_type_fiber, - ngbe_media_type_copper, - ngbe_media_type_backplane, -}; +#define NGBE_MAX_RX_QUEUES (NGBE_MAX_FDIR_INDICES + 1) +#define NGBE_MAX_TX_QUEUES (NGBE_MAX_FDIR_INDICES + 1) -enum ngbe_mac_type { - ngbe_mac_type_unknown = 0, - ngbe_mac_type_mdi, - ngbe_mac_type_rgmii -}; - -struct ngbe_phy_info { - enum ngbe_phy_type type; - enum ngbe_media_type media_type; +#define NGBE_ETH_LENGTH_OF_ADDRESS 6 +#define NGBE_MAX_MSIX_VECTORS 0x09 +#define NGBE_RAR_ENTRIES 32 - u32 addr; - u32 id; +/* TX/RX descriptor defines */ +#define NGBE_DEFAULT_TXD 512 /* default ring size */ +#define NGBE_DEFAULT_TX_WORK 256 +#define NGBE_MAX_TXD 8192 +#define NGBE_MIN_TXD 128 - bool reset_if_overtemp; +#define NGBE_DEFAULT_RXD 512 /* default ring size */ +#define NGBE_DEFAULT_RX_WORK 256 +#define NGBE_MAX_RXD 8192 +#define NGBE_MIN_RXD 128 -}; +extern char ngbe_driver_name[]; -struct ngbe_hw { - struct wx_hw wxhw; - struct ngbe_phy_info phy; - enum ngbe_mac_type mac_type; - - bool wol_enabled; - bool ncsi_enabled; - bool gpio_ctrl; -}; #endif /* _NGBE_TYPE_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe.h b/drivers/net/ethernet/wangxun/txgbe/txgbe.h deleted file mode 100644 index 19e61377bd00..000000000000 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe.h +++ /dev/null @@ -1,43 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */ - -#ifndef _TXGBE_H_ -#define _TXGBE_H_ - -#define TXGBE_MAX_FDIR_INDICES 63 - -#define TXGBE_MAX_RX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1) -#define TXGBE_MAX_TX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1) - -#define TXGBE_SP_MAX_TX_QUEUES 128 -#define TXGBE_SP_MAX_RX_QUEUES 128 -#define TXGBE_SP_RAR_ENTRIES 128 -#define TXGBE_SP_MC_TBL_SIZE 128 - -struct txgbe_mac_addr { - u8 addr[ETH_ALEN]; - u16 state; /* bitmask */ - u64 pools; -}; - -#define TXGBE_MAC_STATE_DEFAULT 0x1 -#define TXGBE_MAC_STATE_MODIFIED 0x2 -#define TXGBE_MAC_STATE_IN_USE 0x4 - -/* board specific private data structure */ -struct txgbe_adapter { - u8 __iomem *io_addr; /* Mainly for iounmap use */ - /* OS defined structs */ - struct net_device *netdev; - struct pci_dev *pdev; - - /* structs defined in txgbe_type.h */ - struct txgbe_hw hw; - u16 msg_enable; - struct txgbe_mac_addr *mac_table; - char eeprom_id[32]; -}; - -extern char txgbe_driver_name[]; - -#endif /* _TXGBE_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c index 167f7ff73192..ebc46f3be056 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c @@ -12,70 +12,67 @@ #include "../libwx/wx_hw.h" #include "txgbe_type.h" #include "txgbe_hw.h" -#include "txgbe.h" /** * txgbe_init_thermal_sensor_thresh - Inits thermal sensor thresholds - * @hw: pointer to hardware structure + * @wx: pointer to hardware structure * * Inits the thermal sensor thresholds according to the NVM map * and save off the threshold and location values into mac.thermal_sensor_data **/ -static void txgbe_init_thermal_sensor_thresh(struct txgbe_hw *hw) +static void txgbe_init_thermal_sensor_thresh(struct wx *wx) { - struct wx_hw *wxhw = &hw->wxhw; - struct wx_thermal_sensor_data *data = &wxhw->mac.sensor; + struct wx_thermal_sensor_data *data = &wx->mac.sensor; memset(data, 0, sizeof(struct wx_thermal_sensor_data)); /* Only support thermal sensors attached to SP physical port 0 */ - if (wxhw->bus.func) + if (wx->bus.func) return; - wr32(wxhw, TXGBE_TS_CTL, TXGBE_TS_CTL_EVAL_MD); + wr32(wx, TXGBE_TS_CTL, TXGBE_TS_CTL_EVAL_MD); - wr32(wxhw, WX_TS_INT_EN, + wr32(wx, WX_TS_INT_EN, WX_TS_INT_EN_ALARM_INT_EN | WX_TS_INT_EN_DALARM_INT_EN); - wr32(wxhw, WX_TS_EN, WX_TS_EN_ENA); + wr32(wx, WX_TS_EN, WX_TS_EN_ENA); data->alarm_thresh = 100; - wr32(wxhw, WX_TS_ALARM_THRE, 677); + wr32(wx, WX_TS_ALARM_THRE, 677); data->dalarm_thresh = 90; - wr32(wxhw, WX_TS_DALARM_THRE, 614); + wr32(wx, WX_TS_DALARM_THRE, 614); } /** * txgbe_read_pba_string - Reads part number string from EEPROM - * @hw: pointer to hardware structure + * @wx: pointer to hardware structure * @pba_num: stores the part number string from the EEPROM * @pba_num_size: part number string buffer length * * Reads the part number string from the EEPROM. **/ -int txgbe_read_pba_string(struct txgbe_hw *hw, u8 *pba_num, u32 pba_num_size) +int txgbe_read_pba_string(struct wx *wx, u8 *pba_num, u32 pba_num_size) { u16 pba_ptr, offset, length, data; - struct wx_hw *wxhw = &hw->wxhw; int ret_val; if (!pba_num) { - wx_err(wxhw, "PBA string buffer was null\n"); + wx_err(wx, "PBA string buffer was null\n"); return -EINVAL; } - ret_val = wx_read_ee_hostif(wxhw, - wxhw->eeprom.sw_region_offset + TXGBE_PBANUM0_PTR, + ret_val = wx_read_ee_hostif(wx, + wx->eeprom.sw_region_offset + TXGBE_PBANUM0_PTR, &data); if (ret_val != 0) { - wx_err(wxhw, "NVM Read Error\n"); + wx_err(wx, "NVM Read Error\n"); return ret_val; } - ret_val = wx_read_ee_hostif(wxhw, - wxhw->eeprom.sw_region_offset + TXGBE_PBANUM1_PTR, + ret_val = wx_read_ee_hostif(wx, + wx->eeprom.sw_region_offset + TXGBE_PBANUM1_PTR, &pba_ptr); if (ret_val != 0) { - wx_err(wxhw, "NVM Read Error\n"); + wx_err(wx, "NVM Read Error\n"); return ret_val; } @@ -84,11 +81,11 @@ int txgbe_read_pba_string(struct txgbe_hw *hw, u8 *pba_num, u32 pba_num_size) * and we can decode it into an ascii string */ if (data != TXGBE_PBANUM_PTR_GUARD) { - wx_err(wxhw, "NVM PBA number is not stored as string\n"); + wx_err(wx, "NVM PBA number is not stored as string\n"); /* we will need 11 characters to store the PBA */ if (pba_num_size < 11) { - wx_err(wxhw, "PBA string buffer too small\n"); + wx_err(wx, "PBA string buffer too small\n"); return -ENOMEM; } @@ -118,20 +115,20 @@ int txgbe_read_pba_string(struct txgbe_hw *hw, u8 *pba_num, u32 pba_num_size) return 0; } - ret_val = wx_read_ee_hostif(wxhw, pba_ptr, &length); + ret_val = wx_read_ee_hostif(wx, pba_ptr, &length); if (ret_val != 0) { - wx_err(wxhw, "NVM Read Error\n"); + wx_err(wx, "NVM Read Error\n"); return ret_val; } if (length == 0xFFFF || length == 0) { - wx_err(wxhw, "NVM PBA number section invalid length\n"); + wx_err(wx, "NVM PBA number section invalid length\n"); return -EINVAL; } /* check if pba_num buffer is big enough */ if (pba_num_size < (((u32)length * 2) - 1)) { - wx_err(wxhw, "PBA string buffer too small\n"); + wx_err(wx, "PBA string buffer too small\n"); return -ENOMEM; } @@ -140,9 +137,9 @@ int txgbe_read_pba_string(struct txgbe_hw *hw, u8 *pba_num, u32 pba_num_size) length--; for (offset = 0; offset < length; offset++) { - ret_val = wx_read_ee_hostif(wxhw, pba_ptr + offset, &data); + ret_val = wx_read_ee_hostif(wx, pba_ptr + offset, &data); if (ret_val != 0) { - wx_err(wxhw, "NVM Read Error\n"); + wx_err(wx, "NVM Read Error\n"); return ret_val; } pba_num[offset * 2] = (u8)(data >> 8); @@ -155,14 +152,13 @@ int txgbe_read_pba_string(struct txgbe_hw *hw, u8 *pba_num, u32 pba_num_size) /** * txgbe_calc_eeprom_checksum - Calculates and returns the checksum - * @hw: pointer to hardware structure + * @wx: pointer to hardware structure * @checksum: pointer to cheksum * * Returns a negative error code on error **/ -static int txgbe_calc_eeprom_checksum(struct txgbe_hw *hw, u16 *checksum) +static int txgbe_calc_eeprom_checksum(struct wx *wx, u16 *checksum) { - struct wx_hw *wxhw = &hw->wxhw; u16 *eeprom_ptrs = NULL; u32 buffer_size = 0; u16 *buffer = NULL; @@ -170,7 +166,7 @@ static int txgbe_calc_eeprom_checksum(struct txgbe_hw *hw, u16 *checksum) int status; u16 i; - wx_init_eeprom_params(wxhw); + wx_init_eeprom_params(wx); if (!buffer) { eeprom_ptrs = kvmalloc_array(TXGBE_EEPROM_LAST_WORD, sizeof(u16), @@ -178,11 +174,11 @@ static int txgbe_calc_eeprom_checksum(struct txgbe_hw *hw, u16 *checksum) if (!eeprom_ptrs) return -ENOMEM; /* Read pointer area */ - status = wx_read_ee_hostif_buffer(wxhw, 0, + status = wx_read_ee_hostif_buffer(wx, 0, TXGBE_EEPROM_LAST_WORD, eeprom_ptrs); if (status != 0) { - wx_err(wxhw, "Failed to read EEPROM image\n"); + wx_err(wx, "Failed to read EEPROM image\n"); kvfree(eeprom_ptrs); return status; } @@ -194,7 +190,7 @@ static int txgbe_calc_eeprom_checksum(struct txgbe_hw *hw, u16 *checksum) } for (i = 0; i < TXGBE_EEPROM_LAST_WORD; i++) - if (i != wxhw->eeprom.sw_region_offset + TXGBE_EEPROM_CHECKSUM) + if (i != wx->eeprom.sw_region_offset + TXGBE_EEPROM_CHECKSUM) *checksum += local_buffer[i]; if (eeprom_ptrs) @@ -210,15 +206,14 @@ static int txgbe_calc_eeprom_checksum(struct txgbe_hw *hw, u16 *checksum) /** * txgbe_validate_eeprom_checksum - Validate EEPROM checksum - * @hw: pointer to hardware structure + * @wx: pointer to hardware structure * @checksum_val: calculated checksum * * Performs checksum calculation and validates the EEPROM checksum. If the * caller does not need checksum_val, the value can be NULL. **/ -int txgbe_validate_eeprom_checksum(struct txgbe_hw *hw, u16 *checksum_val) +int txgbe_validate_eeprom_checksum(struct wx *wx, u16 *checksum_val) { - struct wx_hw *wxhw = &hw->wxhw; u16 read_checksum = 0; u16 checksum; int status; @@ -227,18 +222,18 @@ int txgbe_validate_eeprom_checksum(struct txgbe_hw *hw, u16 *checksum_val) * not continue or we could be in for a very long wait while every * EEPROM read fails */ - status = wx_read_ee_hostif(wxhw, 0, &checksum); + status = wx_read_ee_hostif(wx, 0, &checksum); if (status) { - wx_err(wxhw, "EEPROM read failed\n"); + wx_err(wx, "EEPROM read failed\n"); return status; } checksum = 0; - status = txgbe_calc_eeprom_checksum(hw, &checksum); + status = txgbe_calc_eeprom_checksum(wx, &checksum); if (status != 0) return status; - status = wx_read_ee_hostif(wxhw, wxhw->eeprom.sw_region_offset + + status = wx_read_ee_hostif(wx, wx->eeprom.sw_region_offset + TXGBE_EEPROM_CHECKSUM, &read_checksum); if (status != 0) return status; @@ -248,7 +243,7 @@ int txgbe_validate_eeprom_checksum(struct txgbe_hw *hw, u16 *checksum_val) */ if (read_checksum != checksum) { status = -EIO; - wx_err(wxhw, "Invalid EEPROM checksum\n"); + wx_err(wx, "Invalid EEPROM checksum\n"); } /* If the user cares, return the calculated checksum */ @@ -258,55 +253,52 @@ int txgbe_validate_eeprom_checksum(struct txgbe_hw *hw, u16 *checksum_val) return status; } -static void txgbe_reset_misc(struct txgbe_hw *hw) +static void txgbe_reset_misc(struct wx *wx) { - struct wx_hw *wxhw = &hw->wxhw; - - wx_reset_misc(wxhw); - txgbe_init_thermal_sensor_thresh(hw); + wx_reset_misc(wx); + txgbe_init_thermal_sensor_thresh(wx); } /** * txgbe_reset_hw - Perform hardware reset - * @hw: pointer to hardware structure + * @wx: pointer to wx structure * * Resets the hardware by resetting the transmit and receive units, masks * and clears all interrupts, perform a PHY reset, and perform a link (MAC) * reset. **/ -int txgbe_reset_hw(struct txgbe_hw *hw) +int txgbe_reset_hw(struct wx *wx) { - struct wx_hw *wxhw = &hw->wxhw; int status; /* Call adapter stop to disable tx/rx and clear interrupts */ - status = wx_stop_adapter(wxhw); + status = wx_stop_adapter(wx); if (status != 0) return status; - if (!(((wxhw->subsystem_device_id & WX_NCSI_MASK) == WX_NCSI_SUP) || - ((wxhw->subsystem_device_id & WX_WOL_MASK) == WX_WOL_SUP))) - wx_reset_hostif(wxhw); + if (!(((wx->subsystem_device_id & WX_NCSI_MASK) == WX_NCSI_SUP) || + ((wx->subsystem_device_id & WX_WOL_MASK) == WX_WOL_SUP))) + wx_reset_hostif(wx); usleep_range(10, 100); - status = wx_check_flash_load(wxhw, TXGBE_SPI_ILDR_STATUS_LAN_SW_RST(wxhw->bus.func)); + status = wx_check_flash_load(wx, TXGBE_SPI_ILDR_STATUS_LAN_SW_RST(wx->bus.func)); if (status != 0) return status; - txgbe_reset_misc(hw); + txgbe_reset_misc(wx); /* Store the permanent mac address */ - wx_get_mac_addr(wxhw, wxhw->mac.perm_addr); + wx_get_mac_addr(wx, wx->mac.perm_addr); /* Store MAC address from RAR0, clear receive address registers, and * clear the multicast table. Also reset num_rar_entries to 128, * since we modify this value when programming the SAN MAC address. */ - wxhw->mac.num_rar_entries = TXGBE_SP_RAR_ENTRIES; - wx_init_rx_addrs(wxhw); + wx->mac.num_rar_entries = TXGBE_SP_RAR_ENTRIES; + wx_init_rx_addrs(wx); - pci_set_master(wxhw->pdev); + pci_set_master(wx->pdev); return 0; } diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h index 6a751a69177b..e82f65dff8a6 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h @@ -4,8 +4,8 @@ #ifndef _TXGBE_HW_H_ #define _TXGBE_HW_H_ -int txgbe_read_pba_string(struct txgbe_hw *hw, u8 *pba_num, u32 pba_num_size); -int txgbe_validate_eeprom_checksum(struct txgbe_hw *hw, u16 *checksum_val); -int txgbe_reset_hw(struct txgbe_hw *hw); +int txgbe_read_pba_string(struct wx *wx, u8 *pba_num, u32 pba_num_size); +int txgbe_validate_eeprom_checksum(struct wx *wx, u16 *checksum_val); +int txgbe_reset_hw(struct wx *wx); #endif /* _TXGBE_HW_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c index 36780e7f05b7..aa4d09df3b01 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c @@ -14,7 +14,6 @@ #include "../libwx/wx_hw.h" #include "txgbe_type.h" #include "txgbe_hw.h" -#include "txgbe.h" char txgbe_driver_name[] = "txgbe"; @@ -35,26 +34,26 @@ static const struct pci_device_id txgbe_pci_tbl[] = { #define DEFAULT_DEBUG_LEVEL_SHIFT 3 -static void txgbe_check_minimum_link(struct txgbe_adapter *adapter) +static void txgbe_check_minimum_link(struct wx *wx) { struct pci_dev *pdev; - pdev = adapter->pdev; + pdev = wx->pdev; pcie_print_link_status(pdev); } /** * txgbe_enumerate_functions - Get the number of ports this device has - * @adapter: adapter structure + * @wx: wx structure * * This function enumerates the phsyical functions co-located on a single slot, * in order to determine how many ports a device has. This is most useful in * determining the required GT/s of PCIe bandwidth necessary for optimal * performance. **/ -static int txgbe_enumerate_functions(struct txgbe_adapter *adapter) +static int txgbe_enumerate_functions(struct wx *wx) { - struct pci_dev *entry, *pdev = adapter->pdev; + struct pci_dev *entry, *pdev = wx->pdev; int physfns = 0; list_for_each_entry(entry, &pdev->bus->devices, bus_list) { @@ -73,197 +72,90 @@ static int txgbe_enumerate_functions(struct txgbe_adapter *adapter) return physfns; } -static void txgbe_sync_mac_table(struct txgbe_adapter *adapter) +static void txgbe_up_complete(struct wx *wx) { - struct txgbe_hw *hw = &adapter->hw; - struct wx_hw *wxhw = &hw->wxhw; - int i; - - for (i = 0; i < wxhw->mac.num_rar_entries; i++) { - if (adapter->mac_table[i].state & TXGBE_MAC_STATE_MODIFIED) { - if (adapter->mac_table[i].state & TXGBE_MAC_STATE_IN_USE) { - wx_set_rar(wxhw, i, - adapter->mac_table[i].addr, - adapter->mac_table[i].pools, - WX_PSR_MAC_SWC_AD_H_AV); - } else { - wx_clear_rar(wxhw, i); - } - adapter->mac_table[i].state &= ~(TXGBE_MAC_STATE_MODIFIED); - } - } -} - -/* this function destroys the first RAR entry */ -static void txgbe_mac_set_default_filter(struct txgbe_adapter *adapter, - u8 *addr) -{ - struct wx_hw *wxhw = &adapter->hw.wxhw; - - memcpy(&adapter->mac_table[0].addr, addr, ETH_ALEN); - adapter->mac_table[0].pools = 1ULL; - adapter->mac_table[0].state = (TXGBE_MAC_STATE_DEFAULT | - TXGBE_MAC_STATE_IN_USE); - wx_set_rar(wxhw, 0, adapter->mac_table[0].addr, - adapter->mac_table[0].pools, - WX_PSR_MAC_SWC_AD_H_AV); -} - -static void txgbe_flush_sw_mac_table(struct txgbe_adapter *adapter) -{ - struct wx_hw *wxhw = &adapter->hw.wxhw; - u32 i; - - for (i = 0; i < wxhw->mac.num_rar_entries; i++) { - adapter->mac_table[i].state |= TXGBE_MAC_STATE_MODIFIED; - adapter->mac_table[i].state &= ~TXGBE_MAC_STATE_IN_USE; - memset(adapter->mac_table[i].addr, 0, ETH_ALEN); - adapter->mac_table[i].pools = 0; - } - txgbe_sync_mac_table(adapter); -} - -static int txgbe_del_mac_filter(struct txgbe_adapter *adapter, u8 *addr, u16 pool) -{ - struct wx_hw *wxhw = &adapter->hw.wxhw; - u32 i; - - if (is_zero_ether_addr(addr)) - return -EINVAL; - - /* search table for addr, if found, set to 0 and sync */ - for (i = 0; i < wxhw->mac.num_rar_entries; i++) { - if (ether_addr_equal(addr, adapter->mac_table[i].addr)) { - if (adapter->mac_table[i].pools & (1ULL << pool)) { - adapter->mac_table[i].state |= TXGBE_MAC_STATE_MODIFIED; - adapter->mac_table[i].state &= ~TXGBE_MAC_STATE_IN_USE; - adapter->mac_table[i].pools &= ~(1ULL << pool); - txgbe_sync_mac_table(adapter); - } - return 0; - } - - if (adapter->mac_table[i].pools != (1 << pool)) - continue; - if (!ether_addr_equal(addr, adapter->mac_table[i].addr)) - continue; - - adapter->mac_table[i].state |= TXGBE_MAC_STATE_MODIFIED; - adapter->mac_table[i].state &= ~TXGBE_MAC_STATE_IN_USE; - memset(adapter->mac_table[i].addr, 0, ETH_ALEN); - adapter->mac_table[i].pools = 0; - txgbe_sync_mac_table(adapter); - return 0; - } - return -ENOMEM; -} - -static void txgbe_up_complete(struct txgbe_adapter *adapter) -{ - struct txgbe_hw *hw = &adapter->hw; - struct wx_hw *wxhw = &hw->wxhw; - - wx_control_hw(wxhw, true); + wx_control_hw(wx, true); } -static void txgbe_reset(struct txgbe_adapter *adapter) +static void txgbe_reset(struct wx *wx) { - struct net_device *netdev = adapter->netdev; - struct txgbe_hw *hw = &adapter->hw; + struct net_device *netdev = wx->netdev; u8 old_addr[ETH_ALEN]; int err; - err = txgbe_reset_hw(hw); + err = txgbe_reset_hw(wx); if (err != 0) - dev_err(&adapter->pdev->dev, "Hardware Error: %d\n", err); + wx_err(wx, "Hardware Error: %d\n", err); /* do not flush user set addresses */ - memcpy(old_addr, &adapter->mac_table[0].addr, netdev->addr_len); - txgbe_flush_sw_mac_table(adapter); - txgbe_mac_set_default_filter(adapter, old_addr); + memcpy(old_addr, &wx->mac_table[0].addr, netdev->addr_len); + wx_flush_sw_mac_table(wx); + wx_mac_set_default_filter(wx, old_addr); } -static void txgbe_disable_device(struct txgbe_adapter *adapter) +static void txgbe_disable_device(struct wx *wx) { - struct net_device *netdev = adapter->netdev; - struct wx_hw *wxhw = &adapter->hw.wxhw; + struct net_device *netdev = wx->netdev; - wx_disable_pcie_master(wxhw); + wx_disable_pcie_master(wx); /* disable receives */ - wx_disable_rx(wxhw); + wx_disable_rx(wx); netif_carrier_off(netdev); netif_tx_disable(netdev); - if (wxhw->bus.func < 2) - wr32m(wxhw, TXGBE_MIS_PRB_CTL, TXGBE_MIS_PRB_CTL_LAN_UP(wxhw->bus.func), 0); + if (wx->bus.func < 2) + wr32m(wx, TXGBE_MIS_PRB_CTL, TXGBE_MIS_PRB_CTL_LAN_UP(wx->bus.func), 0); else - dev_err(&adapter->pdev->dev, - "%s: invalid bus lan id %d\n", - __func__, wxhw->bus.func); + wx_err(wx, "%s: invalid bus lan id %d\n", + __func__, wx->bus.func); - if (!(((wxhw->subsystem_device_id & WX_NCSI_MASK) == WX_NCSI_SUP) || - ((wxhw->subsystem_device_id & WX_WOL_MASK) == WX_WOL_SUP))) { + if (!(((wx->subsystem_device_id & WX_NCSI_MASK) == WX_NCSI_SUP) || + ((wx->subsystem_device_id & WX_WOL_MASK) == WX_WOL_SUP))) { /* disable mac transmiter */ - wr32m(wxhw, WX_MAC_TX_CFG, WX_MAC_TX_CFG_TE, 0); + wr32m(wx, WX_MAC_TX_CFG, WX_MAC_TX_CFG_TE, 0); } /* Disable the Tx DMA engine */ - wr32m(wxhw, WX_TDM_CTL, WX_TDM_CTL_TE, 0); + wr32m(wx, WX_TDM_CTL, WX_TDM_CTL_TE, 0); } -static void txgbe_down(struct txgbe_adapter *adapter) +static void txgbe_down(struct wx *wx) { - txgbe_disable_device(adapter); - txgbe_reset(adapter); + txgbe_disable_device(wx); + txgbe_reset(wx); } /** - * txgbe_sw_init - Initialize general software structures (struct txgbe_adapter) - * @adapter: board private structure to initialize + * txgbe_sw_init - Initialize general software structures (struct wx) + * @wx: board private structure to initialize **/ -static int txgbe_sw_init(struct txgbe_adapter *adapter) +static int txgbe_sw_init(struct wx *wx) { - struct pci_dev *pdev = adapter->pdev; - struct txgbe_hw *hw = &adapter->hw; - struct wx_hw *wxhw = &hw->wxhw; int err; - wxhw->hw_addr = adapter->io_addr; - wxhw->pdev = pdev; + wx->mac.num_rar_entries = TXGBE_SP_RAR_ENTRIES; + wx->mac.max_tx_queues = TXGBE_SP_MAX_TX_QUEUES; + wx->mac.max_rx_queues = TXGBE_SP_MAX_RX_QUEUES; + wx->mac.mcft_size = TXGBE_SP_MC_TBL_SIZE; /* PCI config space info */ - err = wx_sw_init(wxhw); + err = wx_sw_init(wx); if (err < 0) { - netif_err(adapter, probe, adapter->netdev, - "read of internal subsystem device id failed\n"); + wx_err(wx, "read of internal subsystem device id failed\n"); return err; } - switch (wxhw->device_id) { + switch (wx->device_id) { case TXGBE_DEV_ID_SP1000: case TXGBE_DEV_ID_WX1820: - wxhw->mac.type = wx_mac_sp; + wx->mac.type = wx_mac_sp; break; default: - wxhw->mac.type = wx_mac_unknown; + wx->mac.type = wx_mac_unknown; break; } - wxhw->mac.num_rar_entries = TXGBE_SP_RAR_ENTRIES; - wxhw->mac.max_tx_queues = TXGBE_SP_MAX_TX_QUEUES; - wxhw->mac.max_rx_queues = TXGBE_SP_MAX_RX_QUEUES; - wxhw->mac.mcft_size = TXGBE_SP_MC_TBL_SIZE; - - adapter->mac_table = kcalloc(wxhw->mac.num_rar_entries, - sizeof(struct txgbe_mac_addr), - GFP_KERNEL); - if (!adapter->mac_table) { - netif_err(adapter, probe, adapter->netdev, - "mac_table allocation failed\n"); - return -ENOMEM; - } - return 0; } @@ -278,23 +170,23 @@ static int txgbe_sw_init(struct txgbe_adapter *adapter) **/ static int txgbe_open(struct net_device *netdev) { - struct txgbe_adapter *adapter = netdev_priv(netdev); + struct wx *wx = netdev_priv(netdev); - txgbe_up_complete(adapter); + txgbe_up_complete(wx); return 0; } /** * txgbe_close_suspend - actions necessary to both suspend and close flows - * @adapter: the private adapter struct + * @wx: the private wx struct * * This function should contain the necessary work common to both suspending * and closing of the device. */ -static void txgbe_close_suspend(struct txgbe_adapter *adapter) +static void txgbe_close_suspend(struct wx *wx) { - txgbe_disable_device(adapter); + txgbe_disable_device(wx); } /** @@ -310,29 +202,28 @@ static void txgbe_close_suspend(struct txgbe_adapter *adapter) **/ static int txgbe_close(struct net_device *netdev) { - struct txgbe_adapter *adapter = netdev_priv(netdev); + struct wx *wx = netdev_priv(netdev); - txgbe_down(adapter); - wx_control_hw(&adapter->hw.wxhw, false); + txgbe_down(wx); + wx_control_hw(wx, false); return 0; } static void txgbe_dev_shutdown(struct pci_dev *pdev, bool *enable_wake) { - struct txgbe_adapter *adapter = pci_get_drvdata(pdev); - struct net_device *netdev = adapter->netdev; - struct txgbe_hw *hw = &adapter->hw; - struct wx_hw *wxhw = &hw->wxhw; + struct wx *wx = pci_get_drvdata(pdev); + struct net_device *netdev; + netdev = wx->netdev; netif_device_detach(netdev); rtnl_lock(); if (netif_running(netdev)) - txgbe_close_suspend(adapter); + txgbe_close_suspend(wx); rtnl_unlock(); - wx_control_hw(wxhw, false); + wx_control_hw(wx, false); pci_disable_device(pdev); } @@ -355,39 +246,12 @@ static netdev_tx_t txgbe_xmit_frame(struct sk_buff *skb, return NETDEV_TX_OK; } -/** - * txgbe_set_mac - Change the Ethernet Address of the NIC - * @netdev: network interface device structure - * @p: pointer to an address structure - * - * Returns 0 on success, negative on failure - **/ -static int txgbe_set_mac(struct net_device *netdev, void *p) -{ - struct txgbe_adapter *adapter = netdev_priv(netdev); - struct wx_hw *wxhw = &adapter->hw.wxhw; - struct sockaddr *addr = p; - int retval; - - retval = eth_prepare_mac_addr_change(netdev, addr); - if (retval) - return retval; - - txgbe_del_mac_filter(adapter, wxhw->mac.addr, 0); - eth_hw_addr_set(netdev, addr->sa_data); - memcpy(wxhw->mac.addr, addr->sa_data, netdev->addr_len); - - txgbe_mac_set_default_filter(adapter, wxhw->mac.addr); - - return 0; -} - static const struct net_device_ops txgbe_netdev_ops = { .ndo_open = txgbe_open, .ndo_stop = txgbe_close, .ndo_start_xmit = txgbe_xmit_frame, .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = txgbe_set_mac, + .ndo_set_mac_address = wx_set_mac, }; /** @@ -398,17 +262,15 @@ static const struct net_device_ops txgbe_netdev_ops = { * Returns 0 on success, negative on failure * * txgbe_probe initializes an adapter identified by a pci_dev structure. - * The OS initialization, configuring of the adapter private structure, + * The OS initialization, configuring of the wx private structure, * and a hardware reset occur. **/ static int txgbe_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) { - struct txgbe_adapter *adapter = NULL; - struct txgbe_hw *hw = NULL; - struct wx_hw *wxhw = NULL; struct net_device *netdev; int err, expected_gts; + struct wx *wx = NULL; u16 eeprom_verh = 0, eeprom_verl = 0, offset = 0; u16 eeprom_cfg_blkh = 0, eeprom_cfg_blkl = 0; @@ -440,7 +302,7 @@ static int txgbe_probe(struct pci_dev *pdev, pci_set_master(pdev); netdev = devm_alloc_etherdev_mqs(&pdev->dev, - sizeof(struct txgbe_adapter), + sizeof(struct wx), TXGBE_MAX_TX_QUEUES, TXGBE_MAX_RX_QUEUES); if (!netdev) { @@ -450,17 +312,16 @@ static int txgbe_probe(struct pci_dev *pdev, SET_NETDEV_DEV(netdev, &pdev->dev); - adapter = netdev_priv(netdev); - adapter->netdev = netdev; - adapter->pdev = pdev; - hw = &adapter->hw; - wxhw = &hw->wxhw; - adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; - - adapter->io_addr = devm_ioremap(&pdev->dev, - pci_resource_start(pdev, 0), - pci_resource_len(pdev, 0)); - if (!adapter->io_addr) { + wx = netdev_priv(netdev); + wx->netdev = netdev; + wx->pdev = pdev; + + wx->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; + + wx->hw_addr = devm_ioremap(&pdev->dev, + pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + if (!wx->hw_addr) { err = -EIO; goto err_pci_release_regions; } @@ -468,25 +329,25 @@ static int txgbe_probe(struct pci_dev *pdev, netdev->netdev_ops = &txgbe_netdev_ops; /* setup the private structure */ - err = txgbe_sw_init(adapter); + err = txgbe_sw_init(wx); if (err) goto err_free_mac_table; /* check if flash load is done after hw power up */ - err = wx_check_flash_load(wxhw, TXGBE_SPI_ILDR_STATUS_PERST); + err = wx_check_flash_load(wx, TXGBE_SPI_ILDR_STATUS_PERST); if (err) goto err_free_mac_table; - err = wx_check_flash_load(wxhw, TXGBE_SPI_ILDR_STATUS_PWRRST); + err = wx_check_flash_load(wx, TXGBE_SPI_ILDR_STATUS_PWRRST); if (err) goto err_free_mac_table; - err = wx_mng_present(wxhw); + err = wx_mng_present(wx); if (err) { dev_err(&pdev->dev, "Management capability is not present\n"); goto err_free_mac_table; } - err = txgbe_reset_hw(hw); + err = txgbe_reset_hw(wx); if (err) { dev_err(&pdev->dev, "HW Init failed: %d\n", err); goto err_free_mac_table; @@ -495,36 +356,36 @@ static int txgbe_probe(struct pci_dev *pdev, netdev->features |= NETIF_F_HIGHDMA; /* make sure the EEPROM is good */ - err = txgbe_validate_eeprom_checksum(hw, NULL); + err = txgbe_validate_eeprom_checksum(wx, NULL); if (err != 0) { dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n"); - wr32(wxhw, WX_MIS_RST, WX_MIS_RST_SW_RST); + wr32(wx, WX_MIS_RST, WX_MIS_RST_SW_RST); err = -EIO; goto err_free_mac_table; } - eth_hw_addr_set(netdev, wxhw->mac.perm_addr); - txgbe_mac_set_default_filter(adapter, wxhw->mac.perm_addr); + eth_hw_addr_set(netdev, wx->mac.perm_addr); + wx_mac_set_default_filter(wx, wx->mac.perm_addr); /* Save off EEPROM version number and Option Rom version which * together make a unique identify for the eeprom */ - wx_read_ee_hostif(wxhw, - wxhw->eeprom.sw_region_offset + TXGBE_EEPROM_VERSION_H, + wx_read_ee_hostif(wx, + wx->eeprom.sw_region_offset + TXGBE_EEPROM_VERSION_H, &eeprom_verh); - wx_read_ee_hostif(wxhw, - wxhw->eeprom.sw_region_offset + TXGBE_EEPROM_VERSION_L, + wx_read_ee_hostif(wx, + wx->eeprom.sw_region_offset + TXGBE_EEPROM_VERSION_L, &eeprom_verl); etrack_id = (eeprom_verh << 16) | eeprom_verl; - wx_read_ee_hostif(wxhw, - wxhw->eeprom.sw_region_offset + TXGBE_ISCSI_BOOT_CONFIG, + wx_read_ee_hostif(wx, + wx->eeprom.sw_region_offset + TXGBE_ISCSI_BOOT_CONFIG, &offset); /* Make sure offset to SCSI block is valid */ if (!(offset == 0x0) && !(offset == 0xffff)) { - wx_read_ee_hostif(wxhw, offset + 0x84, &eeprom_cfg_blkh); - wx_read_ee_hostif(wxhw, offset + 0x83, &eeprom_cfg_blkl); + wx_read_ee_hostif(wx, offset + 0x84, &eeprom_cfg_blkh); + wx_read_ee_hostif(wx, offset + 0x83, &eeprom_cfg_blkl); /* Only display Option Rom if exist */ if (eeprom_cfg_blkl && eeprom_cfg_blkh) { @@ -532,15 +393,15 @@ static int txgbe_probe(struct pci_dev *pdev, build = (eeprom_cfg_blkl << 8) | (eeprom_cfg_blkh >> 8); patch = eeprom_cfg_blkh & 0x00ff; - snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), + snprintf(wx->eeprom_id, sizeof(wx->eeprom_id), "0x%08x, %d.%d.%d", etrack_id, major, build, patch); } else { - snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), + snprintf(wx->eeprom_id, sizeof(wx->eeprom_id), "0x%08x", etrack_id); } } else { - snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), + snprintf(wx->eeprom_id, sizeof(wx->eeprom_id), "0x%08x", etrack_id); } @@ -548,7 +409,7 @@ static int txgbe_probe(struct pci_dev *pdev, if (err) goto err_release_hw; - pci_set_drvdata(pdev, adapter); + pci_set_drvdata(pdev, wx); /* calculate the expected PCIe bandwidth required for optimal * performance. Note that some older parts will never have enough @@ -556,27 +417,27 @@ static int txgbe_probe(struct pci_dev *pdev, * parts to ensure that no warning is displayed, as this could confuse * users otherwise. */ - expected_gts = txgbe_enumerate_functions(adapter) * 10; + expected_gts = txgbe_enumerate_functions(wx) * 10; /* don't check link if we failed to enumerate functions */ if (expected_gts > 0) - txgbe_check_minimum_link(adapter); + txgbe_check_minimum_link(wx); else dev_warn(&pdev->dev, "Failed to enumerate PF devices.\n"); /* First try to read PBA as a string */ - err = txgbe_read_pba_string(hw, part_str, TXGBE_PBANUM_LENGTH); + err = txgbe_read_pba_string(wx, part_str, TXGBE_PBANUM_LENGTH); if (err) strncpy(part_str, "Unknown", TXGBE_PBANUM_LENGTH); - netif_info(adapter, probe, netdev, "%pM\n", netdev->dev_addr); + netif_info(wx, probe, netdev, "%pM\n", netdev->dev_addr); return 0; err_release_hw: - wx_control_hw(wxhw, false); + wx_control_hw(wx, false); err_free_mac_table: - kfree(adapter->mac_table); + kfree(wx->mac_table); err_pci_release_regions: pci_disable_pcie_error_reporting(pdev); pci_release_selected_regions(pdev, @@ -597,16 +458,16 @@ err_pci_disable_dev: **/ static void txgbe_remove(struct pci_dev *pdev) { - struct txgbe_adapter *adapter = pci_get_drvdata(pdev); + struct wx *wx = pci_get_drvdata(pdev); struct net_device *netdev; - netdev = adapter->netdev; + netdev = wx->netdev; unregister_netdev(netdev); pci_release_selected_regions(pdev, pci_select_bars(pdev, IORESOURCE_MEM)); - kfree(adapter->mac_table); + kfree(wx->mac_table); pci_disable_pcie_error_reporting(pdev); diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h index 740a1c447e20..cbd705a9f4bd 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h @@ -67,8 +67,16 @@ #define TXGBE_PBANUM1_PTR 0x06 #define TXGBE_PBANUM_PTR_GUARD 0xFAFA -struct txgbe_hw { - struct wx_hw wxhw; -}; +#define TXGBE_MAX_FDIR_INDICES 63 + +#define TXGBE_MAX_RX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1) +#define TXGBE_MAX_TX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1) + +#define TXGBE_SP_MAX_TX_QUEUES 128 +#define TXGBE_SP_MAX_RX_QUEUES 128 +#define TXGBE_SP_RAR_ENTRIES 128 +#define TXGBE_SP_MC_TBL_SIZE 128 + +extern char txgbe_driver_name[]; #endif /* _TXGBE_TYPE_H_ */ diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c index bd3b0c2655a2..83ff882f5d97 100644 --- a/drivers/net/hamradio/baycom_epp.c +++ b/drivers/net/hamradio/baycom_epp.c @@ -623,16 +623,10 @@ static int receive(struct net_device *dev, int cnt) /* --------------------------------------------------------------------- */ -#if defined(__i386__) && !defined(CONFIG_UML) -#include <asm/msr.h> #define GETTICK(x) \ ({ \ - if (boot_cpu_has(X86_FEATURE_TSC)) \ - x = (unsigned int)rdtsc(); \ + x = (unsigned int)get_cycles(); \ }) -#else /* __i386__ && !CONFIG_UML */ -#define GETTICK(x) -#endif /* __i386__ && !CONFIG_UML */ static void epp_bh(struct work_struct *work) { diff --git a/drivers/net/ipa/ipa_interrupt.c b/drivers/net/ipa/ipa_interrupt.c index c1b3977e1ae4..c19cd27ac852 100644 --- a/drivers/net/ipa/ipa_interrupt.c +++ b/drivers/net/ipa/ipa_interrupt.c @@ -26,6 +26,8 @@ #include "ipa.h" #include "ipa_reg.h" #include "ipa_endpoint.h" +#include "ipa_power.h" +#include "ipa_uc.h" #include "ipa_interrupt.h" /** @@ -33,47 +35,47 @@ * @ipa: IPA pointer * @irq: Linux IRQ number used for IPA interrupts * @enabled: Mask indicating which interrupts are enabled - * @handler: Array of handlers indexed by IPA interrupt ID */ struct ipa_interrupt { struct ipa *ipa; u32 irq; u32 enabled; - ipa_irq_handler_t handler[IPA_IRQ_COUNT]; }; -/* Returns true if the interrupt type is associated with the microcontroller */ -static bool ipa_interrupt_uc(struct ipa_interrupt *interrupt, u32 irq_id) -{ - return irq_id == IPA_IRQ_UC_0 || irq_id == IPA_IRQ_UC_1; -} - /* Process a particular interrupt type that has been received */ static void ipa_interrupt_process(struct ipa_interrupt *interrupt, u32 irq_id) { - bool uc_irq = ipa_interrupt_uc(interrupt, irq_id); struct ipa *ipa = interrupt->ipa; const struct ipa_reg *reg; u32 mask = BIT(irq_id); u32 offset; - /* For microcontroller interrupts, clear the interrupt right away, - * "to avoid clearing unhandled interrupts." - */ reg = ipa_reg(ipa, IPA_IRQ_CLR); offset = ipa_reg_offset(reg); - if (uc_irq) - iowrite32(mask, ipa->reg_virt + offset); - - if (irq_id < IPA_IRQ_COUNT && interrupt->handler[irq_id]) - interrupt->handler[irq_id](interrupt->ipa, irq_id); - /* Clearing the SUSPEND_TX interrupt also clears the register - * that tells us which suspended endpoint(s) caused the interrupt, - * so defer clearing until after the handler has been called. - */ - if (!uc_irq) + switch (irq_id) { + case IPA_IRQ_UC_0: + case IPA_IRQ_UC_1: + /* For microcontroller interrupts, clear the interrupt right + * away, "to avoid clearing unhandled interrupts." + */ + iowrite32(mask, ipa->reg_virt + offset); + ipa_uc_interrupt_handler(ipa, irq_id); + break; + + case IPA_IRQ_TX_SUSPEND: + /* Clearing the SUSPEND_TX interrupt also clears the + * register that tells us which suspended endpoint(s) + * caused the interrupt, so defer clearing until after + * the handler has been called. + */ + ipa_power_suspend_handler(ipa, irq_id); + fallthrough; + + default: /* Silently ignore (and clear) any other condition */ iowrite32(mask, ipa->reg_virt + offset); + break; + } } /* IPA IRQ handler is threaded */ @@ -127,6 +129,29 @@ out_power_put: return IRQ_HANDLED; } +static void ipa_interrupt_enabled_update(struct ipa *ipa) +{ + const struct ipa_reg *reg = ipa_reg(ipa, IPA_IRQ_EN); + + iowrite32(ipa->interrupt->enabled, ipa->reg_virt + ipa_reg_offset(reg)); +} + +/* Enable an IPA interrupt type */ +void ipa_interrupt_enable(struct ipa *ipa, enum ipa_irq_id ipa_irq) +{ + /* Update the IPA interrupt mask to enable it */ + ipa->interrupt->enabled |= BIT(ipa_irq); + ipa_interrupt_enabled_update(ipa); +} + +/* Disable an IPA interrupt type */ +void ipa_interrupt_disable(struct ipa *ipa, enum ipa_irq_id ipa_irq) +{ + /* Update the IPA interrupt mask to disable it */ + ipa->interrupt->enabled &= ~BIT(ipa_irq); + ipa_interrupt_enabled_update(ipa); +} + void ipa_interrupt_irq_disable(struct ipa *ipa) { disable_irq(ipa->interrupt->irq); @@ -210,44 +235,6 @@ void ipa_interrupt_simulate_suspend(struct ipa_interrupt *interrupt) ipa_interrupt_process(interrupt, IPA_IRQ_TX_SUSPEND); } -/* Add a handler for an IPA interrupt */ -void ipa_interrupt_add(struct ipa_interrupt *interrupt, - enum ipa_irq_id ipa_irq, ipa_irq_handler_t handler) -{ - struct ipa *ipa = interrupt->ipa; - const struct ipa_reg *reg; - - if (WARN_ON(ipa_irq >= IPA_IRQ_COUNT)) - return; - - interrupt->handler[ipa_irq] = handler; - - /* Update the IPA interrupt mask to enable it */ - interrupt->enabled |= BIT(ipa_irq); - - reg = ipa_reg(ipa, IPA_IRQ_EN); - iowrite32(interrupt->enabled, ipa->reg_virt + ipa_reg_offset(reg)); -} - -/* Remove the handler for an IPA interrupt type */ -void -ipa_interrupt_remove(struct ipa_interrupt *interrupt, enum ipa_irq_id ipa_irq) -{ - struct ipa *ipa = interrupt->ipa; - const struct ipa_reg *reg; - - if (WARN_ON(ipa_irq >= IPA_IRQ_COUNT)) - return; - - /* Update the IPA interrupt mask to disable it */ - interrupt->enabled &= ~BIT(ipa_irq); - - reg = ipa_reg(ipa, IPA_IRQ_EN); - iowrite32(interrupt->enabled, ipa->reg_virt + ipa_reg_offset(reg)); - - interrupt->handler[ipa_irq] = NULL; -} - /* Configure the IPA interrupt framework */ struct ipa_interrupt *ipa_interrupt_config(struct ipa *ipa) { diff --git a/drivers/net/ipa/ipa_interrupt.h b/drivers/net/ipa/ipa_interrupt.h index 8a1bd5b89393..12e3e798ccb3 100644 --- a/drivers/net/ipa/ipa_interrupt.h +++ b/drivers/net/ipa/ipa_interrupt.h @@ -11,39 +11,7 @@ struct ipa; struct ipa_interrupt; - -/** - * typedef ipa_irq_handler_t - IPA interrupt handler function type - * @ipa: IPA pointer - * @irq_id: interrupt type - * - * Callback function registered by ipa_interrupt_add() to handle a specific - * IPA interrupt type - */ -typedef void (*ipa_irq_handler_t)(struct ipa *ipa, enum ipa_irq_id irq_id); - -/** - * ipa_interrupt_add() - Register a handler for an IPA interrupt type - * @interrupt: IPA interrupt structure - * @irq_id: IPA interrupt type - * @handler: Handler function for the interrupt - * - * Add a handler for an IPA interrupt and enable it. IPA interrupt - * handlers are run in threaded interrupt context, so are allowed to - * block. - */ -void ipa_interrupt_add(struct ipa_interrupt *interrupt, enum ipa_irq_id irq_id, - ipa_irq_handler_t handler); - -/** - * ipa_interrupt_remove() - Remove the handler for an IPA interrupt type - * @interrupt: IPA interrupt structure - * @irq_id: IPA interrupt type - * - * Remove an IPA interrupt handler and disable it. - */ -void ipa_interrupt_remove(struct ipa_interrupt *interrupt, - enum ipa_irq_id irq_id); +enum ipa_irq_id; /** * ipa_interrupt_suspend_enable - Enable TX_SUSPEND for an endpoint @@ -86,6 +54,20 @@ void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt); void ipa_interrupt_simulate_suspend(struct ipa_interrupt *interrupt); /** + * ipa_interrupt_enable() - Enable an IPA interrupt type + * @ipa: IPA pointer + * @ipa_irq: IPA interrupt ID + */ +void ipa_interrupt_enable(struct ipa *ipa, enum ipa_irq_id ipa_irq); + +/** + * ipa_interrupt_disable() - Disable an IPA interrupt type + * @ipa: IPA pointer + * @ipa_irq: IPA interrupt ID + */ +void ipa_interrupt_disable(struct ipa *ipa, enum ipa_irq_id ipa_irq); + +/** * ipa_interrupt_irq_enable() - Enable IPA interrupts * @ipa: IPA pointer * diff --git a/drivers/net/ipa/ipa_power.c b/drivers/net/ipa/ipa_power.c index 8057be8cda80..921eecf3eff6 100644 --- a/drivers/net/ipa/ipa_power.c +++ b/drivers/net/ipa/ipa_power.c @@ -219,17 +219,7 @@ u32 ipa_core_clock_rate(struct ipa *ipa) return ipa->power ? (u32)clk_get_rate(ipa->power->core) : 0; } -/** - * ipa_suspend_handler() - Handle the suspend IPA interrupt - * @ipa: IPA pointer - * @irq_id: IPA interrupt type (unused) - * - * If an RX endpoint is suspended, and the IPA has a packet destined for - * that endpoint, the IPA generates a SUSPEND interrupt to inform the AP - * that it should resume the endpoint. If we get one of these interrupts - * we just wake up the system. - */ -static void ipa_suspend_handler(struct ipa *ipa, enum ipa_irq_id irq_id) +void ipa_power_suspend_handler(struct ipa *ipa, enum ipa_irq_id irq_id) { /* To handle an IPA interrupt we will have resumed the hardware * just to handle the interrupt, so we're done. If we are in a @@ -352,12 +342,11 @@ int ipa_power_setup(struct ipa *ipa) { int ret; - ipa_interrupt_add(ipa->interrupt, IPA_IRQ_TX_SUSPEND, - ipa_suspend_handler); + ipa_interrupt_enable(ipa, IPA_IRQ_TX_SUSPEND); ret = device_init_wakeup(&ipa->pdev->dev, true); if (ret) - ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND); + ipa_interrupt_disable(ipa, IPA_IRQ_TX_SUSPEND); return ret; } @@ -365,7 +354,7 @@ int ipa_power_setup(struct ipa *ipa) void ipa_power_teardown(struct ipa *ipa) { (void)device_init_wakeup(&ipa->pdev->dev, false); - ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND); + ipa_interrupt_disable(ipa, IPA_IRQ_TX_SUSPEND); } /* Initialize IPA power management */ diff --git a/drivers/net/ipa/ipa_power.h b/drivers/net/ipa/ipa_power.h index 896f052e51a1..3a4c59ea1222 100644 --- a/drivers/net/ipa/ipa_power.h +++ b/drivers/net/ipa/ipa_power.h @@ -10,6 +10,7 @@ struct device; struct ipa; struct ipa_power_data; +enum ipa_irq_id; /* IPA device power management function block */ extern const struct dev_pm_ops ipa_pm_ops; @@ -48,6 +49,17 @@ void ipa_power_modem_queue_active(struct ipa *ipa); void ipa_power_retention(struct ipa *ipa, bool enable); /** + * ipa_power_suspend_handler() - Handler for SUSPEND IPA interrupts + * @ipa: IPA pointer + * @irq_id: IPA interrupt ID (unused) + * + * If an RX endpoint is suspended, and the IPA has a packet destined for + * that endpoint, the IPA generates a SUSPEND interrupt to inform the AP + * that it should resume the endpoint. + */ +void ipa_power_suspend_handler(struct ipa *ipa, enum ipa_irq_id irq_id); + +/** * ipa_power_setup() - Set up IPA power management * @ipa: IPA pointer * diff --git a/drivers/net/ipa/ipa_uc.c b/drivers/net/ipa/ipa_uc.c index f0ee47281015..cb8a76a75f21 100644 --- a/drivers/net/ipa/ipa_uc.c +++ b/drivers/net/ipa/ipa_uc.c @@ -124,7 +124,7 @@ static struct ipa_uc_mem_area *ipa_uc_shared(struct ipa *ipa) } /* Microcontroller event IPA interrupt handler */ -static void ipa_uc_event_handler(struct ipa *ipa, enum ipa_irq_id irq_id) +static void ipa_uc_event_handler(struct ipa *ipa) { struct ipa_uc_mem_area *shared = ipa_uc_shared(ipa); struct device *dev = &ipa->pdev->dev; @@ -138,7 +138,7 @@ static void ipa_uc_event_handler(struct ipa *ipa, enum ipa_irq_id irq_id) } /* Microcontroller response IPA interrupt handler */ -static void ipa_uc_response_hdlr(struct ipa *ipa, enum ipa_irq_id irq_id) +static void ipa_uc_response_hdlr(struct ipa *ipa) { struct ipa_uc_mem_area *shared = ipa_uc_shared(ipa); struct device *dev = &ipa->pdev->dev; @@ -170,13 +170,22 @@ static void ipa_uc_response_hdlr(struct ipa *ipa, enum ipa_irq_id irq_id) } } +void ipa_uc_interrupt_handler(struct ipa *ipa, enum ipa_irq_id irq_id) +{ + /* Silently ignore anything unrecognized */ + if (irq_id == IPA_IRQ_UC_0) + ipa_uc_event_handler(ipa); + else if (irq_id == IPA_IRQ_UC_1) + ipa_uc_response_hdlr(ipa); +} + /* Configure the IPA microcontroller subsystem */ void ipa_uc_config(struct ipa *ipa) { ipa->uc_powered = false; ipa->uc_loaded = false; - ipa_interrupt_add(ipa->interrupt, IPA_IRQ_UC_0, ipa_uc_event_handler); - ipa_interrupt_add(ipa->interrupt, IPA_IRQ_UC_1, ipa_uc_response_hdlr); + ipa_interrupt_enable(ipa, IPA_IRQ_UC_0); + ipa_interrupt_enable(ipa, IPA_IRQ_UC_1); } /* Inverse of ipa_uc_config() */ @@ -184,8 +193,8 @@ void ipa_uc_deconfig(struct ipa *ipa) { struct device *dev = &ipa->pdev->dev; - ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_UC_1); - ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_UC_0); + ipa_interrupt_disable(ipa, IPA_IRQ_UC_1); + ipa_interrupt_disable(ipa, IPA_IRQ_UC_0); if (ipa->uc_loaded) ipa_power_retention(ipa, false); diff --git a/drivers/net/ipa/ipa_uc.h b/drivers/net/ipa/ipa_uc.h index 8514096e6f36..85aa0df818c2 100644 --- a/drivers/net/ipa/ipa_uc.h +++ b/drivers/net/ipa/ipa_uc.h @@ -7,6 +7,14 @@ #define _IPA_UC_H_ struct ipa; +enum ipa_irq_id; + +/** + * ipa_uc_interrupt_handler() - Handler for microcontroller IPA interrupts + * @ipa: IPA pointer + * @irq_id: IPA interrupt ID + */ +void ipa_uc_interrupt_handler(struct ipa *ipa, enum ipa_irq_id irq_id); /** * ipa_uc_config() - Configure the IPA microcontroller subsystem diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index bf8ac7a3ded7..17452f818b0d 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -2583,16 +2583,56 @@ static bool macsec_is_configured(struct macsec_dev *macsec) return false; } +static int macsec_update_offload(struct net_device *dev, enum macsec_offload offload) +{ + enum macsec_offload prev_offload; + const struct macsec_ops *ops; + struct macsec_context ctx; + struct macsec_dev *macsec; + int ret = 0; + + macsec = macsec_priv(dev); + + /* Check if the offloading mode is supported by the underlying layers */ + if (offload != MACSEC_OFFLOAD_OFF && + !macsec_check_offload(offload, macsec)) + return -EOPNOTSUPP; + + /* Check if the net device is busy. */ + if (netif_running(dev)) + return -EBUSY; + + /* Check if the device already has rules configured: we do not support + * rules migration. + */ + if (macsec_is_configured(macsec)) + return -EBUSY; + + prev_offload = macsec->offload; + + ops = __macsec_get_ops(offload == MACSEC_OFFLOAD_OFF ? prev_offload : offload, + macsec, &ctx); + if (!ops) + return -EOPNOTSUPP; + + macsec->offload = offload; + + ctx.secy = &macsec->secy; + ret = offload == MACSEC_OFFLOAD_OFF ? macsec_offload(ops->mdo_del_secy, &ctx) + : macsec_offload(ops->mdo_add_secy, &ctx); + if (ret) + macsec->offload = prev_offload; + + return ret; +} + static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info) { struct nlattr *tb_offload[MACSEC_OFFLOAD_ATTR_MAX + 1]; - enum macsec_offload offload, prev_offload; - int (*func)(struct macsec_context *ctx); struct nlattr **attrs = info->attrs; - struct net_device *dev; - const struct macsec_ops *ops; - struct macsec_context ctx; + enum macsec_offload offload; struct macsec_dev *macsec; + struct net_device *dev; int ret = 0; if (!attrs[MACSEC_ATTR_IFINDEX]) @@ -2621,55 +2661,9 @@ static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info) } offload = nla_get_u8(tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]); - if (macsec->offload == offload) - goto out; - - /* Check if the offloading mode is supported by the underlying layers */ - if (offload != MACSEC_OFFLOAD_OFF && - !macsec_check_offload(offload, macsec)) { - ret = -EOPNOTSUPP; - goto out; - } - - /* Check if the net device is busy. */ - if (netif_running(dev)) { - ret = -EBUSY; - goto out; - } - - prev_offload = macsec->offload; - macsec->offload = offload; - /* Check if the device already has rules configured: we do not support - * rules migration. - */ - if (macsec_is_configured(macsec)) { - ret = -EBUSY; - goto rollback; - } - - ops = __macsec_get_ops(offload == MACSEC_OFFLOAD_OFF ? prev_offload : offload, - macsec, &ctx); - if (!ops) { - ret = -EOPNOTSUPP; - goto rollback; - } - - if (prev_offload == MACSEC_OFFLOAD_OFF) - func = ops->mdo_add_secy; - else - func = ops->mdo_del_secy; - - ctx.secy = &macsec->secy; - ret = macsec_offload(func, &ctx); - if (ret) - goto rollback; - - rtnl_unlock(); - return 0; - -rollback: - macsec->offload = prev_offload; + if (macsec->offload != offload) + ret = macsec_update_offload(dev, offload); out: rtnl_unlock(); return ret; @@ -3817,6 +3811,8 @@ static int macsec_changelink(struct net_device *dev, struct nlattr *tb[], struct netlink_ext_ack *extack) { struct macsec_dev *macsec = macsec_priv(dev); + bool macsec_offload_state_change = false; + enum macsec_offload offload; struct macsec_tx_sc tx_sc; struct macsec_secy secy; int ret; @@ -3840,8 +3836,18 @@ static int macsec_changelink(struct net_device *dev, struct nlattr *tb[], if (ret) goto cleanup; + if (data[IFLA_MACSEC_OFFLOAD]) { + offload = nla_get_u8(data[IFLA_MACSEC_OFFLOAD]); + if (macsec->offload != offload) { + macsec_offload_state_change = true; + ret = macsec_update_offload(dev, offload); + if (ret) + goto cleanup; + } + } + /* If h/w offloading is available, propagate to the device */ - if (macsec_is_offloaded(macsec)) { + if (!macsec_offload_state_change && macsec_is_offloaded(macsec)) { const struct macsec_ops *ops; struct macsec_context ctx; @@ -4240,16 +4246,22 @@ static size_t macsec_get_size(const struct net_device *dev) nla_total_size(1) + /* IFLA_MACSEC_SCB */ nla_total_size(1) + /* IFLA_MACSEC_REPLAY_PROTECT */ nla_total_size(1) + /* IFLA_MACSEC_VALIDATION */ + nla_total_size(1) + /* IFLA_MACSEC_OFFLOAD */ 0; } static int macsec_fill_info(struct sk_buff *skb, const struct net_device *dev) { - struct macsec_secy *secy = &macsec_priv(dev)->secy; - struct macsec_tx_sc *tx_sc = &secy->tx_sc; + struct macsec_tx_sc *tx_sc; + struct macsec_dev *macsec; + struct macsec_secy *secy; u64 csid; + macsec = macsec_priv(dev); + secy = &macsec->secy; + tx_sc = &secy->tx_sc; + switch (secy->key_len) { case MACSEC_GCM_AES_128_SAK_LEN: csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID; @@ -4274,6 +4286,7 @@ static int macsec_fill_info(struct sk_buff *skb, nla_put_u8(skb, IFLA_MACSEC_SCB, tx_sc->scb) || nla_put_u8(skb, IFLA_MACSEC_REPLAY_PROTECT, secy->replay_protect) || nla_put_u8(skb, IFLA_MACSEC_VALIDATION, secy->validate_frames) || + nla_put_u8(skb, IFLA_MACSEC_OFFLOAD, macsec->offload) || 0) goto nla_put_failure; diff --git a/drivers/net/mdio/mdio-aspeed.c b/drivers/net/mdio/mdio-aspeed.c index 944d005d2bd1..c727103c8b05 100644 --- a/drivers/net/mdio/mdio-aspeed.c +++ b/drivers/net/mdio/mdio-aspeed.c @@ -104,61 +104,36 @@ static int aspeed_mdio_write_c22(struct mii_bus *bus, int addr, int regnum, addr, regnum, val); } -static int aspeed_mdio_read_c45(struct mii_bus *bus, int addr, int regnum) +static int aspeed_mdio_read_c45(struct mii_bus *bus, int addr, int devad, + int regnum) { - u8 c45_dev = (regnum >> 16) & 0x1F; - u16 c45_addr = regnum & 0xFFFF; int rc; rc = aspeed_mdio_op(bus, ASPEED_MDIO_CTRL_ST_C45, MDIO_C45_OP_ADDR, - addr, c45_dev, c45_addr); + addr, devad, regnum); if (rc < 0) return rc; rc = aspeed_mdio_op(bus, ASPEED_MDIO_CTRL_ST_C45, MDIO_C45_OP_READ, - addr, c45_dev, 0); + addr, devad, 0); if (rc < 0) return rc; return aspeed_mdio_get_data(bus); } -static int aspeed_mdio_write_c45(struct mii_bus *bus, int addr, int regnum, - u16 val) +static int aspeed_mdio_write_c45(struct mii_bus *bus, int addr, int devad, + int regnum, u16 val) { - u8 c45_dev = (regnum >> 16) & 0x1F; - u16 c45_addr = regnum & 0xFFFF; int rc; rc = aspeed_mdio_op(bus, ASPEED_MDIO_CTRL_ST_C45, MDIO_C45_OP_ADDR, - addr, c45_dev, c45_addr); + addr, devad, regnum); if (rc < 0) return rc; return aspeed_mdio_op(bus, ASPEED_MDIO_CTRL_ST_C45, MDIO_C45_OP_WRITE, - addr, c45_dev, val); -} - -static int aspeed_mdio_read(struct mii_bus *bus, int addr, int regnum) -{ - dev_dbg(&bus->dev, "%s: addr: %d, regnum: %d\n", __func__, addr, - regnum); - - if (regnum & MII_ADDR_C45) - return aspeed_mdio_read_c45(bus, addr, regnum); - - return aspeed_mdio_read_c22(bus, addr, regnum); -} - -static int aspeed_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val) -{ - dev_dbg(&bus->dev, "%s: addr: %d, regnum: %d, val: 0x%x\n", - __func__, addr, regnum, val); - - if (regnum & MII_ADDR_C45) - return aspeed_mdio_write_c45(bus, addr, regnum, val); - - return aspeed_mdio_write_c22(bus, addr, regnum, val); + addr, devad, val); } static int aspeed_mdio_probe(struct platform_device *pdev) @@ -185,9 +160,10 @@ static int aspeed_mdio_probe(struct platform_device *pdev) bus->name = DRV_NAME; snprintf(bus->id, MII_BUS_ID_SIZE, "%s%d", pdev->name, pdev->id); bus->parent = &pdev->dev; - bus->read = aspeed_mdio_read; - bus->write = aspeed_mdio_write; - bus->probe_capabilities = MDIOBUS_C22_C45; + bus->read = aspeed_mdio_read_c22; + bus->write = aspeed_mdio_write_c22; + bus->read_c45 = aspeed_mdio_read_c45; + bus->write_c45 = aspeed_mdio_write_c45; rc = of_mdiobus_register(bus, pdev->dev.of_node); if (rc) { diff --git a/drivers/net/mdio/mdio-bitbang.c b/drivers/net/mdio/mdio-bitbang.c index 07609114a26b..b83932562be2 100644 --- a/drivers/net/mdio/mdio-bitbang.c +++ b/drivers/net/mdio/mdio-bitbang.c @@ -127,14 +127,12 @@ static void mdiobb_cmd(struct mdiobb_ctrl *ctrl, int op, u8 phy, u8 reg) /* In clause 45 mode all commands are prefixed by MDIO_ADDR to specify the lower 16 bits of the 21 bit address. This transfer is done identically to a - MDIO_WRITE except for a different code. To enable clause 45 mode or - MII_ADDR_C45 into the address. Theoretically clause 45 and normal devices - can exist on the same bus. Normal devices should ignore the MDIO_ADDR + MDIO_WRITE except for a different code. Theoretically clause 45 and normal + devices can exist on the same bus. Normal devices should ignore the MDIO_ADDR phase. */ -static int mdiobb_cmd_addr(struct mdiobb_ctrl *ctrl, int phy, u32 addr) +static void mdiobb_cmd_addr(struct mdiobb_ctrl *ctrl, int phy, int dev_addr, + int reg) { - unsigned int dev_addr = (addr >> 16) & 0x1F; - unsigned int reg = addr & 0xFFFF; mdiobb_cmd(ctrl, MDIO_C45_ADDR, phy, dev_addr); /* send the turnaround (10) */ @@ -145,21 +143,13 @@ static int mdiobb_cmd_addr(struct mdiobb_ctrl *ctrl, int phy, u32 addr) ctrl->ops->set_mdio_dir(ctrl, 0); mdiobb_get_bit(ctrl); - - return dev_addr; } -int mdiobb_read(struct mii_bus *bus, int phy, int reg) +static int mdiobb_read_common(struct mii_bus *bus, int phy) { struct mdiobb_ctrl *ctrl = bus->priv; int ret, i; - if (reg & MII_ADDR_C45) { - reg = mdiobb_cmd_addr(ctrl, phy, reg); - mdiobb_cmd(ctrl, MDIO_C45_READ, phy, reg); - } else - mdiobb_cmd(ctrl, ctrl->op_c22_read, phy, reg); - ctrl->ops->set_mdio_dir(ctrl, 0); /* check the turnaround bit: the PHY should be driving it to zero, if this @@ -180,17 +170,31 @@ int mdiobb_read(struct mii_bus *bus, int phy, int reg) mdiobb_get_bit(ctrl); return ret; } -EXPORT_SYMBOL(mdiobb_read); -int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val) +int mdiobb_read_c22(struct mii_bus *bus, int phy, int reg) { struct mdiobb_ctrl *ctrl = bus->priv; - if (reg & MII_ADDR_C45) { - reg = mdiobb_cmd_addr(ctrl, phy, reg); - mdiobb_cmd(ctrl, MDIO_C45_WRITE, phy, reg); - } else - mdiobb_cmd(ctrl, ctrl->op_c22_write, phy, reg); + mdiobb_cmd(ctrl, ctrl->op_c22_read, phy, reg); + + return mdiobb_read_common(bus, phy); +} +EXPORT_SYMBOL(mdiobb_read_c22); + +int mdiobb_read_c45(struct mii_bus *bus, int phy, int devad, int reg) +{ + struct mdiobb_ctrl *ctrl = bus->priv; + + mdiobb_cmd_addr(ctrl, phy, devad, reg); + mdiobb_cmd(ctrl, MDIO_C45_READ, phy, reg); + + return mdiobb_read_common(bus, phy); +} +EXPORT_SYMBOL(mdiobb_read_c45); + +static int mdiobb_write_common(struct mii_bus *bus, u16 val) +{ + struct mdiobb_ctrl *ctrl = bus->priv; /* send the turnaround (10) */ mdiobb_send_bit(ctrl, 1); @@ -202,7 +206,27 @@ int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val) mdiobb_get_bit(ctrl); return 0; } -EXPORT_SYMBOL(mdiobb_write); + +int mdiobb_write_c22(struct mii_bus *bus, int phy, int reg, u16 val) +{ + struct mdiobb_ctrl *ctrl = bus->priv; + + mdiobb_cmd(ctrl, ctrl->op_c22_write, phy, reg); + + return mdiobb_write_common(bus, val); +} +EXPORT_SYMBOL(mdiobb_write_c22); + +int mdiobb_write_c45(struct mii_bus *bus, int phy, int devad, int reg, u16 val) +{ + struct mdiobb_ctrl *ctrl = bus->priv; + + mdiobb_cmd_addr(ctrl, phy, devad, reg); + mdiobb_cmd(ctrl, MDIO_C45_WRITE, phy, reg); + + return mdiobb_write_common(bus, val); +} +EXPORT_SYMBOL(mdiobb_write_c45); struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl) { @@ -214,8 +238,11 @@ struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl) __module_get(ctrl->ops->owner); - bus->read = mdiobb_read; - bus->write = mdiobb_write; + bus->read = mdiobb_read_c22; + bus->write = mdiobb_write_c22; + bus->read_c45 = mdiobb_read_c45; + bus->write_c45 = mdiobb_write_c45; + bus->priv = ctrl; if (!ctrl->override_op_c22) { ctrl->op_c22_read = MDIO_READ; diff --git a/drivers/net/mdio/mdio-cavium.c b/drivers/net/mdio/mdio-cavium.c index 95ce274c1be1..100e46a702ee 100644 --- a/drivers/net/mdio/mdio-cavium.c +++ b/drivers/net/mdio/mdio-cavium.c @@ -26,7 +26,7 @@ static void cavium_mdiobus_set_mode(struct cavium_mdiobus *p, } static int cavium_mdiobus_c45_addr(struct cavium_mdiobus *p, - int phy_id, int regnum) + int phy_id, int devad, int regnum) { union cvmx_smix_cmd smi_cmd; union cvmx_smix_wr_dat smi_wr; @@ -38,12 +38,10 @@ static int cavium_mdiobus_c45_addr(struct cavium_mdiobus *p, smi_wr.s.dat = regnum & 0xffff; oct_mdio_writeq(smi_wr.u64, p->register_base + SMI_WR_DAT); - regnum = (regnum >> 16) & 0x1f; - smi_cmd.u64 = 0; smi_cmd.s.phy_op = 0; /* MDIO_CLAUSE_45_ADDRESS */ smi_cmd.s.phy_adr = phy_id; - smi_cmd.s.reg_adr = regnum; + smi_cmd.s.reg_adr = devad; oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD); do { @@ -59,28 +57,51 @@ static int cavium_mdiobus_c45_addr(struct cavium_mdiobus *p, return 0; } -int cavium_mdiobus_read(struct mii_bus *bus, int phy_id, int regnum) +int cavium_mdiobus_read_c22(struct mii_bus *bus, int phy_id, int regnum) { struct cavium_mdiobus *p = bus->priv; union cvmx_smix_cmd smi_cmd; union cvmx_smix_rd_dat smi_rd; - unsigned int op = 1; /* MDIO_CLAUSE_22_READ */ int timeout = 1000; - if (regnum & MII_ADDR_C45) { - int r = cavium_mdiobus_c45_addr(p, phy_id, regnum); + cavium_mdiobus_set_mode(p, C22); + + smi_cmd.u64 = 0; + smi_cmd.s.phy_op = 1; /* MDIO_CLAUSE_22_READ */ + smi_cmd.s.phy_adr = phy_id; + smi_cmd.s.reg_adr = regnum; + oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD); + + do { + /* Wait 1000 clocks so we don't saturate the RSL bus + * doing reads. + */ + __delay(1000); + smi_rd.u64 = oct_mdio_readq(p->register_base + SMI_RD_DAT); + } while (smi_rd.s.pending && --timeout); + + if (smi_rd.s.val) + return smi_rd.s.dat; + else + return -EIO; +} +EXPORT_SYMBOL(cavium_mdiobus_read_c22); - if (r < 0) - return r; +int cavium_mdiobus_read_c45(struct mii_bus *bus, int phy_id, int devad, + int regnum) +{ + struct cavium_mdiobus *p = bus->priv; + union cvmx_smix_cmd smi_cmd; + union cvmx_smix_rd_dat smi_rd; + int timeout = 1000; + int r; - regnum = (regnum >> 16) & 0x1f; - op = 3; /* MDIO_CLAUSE_45_READ */ - } else { - cavium_mdiobus_set_mode(p, C22); - } + r = cavium_mdiobus_c45_addr(p, phy_id, devad, regnum); + if (r < 0) + return r; smi_cmd.u64 = 0; - smi_cmd.s.phy_op = op; + smi_cmd.s.phy_op = 3; /* MDIO_CLAUSE_45_READ */ smi_cmd.s.phy_adr = phy_id; smi_cmd.s.reg_adr = regnum; oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD); @@ -98,36 +119,64 @@ int cavium_mdiobus_read(struct mii_bus *bus, int phy_id, int regnum) else return -EIO; } -EXPORT_SYMBOL(cavium_mdiobus_read); +EXPORT_SYMBOL(cavium_mdiobus_read_c45); -int cavium_mdiobus_write(struct mii_bus *bus, int phy_id, int regnum, u16 val) +int cavium_mdiobus_write_c22(struct mii_bus *bus, int phy_id, int regnum, + u16 val) { struct cavium_mdiobus *p = bus->priv; union cvmx_smix_cmd smi_cmd; union cvmx_smix_wr_dat smi_wr; - unsigned int op = 0; /* MDIO_CLAUSE_22_WRITE */ int timeout = 1000; - if (regnum & MII_ADDR_C45) { - int r = cavium_mdiobus_c45_addr(p, phy_id, regnum); + cavium_mdiobus_set_mode(p, C22); - if (r < 0) - return r; + smi_wr.u64 = 0; + smi_wr.s.dat = val; + oct_mdio_writeq(smi_wr.u64, p->register_base + SMI_WR_DAT); - regnum = (regnum >> 16) & 0x1f; - op = 1; /* MDIO_CLAUSE_45_WRITE */ - } else { - cavium_mdiobus_set_mode(p, C22); - } + smi_cmd.u64 = 0; + smi_cmd.s.phy_op = 0; /* MDIO_CLAUSE_22_WRITE */ + smi_cmd.s.phy_adr = phy_id; + smi_cmd.s.reg_adr = regnum; + oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD); + + do { + /* Wait 1000 clocks so we don't saturate the RSL bus + * doing reads. + */ + __delay(1000); + smi_wr.u64 = oct_mdio_readq(p->register_base + SMI_WR_DAT); + } while (smi_wr.s.pending && --timeout); + + if (timeout <= 0) + return -EIO; + + return 0; +} +EXPORT_SYMBOL(cavium_mdiobus_write_c22); + +int cavium_mdiobus_write_c45(struct mii_bus *bus, int phy_id, int devad, + int regnum, u16 val) +{ + struct cavium_mdiobus *p = bus->priv; + union cvmx_smix_cmd smi_cmd; + union cvmx_smix_wr_dat smi_wr; + int timeout = 1000; + int r; + + r = cavium_mdiobus_c45_addr(p, phy_id, devad, regnum); + if (r < 0) + return r; smi_wr.u64 = 0; smi_wr.s.dat = val; oct_mdio_writeq(smi_wr.u64, p->register_base + SMI_WR_DAT); smi_cmd.u64 = 0; - smi_cmd.s.phy_op = op; + smi_cmd.s.phy_op = 1; /* MDIO_CLAUSE_45_WRITE */ smi_cmd.s.phy_adr = phy_id; - smi_cmd.s.reg_adr = regnum; + smi_cmd.s.reg_adr = devad; oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD); do { @@ -143,7 +192,7 @@ int cavium_mdiobus_write(struct mii_bus *bus, int phy_id, int regnum, u16 val) return 0; } -EXPORT_SYMBOL(cavium_mdiobus_write); +EXPORT_SYMBOL(cavium_mdiobus_write_c45); MODULE_DESCRIPTION("Common code for OCTEON and Thunder MDIO bus drivers"); MODULE_AUTHOR("David Daney"); diff --git a/drivers/net/mdio/mdio-cavium.h b/drivers/net/mdio/mdio-cavium.h index a2245d436f5d..71b8e20cd664 100644 --- a/drivers/net/mdio/mdio-cavium.h +++ b/drivers/net/mdio/mdio-cavium.h @@ -114,5 +114,10 @@ static inline u64 oct_mdio_readq(void __iomem *addr) #define oct_mdio_readq(addr) readq(addr) #endif -int cavium_mdiobus_read(struct mii_bus *bus, int phy_id, int regnum); -int cavium_mdiobus_write(struct mii_bus *bus, int phy_id, int regnum, u16 val); +int cavium_mdiobus_read_c22(struct mii_bus *bus, int phy_id, int regnum); +int cavium_mdiobus_write_c22(struct mii_bus *bus, int phy_id, int regnum, + u16 val); +int cavium_mdiobus_read_c45(struct mii_bus *bus, int phy_id, int devad, + int regnum); +int cavium_mdiobus_write_c45(struct mii_bus *bus, int phy_id, int devad, + int regnum, u16 val); diff --git a/drivers/net/mdio/mdio-i2c.c b/drivers/net/mdio/mdio-i2c.c index bf8bf5e20faf..9577a1842997 100644 --- a/drivers/net/mdio/mdio-i2c.c +++ b/drivers/net/mdio/mdio-i2c.c @@ -30,7 +30,8 @@ static unsigned int i2c_mii_phy_addr(int phy_id) return phy_id + 0x40; } -static int i2c_mii_read_default(struct mii_bus *bus, int phy_id, int reg) +static int i2c_mii_read_default_c45(struct mii_bus *bus, int phy_id, int devad, + int reg) { struct i2c_adapter *i2c = bus->priv; struct i2c_msg msgs[2]; @@ -41,8 +42,8 @@ static int i2c_mii_read_default(struct mii_bus *bus, int phy_id, int reg) return 0xffff; p = addr; - if (reg & MII_ADDR_C45) { - *p++ = 0x20 | ((reg >> 16) & 31); + if (devad >= 0) { + *p++ = 0x20 | devad; *p++ = reg >> 8; } *p++ = reg; @@ -64,8 +65,8 @@ static int i2c_mii_read_default(struct mii_bus *bus, int phy_id, int reg) return data[0] << 8 | data[1]; } -static int i2c_mii_write_default(struct mii_bus *bus, int phy_id, int reg, - u16 val) +static int i2c_mii_write_default_c45(struct mii_bus *bus, int phy_id, + int devad, int reg, u16 val) { struct i2c_adapter *i2c = bus->priv; struct i2c_msg msg; @@ -76,8 +77,8 @@ static int i2c_mii_write_default(struct mii_bus *bus, int phy_id, int reg, return 0; p = data; - if (reg & MII_ADDR_C45) { - *p++ = (reg >> 16) & 31; + if (devad >= 0) { + *p++ = devad; *p++ = reg >> 8; } *p++ = reg; @@ -94,6 +95,17 @@ static int i2c_mii_write_default(struct mii_bus *bus, int phy_id, int reg, return ret < 0 ? ret : 0; } +static int i2c_mii_read_default_c22(struct mii_bus *bus, int phy_id, int reg) +{ + return i2c_mii_read_default_c45(bus, phy_id, -1, reg); +} + +static int i2c_mii_write_default_c22(struct mii_bus *bus, int phy_id, int reg, + u16 val) +{ + return i2c_mii_write_default_c45(bus, phy_id, -1, reg, val); +} + /* RollBall SFPs do not access internal PHY via I2C address 0x56, but * instead via address 0x51, when SFP page is set to 0x03 and password to * 0xffffffff. @@ -403,8 +415,10 @@ struct mii_bus *mdio_i2c_alloc(struct device *parent, struct i2c_adapter *i2c, mii->write = i2c_mii_write_rollball; break; default: - mii->read = i2c_mii_read_default; - mii->write = i2c_mii_write_default; + mii->read = i2c_mii_read_default_c22; + mii->write = i2c_mii_write_default_c22; + mii->read_c45 = i2c_mii_read_default_c45; + mii->write_c45 = i2c_mii_write_default_c45; break; } diff --git a/drivers/net/mdio/mdio-ipq4019.c b/drivers/net/mdio/mdio-ipq4019.c index 4eba5a91075c..78b93de636f5 100644 --- a/drivers/net/mdio/mdio-ipq4019.c +++ b/drivers/net/mdio/mdio-ipq4019.c @@ -53,7 +53,8 @@ static int ipq4019_mdio_wait_busy(struct mii_bus *bus) IPQ4019_MDIO_SLEEP, IPQ4019_MDIO_TIMEOUT); } -static int ipq4019_mdio_read(struct mii_bus *bus, int mii_id, int regnum) +static int ipq4019_mdio_read_c45(struct mii_bus *bus, int mii_id, int mmd, + int reg) { struct ipq4019_mdio_data *priv = bus->priv; unsigned int data; @@ -62,61 +63,71 @@ static int ipq4019_mdio_read(struct mii_bus *bus, int mii_id, int regnum) if (ipq4019_mdio_wait_busy(bus)) return -ETIMEDOUT; - /* Clause 45 support */ - if (regnum & MII_ADDR_C45) { - unsigned int mmd = (regnum >> 16) & 0x1F; - unsigned int reg = regnum & 0xFFFF; + data = readl(priv->membase + MDIO_MODE_REG); - /* Enter Clause 45 mode */ - data = readl(priv->membase + MDIO_MODE_REG); + data |= MDIO_MODE_C45; - data |= MDIO_MODE_C45; + writel(data, priv->membase + MDIO_MODE_REG); - writel(data, priv->membase + MDIO_MODE_REG); + /* issue the phy address and mmd */ + writel((mii_id << 8) | mmd, priv->membase + MDIO_ADDR_REG); - /* issue the phy address and mmd */ - writel((mii_id << 8) | mmd, priv->membase + MDIO_ADDR_REG); + /* issue reg */ + writel(reg, priv->membase + MDIO_DATA_WRITE_REG); - /* issue reg */ - writel(reg, priv->membase + MDIO_DATA_WRITE_REG); + cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_C45_ADDR; - cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_C45_ADDR; - } else { - /* Enter Clause 22 mode */ - data = readl(priv->membase + MDIO_MODE_REG); + /* issue read command */ + writel(cmd, priv->membase + MDIO_CMD_REG); - data &= ~MDIO_MODE_C45; + /* Wait read complete */ + if (ipq4019_mdio_wait_busy(bus)) + return -ETIMEDOUT; - writel(data, priv->membase + MDIO_MODE_REG); + cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_C45_READ; - /* issue the phy address and reg */ - writel((mii_id << 8) | regnum, priv->membase + MDIO_ADDR_REG); + writel(cmd, priv->membase + MDIO_CMD_REG); - cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_READ; - } + if (ipq4019_mdio_wait_busy(bus)) + return -ETIMEDOUT; - /* issue read command */ - writel(cmd, priv->membase + MDIO_CMD_REG); + /* Read and return data */ + return readl(priv->membase + MDIO_DATA_READ_REG); +} + +static int ipq4019_mdio_read_c22(struct mii_bus *bus, int mii_id, int regnum) +{ + struct ipq4019_mdio_data *priv = bus->priv; + unsigned int data; + unsigned int cmd; - /* Wait read complete */ if (ipq4019_mdio_wait_busy(bus)) return -ETIMEDOUT; - if (regnum & MII_ADDR_C45) { - cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_C45_READ; + data = readl(priv->membase + MDIO_MODE_REG); - writel(cmd, priv->membase + MDIO_CMD_REG); + data &= ~MDIO_MODE_C45; - if (ipq4019_mdio_wait_busy(bus)) - return -ETIMEDOUT; - } + writel(data, priv->membase + MDIO_MODE_REG); + + /* issue the phy address and reg */ + writel((mii_id << 8) | regnum, priv->membase + MDIO_ADDR_REG); + + cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_READ; + + /* issue read command */ + writel(cmd, priv->membase + MDIO_CMD_REG); + + /* Wait read complete */ + if (ipq4019_mdio_wait_busy(bus)) + return -ETIMEDOUT; /* Read and return data */ return readl(priv->membase + MDIO_DATA_READ_REG); } -static int ipq4019_mdio_write(struct mii_bus *bus, int mii_id, int regnum, - u16 value) +static int ipq4019_mdio_write_c45(struct mii_bus *bus, int mii_id, int mmd, + int reg, u16 value) { struct ipq4019_mdio_data *priv = bus->priv; unsigned int data; @@ -125,50 +136,63 @@ static int ipq4019_mdio_write(struct mii_bus *bus, int mii_id, int regnum, if (ipq4019_mdio_wait_busy(bus)) return -ETIMEDOUT; - /* Clause 45 support */ - if (regnum & MII_ADDR_C45) { - unsigned int mmd = (regnum >> 16) & 0x1F; - unsigned int reg = regnum & 0xFFFF; + data = readl(priv->membase + MDIO_MODE_REG); - /* Enter Clause 45 mode */ - data = readl(priv->membase + MDIO_MODE_REG); + data |= MDIO_MODE_C45; - data |= MDIO_MODE_C45; + writel(data, priv->membase + MDIO_MODE_REG); - writel(data, priv->membase + MDIO_MODE_REG); + /* issue the phy address and mmd */ + writel((mii_id << 8) | mmd, priv->membase + MDIO_ADDR_REG); - /* issue the phy address and mmd */ - writel((mii_id << 8) | mmd, priv->membase + MDIO_ADDR_REG); + /* issue reg */ + writel(reg, priv->membase + MDIO_DATA_WRITE_REG); - /* issue reg */ - writel(reg, priv->membase + MDIO_DATA_WRITE_REG); + cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_C45_ADDR; - cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_C45_ADDR; + writel(cmd, priv->membase + MDIO_CMD_REG); - writel(cmd, priv->membase + MDIO_CMD_REG); + if (ipq4019_mdio_wait_busy(bus)) + return -ETIMEDOUT; - if (ipq4019_mdio_wait_busy(bus)) - return -ETIMEDOUT; - } else { - /* Enter Clause 22 mode */ - data = readl(priv->membase + MDIO_MODE_REG); + /* issue write data */ + writel(value, priv->membase + MDIO_DATA_WRITE_REG); - data &= ~MDIO_MODE_C45; + cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_C45_WRITE; + writel(cmd, priv->membase + MDIO_CMD_REG); - writel(data, priv->membase + MDIO_MODE_REG); + /* Wait write complete */ + if (ipq4019_mdio_wait_busy(bus)) + return -ETIMEDOUT; - /* issue the phy address and reg */ - writel((mii_id << 8) | regnum, priv->membase + MDIO_ADDR_REG); - } + return 0; +} + +static int ipq4019_mdio_write_c22(struct mii_bus *bus, int mii_id, int regnum, + u16 value) +{ + struct ipq4019_mdio_data *priv = bus->priv; + unsigned int data; + unsigned int cmd; + + if (ipq4019_mdio_wait_busy(bus)) + return -ETIMEDOUT; + + /* Enter Clause 22 mode */ + data = readl(priv->membase + MDIO_MODE_REG); + + data &= ~MDIO_MODE_C45; + + writel(data, priv->membase + MDIO_MODE_REG); + + /* issue the phy address and reg */ + writel((mii_id << 8) | regnum, priv->membase + MDIO_ADDR_REG); /* issue write data */ writel(value, priv->membase + MDIO_DATA_WRITE_REG); /* issue write command */ - if (regnum & MII_ADDR_C45) - cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_C45_WRITE; - else - cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_WRITE; + cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_WRITE; writel(cmd, priv->membase + MDIO_CMD_REG); @@ -235,8 +259,10 @@ static int ipq4019_mdio_probe(struct platform_device *pdev) priv->eth_ldo_rdy = devm_ioremap_resource(&pdev->dev, res); bus->name = "ipq4019_mdio"; - bus->read = ipq4019_mdio_read; - bus->write = ipq4019_mdio_write; + bus->read = ipq4019_mdio_read_c22; + bus->write = ipq4019_mdio_write_c22; + bus->read_c45 = ipq4019_mdio_read_c45; + bus->write_c45 = ipq4019_mdio_write_c45; bus->reset = ipq_mdio_reset; bus->parent = &pdev->dev; snprintf(bus->id, MII_BUS_ID_SIZE, "%s%d", pdev->name, pdev->id); diff --git a/drivers/net/mdio/mdio-mux-bcm-iproc.c b/drivers/net/mdio/mdio-mux-bcm-iproc.c index 014c0baedbd2..956d54846b62 100644 --- a/drivers/net/mdio/mdio-mux-bcm-iproc.c +++ b/drivers/net/mdio/mdio-mux-bcm-iproc.c @@ -98,7 +98,7 @@ static int iproc_mdio_wait_for_idle(void __iomem *base, bool result) * Return value: Successful Read operation returns read reg values and write * operation returns 0. Failure operation returns negative error code. */ -static int start_miim_ops(void __iomem *base, +static int start_miim_ops(void __iomem *base, bool c45, u16 phyid, u32 reg, u16 val, u32 op) { u32 param; @@ -112,7 +112,7 @@ static int start_miim_ops(void __iomem *base, param = readl(base + MDIO_PARAM_OFFSET); param |= phyid << MDIO_PARAM_PHY_ID; param |= val << MDIO_PARAM_PHY_DATA; - if (reg & MII_ADDR_C45) + if (c45) param |= BIT(MDIO_PARAM_C45_SEL); writel(param, base + MDIO_PARAM_OFFSET); @@ -131,28 +131,58 @@ err: return ret; } -static int iproc_mdiomux_read(struct mii_bus *bus, int phyid, int reg) +static int iproc_mdiomux_read_c22(struct mii_bus *bus, int phyid, int reg) { struct iproc_mdiomux_desc *md = bus->priv; int ret; - ret = start_miim_ops(md->base, phyid, reg, 0, MDIO_CTRL_READ_OP); + ret = start_miim_ops(md->base, false, phyid, reg, 0, MDIO_CTRL_READ_OP); if (ret < 0) - dev_err(&bus->dev, "mdiomux read operation failed!!!"); + dev_err(&bus->dev, "mdiomux c22 read operation failed!!!"); return ret; } -static int iproc_mdiomux_write(struct mii_bus *bus, - int phyid, int reg, u16 val) +static int iproc_mdiomux_read_c45(struct mii_bus *bus, int phyid, int devad, + int reg) +{ + struct iproc_mdiomux_desc *md = bus->priv; + int ret; + + ret = start_miim_ops(md->base, true, phyid, reg | devad << 16, 0, + MDIO_CTRL_READ_OP); + if (ret < 0) + dev_err(&bus->dev, "mdiomux read c45 operation failed!!!"); + + return ret; +} + +static int iproc_mdiomux_write_c22(struct mii_bus *bus, + int phyid, int reg, u16 val) +{ + struct iproc_mdiomux_desc *md = bus->priv; + int ret; + + /* Write val at reg offset */ + ret = start_miim_ops(md->base, false, phyid, reg, val, + MDIO_CTRL_WRITE_OP); + if (ret < 0) + dev_err(&bus->dev, "mdiomux write c22 operation failed!!!"); + + return ret; +} + +static int iproc_mdiomux_write_c45(struct mii_bus *bus, + int phyid, int devad, int reg, u16 val) { struct iproc_mdiomux_desc *md = bus->priv; int ret; /* Write val at reg offset */ - ret = start_miim_ops(md->base, phyid, reg, val, MDIO_CTRL_WRITE_OP); + ret = start_miim_ops(md->base, true, phyid, reg | devad << 16, val, + MDIO_CTRL_WRITE_OP); if (ret < 0) - dev_err(&bus->dev, "mdiomux write operation failed!!!"); + dev_err(&bus->dev, "mdiomux write c45 operation failed!!!"); return ret; } @@ -223,8 +253,10 @@ static int mdio_mux_iproc_probe(struct platform_device *pdev) bus->name = "iProc MDIO mux bus"; snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d", pdev->name, pdev->id); bus->parent = &pdev->dev; - bus->read = iproc_mdiomux_read; - bus->write = iproc_mdiomux_write; + bus->read = iproc_mdiomux_read_c22; + bus->write = iproc_mdiomux_write_c22; + bus->read_c45 = iproc_mdiomux_read_c45; + bus->write_c45 = iproc_mdiomux_write_c45; bus->phy_mask = ~0; bus->dev.of_node = pdev->dev.of_node; diff --git a/drivers/net/mdio/mdio-octeon.c b/drivers/net/mdio/mdio-octeon.c index e096e68ac667..7c65c547d377 100644 --- a/drivers/net/mdio/mdio-octeon.c +++ b/drivers/net/mdio/mdio-octeon.c @@ -58,8 +58,10 @@ static int octeon_mdiobus_probe(struct platform_device *pdev) snprintf(bus->mii_bus->id, MII_BUS_ID_SIZE, "%px", bus->register_base); bus->mii_bus->parent = &pdev->dev; - bus->mii_bus->read = cavium_mdiobus_read; - bus->mii_bus->write = cavium_mdiobus_write; + bus->mii_bus->read = cavium_mdiobus_read_c22; + bus->mii_bus->write = cavium_mdiobus_write_c22; + bus->mii_bus->read_c45 = cavium_mdiobus_read_c45; + bus->mii_bus->write_c45 = cavium_mdiobus_write_c45; platform_set_drvdata(pdev, bus); diff --git a/drivers/net/mdio/mdio-thunder.c b/drivers/net/mdio/mdio-thunder.c index 822d2cdd2f35..3847ee92c109 100644 --- a/drivers/net/mdio/mdio-thunder.c +++ b/drivers/net/mdio/mdio-thunder.c @@ -93,8 +93,10 @@ static int thunder_mdiobus_pci_probe(struct pci_dev *pdev, bus->mii_bus->name = KBUILD_MODNAME; snprintf(bus->mii_bus->id, MII_BUS_ID_SIZE, "%llx", r.start); bus->mii_bus->parent = &pdev->dev; - bus->mii_bus->read = cavium_mdiobus_read; - bus->mii_bus->write = cavium_mdiobus_write; + bus->mii_bus->read = cavium_mdiobus_read_c22; + bus->mii_bus->write = cavium_mdiobus_write_c22; + bus->mii_bus->read_c45 = cavium_mdiobus_read_c45; + bus->mii_bus->write_c45 = cavium_mdiobus_write_c45; err = of_mdiobus_register(bus->mii_bus, node); if (err) diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c index b962fc8e1397..738784fda117 100644 --- a/drivers/net/netdevsim/dev.c +++ b/drivers/net/netdevsim/dev.c @@ -1556,14 +1556,18 @@ int nsim_drv_probe(struct nsim_bus_dev *nsim_bus_dev) goto err_devlink_unlock; } - err = nsim_dev_resources_register(devlink); + err = devl_register(devlink); if (err) goto err_vfc_free; + err = nsim_dev_resources_register(devlink); + if (err) + goto err_dl_unregister; + err = devlink_params_register(devlink, nsim_devlink_params, ARRAY_SIZE(nsim_devlink_params)); if (err) - goto err_dl_unregister; + goto err_resource_unregister; nsim_devlink_set_params_init_values(nsim_dev, devlink); err = nsim_dev_dummy_region_init(nsim_dev, devlink); @@ -1607,7 +1611,6 @@ int nsim_drv_probe(struct nsim_bus_dev *nsim_bus_dev) nsim_dev->esw_mode = DEVLINK_ESWITCH_MODE_LEGACY; devlink_set_features(devlink, DEVLINK_F_RELOAD); devl_unlock(devlink); - devlink_register(devlink); return 0; err_hwstats_exit: @@ -1629,8 +1632,10 @@ err_dummy_region_exit: err_params_unregister: devlink_params_unregister(devlink, nsim_devlink_params, ARRAY_SIZE(nsim_devlink_params)); -err_dl_unregister: +err_resource_unregister: devl_resources_unregister(devlink); +err_dl_unregister: + devl_unregister(devlink); err_vfc_free: kfree(nsim_dev->vfconfigs); err_devlink_unlock: @@ -1668,7 +1673,6 @@ void nsim_drv_remove(struct nsim_bus_dev *nsim_bus_dev) struct nsim_dev *nsim_dev = dev_get_drvdata(&nsim_bus_dev->dev); struct devlink *devlink = priv_to_devlink(nsim_dev); - devlink_unregister(devlink); devl_lock(devlink); nsim_dev_reload_destroy(nsim_dev); @@ -1677,6 +1681,7 @@ void nsim_drv_remove(struct nsim_bus_dev *nsim_bus_dev) devlink_params_unregister(devlink, nsim_devlink_params, ARRAY_SIZE(nsim_devlink_params)); devl_resources_unregister(devlink); + devl_unregister(devlink); kfree(nsim_dev->vfconfigs); kfree(nsim_dev->fa_cookie); devl_unlock(devlink); diff --git a/drivers/net/netdevsim/health.c b/drivers/net/netdevsim/health.c index aa77af4a68df..eb04ed715d2d 100644 --- a/drivers/net/netdevsim/health.c +++ b/drivers/net/netdevsim/health.c @@ -233,16 +233,16 @@ int nsim_dev_health_init(struct nsim_dev *nsim_dev, struct devlink *devlink) int err; health->empty_reporter = - devlink_health_reporter_create(devlink, - &nsim_dev_empty_reporter_ops, - 0, health); + devl_health_reporter_create(devlink, + &nsim_dev_empty_reporter_ops, + 0, health); if (IS_ERR(health->empty_reporter)) return PTR_ERR(health->empty_reporter); health->dummy_reporter = - devlink_health_reporter_create(devlink, - &nsim_dev_dummy_reporter_ops, - 0, health); + devl_health_reporter_create(devlink, + &nsim_dev_dummy_reporter_ops, + 0, health); if (IS_ERR(health->dummy_reporter)) { err = PTR_ERR(health->dummy_reporter); goto err_empty_reporter_destroy; @@ -266,9 +266,9 @@ int nsim_dev_health_init(struct nsim_dev *nsim_dev, struct devlink *devlink) return 0; err_dummy_reporter_destroy: - devlink_health_reporter_destroy(health->dummy_reporter); + devl_health_reporter_destroy(health->dummy_reporter); err_empty_reporter_destroy: - devlink_health_reporter_destroy(health->empty_reporter); + devl_health_reporter_destroy(health->empty_reporter); return err; } @@ -278,6 +278,6 @@ void nsim_dev_health_exit(struct nsim_dev *nsim_dev) debugfs_remove_recursive(health->ddir); kfree(health->recovered_break_msg); - devlink_health_reporter_destroy(health->dummy_reporter); - devlink_health_reporter_destroy(health->empty_reporter); + devl_health_reporter_destroy(health->dummy_reporter); + devl_health_reporter_destroy(health->empty_reporter); } diff --git a/drivers/net/pcs/pcs-lynx.c b/drivers/net/pcs/pcs-lynx.c index 7d5fc7f54b2f..3903f3baba2b 100644 --- a/drivers/net/pcs/pcs-lynx.c +++ b/drivers/net/pcs/pcs-lynx.c @@ -10,9 +10,6 @@ #define SGMII_CLOCK_PERIOD_NS 8 /* PCS is clocked at 125 MHz */ #define LINK_TIMER_VAL(ns) ((u32)((ns) / SGMII_CLOCK_PERIOD_NS)) -#define SGMII_AN_LINK_TIMER_NS 1600000 /* defined by SGMII spec */ -#define IEEE8023_LINK_TIMER_NS 10000000 - #define LINK_TIMER_LO 0x12 #define LINK_TIMER_HI 0x13 #define IF_MODE 0x14 @@ -126,26 +123,25 @@ static int lynx_pcs_config_giga(struct mdio_device *pcs, unsigned int mode, phy_interface_t interface, const unsigned long *advertising) { + int link_timer_ns; u32 link_timer; u16 if_mode; int err; - if (interface == PHY_INTERFACE_MODE_1000BASEX) { - link_timer = LINK_TIMER_VAL(IEEE8023_LINK_TIMER_NS); + link_timer_ns = phylink_get_link_timer_ns(interface); + if (link_timer_ns > 0) { + link_timer = LINK_TIMER_VAL(link_timer_ns); + mdiodev_write(pcs, LINK_TIMER_LO, link_timer & 0xffff); mdiodev_write(pcs, LINK_TIMER_HI, link_timer >> 16); + } + if (interface == PHY_INTERFACE_MODE_1000BASEX) { if_mode = 0; } else { if_mode = IF_MODE_SGMII_EN; - if (mode == MLO_AN_INBAND) { + if (mode == MLO_AN_INBAND) if_mode |= IF_MODE_USE_SGMII_AN; - - /* Adjust link timer for SGMII */ - link_timer = LINK_TIMER_VAL(SGMII_AN_LINK_TIMER_NS); - mdiodev_write(pcs, LINK_TIMER_LO, link_timer & 0xffff); - mdiodev_write(pcs, LINK_TIMER_HI, link_timer >> 16); - } } err = mdiodev_modify(pcs, IF_MODE, diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c index f6a038a1d51e..bc428a816719 100644 --- a/drivers/net/pcs/pcs-xpcs.c +++ b/drivers/net/pcs/pcs-xpcs.c @@ -199,9 +199,7 @@ int xpcs_write(struct dw_xpcs *xpcs, int dev, u32 reg, u16 val) static int xpcs_modify_changed(struct dw_xpcs *xpcs, int dev, u32 reg, u16 mask, u16 set) { - u32 reg_addr = mdiobus_c45_addr(dev, reg); - - return mdiodev_modify_changed(xpcs->mdiodev, reg_addr, mask, set); + return mdiodev_c45_modify_changed(xpcs->mdiodev, dev, reg, mask, set); } static int xpcs_read_vendor(struct dw_xpcs *xpcs, int dev, u32 reg) diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 1327290decab..f5df2edc94a5 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -277,6 +277,13 @@ config NXP_TJA11XX_PHY help Currently supports the NXP TJA1100 and TJA1101 PHY. +config NCN26000_PHY + tristate "Onsemi 10BASE-T1S Ethernet PHY" + help + Adds support for the onsemi 10BASE-T1S Ethernet PHY. + Currently supports the NCN26000 10BASE-T1S Industrial PHY + with MII interface. + config AT803X_PHY tristate "Qualcomm Atheros AR803X PHYs and QCA833x PHYs" depends on REGULATOR diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index f7138d3c896b..b5138066ba04 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -77,6 +77,7 @@ obj-$(CONFIG_MICROCHIP_T1_PHY) += microchip_t1.o obj-$(CONFIG_MICROSEMI_PHY) += mscc/ obj-$(CONFIG_MOTORCOMM_PHY) += motorcomm.o obj-$(CONFIG_NATIONAL_PHY) += national.o +obj-$(CONFIG_NCN26000_PHY) += ncn26000.o obj-$(CONFIG_NXP_C45_TJA11XX_PHY) += nxp-c45-tja11xx.o obj-$(CONFIG_NXP_TJA11XX_PHY) += nxp-tja11xx.o obj-$(CONFIG_QSEMI_PHY) += qsemi.o diff --git a/drivers/net/phy/mdio-open-alliance.h b/drivers/net/phy/mdio-open-alliance.h new file mode 100644 index 000000000000..931e14660d75 --- /dev/null +++ b/drivers/net/phy/mdio-open-alliance.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * mdio-open-alliance.h - definition of OPEN Alliance SIG standard registers + */ + +#ifndef __MDIO_OPEN_ALLIANCE__ +#define __MDIO_OPEN_ALLIANCE__ + +#include <linux/mdio.h> + +/* NOTE: all OATC14 registers are located in MDIO_MMD_VEND2 */ + +/* Open Alliance TC14 (10BASE-T1S) registers */ +#define MDIO_OATC14_PLCA_IDVER 0xca00 /* PLCA ID and version */ +#define MDIO_OATC14_PLCA_CTRL0 0xca01 /* PLCA Control register 0 */ +#define MDIO_OATC14_PLCA_CTRL1 0xca02 /* PLCA Control register 1 */ +#define MDIO_OATC14_PLCA_STATUS 0xca03 /* PLCA Status register */ +#define MDIO_OATC14_PLCA_TOTMR 0xca04 /* PLCA TO Timer register */ +#define MDIO_OATC14_PLCA_BURST 0xca05 /* PLCA BURST mode register */ + +/* Open Alliance TC14 PLCA IDVER register */ +#define MDIO_OATC14_PLCA_IDM 0xff00 /* PLCA MAP ID */ +#define MDIO_OATC14_PLCA_VER 0x00ff /* PLCA MAP version */ + +/* Open Alliance TC14 PLCA CTRL0 register */ +#define MDIO_OATC14_PLCA_EN BIT(15) /* PLCA enable */ +#define MDIO_OATC14_PLCA_RST BIT(14) /* PLCA reset */ + +/* Open Alliance TC14 PLCA CTRL1 register */ +#define MDIO_OATC14_PLCA_NCNT 0xff00 /* PLCA node count */ +#define MDIO_OATC14_PLCA_ID 0x00ff /* PLCA local node ID */ + +/* Open Alliance TC14 PLCA STATUS register */ +#define MDIO_OATC14_PLCA_PST BIT(15) /* PLCA status indication */ + +/* Open Alliance TC14 PLCA TOTMR register */ +#define MDIO_OATC14_PLCA_TOT 0x00ff + +/* Open Alliance TC14 PLCA BURST register */ +#define MDIO_OATC14_PLCA_MAXBC 0xff00 +#define MDIO_OATC14_PLCA_BTMR 0x00ff + +/* Version Identifiers */ +#define OATC14_IDM 0x0a00 + +#endif /* __MDIO_OPEN_ALLIANCE__ */ diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 16e021b477f0..fb633fa3e872 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -19,6 +19,7 @@ #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> +#include <linux/micrel_phy.h> #include <linux/mii.h> #include <linux/mm.h> #include <linux/module.h> @@ -511,6 +512,126 @@ static int mdiobus_create_device(struct mii_bus *bus, return ret; } +static struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr, bool c45) +{ + struct phy_device *phydev = ERR_PTR(-ENODEV); + int err; + + phydev = get_phy_device(bus, addr, c45); + if (IS_ERR(phydev)) + return phydev; + + /* For DT, see if the auto-probed phy has a corresponding child + * in the bus node, and set the of_node pointer in this case. + */ + of_mdiobus_link_mdiodev(bus, &phydev->mdio); + + err = phy_device_register(phydev); + if (err) { + phy_device_free(phydev); + return ERR_PTR(-ENODEV); + } + + return phydev; +} + +/** + * mdiobus_scan_c22 - scan one address on a bus for C22 MDIO devices. + * @bus: mii_bus to scan + * @addr: address on bus to scan + * + * This function scans one address on the MDIO bus, looking for + * devices which can be identified using a vendor/product ID in + * registers 2 and 3. Not all MDIO devices have such registers, but + * PHY devices typically do. Hence this function assumes anything + * found is a PHY, or can be treated as a PHY. Other MDIO devices, + * such as switches, will probably not be found during the scan. + */ +struct phy_device *mdiobus_scan_c22(struct mii_bus *bus, int addr) +{ + return mdiobus_scan(bus, addr, false); +} +EXPORT_SYMBOL(mdiobus_scan_c22); + +/** + * mdiobus_scan_c45 - scan one address on a bus for C45 MDIO devices. + * @bus: mii_bus to scan + * @addr: address on bus to scan + * + * This function scans one address on the MDIO bus, looking for + * devices which can be identified using a vendor/product ID in + * registers 2 and 3. Not all MDIO devices have such registers, but + * PHY devices typically do. Hence this function assumes anything + * found is a PHY, or can be treated as a PHY. Other MDIO devices, + * such as switches, will probably not be found during the scan. + */ +static struct phy_device *mdiobus_scan_c45(struct mii_bus *bus, int addr) +{ + return mdiobus_scan(bus, addr, true); +} + +static int mdiobus_scan_bus_c22(struct mii_bus *bus) +{ + int i; + + for (i = 0; i < PHY_MAX_ADDR; i++) { + if ((bus->phy_mask & BIT(i)) == 0) { + struct phy_device *phydev; + + phydev = mdiobus_scan_c22(bus, i); + if (IS_ERR(phydev) && (PTR_ERR(phydev) != -ENODEV)) + return PTR_ERR(phydev); + } + } + return 0; +} + +static int mdiobus_scan_bus_c45(struct mii_bus *bus) +{ + int i; + + for (i = 0; i < PHY_MAX_ADDR; i++) { + if ((bus->phy_mask & BIT(i)) == 0) { + struct phy_device *phydev; + + /* Don't scan C45 if we already have a C22 device */ + if (bus->mdio_map[i]) + continue; + + phydev = mdiobus_scan_c45(bus, i); + if (IS_ERR(phydev) && (PTR_ERR(phydev) != -ENODEV)) + return PTR_ERR(phydev); + } + } + return 0; +} + +/* There are some C22 PHYs which do bad things when where is a C45 + * transaction on the bus, like accepting a read themselves, and + * stomping over the true devices reply, to performing a write to + * themselves which was intended for another device. Now that C22 + * devices have been found, see if any of them are bad for C45, and if we + * should skip the C45 scan. + */ +static bool mdiobus_prevent_c45_scan(struct mii_bus *bus) +{ + int i; + + for (i = 0; i < PHY_MAX_ADDR; i++) { + struct phy_device *phydev; + u32 oui; + + phydev = mdiobus_get_phy(bus, i); + if (!phydev) + continue; + oui = phydev->phy_id >> 10; + + if (oui == MICREL_OUI) + return true; + } + return false; +} + /** * __mdiobus_register - bring up all the PHYs on a given bus and attach them to bus * @bus: target mii_bus @@ -528,11 +649,19 @@ static int mdiobus_create_device(struct mii_bus *bus, int __mdiobus_register(struct mii_bus *bus, struct module *owner) { struct mdio_device *mdiodev; - int i, err; struct gpio_desc *gpiod; + bool prevent_c45_scan; + int i, err; + + if (!bus || !bus->name) + return -EINVAL; - if (NULL == bus || NULL == bus->name || - NULL == bus->read || NULL == bus->write) + /* An access method always needs both read and write operations */ + if (!!bus->read != !!bus->write || !!bus->read_c45 != !!bus->write_c45) + return -EINVAL; + + /* At least one method is mandatory */ + if (!bus->read && !bus->read_c45) return -EINVAL; if (bus->parent && bus->parent->of_node) @@ -587,16 +716,18 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner) goto error_reset_gpiod; } - for (i = 0; i < PHY_MAX_ADDR; i++) { - if ((bus->phy_mask & BIT(i)) == 0) { - struct phy_device *phydev; + if (bus->read) { + err = mdiobus_scan_bus_c22(bus); + if (err) + goto error; + } - phydev = mdiobus_scan(bus, i); - if (IS_ERR(phydev) && (PTR_ERR(phydev) != -ENODEV)) { - err = PTR_ERR(phydev); - goto error; - } - } + prevent_c45_scan = mdiobus_prevent_c45_scan(bus); + + if (!prevent_c45_scan && bus->read_c45) { + err = mdiobus_scan_bus_c45(bus); + if (err) + goto error; } mdiobus_setup_mdiodev_from_board_info(bus, mdiobus_create_device); @@ -606,7 +737,7 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner) return 0; error: - while (--i >= 0) { + for (i = 0; i < PHY_MAX_ADDR; i++) { mdiodev = bus->mdio_map[i]; if (!mdiodev) continue; @@ -677,57 +808,6 @@ void mdiobus_free(struct mii_bus *bus) } EXPORT_SYMBOL(mdiobus_free); -/** - * mdiobus_scan - scan a bus for MDIO devices. - * @bus: mii_bus to scan - * @addr: address on bus to scan - * - * This function scans the MDIO bus, looking for devices which can be - * identified using a vendor/product ID in registers 2 and 3. Not all - * MDIO devices have such registers, but PHY devices typically - * do. Hence this function assumes anything found is a PHY, or can be - * treated as a PHY. Other MDIO devices, such as switches, will - * probably not be found during the scan. - */ -struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr) -{ - struct phy_device *phydev = ERR_PTR(-ENODEV); - int err; - - switch (bus->probe_capabilities) { - case MDIOBUS_NO_CAP: - case MDIOBUS_C22: - phydev = get_phy_device(bus, addr, false); - break; - case MDIOBUS_C45: - phydev = get_phy_device(bus, addr, true); - break; - case MDIOBUS_C22_C45: - phydev = get_phy_device(bus, addr, false); - if (IS_ERR(phydev)) - phydev = get_phy_device(bus, addr, true); - break; - } - - if (IS_ERR(phydev)) - return phydev; - - /* - * For DT, see if the auto-probed phy has a correspoding child - * in the bus node, and set the of_node pointer in this case. - */ - of_mdiobus_link_mdiodev(bus, &phydev->mdio); - - err = phy_device_register(phydev); - if (err) { - phy_device_free(phydev); - return ERR_PTR(-ENODEV); - } - - return phydev; -} -EXPORT_SYMBOL(mdiobus_scan); - static void mdiobus_stats_acct(struct mdio_bus_stats *stats, bool op, int ret) { preempt_disable(); @@ -764,7 +844,10 @@ int __mdiobus_read(struct mii_bus *bus, int addr, u32 regnum) lockdep_assert_held_once(&bus->mdio_lock); - retval = bus->read(bus, addr, regnum); + if (bus->read) + retval = bus->read(bus, addr, regnum); + else + retval = -EOPNOTSUPP; trace_mdio_access(bus, 1, addr, regnum, retval, retval); mdiobus_stats_acct(&bus->stats[addr], true, retval); @@ -790,7 +873,10 @@ int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val) lockdep_assert_held_once(&bus->mdio_lock); - err = bus->write(bus, addr, regnum, val); + if (bus->write) + err = bus->write(bus, addr, regnum, val); + else + err = -EOPNOTSUPP; trace_mdio_access(bus, 0, addr, regnum, val, err); mdiobus_stats_acct(&bus->stats[addr], false, err); @@ -831,6 +917,105 @@ int __mdiobus_modify_changed(struct mii_bus *bus, int addr, u32 regnum, } EXPORT_SYMBOL_GPL(__mdiobus_modify_changed); +static u32 mdiobus_c45_addr(int devad, u16 regnum) +{ + return MII_ADDR_C45 | devad << MII_DEVADDR_C45_SHIFT | regnum; +} + +/** + * __mdiobus_c45_read - Unlocked version of the mdiobus_c45_read function + * @bus: the mii_bus struct + * @addr: the phy address + * @devad: device address to read + * @regnum: register number to read + * + * Read a MDIO bus register. Caller must hold the mdio bus lock. + * + * NOTE: MUST NOT be called from interrupt context. + */ +int __mdiobus_c45_read(struct mii_bus *bus, int addr, int devad, u32 regnum) +{ + int retval; + + lockdep_assert_held_once(&bus->mdio_lock); + + if (bus->read_c45) + retval = bus->read_c45(bus, addr, devad, regnum); + else + retval = bus->read(bus, addr, mdiobus_c45_addr(devad, regnum)); + + trace_mdio_access(bus, 1, addr, regnum, retval, retval); + mdiobus_stats_acct(&bus->stats[addr], true, retval); + + return retval; +} +EXPORT_SYMBOL(__mdiobus_c45_read); + +/** + * __mdiobus_c45_write - Unlocked version of the mdiobus_write function + * @bus: the mii_bus struct + * @addr: the phy address + * @devad: device address to read + * @regnum: register number to write + * @val: value to write to @regnum + * + * Write a MDIO bus register. Caller must hold the mdio bus lock. + * + * NOTE: MUST NOT be called from interrupt context. + */ +int __mdiobus_c45_write(struct mii_bus *bus, int addr, int devad, u32 regnum, + u16 val) +{ + int err; + + lockdep_assert_held_once(&bus->mdio_lock); + + if (bus->write_c45) + err = bus->write_c45(bus, addr, devad, regnum, val); + else + err = bus->write(bus, addr, mdiobus_c45_addr(devad, regnum), + val); + + trace_mdio_access(bus, 0, addr, regnum, val, err); + mdiobus_stats_acct(&bus->stats[addr], false, err); + + return err; +} +EXPORT_SYMBOL(__mdiobus_c45_write); + +/** + * __mdiobus_c45_modify_changed - Unlocked version of the mdiobus_modify function + * @bus: the mii_bus struct + * @addr: the phy address + * @devad: device address to read + * @regnum: register number to modify + * @mask: bit mask of bits to clear + * @set: bit mask of bits to set + * + * Read, modify, and if any change, write the register value back to the + * device. Any error returns a negative number. + * + * NOTE: MUST NOT be called from interrupt context. + */ +static int __mdiobus_c45_modify_changed(struct mii_bus *bus, int addr, + int devad, u32 regnum, u16 mask, + u16 set) +{ + int new, ret; + + ret = __mdiobus_c45_read(bus, addr, devad, regnum); + if (ret < 0) + return ret; + + new = (ret & ~mask) | set; + if (new == ret) + return 0; + + ret = __mdiobus_c45_write(bus, addr, devad, regnum, new); + + return ret < 0 ? ret : 1; +} + /** * mdiobus_read_nested - Nested version of the mdiobus_read function * @bus: the mii_bus struct @@ -879,6 +1064,56 @@ int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum) EXPORT_SYMBOL(mdiobus_read); /** + * mdiobus_c45_read - Convenience function for reading a given MII mgmt register + * @bus: the mii_bus struct + * @addr: the phy address + * @devad: device address to read + * @regnum: register number to read + * + * NOTE: MUST NOT be called from interrupt context, + * because the bus read/write functions may wait for an interrupt + * to conclude the operation. + */ +int mdiobus_c45_read(struct mii_bus *bus, int addr, int devad, u32 regnum) +{ + int retval; + + mutex_lock(&bus->mdio_lock); + retval = __mdiobus_c45_read(bus, addr, devad, regnum); + mutex_unlock(&bus->mdio_lock); + + return retval; +} +EXPORT_SYMBOL(mdiobus_c45_read); + +/** + * mdiobus_c45_read_nested - Nested version of the mdiobus_c45_read function + * @bus: the mii_bus struct + * @addr: the phy address + * @devad: device address to read + * @regnum: register number to read + * + * In case of nested MDIO bus access avoid lockdep false positives by + * using mutex_lock_nested(). + * + * NOTE: MUST NOT be called from interrupt context, + * because the bus read/write functions may wait for an interrupt + * to conclude the operation. + */ +int mdiobus_c45_read_nested(struct mii_bus *bus, int addr, int devad, + u32 regnum) +{ + int retval; + + mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); + retval = __mdiobus_c45_read(bus, addr, devad, regnum); + mutex_unlock(&bus->mdio_lock); + + return retval; +} +EXPORT_SYMBOL(mdiobus_c45_read_nested); + +/** * mdiobus_write_nested - Nested version of the mdiobus_write function * @bus: the mii_bus struct * @addr: the phy address @@ -928,6 +1163,59 @@ int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val) EXPORT_SYMBOL(mdiobus_write); /** + * mdiobus_c45_write - Convenience function for writing a given MII mgmt register + * @bus: the mii_bus struct + * @addr: the phy address + * @devad: device address to read + * @regnum: register number to write + * @val: value to write to @regnum + * + * NOTE: MUST NOT be called from interrupt context, + * because the bus read/write functions may wait for an interrupt + * to conclude the operation. + */ +int mdiobus_c45_write(struct mii_bus *bus, int addr, int devad, u32 regnum, + u16 val) +{ + int err; + + mutex_lock(&bus->mdio_lock); + err = __mdiobus_c45_write(bus, addr, devad, regnum, val); + mutex_unlock(&bus->mdio_lock); + + return err; +} +EXPORT_SYMBOL(mdiobus_c45_write); + +/** + * mdiobus_c45_write_nested - Nested version of the mdiobus_c45_write function + * @bus: the mii_bus struct + * @addr: the phy address + * @devad: device address to read + * @regnum: register number to write + * @val: value to write to @regnum + * + * In case of nested MDIO bus access avoid lockdep false positives by + * using mutex_lock_nested(). + * + * NOTE: MUST NOT be called from interrupt context, + * because the bus read/write functions may wait for an interrupt + * to conclude the operation. + */ +int mdiobus_c45_write_nested(struct mii_bus *bus, int addr, int devad, + u32 regnum, u16 val) +{ + int err; + + mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); + err = __mdiobus_c45_write(bus, addr, devad, regnum, val); + mutex_unlock(&bus->mdio_lock); + + return err; +} +EXPORT_SYMBOL(mdiobus_c45_write_nested); + +/** * mdiobus_modify - Convenience function for modifying a given mdio device * register * @bus: the mii_bus struct @@ -949,6 +1237,30 @@ int mdiobus_modify(struct mii_bus *bus, int addr, u32 regnum, u16 mask, u16 set) EXPORT_SYMBOL_GPL(mdiobus_modify); /** + * mdiobus_c45_modify - Convenience function for modifying a given mdio device + * register + * @bus: the mii_bus struct + * @addr: the phy address + * @devad: device address to read + * @regnum: register number to write + * @mask: bit mask of bits to clear + * @set: bit mask of bits to set + */ +int mdiobus_c45_modify(struct mii_bus *bus, int addr, int devad, u32 regnum, + u16 mask, u16 set) +{ + int err; + + mutex_lock(&bus->mdio_lock); + err = __mdiobus_c45_modify_changed(bus, addr, devad, regnum, + mask, set); + mutex_unlock(&bus->mdio_lock); + + return err < 0 ? err : 0; +} +EXPORT_SYMBOL_GPL(mdiobus_c45_modify); + +/** * mdiobus_modify_changed - Convenience function for modifying a given mdio * device register and returning if it changed * @bus: the mii_bus struct @@ -971,6 +1283,29 @@ int mdiobus_modify_changed(struct mii_bus *bus, int addr, u32 regnum, EXPORT_SYMBOL_GPL(mdiobus_modify_changed); /** + * mdiobus_c45_modify_changed - Convenience function for modifying a given mdio + * device register and returning if it changed + * @bus: the mii_bus struct + * @addr: the phy address + * @devad: device address to read + * @regnum: register number to write + * @mask: bit mask of bits to clear + * @set: bit mask of bits to set + */ +int mdiobus_c45_modify_changed(struct mii_bus *bus, int devad, int addr, + u32 regnum, u16 mask, u16 set) +{ + int err; + + mutex_lock(&bus->mdio_lock); + err = __mdiobus_c45_modify_changed(bus, addr, devad, regnum, mask, set); + mutex_unlock(&bus->mdio_lock); + + return err; +} +EXPORT_SYMBOL_GPL(mdiobus_c45_modify_changed); + +/** * mdio_bus_match - determine if given MDIO driver supports the given * MDIO device * @dev: target MDIO device diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 26ce0c5defcd..d5b80c31ab91 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -2088,7 +2088,8 @@ static int ksz886x_cable_test_get_status(struct phy_device *phydev, const struct kszphy_type *type = phydev->drv->driver_data; unsigned long pair_mask = type->pair_mask; int retries = 20; - int pair, ret; + int ret = 0; + int pair; *finished = false; @@ -2794,13 +2795,11 @@ static void lan8814_get_rx_ts(struct kszphy_ptp_priv *ptp_priv) } while (PTP_CAP_INFO_RX_TS_CNT_GET_(reg) > 0); } -static void lan8814_handle_ptp_interrupt(struct phy_device *phydev) +static void lan8814_handle_ptp_interrupt(struct phy_device *phydev, u16 status) { struct kszphy_priv *priv = phydev->priv; struct kszphy_ptp_priv *ptp_priv = &priv->ptp_priv; - u16 status; - status = lanphy_read_page_reg(phydev, 5, PTP_TSU_INT_STS); if (status & PTP_TSU_INT_STS_PTP_TX_TS_EN_) lan8814_get_tx_ts(ptp_priv); @@ -2899,8 +2898,8 @@ static int lan8804_config_intr(struct phy_device *phydev) static irqreturn_t lan8814_handle_interrupt(struct phy_device *phydev) { - int irq_status, tsu_irq_status; int ret = IRQ_NONE; + int irq_status; irq_status = phy_read(phydev, LAN8814_INTS); if (irq_status < 0) { @@ -2913,20 +2912,13 @@ static irqreturn_t lan8814_handle_interrupt(struct phy_device *phydev) ret = IRQ_HANDLED; } - while (1) { - tsu_irq_status = lanphy_read_page_reg(phydev, 4, - LAN8814_INTR_STS_REG); - - if (tsu_irq_status > 0 && - (tsu_irq_status & (LAN8814_INTR_STS_REG_1588_TSU0_ | - LAN8814_INTR_STS_REG_1588_TSU1_ | - LAN8814_INTR_STS_REG_1588_TSU2_ | - LAN8814_INTR_STS_REG_1588_TSU3_))) { - lan8814_handle_ptp_interrupt(phydev); - ret = IRQ_HANDLED; - } else { + while (true) { + irq_status = lanphy_read_page_reg(phydev, 5, PTP_TSU_INT_STS); + if (!irq_status) break; - } + + lan8814_handle_ptp_interrupt(phydev, irq_status); + ret = IRQ_HANDLED; } return ret; @@ -3016,10 +3008,6 @@ static int lan8814_ptp_probe_once(struct phy_device *phydev) { struct lan8814_shared_priv *shared = phydev->shared->priv; - if (!IS_ENABLED(CONFIG_PTP_1588_CLOCK) || - !IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING)) - return 0; - /* Initialise shared lock for clock*/ mutex_init(&shared->shared_lock); @@ -3039,12 +3027,16 @@ static int lan8814_ptp_probe_once(struct phy_device *phydev) shared->ptp_clock = ptp_clock_register(&shared->ptp_clock_info, &phydev->mdio.dev); - if (IS_ERR_OR_NULL(shared->ptp_clock)) { + if (IS_ERR(shared->ptp_clock)) { phydev_err(phydev, "ptp_clock_register failed %lu\n", PTR_ERR(shared->ptp_clock)); return -EINVAL; } + /* Check if PHC support is missing at the configuration level */ + if (!shared->ptp_clock) + return 0; + phydev_dbg(phydev, "successfully registered ptp clock\n"); shared->phydev = phydev; diff --git a/drivers/net/phy/mxl-gpy.c b/drivers/net/phy/mxl-gpy.c index 147d7a5a9b35..e5972b4ef6e8 100644 --- a/drivers/net/phy/mxl-gpy.c +++ b/drivers/net/phy/mxl-gpy.c @@ -12,6 +12,7 @@ #include <linux/mutex.h> #include <linux/phy.h> #include <linux/polynomial.h> +#include <linux/property.h> #include <linux/netdevice.h> /* PHY ID */ @@ -292,6 +293,10 @@ static int gpy_probe(struct phy_device *phydev) phydev->priv = priv; mutex_init(&priv->mbox_lock); + if (gpy_has_broken_mdint(phydev) && + !device_property_present(dev, "maxlinear,use-broken-interrupts")) + phydev->dev_flags |= PHY_F_NO_IRQ; + fw_version = phy_read(phydev, PHY_FWV); if (fw_version < 0) return fw_version; diff --git a/drivers/net/phy/ncn26000.c b/drivers/net/phy/ncn26000.c new file mode 100644 index 000000000000..5680584f659e --- /dev/null +++ b/drivers/net/phy/ncn26000.c @@ -0,0 +1,171 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* + * Driver for the onsemi 10BASE-T1S NCN26000 PHYs family. + * + * Copyright 2022 onsemi + */ +#include <linux/kernel.h> +#include <linux/bitfield.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/mii.h> +#include <linux/phy.h> + +#include "mdio-open-alliance.h" + +#define PHY_ID_NCN26000 0x180FF5A1 + +#define NCN26000_REG_IRQ_CTL 16 +#define NCN26000_REG_IRQ_STATUS 17 + +// the NCN26000 maps link_ctrl to BMCR_ANENABLE +#define NCN26000_BCMR_LINK_CTRL_BIT BMCR_ANENABLE + +// the NCN26000 maps link_status to BMSR_ANEGCOMPLETE +#define NCN26000_BMSR_LINK_STATUS_BIT BMSR_ANEGCOMPLETE + +#define NCN26000_IRQ_LINKST_BIT BIT(0) +#define NCN26000_IRQ_PLCAST_BIT BIT(1) +#define NCN26000_IRQ_LJABBER_BIT BIT(2) +#define NCN26000_IRQ_RJABBER_BIT BIT(3) +#define NCN26000_IRQ_PLCAREC_BIT BIT(4) +#define NCN26000_IRQ_PHYSCOL_BIT BIT(5) +#define NCN26000_IRQ_RESET_BIT BIT(15) + +#define TO_TMR_DEFAULT 32 + +static int ncn26000_config_init(struct phy_device *phydev) +{ + /* HW bug workaround: the default value of the PLCA TO_TIMER should be + * 32, where the current version of NCN26000 reports 24. This will be + * fixed in future PHY versions. For the time being, we force the + * correct default here. + */ + return phy_write_mmd(phydev, MDIO_MMD_VEND2, MDIO_OATC14_PLCA_TOTMR, + TO_TMR_DEFAULT); +} + +static int ncn26000_config_aneg(struct phy_device *phydev) +{ + /* Note: the NCN26000 supports only P2MP link mode. Therefore, AN is not + * supported. However, this function is invoked by phylib to enable the + * PHY, regardless of the AN support. + */ + phydev->mdix_ctrl = ETH_TP_MDI_AUTO; + phydev->mdix = ETH_TP_MDI; + + // bring up the link + return phy_write(phydev, MII_BMCR, NCN26000_BCMR_LINK_CTRL_BIT); +} + +static int ncn26000_read_status(struct phy_device *phydev) +{ + /* The NCN26000 reports NCN26000_LINK_STATUS_BIT if the link status of + * the PHY is up. It further reports the logical AND of the link status + * and the PLCA status in the BMSR_LSTATUS bit. + */ + int ret; + + /* The link state is latched low so that momentary link + * drops can be detected. Do not double-read the status + * in polling mode to detect such short link drops except + * the link was already down. + */ + if (!phy_polling_mode(phydev) || !phydev->link) { + ret = phy_read(phydev, MII_BMSR); + if (ret < 0) + return ret; + else if (ret & NCN26000_BMSR_LINK_STATUS_BIT) + goto upd_link; + } + + ret = phy_read(phydev, MII_BMSR); + if (ret < 0) + return ret; + +upd_link: + // update link status + if (ret & NCN26000_BMSR_LINK_STATUS_BIT) { + phydev->link = 1; + phydev->pause = 0; + phydev->duplex = DUPLEX_HALF; + phydev->speed = SPEED_10; + } else { + phydev->link = 0; + phydev->duplex = DUPLEX_UNKNOWN; + phydev->speed = SPEED_UNKNOWN; + } + + return 0; +} + +static irqreturn_t ncn26000_handle_interrupt(struct phy_device *phydev) +{ + int ret; + + // read and aknowledge the IRQ status register + ret = phy_read(phydev, NCN26000_REG_IRQ_STATUS); + + // check only link status changes + if (ret < 0 || (ret & NCN26000_REG_IRQ_STATUS) == 0) + return IRQ_NONE; + + phy_trigger_machine(phydev); + return IRQ_HANDLED; +} + +static int ncn26000_config_intr(struct phy_device *phydev) +{ + int ret; + u16 irqe; + + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { + // acknowledge IRQs + ret = phy_read(phydev, NCN26000_REG_IRQ_STATUS); + if (ret < 0) + return ret; + + // get link status notifications + irqe = NCN26000_IRQ_LINKST_BIT; + } else { + // disable all IRQs + irqe = 0; + } + + ret = phy_write(phydev, NCN26000_REG_IRQ_CTL, irqe); + if (ret != 0) + return ret; + + return 0; +} + +static struct phy_driver ncn26000_driver[] = { + { + PHY_ID_MATCH_MODEL(PHY_ID_NCN26000), + .name = "NCN26000", + .features = PHY_BASIC_T1S_P2MP_FEATURES, + .config_init = ncn26000_config_init, + .config_intr = ncn26000_config_intr, + .config_aneg = ncn26000_config_aneg, + .read_status = ncn26000_read_status, + .handle_interrupt = ncn26000_handle_interrupt, + .get_plca_cfg = genphy_c45_plca_get_cfg, + .set_plca_cfg = genphy_c45_plca_set_cfg, + .get_plca_status = genphy_c45_plca_get_status, + .soft_reset = genphy_soft_reset, + }, +}; + +module_phy_driver(ncn26000_driver); + +static struct mdio_device_id __maybe_unused ncn26000_tbl[] = { + { PHY_ID_MATCH_MODEL(PHY_ID_NCN26000) }, + { } +}; + +MODULE_DEVICE_TABLE(mdio, ncn26000_tbl); + +MODULE_AUTHOR("Piergiorgio Beruto"); +MODULE_DESCRIPTION("onsemi 10BASE-T1S PHY driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c index a87a4b3ffce4..9f9565a4819d 100644 --- a/drivers/net/phy/phy-c45.c +++ b/drivers/net/phy/phy-c45.c @@ -8,6 +8,8 @@ #include <linux/mii.h> #include <linux/phy.h> +#include "mdio-open-alliance.h" + /** * genphy_c45_baset1_able - checks if the PMA has BASE-T1 extended abilities * @phydev: target phy_device struct @@ -931,6 +933,197 @@ int genphy_c45_fast_retrain(struct phy_device *phydev, bool enable) } EXPORT_SYMBOL_GPL(genphy_c45_fast_retrain); +/** + * genphy_c45_plca_get_cfg - get PLCA configuration from standard registers + * @phydev: target phy_device struct + * @plca_cfg: output structure to store the PLCA configuration + * + * Description: if the PHY complies to the Open Alliance TC14 10BASE-T1S PLCA + * Management Registers specifications, this function can be used to retrieve + * the current PLCA configuration from the standard registers in MMD 31. + */ +int genphy_c45_plca_get_cfg(struct phy_device *phydev, + struct phy_plca_cfg *plca_cfg) +{ + int ret; + + ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, MDIO_OATC14_PLCA_IDVER); + if (ret < 0) + return ret; + + if ((ret & MDIO_OATC14_PLCA_IDM) != OATC14_IDM) + return -ENODEV; + + plca_cfg->version = ret & ~MDIO_OATC14_PLCA_IDM; + + ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, MDIO_OATC14_PLCA_CTRL0); + if (ret < 0) + return ret; + + plca_cfg->enabled = !!(ret & MDIO_OATC14_PLCA_EN); + + ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, MDIO_OATC14_PLCA_CTRL1); + if (ret < 0) + return ret; + + plca_cfg->node_cnt = (ret & MDIO_OATC14_PLCA_NCNT) >> 8; + plca_cfg->node_id = (ret & MDIO_OATC14_PLCA_ID); + + ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, MDIO_OATC14_PLCA_TOTMR); + if (ret < 0) + return ret; + + plca_cfg->to_tmr = ret & MDIO_OATC14_PLCA_TOT; + + ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, MDIO_OATC14_PLCA_BURST); + if (ret < 0) + return ret; + + plca_cfg->burst_cnt = (ret & MDIO_OATC14_PLCA_MAXBC) >> 8; + plca_cfg->burst_tmr = (ret & MDIO_OATC14_PLCA_BTMR); + + return 0; +} +EXPORT_SYMBOL_GPL(genphy_c45_plca_get_cfg); + +/** + * genphy_c45_plca_set_cfg - set PLCA configuration using standard registers + * @phydev: target phy_device struct + * @plca_cfg: structure containing the PLCA configuration. Fields set to -1 are + * not to be changed. + * + * Description: if the PHY complies to the Open Alliance TC14 10BASE-T1S PLCA + * Management Registers specifications, this function can be used to modify + * the PLCA configuration using the standard registers in MMD 31. + */ +int genphy_c45_plca_set_cfg(struct phy_device *phydev, + const struct phy_plca_cfg *plca_cfg) +{ + u16 val = 0; + int ret; + + // PLCA IDVER is read-only + if (plca_cfg->version >= 0) + return -EINVAL; + + // first of all, disable PLCA if required + if (plca_cfg->enabled == 0) { + ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, + MDIO_OATC14_PLCA_CTRL0, + MDIO_OATC14_PLCA_EN); + + if (ret < 0) + return ret; + } + + // check if we need to set the PLCA node count, node ID, or both + if (plca_cfg->node_cnt >= 0 || plca_cfg->node_id >= 0) { + /* if one between node count and node ID is -not- to be + * changed, read the register to later perform merge/purge of + * the configuration as appropriate + */ + if (plca_cfg->node_cnt < 0 || plca_cfg->node_id < 0) { + ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, + MDIO_OATC14_PLCA_CTRL1); + + if (ret < 0) + return ret; + + val = ret; + } + + if (plca_cfg->node_cnt >= 0) + val = (val & ~MDIO_OATC14_PLCA_NCNT) | + (plca_cfg->node_cnt << 8); + + if (plca_cfg->node_id >= 0) + val = (val & ~MDIO_OATC14_PLCA_ID) | + (plca_cfg->node_id); + + ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, + MDIO_OATC14_PLCA_CTRL1, val); + + if (ret < 0) + return ret; + } + + if (plca_cfg->to_tmr >= 0) { + ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, + MDIO_OATC14_PLCA_TOTMR, + plca_cfg->to_tmr); + + if (ret < 0) + return ret; + } + + // check if we need to set the PLCA burst count, burst timer, or both + if (plca_cfg->burst_cnt >= 0 || plca_cfg->burst_tmr >= 0) { + /* if one between burst count and burst timer is -not- to be + * changed, read the register to later perform merge/purge of + * the configuration as appropriate + */ + if (plca_cfg->burst_cnt < 0 || plca_cfg->burst_tmr < 0) { + ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, + MDIO_OATC14_PLCA_BURST); + + if (ret < 0) + return ret; + + val = ret; + } + + if (plca_cfg->burst_cnt >= 0) + val = (val & ~MDIO_OATC14_PLCA_MAXBC) | + (plca_cfg->burst_cnt << 8); + + if (plca_cfg->burst_tmr >= 0) + val = (val & ~MDIO_OATC14_PLCA_BTMR) | + (plca_cfg->burst_tmr); + + ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, + MDIO_OATC14_PLCA_BURST, val); + + if (ret < 0) + return ret; + } + + // if we need to enable PLCA, do it at the end + if (plca_cfg->enabled > 0) { + ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, + MDIO_OATC14_PLCA_CTRL0, + MDIO_OATC14_PLCA_EN); + + if (ret < 0) + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(genphy_c45_plca_set_cfg); + +/** + * genphy_c45_plca_get_status - get PLCA status from standard registers + * @phydev: target phy_device struct + * @plca_st: output structure to store the PLCA status + * + * Description: if the PHY complies to the Open Alliance TC14 10BASE-T1S PLCA + * Management Registers specifications, this function can be used to retrieve + * the current PLCA status information from the standard registers in MMD 31. + */ +int genphy_c45_plca_get_status(struct phy_device *phydev, + struct phy_plca_status *plca_st) +{ + int ret; + + ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, MDIO_OATC14_PLCA_STATUS); + if (ret < 0) + return ret; + + plca_st->pst = !!(ret & MDIO_OATC14_PLCA_PST); + return 0; +} +EXPORT_SYMBOL_GPL(genphy_c45_plca_get_status); + struct phy_driver genphy_c45_driver = { .phy_id = 0xffffffff, .phy_id_mask = 0xffffffff, diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c index 5d08c627a516..a64186dc53f8 100644 --- a/drivers/net/phy/phy-core.c +++ b/drivers/net/phy/phy-core.c @@ -13,7 +13,7 @@ */ const char *phy_speed_to_str(int speed) { - BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 99, + BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 102, "Enum ethtool_link_mode_bit_indices and phylib are out of sync. " "If a speed or mode has been added please update phy_speed_to_str " "and the PHY settings array.\n"); @@ -260,6 +260,9 @@ static const struct phy_setting settings[] = { PHY_SETTING( 10, FULL, 10baseT_Full ), PHY_SETTING( 10, HALF, 10baseT_Half ), PHY_SETTING( 10, FULL, 10baseT1L_Full ), + PHY_SETTING( 10, FULL, 10baseT1S_Full ), + PHY_SETTING( 10, HALF, 10baseT1S_Half ), + PHY_SETTING( 10, HALF, 10baseT1S_P2MP_Half ), }; #undef PHY_SETTING diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index e5b6cb1a77f9..3378ca4f49b6 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -544,6 +544,198 @@ int phy_ethtool_get_stats(struct phy_device *phydev, EXPORT_SYMBOL(phy_ethtool_get_stats); /** + * phy_ethtool_get_plca_cfg - Get PLCA RS configuration + * @phydev: the phy_device struct + * @plca_cfg: where to store the retrieved configuration + * + * Retrieve the PLCA configuration from the PHY. Return 0 on success or a + * negative value if an error occurred. + */ +int phy_ethtool_get_plca_cfg(struct phy_device *phydev, + struct phy_plca_cfg *plca_cfg) +{ + int ret; + + if (!phydev->drv) { + ret = -EIO; + goto out; + } + + if (!phydev->drv->get_plca_cfg) { + ret = -EOPNOTSUPP; + goto out; + } + + mutex_lock(&phydev->lock); + ret = phydev->drv->get_plca_cfg(phydev, plca_cfg); + + mutex_unlock(&phydev->lock); +out: + return ret; +} + +/** + * plca_check_valid - Check PLCA configuration before enabling + * @phydev: the phy_device struct + * @plca_cfg: current PLCA configuration + * @extack: extack for reporting useful error messages + * + * Checks whether the PLCA and PHY configuration are consistent and it is safe + * to enable PLCA. Returns 0 on success or a negative value if the PLCA or PHY + * configuration is not consistent. + */ +static int plca_check_valid(struct phy_device *phydev, + const struct phy_plca_cfg *plca_cfg, + struct netlink_ext_ack *extack) +{ + int ret = 0; + + if (!linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT1S_P2MP_Half_BIT, + phydev->advertising)) { + ret = -EOPNOTSUPP; + NL_SET_ERR_MSG(extack, + "Point to Multi-Point mode is not enabled"); + } else if (plca_cfg->node_id >= 255) { + NL_SET_ERR_MSG(extack, "PLCA node ID is not set"); + ret = -EINVAL; + } + + return ret; +} + +/** + * phy_ethtool_set_plca_cfg - Set PLCA RS configuration + * @phydev: the phy_device struct + * @plca_cfg: new PLCA configuration to apply + * @extack: extack for reporting useful error messages + * + * Sets the PLCA configuration in the PHY. Return 0 on success or a + * negative value if an error occurred. + */ +int phy_ethtool_set_plca_cfg(struct phy_device *phydev, + const struct phy_plca_cfg *plca_cfg, + struct netlink_ext_ack *extack) +{ + struct phy_plca_cfg *curr_plca_cfg; + int ret; + + if (!phydev->drv) { + ret = -EIO; + goto out; + } + + if (!phydev->drv->set_plca_cfg || + !phydev->drv->get_plca_cfg) { + ret = -EOPNOTSUPP; + goto out; + } + + curr_plca_cfg = kmalloc(sizeof(*curr_plca_cfg), GFP_KERNEL); + if (!curr_plca_cfg) { + ret = -ENOMEM; + goto out; + } + + mutex_lock(&phydev->lock); + + ret = phydev->drv->get_plca_cfg(phydev, curr_plca_cfg); + if (ret) + goto out_drv; + + if (curr_plca_cfg->enabled < 0 && plca_cfg->enabled >= 0) { + NL_SET_ERR_MSG(extack, + "PHY does not support changing the PLCA 'enable' attribute"); + ret = -EINVAL; + goto out_drv; + } + + if (curr_plca_cfg->node_id < 0 && plca_cfg->node_id >= 0) { + NL_SET_ERR_MSG(extack, + "PHY does not support changing the PLCA 'local node ID' attribute"); + ret = -EINVAL; + goto out_drv; + } + + if (curr_plca_cfg->node_cnt < 0 && plca_cfg->node_cnt >= 0) { + NL_SET_ERR_MSG(extack, + "PHY does not support changing the PLCA 'node count' attribute"); + ret = -EINVAL; + goto out_drv; + } + + if (curr_plca_cfg->to_tmr < 0 && plca_cfg->to_tmr >= 0) { + NL_SET_ERR_MSG(extack, + "PHY does not support changing the PLCA 'TO timer' attribute"); + ret = -EINVAL; + goto out_drv; + } + + if (curr_plca_cfg->burst_cnt < 0 && plca_cfg->burst_cnt >= 0) { + NL_SET_ERR_MSG(extack, + "PHY does not support changing the PLCA 'burst count' attribute"); + ret = -EINVAL; + goto out_drv; + } + + if (curr_plca_cfg->burst_tmr < 0 && plca_cfg->burst_tmr >= 0) { + NL_SET_ERR_MSG(extack, + "PHY does not support changing the PLCA 'burst timer' attribute"); + ret = -EINVAL; + goto out_drv; + } + + // if enabling PLCA, perform a few sanity checks + if (plca_cfg->enabled > 0) { + // allow setting node_id concurrently with enabled + if (plca_cfg->node_id >= 0) + curr_plca_cfg->node_id = plca_cfg->node_id; + + ret = plca_check_valid(phydev, curr_plca_cfg, extack); + if (ret) + goto out_drv; + } + + ret = phydev->drv->set_plca_cfg(phydev, plca_cfg); + +out_drv: + kfree(curr_plca_cfg); + mutex_unlock(&phydev->lock); +out: + return ret; +} + +/** + * phy_ethtool_get_plca_status - Get PLCA RS status information + * @phydev: the phy_device struct + * @plca_st: where to store the retrieved status information + * + * Retrieve the PLCA status information from the PHY. Return 0 on success or a + * negative value if an error occurred. + */ +int phy_ethtool_get_plca_status(struct phy_device *phydev, + struct phy_plca_status *plca_st) +{ + int ret; + + if (!phydev->drv) { + ret = -EIO; + goto out; + } + + if (!phydev->drv->get_plca_status) { + ret = -EOPNOTSUPP; + goto out; + } + + mutex_lock(&phydev->lock); + ret = phydev->drv->get_plca_status(phydev, plca_st); + + mutex_unlock(&phydev->lock); +out: + return ret; +} + +/** * phy_start_cable_test - Start a cable test * * @phydev: the phy_device struct diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 716870a4499c..9ba8f973f26f 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -45,6 +45,9 @@ EXPORT_SYMBOL_GPL(phy_basic_features); __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_t1_features) __ro_after_init; EXPORT_SYMBOL_GPL(phy_basic_t1_features); +__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_t1s_p2mp_features) __ro_after_init; +EXPORT_SYMBOL_GPL(phy_basic_t1s_p2mp_features); + __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_features) __ro_after_init; EXPORT_SYMBOL_GPL(phy_gbit_features); @@ -98,6 +101,12 @@ const int phy_basic_t1_features_array[3] = { }; EXPORT_SYMBOL_GPL(phy_basic_t1_features_array); +const int phy_basic_t1s_p2mp_features_array[2] = { + ETHTOOL_LINK_MODE_TP_BIT, + ETHTOOL_LINK_MODE_10baseT1S_P2MP_Half_BIT, +}; +EXPORT_SYMBOL_GPL(phy_basic_t1s_p2mp_features_array); + const int phy_gbit_features_array[2] = { ETHTOOL_LINK_MODE_1000baseT_Half_BIT, ETHTOOL_LINK_MODE_1000baseT_Full_BIT, @@ -138,6 +147,11 @@ static void features_init(void) ARRAY_SIZE(phy_basic_t1_features_array), phy_basic_t1_features); + /* 10 half, P2MP, TP */ + linkmode_set_bit_array(phy_basic_t1s_p2mp_features_array, + ARRAY_SIZE(phy_basic_t1s_p2mp_features_array), + phy_basic_t1s_p2mp_features); + /* 10/100 half/full + 1000 half/full */ linkmode_set_bit_array(phy_basic_ports_array, ARRAY_SIZE(phy_basic_ports_array), @@ -932,7 +946,7 @@ struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45) * probe with C45 to see if we're able to get a valid PHY ID in the C45 * space, if successful, create the C45 PHY device. */ - if (!is_c45 && phy_id == 0 && bus->probe_capabilities >= MDIOBUS_C45) { + if (!is_c45 && phy_id == 0 && bus->read_c45) { r = get_phy_c45_ids(bus, addr, &c45_ids); if (!r) return phy_device_create(bus, addr, phy_id, @@ -1487,6 +1501,13 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, phydev->interrupts = PHY_INTERRUPT_DISABLED; + /* PHYs can request to use poll mode even though they have an + * associated interrupt line. This could be the case if they + * detect a broken interrupt handling. + */ + if (phydev->dev_flags & PHY_F_NO_IRQ) + phydev->irq = PHY_POLL; + /* Port is set to PORT_TP by default and the actual PHY driver will set * it to different value depending on the PHY configuration. If we have * the generic PHY driver we can't figure it out, thus set the old @@ -3262,6 +3283,9 @@ static const struct ethtool_phy_ops phy_ethtool_phy_ops = { .get_sset_count = phy_ethtool_get_sset_count, .get_strings = phy_ethtool_get_strings, .get_stats = phy_ethtool_get_stats, + .get_plca_cfg = phy_ethtool_get_plca_cfg, + .set_plca_cfg = phy_ethtool_set_plca_cfg, + .get_plca_status = phy_ethtool_get_plca_status, .start_cable_test = phy_start_cable_test, .start_cable_test_tdr = phy_start_cable_test_tdr, }; diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 09cc65c0da93..319790221d7f 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -241,12 +241,16 @@ void phylink_caps_to_linkmodes(unsigned long *linkmodes, unsigned long caps) if (caps & MAC_ASYM_PAUSE) __set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, linkmodes); - if (caps & MAC_10HD) + if (caps & MAC_10HD) { __set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_10baseT1S_Half_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_10baseT1S_P2MP_Half_BIT, linkmodes); + } if (caps & MAC_10FD) { __set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, linkmodes); __set_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_10baseT1S_Full_BIT, linkmodes); } if (caps & MAC_100HD) { diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index 83b99d95b278..c02cad6478a8 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -1,6 +1,4 @@ // SPDX-License-Identifier: GPL-2.0 -#include <linux/acpi.h> -#include <linux/ctype.h> #include <linux/debugfs.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> @@ -144,7 +142,7 @@ static const char *sm_state_to_str(unsigned short sm_state) return sm_state_strings[sm_state]; } -static const char *gpio_of_names[] = { +static const char *gpio_names[] = { "mod-def0", "los", "tx-fault", @@ -2563,7 +2561,7 @@ static void sfp_check_state(struct sfp *sfp) for (i = 0; i < GPIO_MAX; i++) if (changed & BIT(i)) - dev_dbg(sfp->dev, "%s %u -> %u\n", gpio_of_names[i], + dev_dbg(sfp->dev, "%s %u -> %u\n", gpio_names[i], !!(sfp->state & BIT(i)), !!(state & BIT(i))); state |= sfp->state & (SFP_F_TX_DISABLE | SFP_F_RATE_SELECT); @@ -2644,10 +2642,8 @@ static void sfp_cleanup(void *data) static int sfp_i2c_get(struct sfp *sfp) { - struct acpi_handle *acpi_handle; struct fwnode_handle *h; struct i2c_adapter *i2c; - struct device_node *np; int err; h = fwnode_find_reference(dev_fwnode(sfp->dev), "i2c-bus", 0); @@ -2656,16 +2652,7 @@ static int sfp_i2c_get(struct sfp *sfp) return -ENODEV; } - if (is_acpi_device_node(h)) { - acpi_handle = ACPI_HANDLE_FWNODE(h); - i2c = i2c_acpi_find_adapter_by_handle(acpi_handle); - } else if ((np = to_of_node(h)) != NULL) { - i2c = of_find_i2c_adapter_by_node(np); - } else { - err = -EINVAL; - goto put; - } - + i2c = i2c_get_adapter_by_fwnode(h); if (!i2c) { err = -EPROBE_DEFER; goto put; @@ -2696,19 +2683,11 @@ static int sfp_probe(struct platform_device *pdev) if (err < 0) return err; - sff = sfp->type = &sfp_data; - - if (pdev->dev.of_node) { - const struct of_device_id *id; + sff = device_get_match_data(sfp->dev); + if (!sff) + sff = &sfp_data; - id = of_match_node(sfp_of_match, pdev->dev.of_node); - if (WARN_ON(!id)) - return -EINVAL; - - sff = sfp->type = id->data; - } else if (!has_acpi_companion(&pdev->dev)) { - return -EINVAL; - } + sfp->type = sff; err = sfp_i2c_get(sfp); if (err) @@ -2717,7 +2696,7 @@ static int sfp_probe(struct platform_device *pdev) for (i = 0; i < GPIO_MAX; i++) if (sff->gpios & BIT(i)) { sfp->gpio[i] = devm_gpiod_get_optional(sfp->dev, - gpio_of_names[i], gpio_flags[i]); + gpio_names[i], gpio_flags[i]); if (IS_ERR(sfp->gpio[i])) return PTR_ERR(sfp->gpio[i]); } @@ -2772,7 +2751,7 @@ static int sfp_probe(struct platform_device *pdev) sfp_irq_name = devm_kasprintf(sfp->dev, GFP_KERNEL, "%s-%s", dev_name(sfp->dev), - gpio_of_names[i]); + gpio_names[i]); if (!sfp_irq_name) return -ENOMEM; diff --git a/drivers/net/thunderbolt/Kconfig b/drivers/net/thunderbolt/Kconfig new file mode 100644 index 000000000000..e127848c8cbd --- /dev/null +++ b/drivers/net/thunderbolt/Kconfig @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0-only +config USB4_NET + tristate "Networking over USB4 and Thunderbolt cables" + depends on USB4 && INET + help + Select this if you want to create network between two computers + over a USB4 and Thunderbolt cables. The driver supports Apple + ThunderboltIP protocol and allows communication with any host + supporting the same protocol including Windows and macOS. + + To compile this driver a module, choose M here. The module will be + called thunderbolt_net. diff --git a/drivers/net/thunderbolt/Makefile b/drivers/net/thunderbolt/Makefile new file mode 100644 index 000000000000..e81c2a4849f0 --- /dev/null +++ b/drivers/net/thunderbolt/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_USB4_NET) := thunderbolt_net.o +thunderbolt_net-objs := main.o trace.o + +# Tracepoints need to know where to find trace.h +CFLAGS_trace.o := -I$(src) diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt/main.c index 990484776f2d..26ef3706445e 100644 --- a/drivers/net/thunderbolt.c +++ b/drivers/net/thunderbolt/main.c @@ -23,6 +23,8 @@ #include <net/ip6_checksum.h> +#include "trace.h" + /* Protocol timeouts in ms */ #define TBNET_LOGIN_DELAY 4500 #define TBNET_LOGIN_TIMEOUT 500 @@ -305,6 +307,8 @@ static int tbnet_logout_request(struct tbnet *net) static void start_login(struct tbnet *net) { + netdev_dbg(net->dev, "login started\n"); + mutex_lock(&net->connection_lock); net->login_sent = false; net->login_received = false; @@ -318,6 +322,8 @@ static void stop_login(struct tbnet *net) { cancel_delayed_work_sync(&net->login_work); cancel_work_sync(&net->connected_work); + + netdev_dbg(net->dev, "login stopped\n"); } static inline unsigned int tbnet_frame_size(const struct tbnet_frame *tf) @@ -349,6 +355,8 @@ static void tbnet_free_buffers(struct tbnet_ring *ring) size = TBNET_RX_PAGE_SIZE; } + trace_tbnet_free_frame(i, tf->page, tf->frame.buffer_phy, dir); + if (tf->frame.buffer_phy) dma_unmap_page(dma_dev, tf->frame.buffer_phy, size, dir); @@ -374,6 +382,8 @@ static void tbnet_tear_down(struct tbnet *net, bool send_logout) int ret, retries = TBNET_LOGOUT_RETRIES; while (send_logout && retries-- > 0) { + netdev_dbg(net->dev, "sending logout request %u\n", + retries); ret = tbnet_logout_request(net); if (ret != -ETIMEDOUT) break; @@ -400,6 +410,8 @@ static void tbnet_tear_down(struct tbnet *net, bool send_logout) net->login_sent = false; net->login_received = false; + netdev_dbg(net->dev, "network traffic stopped\n"); + mutex_unlock(&net->connection_lock); } @@ -431,12 +443,15 @@ static int tbnet_handle_packet(const void *buf, size_t size, void *data) switch (pkg->hdr.type) { case TBIP_LOGIN: + netdev_dbg(net->dev, "remote login request received\n"); if (!netif_running(net->dev)) break; ret = tbnet_login_response(net, route, sequence, pkg->hdr.command_id); if (!ret) { + netdev_dbg(net->dev, "remote login response sent\n"); + mutex_lock(&net->connection_lock); net->login_received = true; net->remote_transmit_path = pkg->transmit_path; @@ -458,9 +473,12 @@ static int tbnet_handle_packet(const void *buf, size_t size, void *data) break; case TBIP_LOGOUT: + netdev_dbg(net->dev, "remote logout request received\n"); ret = tbnet_logout_response(net, route, sequence, command_id); - if (!ret) + if (!ret) { + netdev_dbg(net->dev, "remote logout response sent\n"); queue_work(system_long_wq, &net->disconnect_work); + } break; default: @@ -512,6 +530,9 @@ static int tbnet_alloc_rx_buffers(struct tbnet *net, unsigned int nbuffers) tf->frame.buffer_phy = dma_addr; tf->dev = net->dev; + trace_tbnet_alloc_rx_frame(index, tf->page, dma_addr, + DMA_FROM_DEVICE); + tb_ring_rx(ring->ring, &tf->frame); ring->prod++; @@ -588,6 +609,8 @@ static int tbnet_alloc_tx_buffers(struct tbnet *net) tf->frame.callback = tbnet_tx_callback; tf->frame.sof = TBIP_PDF_FRAME_START; tf->frame.eof = TBIP_PDF_FRAME_END; + + trace_tbnet_alloc_tx_frame(i, tf->page, dma_addr, DMA_TO_DEVICE); } ring->cons = 0; @@ -612,6 +635,8 @@ static void tbnet_connected_work(struct work_struct *work) if (!connected) return; + netdev_dbg(net->dev, "login successful, enabling paths\n"); + ret = tb_xdomain_alloc_in_hopid(net->xd, net->remote_transmit_path); if (ret != net->remote_transmit_path) { netdev_err(net->dev, "failed to allocate Rx HopID\n"); @@ -647,6 +672,8 @@ static void tbnet_connected_work(struct work_struct *work) netif_carrier_on(net->dev); netif_start_queue(net->dev); + + netdev_dbg(net->dev, "network traffic started\n"); return; err_free_tx_buffers: @@ -668,8 +695,13 @@ static void tbnet_login_work(struct work_struct *work) if (netif_carrier_ok(net->dev)) return; + netdev_dbg(net->dev, "sending login request, retries=%u\n", + net->login_retries); + ret = tbnet_login_request(net, net->login_retries % 4); if (ret) { + netdev_dbg(net->dev, "sending login request failed, ret=%d\n", + ret); if (net->login_retries++ < TBNET_LOGIN_RETRIES) { queue_delayed_work(system_long_wq, &net->login_work, delay); @@ -677,6 +709,8 @@ static void tbnet_login_work(struct work_struct *work) netdev_info(net->dev, "ThunderboltIP login timed out\n"); } } else { + netdev_dbg(net->dev, "received login reply\n"); + net->login_retries = 0; mutex_lock(&net->connection_lock); @@ -807,12 +841,16 @@ static int tbnet_poll(struct napi_struct *napi, int budget) hdr = page_address(page); if (!tbnet_check_frame(net, tf, hdr)) { + trace_tbnet_invalid_rx_ip_frame(hdr->frame_size, + hdr->frame_id, hdr->frame_index, hdr->frame_count); __free_pages(page, TBNET_RX_PAGE_ORDER); dev_kfree_skb_any(net->skb); net->skb = NULL; continue; } + trace_tbnet_rx_ip_frame(hdr->frame_size, hdr->frame_id, + hdr->frame_index, hdr->frame_count); frame_size = le32_to_cpu(hdr->frame_size); skb = net->skb; @@ -846,6 +884,7 @@ static int tbnet_poll(struct napi_struct *napi, int budget) if (last) { skb->protocol = eth_type_trans(skb, net->dev); + trace_tbnet_rx_skb(skb); napi_gro_receive(&net->napi, skb); net->skb = NULL; } @@ -965,6 +1004,8 @@ static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb, for (i = 0; i < frame_count; i++) { hdr = page_address(frames[i]->page); hdr->frame_count = cpu_to_le32(frame_count); + trace_tbnet_tx_ip_frame(hdr->frame_size, hdr->frame_id, + hdr->frame_index, hdr->frame_count); dma_sync_single_for_device(dma_dev, frames[i]->frame.buffer_phy, tbnet_frame_size(frames[i]), DMA_TO_DEVICE); @@ -1029,6 +1070,8 @@ static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb, len = le32_to_cpu(hdr->frame_size) - offset; wsum = csum_partial(dest, len, wsum); hdr->frame_count = cpu_to_le32(frame_count); + trace_tbnet_tx_ip_frame(hdr->frame_size, hdr->frame_id, + hdr->frame_index, hdr->frame_count); offset = 0; } @@ -1071,6 +1114,8 @@ static netdev_tx_t tbnet_start_xmit(struct sk_buff *skb, bool unmap = false; void *dest; + trace_tbnet_tx_skb(skb); + nframes = DIV_ROUND_UP(data_len, TBNET_MAX_PAYLOAD_SIZE); if (tbnet_available_buffers(&net->tx_ring) < nframes) { netif_stop_queue(net->dev); @@ -1177,6 +1222,7 @@ static netdev_tx_t tbnet_start_xmit(struct sk_buff *skb, net->stats.tx_packets++; net->stats.tx_bytes += skb->len; + trace_tbnet_consume_skb(skb); dev_consume_skb_any(skb); return NETDEV_TX_OK; diff --git a/drivers/net/thunderbolt/trace.c b/drivers/net/thunderbolt/trace.c new file mode 100644 index 000000000000..1b1499520a44 --- /dev/null +++ b/drivers/net/thunderbolt/trace.c @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Tracepoints for Thunderbolt/USB4 networking driver + * + * Copyright (C) 2023, Intel Corporation + * Author: Mika Westerberg <mika.westerberg@linux.intel.com> + */ + +#define CREATE_TRACE_POINTS +#include "trace.h" diff --git a/drivers/net/thunderbolt/trace.h b/drivers/net/thunderbolt/trace.h new file mode 100644 index 000000000000..9626eadaebb9 --- /dev/null +++ b/drivers/net/thunderbolt/trace.h @@ -0,0 +1,141 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Tracepoints for Thunderbolt/USB4 networking driver + * + * Copyright (C) 2023, Intel Corporation + * Author: Mika Westerberg <mika.westerberg@linux.intel.com> + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM thunderbolt_net + +#if !defined(__TRACE_THUNDERBOLT_NET_H) || defined(TRACE_HEADER_MULTI_READ) +#define __TRACE_THUNDERBOLT_NET_H + +#include <linux/dma-direction.h> +#include <linux/skbuff.h> +#include <linux/tracepoint.h> + +#define DMA_DATA_DIRECTION_NAMES \ + { DMA_BIDIRECTIONAL, "DMA_BIDIRECTIONAL" }, \ + { DMA_TO_DEVICE, "DMA_TO_DEVICE" }, \ + { DMA_FROM_DEVICE, "DMA_FROM_DEVICE" }, \ + { DMA_NONE, "DMA_NONE" } + +DECLARE_EVENT_CLASS(tbnet_frame, + TP_PROTO(unsigned int index, const void *page, dma_addr_t phys, + enum dma_data_direction dir), + TP_ARGS(index, page, phys, dir), + TP_STRUCT__entry( + __field(unsigned int, index) + __field(const void *, page) + __field(dma_addr_t, phys) + __field(enum dma_data_direction, dir) + ), + TP_fast_assign( + __entry->index = index; + __entry->page = page; + __entry->phys = phys; + __entry->dir = dir; + ), + TP_printk("index=%u page=%p phys=%pad dir=%s", + __entry->index, __entry->page, &__entry->phys, + __print_symbolic(__entry->dir, DMA_DATA_DIRECTION_NAMES)) +); + +DEFINE_EVENT(tbnet_frame, tbnet_alloc_rx_frame, + TP_PROTO(unsigned int index, const void *page, dma_addr_t phys, + enum dma_data_direction dir), + TP_ARGS(index, page, phys, dir) +); + +DEFINE_EVENT(tbnet_frame, tbnet_alloc_tx_frame, + TP_PROTO(unsigned int index, const void *page, dma_addr_t phys, + enum dma_data_direction dir), + TP_ARGS(index, page, phys, dir) +); + +DEFINE_EVENT(tbnet_frame, tbnet_free_frame, + TP_PROTO(unsigned int index, const void *page, dma_addr_t phys, + enum dma_data_direction dir), + TP_ARGS(index, page, phys, dir) +); + +DECLARE_EVENT_CLASS(tbnet_ip_frame, + TP_PROTO(__le32 size, __le16 id, __le16 index, __le32 count), + TP_ARGS(size, id, index, count), + TP_STRUCT__entry( + __field(u32, size) + __field(u16, id) + __field(u16, index) + __field(u32, count) + ), + TP_fast_assign( + __entry->size = le32_to_cpu(size); + __entry->id = le16_to_cpu(id); + __entry->index = le16_to_cpu(index); + __entry->count = le32_to_cpu(count); + ), + TP_printk("id=%u size=%u index=%u count=%u", + __entry->id, __entry->size, __entry->index, __entry->count) +); + +DEFINE_EVENT(tbnet_ip_frame, tbnet_rx_ip_frame, + TP_PROTO(__le32 size, __le16 id, __le16 index, __le32 count), + TP_ARGS(size, id, index, count) +); + +DEFINE_EVENT(tbnet_ip_frame, tbnet_invalid_rx_ip_frame, + TP_PROTO(__le32 size, __le16 id, __le16 index, __le32 count), + TP_ARGS(size, id, index, count) +); + +DEFINE_EVENT(tbnet_ip_frame, tbnet_tx_ip_frame, + TP_PROTO(__le32 size, __le16 id, __le16 index, __le32 count), + TP_ARGS(size, id, index, count) +); + +DECLARE_EVENT_CLASS(tbnet_skb, + TP_PROTO(const struct sk_buff *skb), + TP_ARGS(skb), + TP_STRUCT__entry( + __field(const void *, addr) + __field(unsigned int, len) + __field(unsigned int, data_len) + __field(unsigned int, nr_frags) + ), + TP_fast_assign( + __entry->addr = skb; + __entry->len = skb->len; + __entry->data_len = skb->data_len; + __entry->nr_frags = skb_shinfo(skb)->nr_frags; + ), + TP_printk("skb=%p len=%u data_len=%u nr_frags=%u", + __entry->addr, __entry->len, __entry->data_len, + __entry->nr_frags) +); + +DEFINE_EVENT(tbnet_skb, tbnet_rx_skb, + TP_PROTO(const struct sk_buff *skb), + TP_ARGS(skb) +); + +DEFINE_EVENT(tbnet_skb, tbnet_tx_skb, + TP_PROTO(const struct sk_buff *skb), + TP_ARGS(skb) +); + +DEFINE_EVENT(tbnet_skb, tbnet_consume_skb, + TP_PROTO(const struct sk_buff *skb), + TP_ARGS(skb) +); + +#endif /* _TRACE_THUNDERBOLT_NET_H */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . + +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE trace + +#include <trace/define_trace.h> diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index c140edb4b648..80849d115e5d 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -747,13 +747,6 @@ static const struct usb_device_id products[] = { .driver_info = 0, }, -/* Realtek RTL8152 Based USB 2.0 Ethernet Adapters */ -{ - USB_DEVICE_AND_INTERFACE_INFO(REALTEK_VENDOR_ID, 0x8152, USB_CLASS_COMM, - USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), - .driver_info = 0, -}, - /* Realtek RTL8153 Based USB 3.0 Ethernet Adapters */ { USB_DEVICE_AND_INTERFACE_INFO(REALTEK_VENDOR_ID, 0x8153, USB_CLASS_COMM, @@ -761,71 +754,6 @@ static const struct usb_device_id products[] = { .driver_info = 0, }, -/* Samsung USB Ethernet Adapters */ -{ - USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, 0xa101, USB_CLASS_COMM, - USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), - .driver_info = 0, -}, - -#if IS_ENABLED(CONFIG_USB_RTL8152) -/* Linksys USB3GIGV1 Ethernet Adapter */ -{ - USB_DEVICE_AND_INTERFACE_INFO(LINKSYS_VENDOR_ID, 0x0041, USB_CLASS_COMM, - USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), - .driver_info = 0, -}, -#endif - -/* Lenovo ThinkPad OneLink+ Dock (based on Realtek RTL8153) */ -{ - USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3054, USB_CLASS_COMM, - USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), - .driver_info = 0, -}, - -/* ThinkPad USB-C Dock (based on Realtek RTL8153) */ -{ - USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3062, USB_CLASS_COMM, - USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), - .driver_info = 0, -}, - -/* ThinkPad Thunderbolt 3 Dock (based on Realtek RTL8153) */ -{ - USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3069, USB_CLASS_COMM, - USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), - .driver_info = 0, -}, - -/* ThinkPad Thunderbolt 3 Dock Gen 2 (based on Realtek RTL8153) */ -{ - USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3082, USB_CLASS_COMM, - USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), - .driver_info = 0, -}, - -/* Lenovo Thinkpad USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */ -{ - USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x7205, USB_CLASS_COMM, - USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), - .driver_info = 0, -}, - -/* Lenovo USB C to Ethernet Adapter (based on Realtek RTL8153) */ -{ - USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x720c, USB_CLASS_COMM, - USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), - .driver_info = 0, -}, - -/* Lenovo USB-C Travel Hub (based on Realtek RTL8153) */ -{ - USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x7214, USB_CLASS_COMM, - USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), - .driver_info = 0, -}, - /* Lenovo Powered USB-C Travel Hub (4X90S92381, based on Realtek RTL8153) */ { USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x721e, USB_CLASS_COMM, @@ -833,48 +761,6 @@ static const struct usb_device_id products[] = { .driver_info = 0, }, -/* ThinkPad USB-C Dock Gen 2 (based on Realtek RTL8153) */ -{ - USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0xa387, USB_CLASS_COMM, - USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), - .driver_info = 0, -}, - -/* NVIDIA Tegra USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */ -{ - USB_DEVICE_AND_INTERFACE_INFO(NVIDIA_VENDOR_ID, 0x09ff, USB_CLASS_COMM, - USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), - .driver_info = 0, -}, - -/* Microsoft Surface 2 dock (based on Realtek RTL8152) */ -{ - USB_DEVICE_AND_INTERFACE_INFO(MICROSOFT_VENDOR_ID, 0x07ab, USB_CLASS_COMM, - USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), - .driver_info = 0, -}, - -/* Microsoft Surface Ethernet Adapter (based on Realtek RTL8153) */ -{ - USB_DEVICE_AND_INTERFACE_INFO(MICROSOFT_VENDOR_ID, 0x07c6, USB_CLASS_COMM, - USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), - .driver_info = 0, -}, - -/* Microsoft Surface Ethernet Adapter (based on Realtek RTL8153B) */ -{ - USB_DEVICE_AND_INTERFACE_INFO(MICROSOFT_VENDOR_ID, 0x0927, USB_CLASS_COMM, - USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), - .driver_info = 0, -}, - -/* TP-LINK UE300 USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */ -{ - USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, 0x0601, USB_CLASS_COMM, - USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), - .driver_info = 0, -}, - /* Aquantia AQtion USB to 5GbE Controller (based on AQC111U) */ { USB_DEVICE_AND_INTERFACE_INFO(AQUANTIA_VENDOR_ID, 0xc101, diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 23da1d9dafd1..4c5c1df5d7a4 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -9500,9 +9500,8 @@ static int rtl_fw_init(struct r8152 *tp) return 0; } -u8 rtl8152_get_version(struct usb_interface *intf) +static u8 __rtl_get_hw_ver(struct usb_device *udev) { - struct usb_device *udev = interface_to_usbdev(intf); u32 ocp_data = 0; __le32 *tmp; u8 version; @@ -9571,10 +9570,19 @@ u8 rtl8152_get_version(struct usb_interface *intf) break; default: version = RTL_VER_UNKNOWN; - dev_info(&intf->dev, "Unknown version 0x%04x\n", ocp_data); + dev_info(&udev->dev, "Unknown version 0x%04x\n", ocp_data); break; } + return version; +} + +u8 rtl8152_get_version(struct usb_interface *intf) +{ + u8 version; + + version = __rtl_get_hw_ver(interface_to_usbdev(intf)); + dev_dbg(&intf->dev, "Detected version 0x%04x\n", version); return version; @@ -9618,6 +9626,9 @@ static int rtl8152_probe(struct usb_interface *intf, if (version == RTL_VER_UNKNOWN) return -ENODEV; + if (intf->cur_altsetting->desc.bInterfaceClass != USB_CLASS_VENDOR_SPEC) + return -ENODEV; + if (!rtl_vendor_mode(intf)) return -ENODEV; @@ -9814,43 +9825,35 @@ static void rtl8152_disconnect(struct usb_interface *intf) } } -#define REALTEK_USB_DEVICE(vend, prod) { \ - USB_DEVICE_INTERFACE_CLASS(vend, prod, USB_CLASS_VENDOR_SPEC), \ -}, \ -{ \ - USB_DEVICE_AND_INTERFACE_INFO(vend, prod, USB_CLASS_COMM, \ - USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), \ -} - /* table of devices that work with this driver */ static const struct usb_device_id rtl8152_table[] = { /* Realtek */ - REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8050), - REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8053), - REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8152), - REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153), - REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8155), - REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8156), + { USB_DEVICE(VENDOR_ID_REALTEK, 0x8050) }, + { USB_DEVICE(VENDOR_ID_REALTEK, 0x8053) }, + { USB_DEVICE(VENDOR_ID_REALTEK, 0x8152) }, + { USB_DEVICE(VENDOR_ID_REALTEK, 0x8153) }, + { USB_DEVICE(VENDOR_ID_REALTEK, 0x8155) }, + { USB_DEVICE(VENDOR_ID_REALTEK, 0x8156) }, /* Microsoft */ - REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07ab), - REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07c6), - REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927), - REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0c5e), - REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101), - REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f), - REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3054), - REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3062), - REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3069), - REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3082), - REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205), - REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x720c), - REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7214), - REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x721e), - REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0xa387), - REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041), - REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff), - REALTEK_USB_DEVICE(VENDOR_ID_TPLINK, 0x0601), + { USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07ab) }, + { USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07c6) }, + { USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927) }, + { USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0c5e) }, + { USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101) }, + { USB_DEVICE(VENDOR_ID_LENOVO, 0x304f) }, + { USB_DEVICE(VENDOR_ID_LENOVO, 0x3054) }, + { USB_DEVICE(VENDOR_ID_LENOVO, 0x3062) }, + { USB_DEVICE(VENDOR_ID_LENOVO, 0x3069) }, + { USB_DEVICE(VENDOR_ID_LENOVO, 0x3082) }, + { USB_DEVICE(VENDOR_ID_LENOVO, 0x7205) }, + { USB_DEVICE(VENDOR_ID_LENOVO, 0x720c) }, + { USB_DEVICE(VENDOR_ID_LENOVO, 0x7214) }, + { USB_DEVICE(VENDOR_ID_LENOVO, 0x721e) }, + { USB_DEVICE(VENDOR_ID_LENOVO, 0xa387) }, + { USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041) }, + { USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff) }, + { USB_DEVICE(VENDOR_ID_TPLINK, 0x0601) }, {} }; @@ -9870,7 +9873,67 @@ static struct usb_driver rtl8152_driver = { .disable_hub_initiated_lpm = 1, }; -module_usb_driver(rtl8152_driver); +static int rtl8152_cfgselector_probe(struct usb_device *udev) +{ + struct usb_host_config *c; + int i, num_configs; + + /* Switch the device to vendor mode, if and only if the vendor mode + * driver supports it. + */ + if (__rtl_get_hw_ver(udev) == RTL_VER_UNKNOWN) + return 0; + + /* The vendor mode is not always config #1, so to find it out. */ + c = udev->config; + num_configs = udev->descriptor.bNumConfigurations; + for (i = 0; i < num_configs; (i++, c++)) { + struct usb_interface_descriptor *desc = NULL; + + if (!c->desc.bNumInterfaces) + continue; + desc = &c->intf_cache[0]->altsetting->desc; + if (desc->bInterfaceClass == USB_CLASS_VENDOR_SPEC) + break; + } + + if (i == num_configs) + return -ENODEV; + + if (usb_set_configuration(udev, c->desc.bConfigurationValue)) { + dev_err(&udev->dev, "Failed to set configuration %d\n", + c->desc.bConfigurationValue); + return -ENODEV; + } + + return 0; +} + +static struct usb_device_driver rtl8152_cfgselector_driver = { + .name = MODULENAME "-cfgselector", + .probe = rtl8152_cfgselector_probe, + .id_table = rtl8152_table, + .generic_subclass = 1, +}; + +static int __init rtl8152_driver_init(void) +{ + int ret; + + ret = usb_register_device_driver(&rtl8152_cfgselector_driver, THIS_MODULE); + if (ret) + return ret; + return usb_register(&rtl8152_driver); +} + +static void __exit rtl8152_driver_exit(void) +{ + usb_deregister(&rtl8152_driver); + usb_deregister_device_driver(&rtl8152_cfgselector_driver); +} + +module_init(rtl8152_driver_init); +module_exit(rtl8152_driver_exit); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 64a9a80b2309..283ffddda821 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -555,32 +555,30 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags) /*-------------------------------------------------------------------------*/ -static inline void rx_process (struct usbnet *dev, struct sk_buff *skb) +static inline int rx_process(struct usbnet *dev, struct sk_buff *skb) { if (dev->driver_info->rx_fixup && !dev->driver_info->rx_fixup (dev, skb)) { /* With RX_ASSEMBLE, rx_fixup() must update counters */ if (!(dev->driver_info->flags & FLAG_RX_ASSEMBLE)) dev->net->stats.rx_errors++; - goto done; + return -EPROTO; } // else network stack removes extra byte if we forced a short packet /* all data was already cloned from skb inside the driver */ if (dev->driver_info->flags & FLAG_MULTI_PACKET) - goto done; + return -EALREADY; if (skb->len < ETH_HLEN) { dev->net->stats.rx_errors++; dev->net->stats.rx_length_errors++; netif_dbg(dev, rx_err, dev->net, "rx length %d\n", skb->len); - } else { - usbnet_skb_return(dev, skb); - return; + return -EPROTO; } -done: - skb_queue_tail(&dev->done, skb); + usbnet_skb_return(dev, skb); + return 0; } /*-------------------------------------------------------------------------*/ @@ -1514,6 +1512,14 @@ err: return ret; } +static inline void usb_free_skb(struct sk_buff *skb) +{ + struct skb_data *entry = (struct skb_data *)skb->cb; + + usb_free_urb(entry->urb); + dev_kfree_skb(skb); +} + /*-------------------------------------------------------------------------*/ // tasklet (work deferred from completions, in_irq) or timer @@ -1528,15 +1534,14 @@ static void usbnet_bh (struct timer_list *t) entry = (struct skb_data *) skb->cb; switch (entry->state) { case rx_done: - entry->state = rx_cleanup; - rx_process (dev, skb); + if (rx_process(dev, skb)) + usb_free_skb(skb); continue; case tx_done: kfree(entry->urb->sg); fallthrough; case rx_cleanup: - usb_free_urb (entry->urb); - dev_kfree_skb (skb); + usb_free_skb(skb); continue; default: netdev_dbg(dev->net, "bogus skb state %d\n", entry->state); diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 18b3de854aeb..a170b0075dcf 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -446,9 +446,7 @@ static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx) static struct sk_buff *page_to_skb(struct virtnet_info *vi, struct receive_queue *rq, struct page *page, unsigned int offset, - unsigned int len, unsigned int truesize, - bool hdr_valid, unsigned int metasize, - unsigned int headroom) + unsigned int len, unsigned int truesize) { struct sk_buff *skb; struct virtio_net_hdr_mrg_rxbuf *hdr; @@ -466,21 +464,11 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi, else hdr_padded_len = sizeof(struct padded_vnet_hdr); - /* If headroom is not 0, there is an offset between the beginning of the - * data and the allocated space, otherwise the data and the allocated - * space are aligned. - * - * Buffers with headroom use PAGE_SIZE as alloc size, see - * add_recvbuf_mergeable() + get_mergeable_buf_len() - */ - truesize = headroom ? PAGE_SIZE : truesize; - tailroom = truesize - headroom; - buf = p - headroom; - + buf = p; len -= hdr_len; offset += hdr_padded_len; p += hdr_padded_len; - tailroom -= hdr_padded_len + len; + tailroom = truesize - hdr_padded_len - len; shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); @@ -510,7 +498,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi, if (len <= skb_tailroom(skb)) copy = len; else - copy = ETH_HLEN + metasize; + copy = ETH_HLEN; skb_put_data(skb, p, copy); len -= copy; @@ -549,19 +537,11 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi, give_pages(rq, page); ok: - /* hdr_valid means no XDP, so we can copy the vnet header */ - if (hdr_valid) { - hdr = skb_vnet_hdr(skb); - memcpy(hdr, hdr_p, hdr_len); - } + hdr = skb_vnet_hdr(skb); + memcpy(hdr, hdr_p, hdr_len); if (page_to_free) put_page(page_to_free); - if (metasize) { - __skb_pull(skb, metasize); - skb_metadata_set(skb, metasize); - } - return skb; } @@ -570,22 +550,43 @@ static int __virtnet_xdp_xmit_one(struct virtnet_info *vi, struct xdp_frame *xdpf) { struct virtio_net_hdr_mrg_rxbuf *hdr; - int err; + struct skb_shared_info *shinfo; + u8 nr_frags = 0; + int err, i; if (unlikely(xdpf->headroom < vi->hdr_len)) return -EOVERFLOW; - /* Make room for virtqueue hdr (also change xdpf->headroom?) */ + if (unlikely(xdp_frame_has_frags(xdpf))) { + shinfo = xdp_get_shared_info_from_frame(xdpf); + nr_frags = shinfo->nr_frags; + } + + /* In wrapping function virtnet_xdp_xmit(), we need to free + * up the pending old buffers, where we need to calculate the + * position of skb_shared_info in xdp_get_frame_len() and + * xdp_return_frame(), which will involve to xdpf->data and + * xdpf->headroom. Therefore, we need to update the value of + * headroom synchronously here. + */ + xdpf->headroom -= vi->hdr_len; xdpf->data -= vi->hdr_len; /* Zero header and leave csum up to XDP layers */ hdr = xdpf->data; memset(hdr, 0, vi->hdr_len); xdpf->len += vi->hdr_len; - sg_init_one(sq->sg, xdpf->data, xdpf->len); + sg_init_table(sq->sg, nr_frags + 1); + sg_set_buf(sq->sg, xdpf->data, xdpf->len); + for (i = 0; i < nr_frags; i++) { + skb_frag_t *frag = &shinfo->frags[i]; + + sg_set_page(&sq->sg[i + 1], skb_frag_page(frag), + skb_frag_size(frag), skb_frag_off(frag)); + } - err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp_to_ptr(xdpf), - GFP_ATOMIC); + err = virtqueue_add_outbuf(sq->vq, sq->sg, nr_frags + 1, + xdp_to_ptr(xdpf), GFP_ATOMIC); if (unlikely(err)) return -ENOSPC; /* Caller handle free/refcnt */ @@ -665,7 +666,7 @@ static int virtnet_xdp_xmit(struct net_device *dev, if (likely(is_xdp_frame(ptr))) { struct xdp_frame *frame = ptr_to_xdp(ptr); - bytes += frame->len; + bytes += xdp_get_frame_len(frame); xdp_return_frame(frame); } else { struct sk_buff *skb = ptr; @@ -924,7 +925,7 @@ static struct sk_buff *receive_big(struct net_device *dev, { struct page *page = buf; struct sk_buff *skb = - page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, true, 0, 0); + page_to_skb(vi, rq, page, 0, len, PAGE_SIZE); stats->bytes += len - vi->hdr_len; if (unlikely(!skb)) @@ -938,6 +939,140 @@ err: return NULL; } +/* Why not use xdp_build_skb_from_frame() ? + * XDP core assumes that xdp frags are PAGE_SIZE in length, while in + * virtio-net there are 2 points that do not match its requirements: + * 1. The size of the prefilled buffer is not fixed before xdp is set. + * 2. xdp_build_skb_from_frame() does more checks that we don't need, + * like eth_type_trans() (which virtio-net does in receive_buf()). + */ +static struct sk_buff *build_skb_from_xdp_buff(struct net_device *dev, + struct virtnet_info *vi, + struct xdp_buff *xdp, + unsigned int xdp_frags_truesz) +{ + struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); + unsigned int headroom, data_len; + struct sk_buff *skb; + int metasize; + u8 nr_frags; + + if (unlikely(xdp->data_end > xdp_data_hard_end(xdp))) { + pr_debug("Error building skb as missing reserved tailroom for xdp"); + return NULL; + } + + if (unlikely(xdp_buff_has_frags(xdp))) + nr_frags = sinfo->nr_frags; + + skb = build_skb(xdp->data_hard_start, xdp->frame_sz); + if (unlikely(!skb)) + return NULL; + + headroom = xdp->data - xdp->data_hard_start; + data_len = xdp->data_end - xdp->data; + skb_reserve(skb, headroom); + __skb_put(skb, data_len); + + metasize = xdp->data - xdp->data_meta; + metasize = metasize > 0 ? metasize : 0; + if (metasize) + skb_metadata_set(skb, metasize); + + if (unlikely(xdp_buff_has_frags(xdp))) + xdp_update_skb_shared_info(skb, nr_frags, + sinfo->xdp_frags_size, + xdp_frags_truesz, + xdp_buff_is_frag_pfmemalloc(xdp)); + + return skb; +} + +/* TODO: build xdp in big mode */ +static int virtnet_build_xdp_buff_mrg(struct net_device *dev, + struct virtnet_info *vi, + struct receive_queue *rq, + struct xdp_buff *xdp, + void *buf, + unsigned int len, + unsigned int frame_sz, + u16 *num_buf, + unsigned int *xdp_frags_truesize, + struct virtnet_rq_stats *stats) +{ + struct virtio_net_hdr_mrg_rxbuf *hdr = buf; + unsigned int headroom, tailroom, room; + unsigned int truesize, cur_frag_size; + struct skb_shared_info *shinfo; + unsigned int xdp_frags_truesz = 0; + struct page *page; + skb_frag_t *frag; + int offset; + void *ctx; + + xdp_init_buff(xdp, frame_sz, &rq->xdp_rxq); + xdp_prepare_buff(xdp, buf - VIRTIO_XDP_HEADROOM, + VIRTIO_XDP_HEADROOM + vi->hdr_len, len - vi->hdr_len, true); + + if (*num_buf > 1) { + /* If we want to build multi-buffer xdp, we need + * to specify that the flags of xdp_buff have the + * XDP_FLAGS_HAS_FRAG bit. + */ + if (!xdp_buff_has_frags(xdp)) + xdp_buff_set_frags_flag(xdp); + + shinfo = xdp_get_shared_info_from_buff(xdp); + shinfo->nr_frags = 0; + shinfo->xdp_frags_size = 0; + } + + if ((*num_buf - 1) > MAX_SKB_FRAGS) + return -EINVAL; + + while ((--*num_buf) >= 1) { + buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx); + if (unlikely(!buf)) { + pr_debug("%s: rx error: %d buffers out of %d missing\n", + dev->name, *num_buf, + virtio16_to_cpu(vi->vdev, hdr->num_buffers)); + dev->stats.rx_length_errors++; + return -EINVAL; + } + + stats->bytes += len; + page = virt_to_head_page(buf); + offset = buf - page_address(page); + + truesize = mergeable_ctx_to_truesize(ctx); + headroom = mergeable_ctx_to_headroom(ctx); + tailroom = headroom ? sizeof(struct skb_shared_info) : 0; + room = SKB_DATA_ALIGN(headroom + tailroom); + + cur_frag_size = truesize; + xdp_frags_truesz += cur_frag_size; + if (unlikely(len > truesize - room || cur_frag_size > PAGE_SIZE)) { + put_page(page); + pr_debug("%s: rx error: len %u exceeds truesize %lu\n", + dev->name, len, (unsigned long)(truesize - room)); + dev->stats.rx_length_errors++; + return -EINVAL; + } + + frag = &shinfo->frags[shinfo->nr_frags++]; + __skb_frag_set_page(frag, page); + skb_frag_off_set(frag, offset); + skb_frag_size_set(frag, len); + if (page_is_pfmemalloc(page)) + xdp_buff_set_frag_pfmemalloc(xdp); + + shinfo->xdp_frags_size += len; + } + + *xdp_frags_truesize = xdp_frags_truesz; + return 0; +} + static struct sk_buff *receive_mergeable(struct net_device *dev, struct virtnet_info *vi, struct receive_queue *rq, @@ -955,16 +1090,17 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, struct bpf_prog *xdp_prog; unsigned int truesize = mergeable_ctx_to_truesize(ctx); unsigned int headroom = mergeable_ctx_to_headroom(ctx); - unsigned int metasize = 0; - unsigned int frame_sz; + unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0; + unsigned int room = SKB_DATA_ALIGN(headroom + tailroom); + unsigned int frame_sz, xdp_room; int err; head_skb = NULL; stats->bytes += len - vi->hdr_len; - if (unlikely(len > truesize)) { + if (unlikely(len > truesize - room)) { pr_debug("%s: rx error: len %u exceeds truesize %lu\n", - dev->name, len, (unsigned long)ctx); + dev->name, len, (unsigned long)(truesize - room)); dev->stats.rx_length_errors++; goto err_skb; } @@ -977,11 +1113,14 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, rcu_read_lock(); xdp_prog = rcu_dereference(rq->xdp_prog); if (xdp_prog) { + unsigned int xdp_frags_truesz = 0; + struct skb_shared_info *shinfo; struct xdp_frame *xdpf; struct page *xdp_page; struct xdp_buff xdp; void *data; u32 act; + int i; /* Transient failure which in theory could occur if * in-flight packets from before XDP was enabled reach @@ -990,19 +1129,23 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, if (unlikely(hdr->hdr.gso_type)) goto err_xdp; - /* Buffers with headroom use PAGE_SIZE as alloc size, - * see add_recvbuf_mergeable() + get_mergeable_buf_len() + /* Now XDP core assumes frag size is PAGE_SIZE, but buffers + * with headroom may add hole in truesize, which + * make their length exceed PAGE_SIZE. So we disabled the + * hole mechanism for xdp. See add_recvbuf_mergeable(). */ - frame_sz = headroom ? PAGE_SIZE : truesize; - - /* This happens when rx buffer size is underestimated - * or headroom is not enough because of the buffer - * was refilled before XDP is set. This should only - * happen for the first several packets, so we don't - * care much about its performance. + frame_sz = truesize; + + /* This happens when headroom is not enough because + * of the buffer was prefilled before XDP is set. + * This should only happen for the first several packets. + * In fact, vq reset can be used here to help us clean up + * the prefilled buffers, but many existing devices do not + * support it, and we don't want to bother users who are + * using xdp normally. */ - if (unlikely(num_buf > 1 || - headroom < virtnet_get_headroom(vi))) { + if (!xdp_prog->aux->xdp_has_frags && + (num_buf > 1 || headroom < virtnet_get_headroom(vi))) { /* linearize data for XDP */ xdp_page = xdp_linearize_page(rq, &num_buf, page, offset, @@ -1013,82 +1156,53 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, if (!xdp_page) goto err_xdp; offset = VIRTIO_XDP_HEADROOM; + } else if (unlikely(headroom < virtnet_get_headroom(vi))) { + xdp_room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM + + sizeof(struct skb_shared_info)); + if (len + xdp_room > PAGE_SIZE) + goto err_xdp; + + xdp_page = alloc_page(GFP_ATOMIC); + if (!xdp_page) + goto err_xdp; + + memcpy(page_address(xdp_page) + VIRTIO_XDP_HEADROOM, + page_address(page) + offset, len); + frame_sz = PAGE_SIZE; + offset = VIRTIO_XDP_HEADROOM; } else { xdp_page = page; } - /* Allow consuming headroom but reserve enough space to push - * the descriptor on if we get an XDP_TX return code. - */ data = page_address(xdp_page) + offset; - xdp_init_buff(&xdp, frame_sz - vi->hdr_len, &rq->xdp_rxq); - xdp_prepare_buff(&xdp, data - VIRTIO_XDP_HEADROOM + vi->hdr_len, - VIRTIO_XDP_HEADROOM, len - vi->hdr_len, true); + err = virtnet_build_xdp_buff_mrg(dev, vi, rq, &xdp, data, len, frame_sz, + &num_buf, &xdp_frags_truesz, stats); + if (unlikely(err)) + goto err_xdp_frags; act = bpf_prog_run_xdp(xdp_prog, &xdp); stats->xdp_packets++; switch (act) { case XDP_PASS: - metasize = xdp.data - xdp.data_meta; - - /* recalculate offset to account for any header - * adjustments and minus the metasize to copy the - * metadata in page_to_skb(). Note other cases do not - * build an skb and avoid using offset - */ - offset = xdp.data - page_address(xdp_page) - - vi->hdr_len - metasize; - - /* recalculate len if xdp.data, xdp.data_end or - * xdp.data_meta were adjusted - */ - len = xdp.data_end - xdp.data + vi->hdr_len + metasize; - - /* recalculate headroom if xdp.data or xdp_data_meta - * were adjusted, note that offset should always point - * to the start of the reserved bytes for virtio_net - * header which are followed by xdp.data, that means - * that offset is equal to the headroom (when buf is - * starting at the beginning of the page, otherwise - * there is a base offset inside the page) but it's used - * with a different starting point (buf start) than - * xdp.data (buf start + vnet hdr size). If xdp.data or - * data_meta were adjusted by the xdp prog then the - * headroom size has changed and so has the offset, we - * can use data_hard_start, which points at buf start + - * vnet hdr size, to calculate the new headroom and use - * it later to compute buf start in page_to_skb() - */ - headroom = xdp.data - xdp.data_hard_start - metasize; - - /* We can only create skb based on xdp_page. */ - if (unlikely(xdp_page != page)) { - rcu_read_unlock(); + if (unlikely(xdp_page != page)) put_page(page); - head_skb = page_to_skb(vi, rq, xdp_page, offset, - len, PAGE_SIZE, false, - metasize, - headroom); - return head_skb; - } - break; + head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz); + rcu_read_unlock(); + return head_skb; case XDP_TX: stats->xdp_tx++; xdpf = xdp_convert_buff_to_frame(&xdp); if (unlikely(!xdpf)) { - if (unlikely(xdp_page != page)) - put_page(xdp_page); - goto err_xdp; + netdev_dbg(dev, "convert buff to frame failed for xdp\n"); + goto err_xdp_frags; } err = virtnet_xdp_xmit(dev, 1, &xdpf, 0); if (unlikely(!err)) { xdp_return_frame_rx_napi(xdpf); } else if (unlikely(err < 0)) { trace_xdp_exception(vi->dev, xdp_prog, act); - if (unlikely(xdp_page != page)) - put_page(xdp_page); - goto err_xdp; + goto err_xdp_frags; } *xdp_xmit |= VIRTIO_XDP_TX; if (unlikely(xdp_page != page)) @@ -1098,11 +1212,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, case XDP_REDIRECT: stats->xdp_redirects++; err = xdp_do_redirect(dev, &xdp, xdp_prog); - if (err) { - if (unlikely(xdp_page != page)) - put_page(xdp_page); - goto err_xdp; - } + if (err) + goto err_xdp_frags; *xdp_xmit |= VIRTIO_XDP_REDIR; if (unlikely(xdp_page != page)) put_page(page); @@ -1115,16 +1226,26 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, trace_xdp_exception(vi->dev, xdp_prog, act); fallthrough; case XDP_DROP: - if (unlikely(xdp_page != page)) - __free_pages(xdp_page, 0); - goto err_xdp; + goto err_xdp_frags; + } +err_xdp_frags: + if (unlikely(xdp_page != page)) + __free_pages(xdp_page, 0); + + if (xdp_buff_has_frags(&xdp)) { + shinfo = xdp_get_shared_info_from_buff(&xdp); + for (i = 0; i < shinfo->nr_frags; i++) { + xdp_page = skb_frag_page(&shinfo->frags[i]); + put_page(xdp_page); + } } + + goto err_xdp; } rcu_read_unlock(); skip_xdp: - head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog, - metasize, headroom); + head_skb = page_to_skb(vi, rq, page, offset, len, truesize); curr_skb = head_skb; if (unlikely(!curr_skb)) @@ -1146,9 +1267,12 @@ skip_xdp: page = virt_to_head_page(buf); truesize = mergeable_ctx_to_truesize(ctx); - if (unlikely(len > truesize)) { + headroom = mergeable_ctx_to_headroom(ctx); + tailroom = headroom ? sizeof(struct skb_shared_info) : 0; + room = SKB_DATA_ALIGN(headroom + tailroom); + if (unlikely(len > truesize - room)) { pr_debug("%s: rx error: len %u exceeds truesize %lu\n", - dev->name, len, (unsigned long)ctx); + dev->name, len, (unsigned long)(truesize - room)); dev->stats.rx_length_errors++; goto err_skb; } @@ -1251,13 +1375,7 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, if (unlikely(len < vi->hdr_len + ETH_HLEN)) { pr_debug("%s: short packet %i\n", dev->name, len); dev->stats.rx_length_errors++; - if (vi->mergeable_rx_bufs) { - put_page(virt_to_head_page(buf)); - } else if (vi->big_packets) { - give_pages(rq, buf); - } else { - put_page(virt_to_head_page(buf)); - } + virtnet_rq_free_unused_buf(rq->vq, buf); return; } @@ -1426,13 +1544,16 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi, /* To avoid internal fragmentation, if there is very likely not * enough space for another buffer, add the remaining space to * the current buffer. + * XDP core assumes that frame_size of xdp_buff and the length + * of the frag are PAGE_SIZE, so we disable the hole mechanism. */ - len += hole; + if (!headroom) + len += hole; alloc_frag->offset += hole; } sg_init_one(rq->sg, buf, len); - ctx = mergeable_len_to_ctx(len, headroom); + ctx = mergeable_len_to_ctx(len + room, headroom); err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); if (err < 0) put_page(virt_to_head_page(buf)); @@ -1608,7 +1729,7 @@ static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi) } else { struct xdp_frame *frame = ptr_to_xdp(ptr); - bytes += frame->len; + bytes += xdp_get_frame_len(frame); xdp_return_frame(frame); } packets++; @@ -3080,7 +3201,9 @@ static int virtnet_restore_guest_offloads(struct virtnet_info *vi) static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, struct netlink_ext_ack *extack) { - unsigned long int max_sz = PAGE_SIZE - sizeof(struct padded_vnet_hdr); + unsigned int room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM + + sizeof(struct skb_shared_info)); + unsigned int max_sz = PAGE_SIZE - room - ETH_HLEN; struct virtnet_info *vi = netdev_priv(dev); struct bpf_prog *old_prog; u16 xdp_qp = 0, curr_qp; @@ -3103,9 +3226,9 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, return -EINVAL; } - if (dev->mtu > max_sz) { - NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP"); - netdev_warn(dev, "XDP requires MTU less than %lu\n", max_sz); + if (prog && !prog->aux->xdp_has_frags && dev->mtu > max_sz) { + NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP without frags"); + netdev_warn(dev, "single-buffer XDP requires MTU less than %u\n", max_sz); return -EINVAL; } |