diff options
Diffstat (limited to 'drivers/net')
926 files changed, 34974 insertions, 25352 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 6977f8248df7..f37b1c56f7c4 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -431,10 +431,10 @@ config VSOCKMON config MHI_NET tristate "MHI network driver" depends on MHI_BUS - select WWAN help This is the network driver for MHI bus. It can be used with - QCOM based WWAN modems (like SDX55). Say Y or M. + QCOM based WWAN modems for IP or QMAP/rmnet protocol (like SDX55). + Say Y or M. endif # NET_CORE @@ -483,6 +483,8 @@ config NET_SB1000 source "drivers/net/phy/Kconfig" +source "drivers/net/mctp/Kconfig" + source "drivers/net/mdio/Kconfig" source "drivers/net/pcs/Kconfig" @@ -549,8 +551,8 @@ config VMXNET3 tristate "VMware VMXNET3 ethernet driver" depends on PCI && INET depends on !(PAGE_SIZE_64KB || ARM64_64K_PAGES || \ - IA64_PAGE_SIZE_64KB || MICROBLAZE_64K_PAGES || \ - PARISC_PAGE_SIZE_64KB || PPC_64K_PAGES) + IA64_PAGE_SIZE_64KB || PARISC_PAGE_SIZE_64KB || \ + PPC_64K_PAGES) help This driver supports VMware's vmxnet3 virtual ethernet NIC. To compile this driver as a module, choose M here: the @@ -604,4 +606,11 @@ config NET_FAILOVER a VM with direct attached VF by failing over to the paravirtual datapath when the VF is unplugged. +config NETDEV_LEGACY_INIT + bool + depends on ISA + help + Drivers that call netdev_boot_setup_check() should select this + symbol, everything else no longer needs it. + endif # NETDEVICES diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 7ffd2d03efaf..739838623cf6 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -18,7 +18,8 @@ obj-$(CONFIG_MACVLAN) += macvlan.o obj-$(CONFIG_MACVTAP) += macvtap.o obj-$(CONFIG_MII) += mii.o obj-$(CONFIG_MDIO) += mdio.o -obj-$(CONFIG_NET) += Space.o loopback.o +obj-$(CONFIG_NET) += loopback.o +obj-$(CONFIG_NETDEV_LEGACY_INIT) += Space.o obj-$(CONFIG_NETCONSOLE) += netconsole.o obj-y += phy/ obj-y += mdio/ @@ -36,7 +37,7 @@ obj-$(CONFIG_GTP) += gtp.o obj-$(CONFIG_NLMON) += nlmon.o obj-$(CONFIG_NET_VRF) += vrf.o obj-$(CONFIG_VSOCKMON) += vsockmon.o -obj-$(CONFIG_MHI_NET) += mhi/ +obj-$(CONFIG_MHI_NET) += mhi_net.o # # Networking Drivers @@ -69,6 +70,7 @@ obj-$(CONFIG_WAN) += wan/ obj-$(CONFIG_WLAN) += wireless/ obj-$(CONFIG_IEEE802154) += ieee802154/ obj-$(CONFIG_WWAN) += wwan/ +obj-$(CONFIG_MCTP) += mctp/ obj-$(CONFIG_VMXNET3) += vmxnet3/ obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o diff --git a/drivers/net/Space.c b/drivers/net/Space.c index df79e7370bcc..49e67c9fb5a4 100644 --- a/drivers/net/Space.c +++ b/drivers/net/Space.c @@ -30,6 +30,148 @@ #include <linux/netlink.h> #include <net/Space.h> +/* + * This structure holds boot-time configured netdevice settings. They + * are then used in the device probing. + */ +struct netdev_boot_setup { + char name[IFNAMSIZ]; + struct ifmap map; +}; +#define NETDEV_BOOT_SETUP_MAX 8 + + +/****************************************************************************** + * + * Device Boot-time Settings Routines + * + ******************************************************************************/ + +/* Boot time configuration table */ +static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX]; + +/** + * netdev_boot_setup_add - add new setup entry + * @name: name of the device + * @map: configured settings for the device + * + * Adds new setup entry to the dev_boot_setup list. The function + * returns 0 on error and 1 on success. This is a generic routine to + * all netdevices. + */ +static int netdev_boot_setup_add(char *name, struct ifmap *map) +{ + struct netdev_boot_setup *s; + int i; + + s = dev_boot_setup; + for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) { + if (s[i].name[0] == '\0' || s[i].name[0] == ' ') { + memset(s[i].name, 0, sizeof(s[i].name)); + strlcpy(s[i].name, name, IFNAMSIZ); + memcpy(&s[i].map, map, sizeof(s[i].map)); + break; + } + } + + return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1; +} + +/** + * netdev_boot_setup_check - check boot time settings + * @dev: the netdevice + * + * Check boot time settings for the device. + * The found settings are set for the device to be used + * later in the device probing. + * Returns 0 if no settings found, 1 if they are. + */ +int netdev_boot_setup_check(struct net_device *dev) +{ + struct netdev_boot_setup *s = dev_boot_setup; + int i; + + for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) { + if (s[i].name[0] != '\0' && s[i].name[0] != ' ' && + !strcmp(dev->name, s[i].name)) { + dev->irq = s[i].map.irq; + dev->base_addr = s[i].map.base_addr; + dev->mem_start = s[i].map.mem_start; + dev->mem_end = s[i].map.mem_end; + return 1; + } + } + return 0; +} +EXPORT_SYMBOL(netdev_boot_setup_check); + +/** + * netdev_boot_base - get address from boot time settings + * @prefix: prefix for network device + * @unit: id for network device + * + * Check boot time settings for the base address of device. + * The found settings are set for the device to be used + * later in the device probing. + * Returns 0 if no settings found. + */ +static unsigned long netdev_boot_base(const char *prefix, int unit) +{ + const struct netdev_boot_setup *s = dev_boot_setup; + char name[IFNAMSIZ]; + int i; + + sprintf(name, "%s%d", prefix, unit); + + /* + * If device already registered then return base of 1 + * to indicate not to probe for this interface + */ + if (__dev_get_by_name(&init_net, name)) + return 1; + + for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) + if (!strcmp(name, s[i].name)) + return s[i].map.base_addr; + return 0; +} + +/* + * Saves at boot time configured settings for any netdevice. + */ +static int __init netdev_boot_setup(char *str) +{ + int ints[5]; + struct ifmap map; + + str = get_options(str, ARRAY_SIZE(ints), ints); + if (!str || !*str) + return 0; + + /* Save settings */ + memset(&map, 0, sizeof(map)); + if (ints[0] > 0) + map.irq = ints[1]; + if (ints[0] > 1) + map.base_addr = ints[2]; + if (ints[0] > 2) + map.mem_start = ints[3]; + if (ints[0] > 3) + map.mem_end = ints[4]; + + /* Add new entry to the list */ + return netdev_boot_setup_add(str, &map); +} + +__setup("netdev=", netdev_boot_setup); + +static int __init ether_boot_setup(char *str) +{ + return netdev_boot_setup(str); +} +__setup("ether=", ether_boot_setup); + + /* A unified ethernet device probe. This is the easiest way to have every * ethernet adaptor have the name "eth[0123...]". */ @@ -77,39 +219,15 @@ static struct devprobe2 isa_probes[] __initdata = { #ifdef CONFIG_SMC9194 {smc_init, 0}, #endif -#ifdef CONFIG_CS89x0 -#ifndef CONFIG_CS89x0_PLATFORM +#ifdef CONFIG_CS89x0_ISA {cs89x0_probe, 0}, #endif -#endif -#if defined(CONFIG_MVME16x_NET) || defined(CONFIG_BVME6000_NET) /* Intel */ - {i82596_probe, 0}, /* I82596 */ -#endif #ifdef CONFIG_NI65 {ni65_probe, 0}, #endif {NULL, 0}, }; -static struct devprobe2 m68k_probes[] __initdata = { -#ifdef CONFIG_ATARILANCE /* Lance-based Atari ethernet boards */ - {atarilance_probe, 0}, -#endif -#ifdef CONFIG_SUN3LANCE /* sun3 onboard Lance chip */ - {sun3lance_probe, 0}, -#endif -#ifdef CONFIG_SUN3_82586 /* sun3 onboard Intel 82586 chip */ - {sun3_82586_probe, 0}, -#endif -#ifdef CONFIG_APNE /* A1200 PCMCIA NE2000 */ - {apne_probe, 0}, -#endif -#ifdef CONFIG_MVME147_NET /* MVME147 internal Ethernet */ - {mvme147lance_probe, 0}, -#endif - {NULL, 0}, -}; - /* Unified ethernet device probe, segmented per architecture and * per bus interface. This drives the legacy devices only for now. */ @@ -121,8 +239,7 @@ static void __init ethif_probe2(int unit) if (base_addr == 1) return; - (void)(probe_list2(unit, m68k_probes, base_addr == 0) && - probe_list2(unit, isa_probes, base_addr == 0)); + probe_list2(unit, isa_probes, base_addr == 0); } /* Statically configured drivers -- order matters here. */ @@ -130,10 +247,6 @@ static int __init net_olddevs_init(void) { int num; -#ifdef CONFIG_SBNI - for (num = 0; num < 8; ++num) - sbni_probe(num); -#endif for (num = 0; num < 8; ++num) ethif_probe2(num); @@ -142,9 +255,6 @@ static int __init net_olddevs_init(void) cops_probe(1); cops_probe(2); #endif -#ifdef CONFIG_LTPC - ltpc_probe(); -#endif return 0; } diff --git a/drivers/net/appletalk/Kconfig b/drivers/net/appletalk/Kconfig index 43918398f0d3..90b9f1d6eda9 100644 --- a/drivers/net/appletalk/Kconfig +++ b/drivers/net/appletalk/Kconfig @@ -52,7 +52,9 @@ config LTPC config COPS tristate "COPS LocalTalk PC support" - depends on DEV_APPLETALK && (ISA || EISA) + depends on DEV_APPLETALK && ISA + depends on NETDEVICES + select NETDEV_LEGACY_INIT help This allows you to use COPS AppleTalk cards to connect to LocalTalk networks. You also need version 1.3.3 or later of the netatalk diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c index 51cf5eca9c7f..5566daefbff4 100644 --- a/drivers/net/appletalk/ipddp.c +++ b/drivers/net/appletalk/ipddp.c @@ -54,11 +54,12 @@ static netdev_tx_t ipddp_xmit(struct sk_buff *skb, static int ipddp_create(struct ipddp_route *new_rt); static int ipddp_delete(struct ipddp_route *rt); static struct ipddp_route* __ipddp_find_route(struct ipddp_route *rt); -static int ipddp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); +static int ipddp_siocdevprivate(struct net_device *dev, struct ifreq *ifr, + void __user *data, int cmd); static const struct net_device_ops ipddp_netdev_ops = { .ndo_start_xmit = ipddp_xmit, - .ndo_do_ioctl = ipddp_ioctl, + .ndo_siocdevprivate = ipddp_siocdevprivate, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; @@ -268,15 +269,18 @@ static struct ipddp_route* __ipddp_find_route(struct ipddp_route *rt) return NULL; } -static int ipddp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +static int ipddp_siocdevprivate(struct net_device *dev, struct ifreq *ifr, + void __user *data, int cmd) { - struct ipddp_route __user *rt = ifr->ifr_data; struct ipddp_route rcp, rcp2, *rp; + if (in_compat_syscall()) + return -EOPNOTSUPP; + if(!capable(CAP_NET_ADMIN)) return -EPERM; - if(copy_from_user(&rcp, rt, sizeof(rcp))) + if (copy_from_user(&rcp, data, sizeof(rcp))) return -EFAULT; switch(cmd) @@ -296,7 +300,7 @@ static int ipddp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) spin_unlock_bh(&ipddp_route_lock); if (rp) { - if (copy_to_user(rt, &rcp2, + if (copy_to_user(data, &rcp2, sizeof(struct ipddp_route))) return -EFAULT; return 0; diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c index 69c270885ff0..1f8925e75b3f 100644 --- a/drivers/net/appletalk/ltpc.c +++ b/drivers/net/appletalk/ltpc.c @@ -1015,7 +1015,7 @@ static const struct net_device_ops ltpc_netdev = { .ndo_set_rx_mode = set_multicast_list, }; -struct net_device * __init ltpc_probe(void) +static struct net_device * __init ltpc_probe(void) { struct net_device *dev; int err = -ENOMEM; @@ -1221,12 +1221,10 @@ static int __init ltpc_setup(char *str) } __setup("ltpc=", ltpc_setup); -#endif /* MODULE */ +#endif static struct net_device *dev_ltpc; -#ifdef MODULE - MODULE_LICENSE("GPL"); module_param(debug, int, 0); module_param_hw(io, int, ioport, 0); @@ -1244,7 +1242,6 @@ static int __init ltpc_module_init(void) return PTR_ERR_OR_ZERO(dev_ltpc); } module_init(ltpc_module_init); -#endif static void __exit ltpc_cleanup(void) { diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index 6908822d9773..a4a202b9a0a2 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -96,7 +96,7 @@ static int ad_marker_send(struct port *port, struct bond_marker *marker); static void ad_mux_machine(struct port *port, bool *update_slave_arr); static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port); static void ad_tx_machine(struct port *port); -static void ad_periodic_machine(struct port *port); +static void ad_periodic_machine(struct port *port, struct bond_params bond_params); static void ad_port_selection_logic(struct port *port, bool *update_slave_arr); static void ad_agg_selection_logic(struct aggregator *aggregator, bool *update_slave_arr); @@ -1294,10 +1294,11 @@ static void ad_tx_machine(struct port *port) /** * ad_periodic_machine - handle a port's periodic state machine * @port: the port we're looking at + * @bond_params: bond parameters we will use * * Turn ntt flag on priodically to perform periodic transmission of lacpdu's. */ -static void ad_periodic_machine(struct port *port) +static void ad_periodic_machine(struct port *port, struct bond_params bond_params) { periodic_states_t last_state; @@ -1306,8 +1307,8 @@ static void ad_periodic_machine(struct port *port) /* check if port was reinitialized */ if (((port->sm_vars & AD_PORT_BEGIN) || !(port->sm_vars & AD_PORT_LACP_ENABLED) || !port->is_enabled) || - (!(port->actor_oper_port_state & LACP_STATE_LACP_ACTIVITY) && !(port->partner_oper.port_state & LACP_STATE_LACP_ACTIVITY)) - ) { + (!(port->actor_oper_port_state & LACP_STATE_LACP_ACTIVITY) && !(port->partner_oper.port_state & LACP_STATE_LACP_ACTIVITY)) || + !bond_params.lacp_active) { port->sm_periodic_state = AD_NO_PERIODIC; } /* check if state machine should change state */ @@ -2341,7 +2342,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work) } ad_rx_machine(NULL, port); - ad_periodic_machine(port); + ad_periodic_machine(port, bond->params); ad_port_selection_logic(port, &update_slave_arr); ad_mux_machine(port, &update_slave_arr); ad_tx_machine(port); diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 22e5632089ac..7d3752cbf761 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -17,7 +17,6 @@ #include <linux/if_bonding.h> #include <linux/if_vlan.h> #include <linux/in.h> -#include <net/ipx.h> #include <net/arp.h> #include <net/ipv6.h> #include <asm/byteorder.h> @@ -1351,8 +1350,6 @@ struct slave *bond_xmit_tlb_slave_get(struct bonding *bond, if (!is_multicast_ether_addr(eth_data->h_dest)) { switch (skb->protocol) { case htons(ETH_P_IP): - case htons(ETH_P_IPX): - /* In case of IPX, it will falback to L2 hash */ case htons(ETH_P_IPV6): hash_index = bond_xmit_hash(bond, skb); if (bond->params.tlb_dynamic_lb) { @@ -1454,35 +1451,6 @@ struct slave *bond_xmit_alb_slave_get(struct bonding *bond, hash_size = sizeof(ip6hdr->daddr); break; } - case ETH_P_IPX: { - const struct ipxhdr *ipxhdr; - - if (pskb_network_may_pull(skb, sizeof(*ipxhdr))) { - do_tx_balance = false; - break; - } - ipxhdr = (struct ipxhdr *)skb_network_header(skb); - - if (ipxhdr->ipx_checksum != IPX_NO_CHECKSUM) { - /* something is wrong with this packet */ - do_tx_balance = false; - break; - } - - if (ipxhdr->ipx_type != IPX_TYPE_NCP) { - /* The only protocol worth balancing in - * this family since it has an "ARP" like - * mechanism - */ - do_tx_balance = false; - break; - } - - eth_data = eth_hdr(skb); - hash_start = (char *)eth_data->h_dest; - hash_size = ETH_ALEN; - break; - } case ETH_P_ARP: do_tx_balance = false; if (bond_info->rlb_enabled) diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 31730efa7538..b0966e733926 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -317,6 +317,25 @@ bool bond_sk_check(struct bonding *bond) } } +static bool bond_xdp_check(struct bonding *bond) +{ + switch (BOND_MODE(bond)) { + case BOND_MODE_ROUNDROBIN: + case BOND_MODE_ACTIVEBACKUP: + return true; + case BOND_MODE_8023AD: + case BOND_MODE_XOR: + /* vlan+srcmac is not supported with XDP as in most cases the 802.1q + * payload is not in the packet due to hardware offload. + */ + if (bond->params.xmit_policy != BOND_XMIT_POLICY_VLAN_SRCMAC) + return true; + fallthrough; + default: + return false; + } +} + /*---------------------------------- VLAN -----------------------------------*/ /* In the following 2 functions, bond_vlan_rx_add_vid and bond_vlan_rx_kill_vid, @@ -732,7 +751,7 @@ static int bond_check_dev_link(struct bonding *bond, BMSR_LSTATUS : 0; /* Ethtool can't be used, fallback to MII ioctls. */ - ioctl = slave_ops->ndo_do_ioctl; + ioctl = slave_ops->ndo_eth_ioctl; if (ioctl) { /* TODO: set pointer to correct ioctl on a per team member * bases to make this more efficient. that is, once @@ -756,7 +775,7 @@ static int bond_check_dev_link(struct bonding *bond, } } - /* If reporting, report that either there's no dev->do_ioctl, + /* If reporting, report that either there's no ndo_eth_ioctl, * or both SIOCGMIIREG and get_link failed (meaning that we * cannot report link status). If not reporting, pretend * we're ok. @@ -1712,6 +1731,20 @@ void bond_lower_state_changed(struct slave *slave) netdev_lower_state_changed(slave->dev, &info); } +#define BOND_NL_ERR(bond_dev, extack, errmsg) do { \ + if (extack) \ + NL_SET_ERR_MSG(extack, errmsg); \ + else \ + netdev_err(bond_dev, "Error: %s\n", errmsg); \ +} while (0) + +#define SLAVE_NL_ERR(bond_dev, slave_dev, extack, errmsg) do { \ + if (extack) \ + NL_SET_ERR_MSG(extack, errmsg); \ + else \ + slave_err(bond_dev, slave_dev, "Error: %s\n", errmsg); \ +} while (0) + /* enslave device <slave> to bond device <master> */ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, struct netlink_ext_ack *extack) @@ -1725,29 +1758,26 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, if (slave_dev->flags & IFF_MASTER && !netif_is_bond_master(slave_dev)) { - NL_SET_ERR_MSG(extack, "Device with IFF_MASTER cannot be enslaved"); - netdev_err(bond_dev, - "Error: Device with IFF_MASTER cannot be enslaved\n"); + BOND_NL_ERR(bond_dev, extack, + "Device type (master device) cannot be enslaved"); return -EPERM; } if (!bond->params.use_carrier && slave_dev->ethtool_ops->get_link == NULL && - slave_ops->ndo_do_ioctl == NULL) { + slave_ops->ndo_eth_ioctl == NULL) { slave_warn(bond_dev, slave_dev, "no link monitoring support\n"); } /* already in-use? */ if (netdev_is_rx_handler_busy(slave_dev)) { - NL_SET_ERR_MSG(extack, "Device is in use and cannot be enslaved"); - slave_err(bond_dev, slave_dev, - "Error: Device is in use and cannot be enslaved\n"); + SLAVE_NL_ERR(bond_dev, slave_dev, extack, + "Device is in use and cannot be enslaved"); return -EBUSY; } if (bond_dev == slave_dev) { - NL_SET_ERR_MSG(extack, "Cannot enslave bond to itself."); - netdev_err(bond_dev, "cannot enslave bond to itself.\n"); + BOND_NL_ERR(bond_dev, extack, "Cannot enslave bond to itself."); return -EPERM; } @@ -1756,8 +1786,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) { slave_dbg(bond_dev, slave_dev, "is NETIF_F_VLAN_CHALLENGED\n"); if (vlan_uses_dev(bond_dev)) { - NL_SET_ERR_MSG(extack, "Can not enslave VLAN challenged device to VLAN enabled bond"); - slave_err(bond_dev, slave_dev, "Error: cannot enslave VLAN challenged slave on VLAN enabled bond\n"); + SLAVE_NL_ERR(bond_dev, slave_dev, extack, + "Can not enslave VLAN challenged device to VLAN enabled bond"); return -EPERM; } else { slave_warn(bond_dev, slave_dev, "enslaved VLAN challenged slave. Adding VLANs will be blocked as long as it is part of bond.\n"); @@ -1775,8 +1805,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, * enslaving it; the old ifenslave will not. */ if (slave_dev->flags & IFF_UP) { - NL_SET_ERR_MSG(extack, "Device can not be enslaved while up"); - slave_err(bond_dev, slave_dev, "slave is up - this may be due to an out of date ifenslave\n"); + SLAVE_NL_ERR(bond_dev, slave_dev, extack, + "Device can not be enslaved while up"); return -EPERM; } @@ -1815,17 +1845,15 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, bond_dev); } } else if (bond_dev->type != slave_dev->type) { - NL_SET_ERR_MSG(extack, "Device type is different from other slaves"); - slave_err(bond_dev, slave_dev, "ether type (%d) is different from other slaves (%d), can not enslave it\n", - slave_dev->type, bond_dev->type); + SLAVE_NL_ERR(bond_dev, slave_dev, extack, + "Device type is different from other slaves"); return -EINVAL; } if (slave_dev->type == ARPHRD_INFINIBAND && BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { - NL_SET_ERR_MSG(extack, "Only active-backup mode is supported for infiniband slaves"); - slave_warn(bond_dev, slave_dev, "Type (%d) supports only active-backup mode\n", - slave_dev->type); + SLAVE_NL_ERR(bond_dev, slave_dev, extack, + "Only active-backup mode is supported for infiniband slaves"); res = -EOPNOTSUPP; goto err_undo_flags; } @@ -1839,8 +1867,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, bond->params.fail_over_mac = BOND_FOM_ACTIVE; slave_warn(bond_dev, slave_dev, "Setting fail_over_mac to active for active-backup mode\n"); } else { - NL_SET_ERR_MSG(extack, "Slave device does not support setting the MAC address, but fail_over_mac is not set to active"); - slave_err(bond_dev, slave_dev, "The slave device specified does not support setting the MAC address, but fail_over_mac is not set to active\n"); + SLAVE_NL_ERR(bond_dev, slave_dev, extack, + "Slave device does not support setting the MAC address, but fail_over_mac is not set to active"); res = -EOPNOTSUPP; goto err_undo_flags; } @@ -2133,6 +2161,39 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, bond_update_slave_arr(bond, NULL); + if (!slave_dev->netdev_ops->ndo_bpf || + !slave_dev->netdev_ops->ndo_xdp_xmit) { + if (bond->xdp_prog) { + SLAVE_NL_ERR(bond_dev, slave_dev, extack, + "Slave does not support XDP"); + res = -EOPNOTSUPP; + goto err_sysfs_del; + } + } else { + struct netdev_bpf xdp = { + .command = XDP_SETUP_PROG, + .flags = 0, + .prog = bond->xdp_prog, + .extack = extack, + }; + + if (dev_xdp_prog_count(slave_dev) > 0) { + SLAVE_NL_ERR(bond_dev, slave_dev, extack, + "Slave has XDP program loaded, please unload before enslaving"); + res = -EOPNOTSUPP; + goto err_sysfs_del; + } + + res = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp); + if (res < 0) { + /* ndo_bpf() sets extack error message */ + slave_dbg(bond_dev, slave_dev, "Error %d calling ndo_bpf\n", res); + goto err_sysfs_del; + } + if (bond->xdp_prog) + bpf_prog_inc(bond->xdp_prog); + } + slave_info(bond_dev, slave_dev, "Enslaving as %s interface with %s link\n", bond_is_active_slave(new_slave) ? "an active" : "a backup", new_slave->link != BOND_LINK_DOWN ? "an up" : "a down"); @@ -2252,7 +2313,17 @@ static int __bond_release_one(struct net_device *bond_dev, /* recompute stats just before removing the slave */ bond_get_stats(bond->dev, &bond->bond_stats); - bond_upper_dev_unlink(bond, slave); + if (bond->xdp_prog) { + struct netdev_bpf xdp = { + .command = XDP_SETUP_PROG, + .flags = 0, + .prog = NULL, + .extack = NULL, + }; + if (slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp)) + slave_warn(bond_dev, slave_dev, "failed to unload XDP program\n"); + } + /* unregister rx_handler early so bond_handle_frame wouldn't be called * for this slave anymore. */ @@ -2261,6 +2332,8 @@ static int __bond_release_one(struct net_device *bond_dev, if (BOND_MODE(bond) == BOND_MODE_8023AD) bond_3ad_unbind_slave(slave); + bond_upper_dev_unlink(bond, slave); + if (bond_mode_can_use_xmit_hash(bond)) bond_update_slave_arr(bond, slave); @@ -3613,90 +3686,112 @@ static struct notifier_block bond_netdev_notifier = { /*---------------------------- Hashing Policies -----------------------------*/ +/* Helper to access data in a packet, with or without a backing skb. + * If skb is given the data is linearized if necessary via pskb_may_pull. + */ +static inline const void *bond_pull_data(struct sk_buff *skb, + const void *data, int hlen, int n) +{ + if (likely(n <= hlen)) + return data; + else if (skb && likely(pskb_may_pull(skb, n))) + return skb->head; + + return NULL; +} + /* L2 hash helper */ -static inline u32 bond_eth_hash(struct sk_buff *skb) +static inline u32 bond_eth_hash(struct sk_buff *skb, const void *data, int mhoff, int hlen) { - struct ethhdr *ep, hdr_tmp; + struct ethhdr *ep; - ep = skb_header_pointer(skb, 0, sizeof(hdr_tmp), &hdr_tmp); - if (ep) - return ep->h_dest[5] ^ ep->h_source[5] ^ ep->h_proto; - return 0; + data = bond_pull_data(skb, data, hlen, mhoff + sizeof(struct ethhdr)); + if (!data) + return 0; + + ep = (struct ethhdr *)(data + mhoff); + return ep->h_dest[5] ^ ep->h_source[5] ^ be16_to_cpu(ep->h_proto); } -static bool bond_flow_ip(struct sk_buff *skb, struct flow_keys *fk, - int *noff, int *proto, bool l34) +static bool bond_flow_ip(struct sk_buff *skb, struct flow_keys *fk, const void *data, + int hlen, __be16 l2_proto, int *nhoff, int *ip_proto, bool l34) { const struct ipv6hdr *iph6; const struct iphdr *iph; - if (skb->protocol == htons(ETH_P_IP)) { - if (unlikely(!pskb_may_pull(skb, *noff + sizeof(*iph)))) + if (l2_proto == htons(ETH_P_IP)) { + data = bond_pull_data(skb, data, hlen, *nhoff + sizeof(*iph)); + if (!data) return false; - iph = (const struct iphdr *)(skb->data + *noff); + + iph = (const struct iphdr *)(data + *nhoff); iph_to_flow_copy_v4addrs(fk, iph); - *noff += iph->ihl << 2; + *nhoff += iph->ihl << 2; if (!ip_is_fragment(iph)) - *proto = iph->protocol; - } else if (skb->protocol == htons(ETH_P_IPV6)) { - if (unlikely(!pskb_may_pull(skb, *noff + sizeof(*iph6)))) + *ip_proto = iph->protocol; + } else if (l2_proto == htons(ETH_P_IPV6)) { + data = bond_pull_data(skb, data, hlen, *nhoff + sizeof(*iph6)); + if (!data) return false; - iph6 = (const struct ipv6hdr *)(skb->data + *noff); + + iph6 = (const struct ipv6hdr *)(data + *nhoff); iph_to_flow_copy_v6addrs(fk, iph6); - *noff += sizeof(*iph6); - *proto = iph6->nexthdr; + *nhoff += sizeof(*iph6); + *ip_proto = iph6->nexthdr; } else { return false; } - if (l34 && *proto >= 0) - fk->ports.ports = skb_flow_get_ports(skb, *noff, *proto); + if (l34 && *ip_proto >= 0) + fk->ports.ports = __skb_flow_get_ports(skb, *nhoff, *ip_proto, data, hlen); return true; } -static u32 bond_vlan_srcmac_hash(struct sk_buff *skb) +static u32 bond_vlan_srcmac_hash(struct sk_buff *skb, const void *data, int mhoff, int hlen) { - struct ethhdr *mac_hdr = (struct ethhdr *)skb_mac_header(skb); u32 srcmac_vendor = 0, srcmac_dev = 0; - u16 vlan; + struct ethhdr *mac_hdr; + u16 vlan = 0; int i; + data = bond_pull_data(skb, data, hlen, mhoff + sizeof(struct ethhdr)); + if (!data) + return 0; + mac_hdr = (struct ethhdr *)(data + mhoff); + for (i = 0; i < 3; i++) srcmac_vendor = (srcmac_vendor << 8) | mac_hdr->h_source[i]; for (i = 3; i < ETH_ALEN; i++) srcmac_dev = (srcmac_dev << 8) | mac_hdr->h_source[i]; - if (!skb_vlan_tag_present(skb)) - return srcmac_vendor ^ srcmac_dev; - - vlan = skb_vlan_tag_get(skb); + if (skb && skb_vlan_tag_present(skb)) + vlan = skb_vlan_tag_get(skb); return vlan ^ srcmac_vendor ^ srcmac_dev; } /* Extract the appropriate headers based on bond's xmit policy */ -static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb, - struct flow_keys *fk) +static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb, const void *data, + __be16 l2_proto, int nhoff, int hlen, struct flow_keys *fk) { bool l34 = bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34; - int noff, proto = -1; + int ip_proto = -1; switch (bond->params.xmit_policy) { case BOND_XMIT_POLICY_ENCAP23: case BOND_XMIT_POLICY_ENCAP34: memset(fk, 0, sizeof(*fk)); return __skb_flow_dissect(NULL, skb, &flow_keys_bonding, - fk, NULL, 0, 0, 0, 0); + fk, data, l2_proto, nhoff, hlen, 0); default: break; } fk->ports.ports = 0; memset(&fk->icmp, 0, sizeof(fk->icmp)); - noff = skb_network_offset(skb); - if (!bond_flow_ip(skb, fk, &noff, &proto, l34)) + if (!bond_flow_ip(skb, fk, data, hlen, l2_proto, &nhoff, &ip_proto, l34)) return false; /* ICMP error packets contains at least 8 bytes of the header @@ -3704,22 +3799,20 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb, * to correlate ICMP error packets within the same flow which * generated the error. */ - if (proto == IPPROTO_ICMP || proto == IPPROTO_ICMPV6) { - skb_flow_get_icmp_tci(skb, &fk->icmp, skb->data, - skb_transport_offset(skb), - skb_headlen(skb)); - if (proto == IPPROTO_ICMP) { + if (ip_proto == IPPROTO_ICMP || ip_proto == IPPROTO_ICMPV6) { + skb_flow_get_icmp_tci(skb, &fk->icmp, data, nhoff, hlen); + if (ip_proto == IPPROTO_ICMP) { if (!icmp_is_err(fk->icmp.type)) return true; - noff += sizeof(struct icmphdr); - } else if (proto == IPPROTO_ICMPV6) { + nhoff += sizeof(struct icmphdr); + } else if (ip_proto == IPPROTO_ICMPV6) { if (!icmpv6_is_err(fk->icmp.type)) return true; - noff += sizeof(struct icmp6hdr); + nhoff += sizeof(struct icmp6hdr); } - return bond_flow_ip(skb, fk, &noff, &proto, l34); + return bond_flow_ip(skb, fk, data, hlen, l2_proto, &nhoff, &ip_proto, l34); } return true; @@ -3735,33 +3828,26 @@ static u32 bond_ip_hash(u32 hash, struct flow_keys *flow) return hash >> 1; } -/** - * bond_xmit_hash - generate a hash value based on the xmit policy - * @bond: bonding device - * @skb: buffer to use for headers - * - * This function will extract the necessary headers from the skb buffer and use - * them to generate a hash based on the xmit_policy set in the bonding device +/* Generate hash based on xmit policy. If @skb is given it is used to linearize + * the data as required, but this function can be used without it if the data is + * known to be linear (e.g. with xdp_buff). */ -u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb) +static u32 __bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, const void *data, + __be16 l2_proto, int mhoff, int nhoff, int hlen) { struct flow_keys flow; u32 hash; - if (bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP34 && - skb->l4_hash) - return skb->hash; - if (bond->params.xmit_policy == BOND_XMIT_POLICY_VLAN_SRCMAC) - return bond_vlan_srcmac_hash(skb); + return bond_vlan_srcmac_hash(skb, data, mhoff, hlen); if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER2 || - !bond_flow_dissect(bond, skb, &flow)) - return bond_eth_hash(skb); + !bond_flow_dissect(bond, skb, data, l2_proto, nhoff, hlen, &flow)) + return bond_eth_hash(skb, data, mhoff, hlen); if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER23 || bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23) { - hash = bond_eth_hash(skb); + hash = bond_eth_hash(skb, data, mhoff, hlen); } else { if (flow.icmp.id) memcpy(&hash, &flow.icmp, sizeof(hash)); @@ -3772,6 +3858,45 @@ u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb) return bond_ip_hash(hash, &flow); } +/** + * bond_xmit_hash - generate a hash value based on the xmit policy + * @bond: bonding device + * @skb: buffer to use for headers + * + * This function will extract the necessary headers from the skb buffer and use + * them to generate a hash based on the xmit_policy set in the bonding device + */ +u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb) +{ + if (bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP34 && + skb->l4_hash) + return skb->hash; + + return __bond_xmit_hash(bond, skb, skb->head, skb->protocol, + skb->mac_header, skb->network_header, + skb_headlen(skb)); +} + +/** + * bond_xmit_hash_xdp - generate a hash value based on the xmit policy + * @bond: bonding device + * @xdp: buffer to use for headers + * + * The XDP variant of bond_xmit_hash. + */ +static u32 bond_xmit_hash_xdp(struct bonding *bond, struct xdp_buff *xdp) +{ + struct ethhdr *eth; + + if (xdp->data + sizeof(struct ethhdr) > xdp->data_end) + return 0; + + eth = (struct ethhdr *)xdp->data; + + return __bond_xmit_hash(bond, NULL, xdp->data, eth->h_proto, 0, + sizeof(struct ethhdr), xdp->data_end - xdp->data); +} + /*-------------------------- Device entry points ----------------------------*/ void bond_work_init_all(struct bonding *bond) @@ -3962,20 +4087,13 @@ static void bond_get_stats(struct net_device *bond_dev, rcu_read_unlock(); } -static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd) +static int bond_eth_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd) { struct bonding *bond = netdev_priv(bond_dev); - struct net_device *slave_dev = NULL; - struct ifbond k_binfo; - struct ifbond __user *u_binfo = NULL; - struct ifslave k_sinfo; - struct ifslave __user *u_sinfo = NULL; struct mii_ioctl_data *mii = NULL; - struct bond_opt_value newval; - struct net *net; - int res = 0; + int res; - netdev_dbg(bond_dev, "bond_ioctl: cmd=%d\n", cmd); + netdev_dbg(bond_dev, "bond_eth_ioctl: cmd=%d\n", cmd); switch (cmd) { case SIOCGMIIPHY: @@ -4000,7 +4118,28 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd } return 0; - case BOND_INFO_QUERY_OLD: + default: + res = -EOPNOTSUPP; + } + + return res; +} + +static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd) +{ + struct bonding *bond = netdev_priv(bond_dev); + struct net_device *slave_dev = NULL; + struct ifbond k_binfo; + struct ifbond __user *u_binfo = NULL; + struct ifslave k_sinfo; + struct ifslave __user *u_sinfo = NULL; + struct bond_opt_value newval; + struct net *net; + int res = 0; + + netdev_dbg(bond_dev, "bond_ioctl: cmd=%d\n", cmd); + + switch (cmd) { case SIOCBONDINFOQUERY: u_binfo = (struct ifbond __user *)ifr->ifr_data; @@ -4012,7 +4151,6 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd return -EFAULT; return 0; - case BOND_SLAVE_INFO_QUERY_OLD: case SIOCBONDSLAVEINFOQUERY: u_sinfo = (struct ifslave __user *)ifr->ifr_data; @@ -4042,19 +4180,15 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd return -ENODEV; switch (cmd) { - case BOND_ENSLAVE_OLD: case SIOCBONDENSLAVE: res = bond_enslave(bond_dev, slave_dev, NULL); break; - case BOND_RELEASE_OLD: case SIOCBONDRELEASE: res = bond_release(bond_dev, slave_dev); break; - case BOND_SETHWADDR_OLD: case SIOCBONDSETHWADDR: res = bond_set_dev_addr(bond_dev, slave_dev); break; - case BOND_CHANGE_ACTIVE_OLD: case SIOCBONDCHANGEACTIVE: bond_opt_initstr(&newval, slave_dev->name); res = __bond_opt_set_notify(bond, BOND_OPT_ACTIVE_SLAVE, @@ -4067,6 +4201,29 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd return res; } +static int bond_siocdevprivate(struct net_device *bond_dev, struct ifreq *ifr, + void __user *data, int cmd) +{ + struct ifreq ifrdata = { .ifr_data = data }; + + switch (cmd) { + case BOND_INFO_QUERY_OLD: + return bond_do_ioctl(bond_dev, &ifrdata, SIOCBONDINFOQUERY); + case BOND_SLAVE_INFO_QUERY_OLD: + return bond_do_ioctl(bond_dev, &ifrdata, SIOCBONDSLAVEINFOQUERY); + case BOND_ENSLAVE_OLD: + return bond_do_ioctl(bond_dev, ifr, SIOCBONDENSLAVE); + case BOND_RELEASE_OLD: + return bond_do_ioctl(bond_dev, ifr, SIOCBONDRELEASE); + case BOND_SETHWADDR_OLD: + return bond_do_ioctl(bond_dev, ifr, SIOCBONDSETHWADDR); + case BOND_CHANGE_ACTIVE_OLD: + return bond_do_ioctl(bond_dev, ifr, SIOCBONDCHANGEACTIVE); + } + + return -EOPNOTSUPP; +} + static void bond_change_rx_flags(struct net_device *bond_dev, int change) { struct bonding *bond = netdev_priv(bond_dev); @@ -4388,6 +4545,47 @@ non_igmp: return NULL; } +static struct slave *bond_xdp_xmit_roundrobin_slave_get(struct bonding *bond, + struct xdp_buff *xdp) +{ + struct slave *slave; + int slave_cnt; + u32 slave_id; + const struct ethhdr *eth; + void *data = xdp->data; + + if (data + sizeof(struct ethhdr) > xdp->data_end) + goto non_igmp; + + eth = (struct ethhdr *)data; + data += sizeof(struct ethhdr); + + /* See comment on IGMP in bond_xmit_roundrobin_slave_get() */ + if (eth->h_proto == htons(ETH_P_IP)) { + const struct iphdr *iph; + + if (data + sizeof(struct iphdr) > xdp->data_end) + goto non_igmp; + + iph = (struct iphdr *)data; + + if (iph->protocol == IPPROTO_IGMP) { + slave = rcu_dereference(bond->curr_active_slave); + if (slave) + return slave; + return bond_get_slave_by_id(bond, 0); + } + } + +non_igmp: + slave_cnt = READ_ONCE(bond->slave_cnt); + if (likely(slave_cnt)) { + slave_id = bond_rr_gen_slave_id(bond) % slave_cnt; + return bond_get_slave_by_id(bond, slave_id); + } + return NULL; +} + static netdev_tx_t bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev) { @@ -4401,8 +4599,7 @@ static netdev_tx_t bond_xmit_roundrobin(struct sk_buff *skb, return bond_tx_drop(bond_dev, skb); } -static struct slave *bond_xmit_activebackup_slave_get(struct bonding *bond, - struct sk_buff *skb) +static struct slave *bond_xmit_activebackup_slave_get(struct bonding *bond) { return rcu_dereference(bond->curr_active_slave); } @@ -4416,7 +4613,7 @@ static netdev_tx_t bond_xmit_activebackup(struct sk_buff *skb, struct bonding *bond = netdev_priv(bond_dev); struct slave *slave; - slave = bond_xmit_activebackup_slave_get(bond, skb); + slave = bond_xmit_activebackup_slave_get(bond); if (slave) return bond_dev_queue_xmit(bond, skb, slave->dev); @@ -4604,6 +4801,22 @@ static struct slave *bond_xmit_3ad_xor_slave_get(struct bonding *bond, return slave; } +static struct slave *bond_xdp_xmit_3ad_xor_slave_get(struct bonding *bond, + struct xdp_buff *xdp) +{ + struct bond_up_slave *slaves; + unsigned int count; + u32 hash; + + hash = bond_xmit_hash_xdp(bond, xdp); + slaves = rcu_dereference(bond->usable_slaves); + count = slaves ? READ_ONCE(slaves->count) : 0; + if (unlikely(!count)) + return NULL; + + return slaves->arr[hash % count]; +} + /* Use this Xmit function for 3AD as well as XOR modes. The current * usable slave array is formed in the control path. The xmit function * just calculates hash and sends the packet out. @@ -4714,7 +4927,7 @@ static struct net_device *bond_xmit_get_slave(struct net_device *master_dev, slave = bond_xmit_roundrobin_slave_get(bond, skb); break; case BOND_MODE_ACTIVEBACKUP: - slave = bond_xmit_activebackup_slave_get(bond, skb); + slave = bond_xmit_activebackup_slave_get(bond); break; case BOND_MODE_8023AD: case BOND_MODE_XOR: @@ -4888,6 +5101,172 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev) return ret; } +static struct net_device * +bond_xdp_get_xmit_slave(struct net_device *bond_dev, struct xdp_buff *xdp) +{ + struct bonding *bond = netdev_priv(bond_dev); + struct slave *slave; + + /* Caller needs to hold rcu_read_lock() */ + + switch (BOND_MODE(bond)) { + case BOND_MODE_ROUNDROBIN: + slave = bond_xdp_xmit_roundrobin_slave_get(bond, xdp); + break; + + case BOND_MODE_ACTIVEBACKUP: + slave = bond_xmit_activebackup_slave_get(bond); + break; + + case BOND_MODE_8023AD: + case BOND_MODE_XOR: + slave = bond_xdp_xmit_3ad_xor_slave_get(bond, xdp); + break; + + default: + /* Should never happen. Mode guarded by bond_xdp_check() */ + netdev_err(bond_dev, "Unknown bonding mode %d for xdp xmit\n", BOND_MODE(bond)); + WARN_ON_ONCE(1); + return NULL; + } + + if (slave) + return slave->dev; + + return NULL; +} + +static int bond_xdp_xmit(struct net_device *bond_dev, + int n, struct xdp_frame **frames, u32 flags) +{ + int nxmit, err = -ENXIO; + + rcu_read_lock(); + + for (nxmit = 0; nxmit < n; nxmit++) { + struct xdp_frame *frame = frames[nxmit]; + struct xdp_frame *frames1[] = {frame}; + struct net_device *slave_dev; + struct xdp_buff xdp; + + xdp_convert_frame_to_buff(frame, &xdp); + + slave_dev = bond_xdp_get_xmit_slave(bond_dev, &xdp); + if (!slave_dev) { + err = -ENXIO; + break; + } + + err = slave_dev->netdev_ops->ndo_xdp_xmit(slave_dev, 1, frames1, flags); + if (err < 1) + break; + } + + rcu_read_unlock(); + + /* If error happened on the first frame then we can pass the error up, otherwise + * report the number of frames that were xmitted. + */ + if (err < 0) + return (nxmit == 0 ? err : nxmit); + + return nxmit; +} + +static int bond_xdp_set(struct net_device *dev, struct bpf_prog *prog, + struct netlink_ext_ack *extack) +{ + struct bonding *bond = netdev_priv(dev); + struct list_head *iter; + struct slave *slave, *rollback_slave; + struct bpf_prog *old_prog; + struct netdev_bpf xdp = { + .command = XDP_SETUP_PROG, + .flags = 0, + .prog = prog, + .extack = extack, + }; + int err; + + ASSERT_RTNL(); + + if (!bond_xdp_check(bond)) + return -EOPNOTSUPP; + + old_prog = bond->xdp_prog; + bond->xdp_prog = prog; + + bond_for_each_slave(bond, slave, iter) { + struct net_device *slave_dev = slave->dev; + + if (!slave_dev->netdev_ops->ndo_bpf || + !slave_dev->netdev_ops->ndo_xdp_xmit) { + SLAVE_NL_ERR(dev, slave_dev, extack, + "Slave device does not support XDP"); + err = -EOPNOTSUPP; + goto err; + } + + if (dev_xdp_prog_count(slave_dev) > 0) { + SLAVE_NL_ERR(dev, slave_dev, extack, + "Slave has XDP program loaded, please unload before enslaving"); + err = -EOPNOTSUPP; + goto err; + } + + err = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp); + if (err < 0) { + /* ndo_bpf() sets extack error message */ + slave_err(dev, slave_dev, "Error %d calling ndo_bpf\n", err); + goto err; + } + if (prog) + bpf_prog_inc(prog); + } + + if (old_prog) + bpf_prog_put(old_prog); + + if (prog) + static_branch_inc(&bpf_master_redirect_enabled_key); + else + static_branch_dec(&bpf_master_redirect_enabled_key); + + return 0; + +err: + /* unwind the program changes */ + bond->xdp_prog = old_prog; + xdp.prog = old_prog; + xdp.extack = NULL; /* do not overwrite original error */ + + bond_for_each_slave(bond, rollback_slave, iter) { + struct net_device *slave_dev = rollback_slave->dev; + int err_unwind; + + if (slave == rollback_slave) + break; + + err_unwind = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp); + if (err_unwind < 0) + slave_err(dev, slave_dev, + "Error %d when unwinding XDP program change\n", err_unwind); + else if (xdp.prog) + bpf_prog_inc(xdp.prog); + } + return err; +} + +static int bond_xdp(struct net_device *dev, struct netdev_bpf *xdp) +{ + switch (xdp->command) { + case XDP_SETUP_PROG: + return bond_xdp_set(dev, xdp->prog, xdp->extack); + default: + return -EINVAL; + } +} + static u32 bond_mode_bcast_speed(struct slave *slave, u32 speed) { if (speed == 0 || speed == SPEED_UNKNOWN) @@ -4955,7 +5334,9 @@ static const struct net_device_ops bond_netdev_ops = { .ndo_start_xmit = bond_start_xmit, .ndo_select_queue = bond_select_queue, .ndo_get_stats64 = bond_get_stats, - .ndo_do_ioctl = bond_do_ioctl, + .ndo_eth_ioctl = bond_eth_ioctl, + .ndo_siocbond = bond_do_ioctl, + .ndo_siocdevprivate = bond_siocdevprivate, .ndo_change_rx_flags = bond_change_rx_flags, .ndo_set_rx_mode = bond_set_rx_mode, .ndo_change_mtu = bond_change_mtu, @@ -4974,6 +5355,9 @@ static const struct net_device_ops bond_netdev_ops = { .ndo_features_check = passthru_features_check, .ndo_get_xmit_slave = bond_xmit_get_slave, .ndo_sk_get_lower_dev = bond_sk_get_lower_dev, + .ndo_bpf = bond_xdp, + .ndo_xdp_xmit = bond_xdp_xmit, + .ndo_xdp_get_xmit_slave = bond_xdp_get_xmit_slave, }; static const struct device_type bond_type = { @@ -5443,6 +5827,7 @@ static int bond_check_params(struct bond_params *params) params->downdelay = downdelay; params->peer_notif_delay = 0; params->use_carrier = use_carrier; + params->lacp_active = 1; params->lacp_fast = lacp_fast; params->primary[0] = 0; params->primary_reselect = primary_reselect_value; diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c index 0561ece1ba45..5d54e11d18fa 100644 --- a/drivers/net/bonding/bond_netlink.c +++ b/drivers/net/bonding/bond_netlink.c @@ -100,6 +100,7 @@ static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = { [IFLA_BOND_MIN_LINKS] = { .type = NLA_U32 }, [IFLA_BOND_LP_INTERVAL] = { .type = NLA_U32 }, [IFLA_BOND_PACKETS_PER_SLAVE] = { .type = NLA_U32 }, + [IFLA_BOND_AD_LACP_ACTIVE] = { .type = NLA_U8 }, [IFLA_BOND_AD_LACP_RATE] = { .type = NLA_U8 }, [IFLA_BOND_AD_SELECT] = { .type = NLA_U8 }, [IFLA_BOND_AD_INFO] = { .type = NLA_NESTED }, @@ -387,6 +388,16 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[], if (err) return err; } + + if (data[IFLA_BOND_AD_LACP_ACTIVE]) { + int lacp_active = nla_get_u8(data[IFLA_BOND_AD_LACP_ACTIVE]); + + bond_opt_initval(&newval, lacp_active); + err = __bond_opt_set(bond, BOND_OPT_LACP_ACTIVE, &newval); + if (err) + return err; + } + if (data[IFLA_BOND_AD_LACP_RATE]) { int lacp_rate = nla_get_u8(data[IFLA_BOND_AD_LACP_RATE]); @@ -490,6 +501,7 @@ static size_t bond_get_size(const struct net_device *bond_dev) nla_total_size(sizeof(u32)) + /* IFLA_BOND_MIN_LINKS */ nla_total_size(sizeof(u32)) + /* IFLA_BOND_LP_INTERVAL */ nla_total_size(sizeof(u32)) + /* IFLA_BOND_PACKETS_PER_SLAVE */ + nla_total_size(sizeof(u8)) + /* IFLA_BOND_AD_LACP_ACTIVE */ nla_total_size(sizeof(u8)) + /* IFLA_BOND_AD_LACP_RATE */ nla_total_size(sizeof(u8)) + /* IFLA_BOND_AD_SELECT */ nla_total_size(sizeof(struct nlattr)) + /* IFLA_BOND_AD_INFO */ @@ -622,6 +634,10 @@ static int bond_fill_info(struct sk_buff *skb, packets_per_slave)) goto nla_put_failure; + if (nla_put_u8(skb, IFLA_BOND_AD_LACP_ACTIVE, + bond->params.lacp_active)) + goto nla_put_failure; + if (nla_put_u8(skb, IFLA_BOND_AD_LACP_RATE, bond->params.lacp_fast)) goto nla_put_failure; diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index 0cf25de6f46d..a8fde3bc458f 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -58,6 +58,8 @@ static int bond_option_lp_interval_set(struct bonding *bond, const struct bond_opt_value *newval); static int bond_option_pps_set(struct bonding *bond, const struct bond_opt_value *newval); +static int bond_option_lacp_active_set(struct bonding *bond, + const struct bond_opt_value *newval); static int bond_option_lacp_rate_set(struct bonding *bond, const struct bond_opt_value *newval); static int bond_option_ad_select_set(struct bonding *bond, @@ -135,6 +137,12 @@ static const struct bond_opt_value bond_intmax_tbl[] = { { NULL, -1, 0} }; +static const struct bond_opt_value bond_lacp_active[] = { + { "off", 0, 0}, + { "on", 1, BOND_VALFLAG_DEFAULT}, + { NULL, -1, 0} +}; + static const struct bond_opt_value bond_lacp_rate_tbl[] = { { "slow", AD_LACP_SLOW, 0}, { "fast", AD_LACP_FAST, 0}, @@ -283,6 +291,15 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = { .values = bond_intmax_tbl, .set = bond_option_updelay_set }, + [BOND_OPT_LACP_ACTIVE] = { + .id = BOND_OPT_LACP_ACTIVE, + .name = "lacp_active", + .desc = "Send LACPDU frames with configured lacp rate or acts as speak when spoken to", + .flags = BOND_OPTFLAG_IFDOWN, + .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_8023AD)), + .values = bond_lacp_active, + .set = bond_option_lacp_active_set + }, [BOND_OPT_LACP_RATE] = { .id = BOND_OPT_LACP_RATE, .name = "lacp_rate", @@ -1333,6 +1350,16 @@ static int bond_option_pps_set(struct bonding *bond, return 0; } +static int bond_option_lacp_active_set(struct bonding *bond, + const struct bond_opt_value *newval) +{ + netdev_dbg(bond->dev, "Setting LACP active to %s (%llu)\n", + newval->string, newval->value); + bond->params.lacp_active = newval->value; + + return 0; +} + static int bond_option_lacp_rate_set(struct bonding *bond, const struct bond_opt_value *newval) { diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c index 0fb1da361bb1..f3e3bfd72556 100644 --- a/drivers/net/bonding/bond_procfs.c +++ b/drivers/net/bonding/bond_procfs.c @@ -133,6 +133,8 @@ static void bond_info_show_master(struct seq_file *seq) struct ad_info ad_info; seq_puts(seq, "\n802.3ad info\n"); + seq_printf(seq, "LACP active: %s\n", + (bond->params.lacp_active) ? "on" : "off"); seq_printf(seq, "LACP rate: %s\n", (bond->params.lacp_fast) ? "fast" : "slow"); seq_printf(seq, "Min links: %d\n", bond->params.min_links); diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index 5f9e9a240226..b9e9842fed94 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -339,10 +339,24 @@ static ssize_t bonding_show_peer_notif_delay(struct device *d, static DEVICE_ATTR(peer_notif_delay, 0644, bonding_show_peer_notif_delay, bonding_sysfs_store_option); -/* Show the LACP interval. */ -static ssize_t bonding_show_lacp(struct device *d, - struct device_attribute *attr, - char *buf) +/* Show the LACP activity and interval. */ +static ssize_t bonding_show_lacp_active(struct device *d, + struct device_attribute *attr, + char *buf) +{ + struct bonding *bond = to_bond(d); + const struct bond_opt_value *val; + + val = bond_opt_get_val(BOND_OPT_LACP_ACTIVE, bond->params.lacp_active); + + return sprintf(buf, "%s %d\n", val->string, bond->params.lacp_active); +} +static DEVICE_ATTR(lacp_active, 0644, + bonding_show_lacp_active, bonding_sysfs_store_option); + +static ssize_t bonding_show_lacp_rate(struct device *d, + struct device_attribute *attr, + char *buf) { struct bonding *bond = to_bond(d); const struct bond_opt_value *val; @@ -352,7 +366,7 @@ static ssize_t bonding_show_lacp(struct device *d, return sprintf(buf, "%s %d\n", val->string, bond->params.lacp_fast); } static DEVICE_ATTR(lacp_rate, 0644, - bonding_show_lacp, bonding_sysfs_store_option); + bonding_show_lacp_rate, bonding_sysfs_store_option); static ssize_t bonding_show_min_links(struct device *d, struct device_attribute *attr, @@ -738,6 +752,7 @@ static struct attribute *per_bond_attrs[] = { &dev_attr_downdelay.attr, &dev_attr_updelay.attr, &dev_attr_peer_notif_delay.attr, + &dev_attr_lacp_active.attr, &dev_attr_lacp_rate.attr, &dev_attr_ad_select.attr, &dev_attr_xmit_hash_policy.attr, diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig index e355d3974977..fff259247d52 100644 --- a/drivers/net/can/Kconfig +++ b/drivers/net/can/Kconfig @@ -97,7 +97,8 @@ config CAN_AT91 config CAN_FLEXCAN tristate "Support for Freescale FLEXCAN based chips" - depends on OF && HAS_IOMEM + depends on OF || COLDFIRE || COMPILE_TEST + depends on HAS_IOMEM help Say Y here if you want to support for Freescale FlexCAN. diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c index 04d0bb3ffe89..b06af90a9964 100644 --- a/drivers/net/can/at91_can.c +++ b/drivers/net/can/at91_can.c @@ -43,14 +43,14 @@ enum at91_reg { }; /* Mailbox registers (0 <= i <= 15) */ -#define AT91_MMR(i) (enum at91_reg)(0x200 + ((i) * 0x20)) -#define AT91_MAM(i) (enum at91_reg)(0x204 + ((i) * 0x20)) -#define AT91_MID(i) (enum at91_reg)(0x208 + ((i) * 0x20)) -#define AT91_MFID(i) (enum at91_reg)(0x20C + ((i) * 0x20)) -#define AT91_MSR(i) (enum at91_reg)(0x210 + ((i) * 0x20)) -#define AT91_MDL(i) (enum at91_reg)(0x214 + ((i) * 0x20)) -#define AT91_MDH(i) (enum at91_reg)(0x218 + ((i) * 0x20)) -#define AT91_MCR(i) (enum at91_reg)(0x21C + ((i) * 0x20)) +#define AT91_MMR(i) ((enum at91_reg)(0x200 + ((i) * 0x20))) +#define AT91_MAM(i) ((enum at91_reg)(0x204 + ((i) * 0x20))) +#define AT91_MID(i) ((enum at91_reg)(0x208 + ((i) * 0x20))) +#define AT91_MFID(i) ((enum at91_reg)(0x20C + ((i) * 0x20))) +#define AT91_MSR(i) ((enum at91_reg)(0x210 + ((i) * 0x20))) +#define AT91_MDL(i) ((enum at91_reg)(0x214 + ((i) * 0x20))) +#define AT91_MDH(i) ((enum at91_reg)(0x218 + ((i) * 0x20))) +#define AT91_MCR(i) ((enum at91_reg)(0x21C + ((i) * 0x20))) /* Register bits */ #define AT91_MR_CANEN BIT(0) @@ -87,19 +87,19 @@ enum at91_mb_mode { }; /* Interrupt mask bits */ -#define AT91_IRQ_ERRA (1 << 16) -#define AT91_IRQ_WARN (1 << 17) -#define AT91_IRQ_ERRP (1 << 18) -#define AT91_IRQ_BOFF (1 << 19) -#define AT91_IRQ_SLEEP (1 << 20) -#define AT91_IRQ_WAKEUP (1 << 21) -#define AT91_IRQ_TOVF (1 << 22) -#define AT91_IRQ_TSTP (1 << 23) -#define AT91_IRQ_CERR (1 << 24) -#define AT91_IRQ_SERR (1 << 25) -#define AT91_IRQ_AERR (1 << 26) -#define AT91_IRQ_FERR (1 << 27) -#define AT91_IRQ_BERR (1 << 28) +#define AT91_IRQ_ERRA BIT(16) +#define AT91_IRQ_WARN BIT(17) +#define AT91_IRQ_ERRP BIT(18) +#define AT91_IRQ_BOFF BIT(19) +#define AT91_IRQ_SLEEP BIT(20) +#define AT91_IRQ_WAKEUP BIT(21) +#define AT91_IRQ_TOVF BIT(22) +#define AT91_IRQ_TSTP BIT(23) +#define AT91_IRQ_CERR BIT(24) +#define AT91_IRQ_SERR BIT(25) +#define AT91_IRQ_AERR BIT(26) +#define AT91_IRQ_FERR BIT(27) +#define AT91_IRQ_BERR BIT(28) #define AT91_IRQ_ERR_ALL (0x1fff0000) #define AT91_IRQ_ERR_FRAME (AT91_IRQ_CERR | AT91_IRQ_SERR | \ @@ -163,7 +163,7 @@ static const struct can_bittiming_const at91_bittiming_const = { .tseg2_min = 2, .tseg2_max = 8, .sjw_max = 4, - .brp_min = 2, + .brp_min = 2, .brp_max = 128, .brp_inc = 1, }; @@ -281,19 +281,20 @@ static inline u32 at91_read(const struct at91_priv *priv, enum at91_reg reg) } static inline void at91_write(const struct at91_priv *priv, enum at91_reg reg, - u32 value) + u32 value) { writel_relaxed(value, priv->reg_base + reg); } static inline void set_mb_mode_prio(const struct at91_priv *priv, - unsigned int mb, enum at91_mb_mode mode, int prio) + unsigned int mb, enum at91_mb_mode mode, + int prio) { at91_write(priv, AT91_MMR(mb), (mode << 24) | (prio << 16)); } static inline void set_mb_mode(const struct at91_priv *priv, unsigned int mb, - enum at91_mb_mode mode) + enum at91_mb_mode mode) { set_mb_mode_prio(priv, mb, mode, 0); } @@ -316,8 +317,7 @@ static void at91_setup_mailboxes(struct net_device *dev) unsigned int i; u32 reg_mid; - /* - * Due to a chip bug (errata 50.2.6.3 & 50.3.5.3) the first + /* Due to a chip bug (errata 50.2.6.3 & 50.3.5.3) the first * mailbox is disabled. The next 11 mailboxes are used as a * reception FIFO. The last mailbox is configured with * overwrite option. The overwrite flag indicates a FIFO @@ -368,7 +368,7 @@ static int at91_set_bittiming(struct net_device *dev) } static int at91_get_berr_counter(const struct net_device *dev, - struct can_berr_counter *bec) + struct can_berr_counter *bec) { const struct at91_priv *priv = netdev_priv(dev); u32 reg_ecr = at91_read(priv, AT91_ECR); @@ -423,8 +423,7 @@ static void at91_chip_stop(struct net_device *dev, enum can_state state) priv->can.state = state; } -/* - * theory of operation: +/* theory of operation: * * According to the datasheet priority 0 is the highest priority, 15 * is the lowest. If two mailboxes have the same priority level the @@ -486,8 +485,7 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev) /* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */ can_put_echo_skb(skb, dev, mb - get_mb_tx_first(priv), 0); - /* - * we have to stop the queue and deliver all messages in case + /* we have to stop the queue and deliver all messages in case * of a prio+mb counter wrap around. This is the case if * tx_next buffer prio and mailbox equals 0. * @@ -515,6 +513,7 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev) static inline void at91_activate_rx_low(const struct at91_priv *priv) { u32 mask = get_mb_rx_low_mask(priv); + at91_write(priv, AT91_TCR, mask); } @@ -526,9 +525,10 @@ static inline void at91_activate_rx_low(const struct at91_priv *priv) * Reenables given mailbox for reception of new CAN messages */ static inline void at91_activate_rx_mb(const struct at91_priv *priv, - unsigned int mb) + unsigned int mb) { u32 mask = 1 << mb; + at91_write(priv, AT91_TCR, mask); } @@ -568,7 +568,7 @@ static void at91_rx_overflow_err(struct net_device *dev) * given can frame. "mb" and "cf" must be valid. */ static void at91_read_mb(struct net_device *dev, unsigned int mb, - struct can_frame *cf) + struct can_frame *cf) { const struct at91_priv *priv = netdev_priv(dev); u32 reg_msr, reg_mid; @@ -582,9 +582,9 @@ static void at91_read_mb(struct net_device *dev, unsigned int mb, reg_msr = at91_read(priv, AT91_MSR(mb)); cf->len = can_cc_dlc2len((reg_msr >> 16) & 0xf); - if (reg_msr & AT91_MSR_MRTR) + if (reg_msr & AT91_MSR_MRTR) { cf->can_id |= CAN_RTR_FLAG; - else { + } else { *(u32 *)(cf->data + 0) = at91_read(priv, AT91_MDL(mb)); *(u32 *)(cf->data + 4) = at91_read(priv, AT91_MDH(mb)); } @@ -685,7 +685,7 @@ static int at91_poll_rx(struct net_device *dev, int quota) if (priv->rx_next > get_mb_rx_low_last(priv) && reg_sr & get_mb_rx_low_mask(priv)) netdev_info(dev, - "order of incoming frames cannot be guaranteed\n"); + "order of incoming frames cannot be guaranteed\n"); again: for (mb = find_next_bit(addr, get_mb_tx_first(priv), priv->rx_next); @@ -718,7 +718,7 @@ static int at91_poll_rx(struct net_device *dev, int quota) } static void at91_poll_err_frame(struct net_device *dev, - struct can_frame *cf, u32 reg_sr) + struct can_frame *cf, u32 reg_sr) { struct at91_priv *priv = netdev_priv(dev); @@ -796,8 +796,7 @@ static int at91_poll(struct napi_struct *napi, int quota) if (reg_sr & get_irq_mb_rx(priv)) work_done += at91_poll_rx(dev, quota - work_done); - /* - * The error bits are clear on read, + /* The error bits are clear on read, * so use saved value from irq handler. */ reg_sr |= priv->reg_sr; @@ -807,6 +806,7 @@ static int at91_poll(struct napi_struct *napi, int quota) if (work_done < quota) { /* enable IRQs for frame errors and all mailboxes >= rx_next */ u32 reg_ier = AT91_IRQ_ERR_FRAME; + reg_ier |= get_irq_mb_rx(priv) & ~AT91_MB_MASK(priv->rx_next); napi_complete_done(napi, work_done); @@ -816,8 +816,7 @@ static int at91_poll(struct napi_struct *napi, int quota) return work_done; } -/* - * theory of operation: +/* theory of operation: * * priv->tx_echo holds the number of the oldest can_frame put for * transmission into the hardware, but not yet ACKed by the CAN tx @@ -846,8 +845,7 @@ static void at91_irq_tx(struct net_device *dev, u32 reg_sr) /* Disable irq for this TX mailbox */ at91_write(priv, AT91_IDR, 1 << mb); - /* - * only echo if mailbox signals us a transfer + /* only echo if mailbox signals us a transfer * complete (MSR_MRDY). Otherwise it's a tansfer * abort. "can_bus_off()" takes care about the skbs * parked in the echo queue. @@ -862,8 +860,7 @@ static void at91_irq_tx(struct net_device *dev, u32 reg_sr) } } - /* - * restart queue if we don't have a wrap around but restart if + /* restart queue if we don't have a wrap around but restart if * we get a TX int for the last can frame directly before a * wrap around. */ @@ -873,7 +870,7 @@ static void at91_irq_tx(struct net_device *dev, u32 reg_sr) } static void at91_irq_err_state(struct net_device *dev, - struct can_frame *cf, enum can_state new_state) + struct can_frame *cf, enum can_state new_state) { struct at91_priv *priv = netdev_priv(dev); u32 reg_idr = 0, reg_ier = 0; @@ -883,8 +880,7 @@ static void at91_irq_err_state(struct net_device *dev, switch (priv->can.state) { case CAN_STATE_ERROR_ACTIVE: - /* - * from: ERROR_ACTIVE + /* from: ERROR_ACTIVE * to : ERROR_WARNING, ERROR_PASSIVE, BUS_OFF * => : there was a warning int */ @@ -900,8 +896,7 @@ static void at91_irq_err_state(struct net_device *dev, } fallthrough; case CAN_STATE_ERROR_WARNING: - /* - * from: ERROR_ACTIVE, ERROR_WARNING + /* from: ERROR_ACTIVE, ERROR_WARNING * to : ERROR_PASSIVE, BUS_OFF * => : error passive int */ @@ -917,8 +912,7 @@ static void at91_irq_err_state(struct net_device *dev, } break; case CAN_STATE_BUS_OFF: - /* - * from: BUS_OFF + /* from: BUS_OFF * to : ERROR_ACTIVE, ERROR_WARNING, ERROR_PASSIVE */ if (new_state <= CAN_STATE_ERROR_PASSIVE) { @@ -935,12 +929,10 @@ static void at91_irq_err_state(struct net_device *dev, break; } - /* process state changes depending on the new state */ switch (new_state) { case CAN_STATE_ERROR_ACTIVE: - /* - * actually we want to enable AT91_IRQ_WARN here, but + /* actually we want to enable AT91_IRQ_WARN here, but * it screws up the system under certain * circumstances. so just enable AT91_IRQ_ERRP, thus * the "fallthrough" @@ -983,7 +975,7 @@ static void at91_irq_err_state(struct net_device *dev, } static int at91_get_state_by_bec(const struct net_device *dev, - enum can_state *state) + enum can_state *state) { struct can_berr_counter bec; int err; @@ -1004,7 +996,6 @@ static int at91_get_state_by_bec(const struct net_device *dev, return 0; } - static void at91_irq_err(struct net_device *dev) { struct at91_priv *priv = netdev_priv(dev); @@ -1018,15 +1009,15 @@ static void at91_irq_err(struct net_device *dev) reg_sr = at91_read(priv, AT91_SR); /* we need to look at the unmasked reg_sr */ - if (unlikely(reg_sr & AT91_IRQ_BOFF)) + if (unlikely(reg_sr & AT91_IRQ_BOFF)) { new_state = CAN_STATE_BUS_OFF; - else if (unlikely(reg_sr & AT91_IRQ_ERRP)) + } else if (unlikely(reg_sr & AT91_IRQ_ERRP)) { new_state = CAN_STATE_ERROR_PASSIVE; - else if (unlikely(reg_sr & AT91_IRQ_WARN)) + } else if (unlikely(reg_sr & AT91_IRQ_WARN)) { new_state = CAN_STATE_ERROR_WARNING; - else if (likely(reg_sr & AT91_IRQ_ERRA)) + } else if (likely(reg_sr & AT91_IRQ_ERRA)) { new_state = CAN_STATE_ERROR_ACTIVE; - else { + } else { netdev_err(dev, "BUG! hardware in undefined state\n"); return; } @@ -1053,8 +1044,7 @@ static void at91_irq_err(struct net_device *dev) priv->can.state = new_state; } -/* - * interrupt handler +/* interrupt handler */ static irqreturn_t at91_irq(int irq, void *dev_id) { @@ -1075,8 +1065,7 @@ static irqreturn_t at91_irq(int irq, void *dev_id) /* Receive or error interrupt? -> napi */ if (reg_sr & (get_irq_mb_rx(priv) | AT91_IRQ_ERR_FRAME)) { - /* - * The error bits are clear on read, + /* The error bits are clear on read, * save for later use. */ priv->reg_sr = reg_sr; @@ -1133,8 +1122,7 @@ static int at91_open(struct net_device *dev) return err; } -/* - * stop CAN bus activity +/* stop CAN bus activity */ static int at91_close(struct net_device *dev) { @@ -1176,8 +1164,8 @@ static const struct net_device_ops at91_netdev_ops = { .ndo_change_mtu = can_change_mtu, }; -static ssize_t at91_sysfs_show_mb0_id(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t mb0_id_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct at91_priv *priv = netdev_priv(to_net_dev(dev)); @@ -1187,8 +1175,9 @@ static ssize_t at91_sysfs_show_mb0_id(struct device *dev, return snprintf(buf, PAGE_SIZE, "0x%03x\n", priv->mb0_id); } -static ssize_t at91_sysfs_set_mb0_id(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) +static ssize_t mb0_id_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) { struct net_device *ndev = to_net_dev(dev); struct at91_priv *priv = netdev_priv(ndev); @@ -1222,7 +1211,7 @@ static ssize_t at91_sysfs_set_mb0_id(struct device *dev, return ret; } -static DEVICE_ATTR(mb0_id, 0644, at91_sysfs_show_mb0_id, at91_sysfs_set_mb0_id); +static DEVICE_ATTR_RW(mb0_id); static struct attribute *at91_sysfs_attrs[] = { &dev_attr_mb0_id.attr, diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h index 4247ff80a29c..08b6efa7a1a7 100644 --- a/drivers/net/can/c_can/c_can.h +++ b/drivers/net/can/c_can/c_can.h @@ -176,6 +176,13 @@ struct c_can_raminit { bool needs_pulse; }; +/* c_can tx ring structure */ +struct c_can_tx_ring { + unsigned int head; + unsigned int tail; + unsigned int obj_num; +}; + /* c_can private data structure */ struct c_can_priv { struct can_priv can; /* must be the first member */ @@ -190,17 +197,16 @@ struct c_can_priv { unsigned int msg_obj_tx_first; unsigned int msg_obj_tx_last; u32 msg_obj_rx_mask; - atomic_t tx_active; atomic_t sie_pending; unsigned long tx_dir; int last_status; + struct c_can_tx_ring tx; u16 (*read_reg)(const struct c_can_priv *priv, enum reg index); void (*write_reg)(const struct c_can_priv *priv, enum reg index, u16 val); u32 (*read_reg32)(const struct c_can_priv *priv, enum reg index); void (*write_reg32)(const struct c_can_priv *priv, enum reg index, u32 val); void __iomem *base; const u16 *regs; - void *priv; /* for board-specific data */ enum c_can_dev_id type; struct c_can_raminit raminit_sys; /* RAMINIT via syscon regmap */ void (*raminit)(const struct c_can_priv *priv, bool enable); @@ -220,4 +226,19 @@ int c_can_power_down(struct net_device *dev); void c_can_set_ethtool_ops(struct net_device *dev); +static inline u8 c_can_get_tx_head(const struct c_can_tx_ring *ring) +{ + return ring->head & (ring->obj_num - 1); +} + +static inline u8 c_can_get_tx_tail(const struct c_can_tx_ring *ring) +{ + return ring->tail & (ring->obj_num - 1); +} + +static inline u8 c_can_get_tx_free(const struct c_can_tx_ring *ring) +{ + return ring->obj_num - (ring->head - ring->tail); +} + #endif /* C_CAN_H */ diff --git a/drivers/net/can/c_can/c_can_main.c b/drivers/net/can/c_can/c_can_main.c index 7588f70ca0fe..52671d1ea17d 100644 --- a/drivers/net/can/c_can/c_can_main.c +++ b/drivers/net/can/c_can/c_can_main.c @@ -160,8 +160,8 @@ #define IF_MCONT_TX (IF_MCONT_TXIE | IF_MCONT_EOB) -/* Use IF1 for RX and IF2 for TX */ -#define IF_RX 0 +/* Use IF1 in NAPI path and IF2 in TX path */ +#define IF_NAPI 0 #define IF_TX 1 /* minimum timeout for checking BUSY status */ @@ -427,24 +427,51 @@ static void c_can_setup_receive_object(struct net_device *dev, int iface, c_can_object_put(dev, iface, obj, IF_COMM_RCV_SETUP); } +static bool c_can_tx_busy(const struct c_can_priv *priv, + const struct c_can_tx_ring *tx_ring) +{ + if (c_can_get_tx_free(tx_ring) > 0) + return false; + + netif_stop_queue(priv->dev); + + /* Memory barrier before checking tx_free (head and tail) */ + smp_mb(); + + if (c_can_get_tx_free(tx_ring) == 0) { + netdev_dbg(priv->dev, + "Stopping tx-queue (tx_head=0x%08x, tx_tail=0x%08x, len=%d).\n", + tx_ring->head, tx_ring->tail, + tx_ring->head - tx_ring->tail); + return true; + } + + netif_start_queue(priv->dev); + return false; +} + static netdev_tx_t c_can_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct can_frame *frame = (struct can_frame *)skb->data; struct c_can_priv *priv = netdev_priv(dev); - u32 idx, obj; + struct c_can_tx_ring *tx_ring = &priv->tx; + u32 idx, obj, cmd = IF_COMM_TX; if (can_dropped_invalid_skb(dev, skb)) return NETDEV_TX_OK; - /* This is not a FIFO. C/D_CAN sends out the buffers - * prioritized. The lowest buffer number wins. - */ - idx = fls(atomic_read(&priv->tx_active)); - obj = idx + priv->msg_obj_tx_first; - /* If this is the last buffer, stop the xmit queue */ - if (idx == priv->msg_obj_tx_num - 1) + if (c_can_tx_busy(priv, tx_ring)) + return NETDEV_TX_BUSY; + + idx = c_can_get_tx_head(tx_ring); + tx_ring->head++; + if (c_can_get_tx_free(tx_ring) == 0) netif_stop_queue(dev); + + if (idx < c_can_get_tx_tail(tx_ring)) + cmd &= ~IF_COMM_TXRQST; /* Cache the message */ + /* Store the message in the interface so we can call * can_put_echo_skb(). We must do this before we enable * transmit as we might race against do_tx(). @@ -452,11 +479,8 @@ static netdev_tx_t c_can_start_xmit(struct sk_buff *skb, c_can_setup_tx_object(dev, IF_TX, frame, idx); priv->dlc[idx] = frame->len; can_put_echo_skb(skb, dev, idx, 0); - - /* Update the active bits */ - atomic_add(BIT(idx), &priv->tx_active); - /* Start transmission */ - c_can_object_put(dev, IF_TX, obj, IF_COMM_TX); + obj = idx + priv->msg_obj_tx_first; + c_can_object_put(dev, IF_TX, obj, cmd); return NETDEV_TX_OK; } @@ -529,13 +553,13 @@ static void c_can_configure_msg_objects(struct net_device *dev) /* first invalidate all message objects */ for (i = priv->msg_obj_rx_first; i <= priv->msg_obj_num; i++) - c_can_inval_msg_object(dev, IF_RX, i); + c_can_inval_msg_object(dev, IF_NAPI, i); /* setup receive message objects */ for (i = priv->msg_obj_rx_first; i < priv->msg_obj_rx_last; i++) - c_can_setup_receive_object(dev, IF_RX, i, 0, 0, IF_MCONT_RCV); + c_can_setup_receive_object(dev, IF_NAPI, i, 0, 0, IF_MCONT_RCV); - c_can_setup_receive_object(dev, IF_RX, priv->msg_obj_rx_last, 0, 0, + c_can_setup_receive_object(dev, IF_NAPI, priv->msg_obj_rx_last, 0, 0, IF_MCONT_RCV_EOB); } @@ -567,6 +591,7 @@ static int c_can_software_reset(struct net_device *dev) static int c_can_chip_config(struct net_device *dev) { struct c_can_priv *priv = netdev_priv(dev); + struct c_can_tx_ring *tx_ring = &priv->tx; int err; err = c_can_software_reset(dev); @@ -598,7 +623,8 @@ static int c_can_chip_config(struct net_device *dev) priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED); /* Clear all internal status */ - atomic_set(&priv->tx_active, 0); + tx_ring->head = 0; + tx_ring->tail = 0; priv->tx_dir = 0; /* set bittiming params */ @@ -696,40 +722,57 @@ static int c_can_get_berr_counter(const struct net_device *dev, static void c_can_do_tx(struct net_device *dev) { struct c_can_priv *priv = netdev_priv(dev); + struct c_can_tx_ring *tx_ring = &priv->tx; struct net_device_stats *stats = &dev->stats; - u32 idx, obj, pkts = 0, bytes = 0, pend, clr; + u32 idx, obj, pkts = 0, bytes = 0, pend; + u8 tail; if (priv->msg_obj_tx_last > 32) pend = priv->read_reg32(priv, C_CAN_INTPND3_REG); else pend = priv->read_reg(priv, C_CAN_INTPND2_REG); - clr = pend; while ((idx = ffs(pend))) { idx--; pend &= ~BIT(idx); obj = idx + priv->msg_obj_tx_first; - /* We use IF_RX interface instead of IF_TX because we + /* We use IF_NAPI interface instead of IF_TX because we * are called from c_can_poll(), which runs inside - * NAPI. We are not trasmitting. + * NAPI. We are not transmitting. */ - c_can_inval_tx_object(dev, IF_RX, obj); + c_can_inval_tx_object(dev, IF_NAPI, obj); can_get_echo_skb(dev, idx, NULL); bytes += priv->dlc[idx]; pkts++; } - /* Clear the bits in the tx_active mask */ - atomic_sub(clr, &priv->tx_active); + if (!pkts) + return; - if (clr & BIT(priv->msg_obj_tx_num - 1)) - netif_wake_queue(dev); + tx_ring->tail += pkts; + if (c_can_get_tx_free(tx_ring)) { + /* Make sure that anybody stopping the queue after + * this sees the new tx_ring->tail. + */ + smp_mb(); + netif_wake_queue(priv->dev); + } - if (pkts) { - stats->tx_bytes += bytes; - stats->tx_packets += pkts; - can_led_event(dev, CAN_LED_EVENT_TX); + stats->tx_bytes += bytes; + stats->tx_packets += pkts; + can_led_event(dev, CAN_LED_EVENT_TX); + + tail = c_can_get_tx_tail(tx_ring); + + if (tail == 0) { + u8 head = c_can_get_tx_head(tx_ring); + + /* Start transmission for all cached messages */ + for (idx = tail; idx < head; idx++) { + obj = idx + priv->msg_obj_tx_first; + c_can_object_put(dev, IF_NAPI, obj, IF_COMM_TXRQST); + } } } @@ -766,14 +809,14 @@ static u32 c_can_adjust_pending(u32 pend, u32 rx_mask) static inline void c_can_rx_object_get(struct net_device *dev, struct c_can_priv *priv, u32 obj) { - c_can_object_get(dev, IF_RX, obj, priv->comm_rcv_high); + c_can_object_get(dev, IF_NAPI, obj, priv->comm_rcv_high); } static inline void c_can_rx_finalize(struct net_device *dev, struct c_can_priv *priv, u32 obj) { if (priv->type != BOSCH_D_CAN) - c_can_object_get(dev, IF_RX, obj, IF_COMM_CLR_NEWDAT); + c_can_object_get(dev, IF_NAPI, obj, IF_COMM_CLR_NEWDAT); } static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv, @@ -785,10 +828,12 @@ static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv, pend &= ~BIT(obj - 1); c_can_rx_object_get(dev, priv, obj); - ctrl = priv->read_reg(priv, C_CAN_IFACE(MSGCTRL_REG, IF_RX)); + ctrl = priv->read_reg(priv, C_CAN_IFACE(MSGCTRL_REG, IF_NAPI)); if (ctrl & IF_MCONT_MSGLST) { - int n = c_can_handle_lost_msg_obj(dev, IF_RX, obj, ctrl); + int n; + + n = c_can_handle_lost_msg_obj(dev, IF_NAPI, obj, ctrl); pkts += n; quota -= n; @@ -803,7 +848,7 @@ static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv, continue; /* read the data from the message object */ - c_can_read_msg_object(dev, IF_RX, ctrl); + c_can_read_msg_object(dev, IF_NAPI, ctrl); c_can_rx_finalize(dev, priv, obj); @@ -1205,6 +1250,10 @@ struct net_device *alloc_c_can_dev(int msg_obj_num) priv->msg_obj_tx_last = priv->msg_obj_tx_first + priv->msg_obj_tx_num - 1; + priv->tx.head = 0; + priv->tx.tail = 0; + priv->tx.obj_num = msg_obj_tx_num; + netif_napi_add(dev, &priv->napi, c_can_poll, priv->msg_obj_rx_num); priv->dev = dev; diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c index 36950363682f..86e95e9d6533 100644 --- a/drivers/net/can/c_can/c_can_platform.c +++ b/drivers/net/can/c_can/c_can_platform.c @@ -385,7 +385,6 @@ static int c_can_plat_probe(struct platform_device *pdev) priv->base = addr; priv->device = &pdev->dev; priv->can.clock.freq = clk_get_rate(clk); - priv->priv = clk; priv->type = drvdata->id; platform_set_drvdata(pdev, dev); diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c index 311d8564d611..e3d840b81357 100644 --- a/drivers/net/can/dev/dev.c +++ b/drivers/net/can/dev/dev.c @@ -15,6 +15,7 @@ #include <linux/can/dev.h> #include <linux/can/skb.h> #include <linux/can/led.h> +#include <linux/gpio/consumer.h> #include <linux/of.h> #define MOD_DESC "CAN device driver interface" @@ -400,10 +401,69 @@ void close_candev(struct net_device *dev) } EXPORT_SYMBOL_GPL(close_candev); +static int can_set_termination(struct net_device *ndev, u16 term) +{ + struct can_priv *priv = netdev_priv(ndev); + int set; + + if (term == priv->termination_gpio_ohms[CAN_TERMINATION_GPIO_ENABLED]) + set = 1; + else + set = 0; + + gpiod_set_value(priv->termination_gpio, set); + + return 0; +} + +static int can_get_termination(struct net_device *ndev) +{ + struct can_priv *priv = netdev_priv(ndev); + struct device *dev = ndev->dev.parent; + struct gpio_desc *gpio; + u32 term; + int ret; + + /* Disabling termination by default is the safe choice: Else if many + * bus participants enable it, no communication is possible at all. + */ + gpio = devm_gpiod_get_optional(dev, "termination", GPIOD_OUT_LOW); + if (IS_ERR(gpio)) + return dev_err_probe(dev, PTR_ERR(gpio), + "Cannot get termination-gpios\n"); + + if (!gpio) + return 0; + + ret = device_property_read_u32(dev, "termination-ohms", &term); + if (ret) { + netdev_err(ndev, "Cannot get termination-ohms: %pe\n", + ERR_PTR(ret)); + return ret; + } + + if (term > U16_MAX) { + netdev_err(ndev, "Invalid termination-ohms value (%u > %u)\n", + term, U16_MAX); + return -EINVAL; + } + + priv->termination_const_cnt = ARRAY_SIZE(priv->termination_gpio_ohms); + priv->termination_const = priv->termination_gpio_ohms; + priv->termination_gpio = gpio; + priv->termination_gpio_ohms[CAN_TERMINATION_GPIO_DISABLED] = + CAN_TERMINATION_DISABLED; + priv->termination_gpio_ohms[CAN_TERMINATION_GPIO_ENABLED] = term; + priv->do_set_termination = can_set_termination; + + return 0; +} + /* Register the CAN network device */ int register_candev(struct net_device *dev) { struct can_priv *priv = netdev_priv(dev); + int err; /* Ensure termination_const, termination_const_cnt and * do_set_termination consistency. All must be either set or @@ -419,6 +479,12 @@ int register_candev(struct net_device *dev) if (!priv->data_bitrate_const != !priv->data_bitrate_const_cnt) return -EINVAL; + if (!priv->termination_const) { + err = can_get_termination(dev); + if (err) + return err; + } + dev->rtnl_link_ops = &can_link_ops; netif_carrier_off(dev); diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c index e38c2566aff4..80425636049d 100644 --- a/drivers/net/can/dev/netlink.c +++ b/drivers/net/can/dev/netlink.c @@ -47,7 +47,7 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[], } if (data[IFLA_CAN_DATA_BITTIMING]) { - if (!is_can_fd || !data[IFLA_CAN_BITTIMING]) + if (!is_can_fd) return -EOPNOTSUPP; } @@ -116,7 +116,7 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[], maskedflags = cm->flags & cm->mask; /* check whether provided bits are allowed to be passed */ - if (cm->mask & ~(priv->ctrlmode_supported | ctrlstatic)) + if (maskedflags & ~(priv->ctrlmode_supported | ctrlstatic)) return -EOPNOTSUPP; /* do not check for static fd-non-iso if 'fd' is disabled */ @@ -132,10 +132,13 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[], priv->ctrlmode |= maskedflags; /* CAN_CTRLMODE_FD can only be set when driver supports FD */ - if (priv->ctrlmode & CAN_CTRLMODE_FD) + if (priv->ctrlmode & CAN_CTRLMODE_FD) { dev->mtu = CANFD_MTU; - else + } else { dev->mtu = CAN_MTU; + memset(&priv->data_bittiming, 0, + sizeof(priv->data_bittiming)); + } } if (data[IFLA_CAN_RESTART_MS]) { diff --git a/drivers/net/can/dev/rx-offload.c b/drivers/net/can/dev/rx-offload.c index ab2c1543786c..37b0cc65237b 100644 --- a/drivers/net/can/dev/rx-offload.c +++ b/drivers/net/can/dev/rx-offload.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2014 Protonic Holland, * David Jander - * Copyright (C) 2014-2017 Pengutronix, + * Copyright (C) 2014-2021 Pengutronix, * Marc Kleine-Budde <kernel@pengutronix.de> */ @@ -174,10 +174,8 @@ can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n) int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 pending) { - struct sk_buff_head skb_queue; unsigned int i; - - __skb_queue_head_init(&skb_queue); + int received = 0; for (i = offload->mb_first; can_rx_offload_le(offload, i, offload->mb_last); @@ -191,26 +189,12 @@ int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, if (IS_ERR_OR_NULL(skb)) continue; - __skb_queue_add_sort(&skb_queue, skb, can_rx_offload_compare); - } - - if (!skb_queue_empty(&skb_queue)) { - unsigned long flags; - u32 queue_len; - - spin_lock_irqsave(&offload->skb_queue.lock, flags); - skb_queue_splice_tail(&skb_queue, &offload->skb_queue); - spin_unlock_irqrestore(&offload->skb_queue.lock, flags); - - queue_len = skb_queue_len(&offload->skb_queue); - if (queue_len > offload->skb_queue_len_max / 8) - netdev_dbg(offload->dev, "%s: queue_len=%d\n", - __func__, queue_len); - - can_rx_offload_schedule(offload); + __skb_queue_add_sort(&offload->skb_irq_queue, skb, + can_rx_offload_compare); + received++; } - return skb_queue_len(&skb_queue); + return received; } EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_timestamp); @@ -226,13 +210,10 @@ int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload) if (!skb) break; - skb_queue_tail(&offload->skb_queue, skb); + __skb_queue_tail(&offload->skb_irq_queue, skb); received++; } - if (received) - can_rx_offload_schedule(offload); - return received; } EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo); @@ -241,7 +222,6 @@ int can_rx_offload_queue_sorted(struct can_rx_offload *offload, struct sk_buff *skb, u32 timestamp) { struct can_rx_offload_cb *cb; - unsigned long flags; if (skb_queue_len(&offload->skb_queue) > offload->skb_queue_len_max) { @@ -252,11 +232,8 @@ int can_rx_offload_queue_sorted(struct can_rx_offload *offload, cb = can_rx_offload_get_cb(skb); cb->timestamp = timestamp; - spin_lock_irqsave(&offload->skb_queue.lock, flags); - __skb_queue_add_sort(&offload->skb_queue, skb, can_rx_offload_compare); - spin_unlock_irqrestore(&offload->skb_queue.lock, flags); - - can_rx_offload_schedule(offload); + __skb_queue_add_sort(&offload->skb_irq_queue, skb, + can_rx_offload_compare); return 0; } @@ -295,13 +272,56 @@ int can_rx_offload_queue_tail(struct can_rx_offload *offload, return -ENOBUFS; } - skb_queue_tail(&offload->skb_queue, skb); - can_rx_offload_schedule(offload); + __skb_queue_tail(&offload->skb_irq_queue, skb); return 0; } EXPORT_SYMBOL_GPL(can_rx_offload_queue_tail); +void can_rx_offload_irq_finish(struct can_rx_offload *offload) +{ + unsigned long flags; + int queue_len; + + if (skb_queue_empty_lockless(&offload->skb_irq_queue)) + return; + + spin_lock_irqsave(&offload->skb_queue.lock, flags); + skb_queue_splice_tail_init(&offload->skb_irq_queue, &offload->skb_queue); + spin_unlock_irqrestore(&offload->skb_queue.lock, flags); + + queue_len = skb_queue_len(&offload->skb_queue); + if (queue_len > offload->skb_queue_len_max / 8) + netdev_dbg(offload->dev, "%s: queue_len=%d\n", + __func__, queue_len); + + napi_schedule(&offload->napi); +} +EXPORT_SYMBOL_GPL(can_rx_offload_irq_finish); + +void can_rx_offload_threaded_irq_finish(struct can_rx_offload *offload) +{ + unsigned long flags; + int queue_len; + + if (skb_queue_empty_lockless(&offload->skb_irq_queue)) + return; + + spin_lock_irqsave(&offload->skb_queue.lock, flags); + skb_queue_splice_tail_init(&offload->skb_irq_queue, &offload->skb_queue); + spin_unlock_irqrestore(&offload->skb_queue.lock, flags); + + queue_len = skb_queue_len(&offload->skb_queue); + if (queue_len > offload->skb_queue_len_max / 8) + netdev_dbg(offload->dev, "%s: queue_len=%d\n", + __func__, queue_len); + + local_bh_disable(); + napi_schedule(&offload->napi); + local_bh_enable(); +} +EXPORT_SYMBOL_GPL(can_rx_offload_threaded_irq_finish); + static int can_rx_offload_init_queue(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight) @@ -312,6 +332,7 @@ static int can_rx_offload_init_queue(struct net_device *dev, offload->skb_queue_len_max = 2 << fls(weight); offload->skb_queue_len_max *= 4; skb_queue_head_init(&offload->skb_queue); + __skb_queue_head_init(&offload->skb_irq_queue); netif_napi_add(dev, &offload->napi, can_rx_offload_napi_poll, weight); @@ -373,5 +394,6 @@ void can_rx_offload_del(struct can_rx_offload *offload) { netif_napi_del(&offload->napi); skb_queue_purge(&offload->skb_queue); + __skb_queue_purge(&offload->skb_irq_queue); } EXPORT_SYMBOL_GPL(can_rx_offload_del); diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 57f3635ad8d7..7734229aa078 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -28,6 +28,7 @@ #include <linux/of_device.h> #include <linux/pinctrl/consumer.h> #include <linux/platform_device.h> +#include <linux/can/platform/flexcan.h> #include <linux/pm_runtime.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> @@ -208,18 +209,19 @@ /* FLEXCAN hardware feature flags * * Below is some version info we got: - * SOC Version IP-Version Glitch- [TR]WRN_INT IRQ Err Memory err RTR rece- FD Mode + * SOC Version IP-Version Glitch- [TR]WRN_INT IRQ Err Memory err RTR rece- FD Mode MB * Filter? connected? Passive detection ption in MB Supported? - * MX25 FlexCAN2 03.00.00.00 no no no no no no - * MX28 FlexCAN2 03.00.04.00 yes yes no no no no - * MX35 FlexCAN2 03.00.00.00 no no no no no no - * MX53 FlexCAN2 03.00.00.00 yes no no no no no - * MX6s FlexCAN3 10.00.12.00 yes yes no no yes no - * MX8QM FlexCAN3 03.00.23.00 yes yes no no yes yes - * MX8MP FlexCAN3 03.00.17.01 yes yes no yes yes yes - * VF610 FlexCAN3 ? no yes no yes yes? no - * LS1021A FlexCAN2 03.00.04.00 no yes no no yes no - * LX2160A FlexCAN3 03.00.23.00 no yes no yes yes yes + * MCF5441X FlexCAN2 ? no yes no no yes no 16 + * MX25 FlexCAN2 03.00.00.00 no no no no no no 64 + * MX28 FlexCAN2 03.00.04.00 yes yes no no no no 64 + * MX35 FlexCAN2 03.00.00.00 no no no no no no 64 + * MX53 FlexCAN2 03.00.00.00 yes no no no no no 64 + * MX6s FlexCAN3 10.00.12.00 yes yes no no yes no 64 + * MX8QM FlexCAN3 03.00.23.00 yes yes no no yes yes 64 + * MX8MP FlexCAN3 03.00.17.01 yes yes no yes yes yes 64 + * VF610 FlexCAN3 ? no yes no yes yes? no 64 + * LS1021A FlexCAN2 03.00.04.00 no yes no no yes no 64 + * LX2160A FlexCAN3 03.00.23.00 no yes no yes yes yes 64 * * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected. */ @@ -246,6 +248,10 @@ #define FLEXCAN_QUIRK_SUPPORT_ECC BIT(10) /* Setup stop mode with SCU firmware to support wakeup */ #define FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW BIT(11) +/* Setup 3 separate interrupts, main, boff and err */ +#define FLEXCAN_QUIRK_NR_IRQ_3 BIT(12) +/* Setup 16 mailboxes */ +#define FLEXCAN_QUIRK_NR_MB_16 BIT(13) /* Structure of the message buffer */ struct flexcan_mb { @@ -363,6 +369,9 @@ struct flexcan_priv { struct regulator *reg_xceiver; struct flexcan_stop_mode stm; + int irq_boff; + int irq_err; + /* IPC handle when setup stop mode by System Controller firmware(scfw) */ struct imx_sc_ipc *sc_ipc_handle; @@ -371,6 +380,11 @@ struct flexcan_priv { void (*write)(u32 val, void __iomem *addr); }; +static const struct flexcan_devtype_data fsl_mcf5441x_devtype_data = { + .quirks = FLEXCAN_QUIRK_BROKEN_PERR_STATE | + FLEXCAN_QUIRK_NR_IRQ_3 | FLEXCAN_QUIRK_NR_MB_16, +}; + static const struct flexcan_devtype_data fsl_p1010_devtype_data = { .quirks = FLEXCAN_QUIRK_BROKEN_WERR_STATE | FLEXCAN_QUIRK_BROKEN_PERR_STATE | @@ -635,15 +649,19 @@ static inline void flexcan_error_irq_disable(const struct flexcan_priv *priv) static int flexcan_clks_enable(const struct flexcan_priv *priv) { - int err; + int err = 0; - err = clk_prepare_enable(priv->clk_ipg); - if (err) - return err; + if (priv->clk_ipg) { + err = clk_prepare_enable(priv->clk_ipg); + if (err) + return err; + } - err = clk_prepare_enable(priv->clk_per); - if (err) - clk_disable_unprepare(priv->clk_ipg); + if (priv->clk_per) { + err = clk_prepare_enable(priv->clk_per); + if (err) + clk_disable_unprepare(priv->clk_ipg); + } return err; } @@ -1198,6 +1216,9 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) } } + if (handled) + can_rx_offload_irq_finish(&priv->offload); + return handled; } @@ -1401,8 +1422,12 @@ static int flexcan_rx_offload_setup(struct net_device *dev) priv->mb_size = sizeof(struct flexcan_mb) + CANFD_MAX_DLEN; else priv->mb_size = sizeof(struct flexcan_mb) + CAN_MAX_DLEN; - priv->mb_count = (sizeof(priv->regs->mb[0]) / priv->mb_size) + - (sizeof(priv->regs->mb[1]) / priv->mb_size); + + if (priv->devtype_data->quirks & FLEXCAN_QUIRK_NR_MB_16) + priv->mb_count = 16; + else + priv->mb_count = (sizeof(priv->regs->mb[0]) / priv->mb_size) + + (sizeof(priv->regs->mb[1]) / priv->mb_size); if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) priv->tx_mb_reserved = @@ -1774,6 +1799,18 @@ static int flexcan_open(struct net_device *dev) if (err) goto out_can_rx_offload_disable; + if (priv->devtype_data->quirks & FLEXCAN_QUIRK_NR_IRQ_3) { + err = request_irq(priv->irq_boff, + flexcan_irq, IRQF_SHARED, dev->name, dev); + if (err) + goto out_free_irq; + + err = request_irq(priv->irq_err, + flexcan_irq, IRQF_SHARED, dev->name, dev); + if (err) + goto out_free_irq_boff; + } + flexcan_chip_interrupts_enable(dev); can_led_event(dev, CAN_LED_EVENT_OPEN); @@ -1782,6 +1819,10 @@ static int flexcan_open(struct net_device *dev) return 0; + out_free_irq_boff: + free_irq(priv->irq_boff, dev); + out_free_irq: + free_irq(dev->irq, dev); out_can_rx_offload_disable: can_rx_offload_disable(&priv->offload); flexcan_chip_stop(dev); @@ -1803,6 +1844,12 @@ static int flexcan_close(struct net_device *dev) netif_stop_queue(dev); flexcan_chip_interrupts_disable(dev); + + if (priv->devtype_data->quirks & FLEXCAN_QUIRK_NR_IRQ_3) { + free_irq(priv->irq_err, dev); + free_irq(priv->irq_boff, dev); + } + free_irq(dev->irq, dev); can_rx_offload_disable(&priv->offload); flexcan_chip_stop_disable_on_error(dev); @@ -2039,14 +2086,26 @@ static const struct of_device_id flexcan_of_match[] = { }; MODULE_DEVICE_TABLE(of, flexcan_of_match); +static const struct platform_device_id flexcan_id_table[] = { + { + .name = "flexcan-mcf5441x", + .driver_data = (kernel_ulong_t)&fsl_mcf5441x_devtype_data, + }, { + /* sentinel */ + }, +}; +MODULE_DEVICE_TABLE(platform, flexcan_id_table); + static int flexcan_probe(struct platform_device *pdev) { + const struct of_device_id *of_id; const struct flexcan_devtype_data *devtype_data; struct net_device *dev; struct flexcan_priv *priv; struct regulator *reg_xceiver; struct clk *clk_ipg = NULL, *clk_per = NULL; struct flexcan_regs __iomem *regs; + struct flexcan_platform_data *pdata; int err, irq; u8 clk_src = 1; u32 clock_freq = 0; @@ -2064,6 +2123,12 @@ static int flexcan_probe(struct platform_device *pdev) "clock-frequency", &clock_freq); of_property_read_u8(pdev->dev.of_node, "fsl,clk-source", &clk_src); + } else { + pdata = dev_get_platdata(&pdev->dev); + if (pdata) { + clock_freq = pdata->clock_frequency; + clk_src = pdata->clk_src; + } } if (!clock_freq) { @@ -2089,7 +2154,14 @@ static int flexcan_probe(struct platform_device *pdev) if (IS_ERR(regs)) return PTR_ERR(regs); - devtype_data = of_device_get_match_data(&pdev->dev); + of_id = of_match_device(flexcan_of_match, &pdev->dev); + if (of_id) + devtype_data = of_id->data; + else if (platform_get_device_id(pdev)->driver_data) + devtype_data = (struct flexcan_devtype_data *) + platform_get_device_id(pdev)->driver_data; + else + return -ENODEV; if ((devtype_data->quirks & FLEXCAN_QUIRK_SUPPORT_FD) && !(devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP)) { @@ -2133,6 +2205,19 @@ static int flexcan_probe(struct platform_device *pdev) priv->devtype_data = devtype_data; priv->reg_xceiver = reg_xceiver; + if (devtype_data->quirks & FLEXCAN_QUIRK_NR_IRQ_3) { + priv->irq_boff = platform_get_irq(pdev, 1); + if (priv->irq_boff <= 0) { + err = -ENODEV; + goto failed_platform_get_irq; + } + priv->irq_err = platform_get_irq(pdev, 2); + if (priv->irq_err <= 0) { + err = -ENODEV; + goto failed_platform_get_irq; + } + } + if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SUPPORT_FD) { priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO; @@ -2170,6 +2255,7 @@ static int flexcan_probe(struct platform_device *pdev) failed_register: pm_runtime_put_noidle(&pdev->dev); pm_runtime_disable(&pdev->dev); + failed_platform_get_irq: free_candev(dev); return err; } @@ -2322,6 +2408,7 @@ static struct platform_driver flexcan_driver = { }, .probe = flexcan_probe, .remove = flexcan_remove, + .id_table = flexcan_id_table, }; module_platform_driver(flexcan_driver); diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c index 2a6c918186c0..c68ad56628bd 100644 --- a/drivers/net/can/janz-ican3.c +++ b/drivers/net/can/janz-ican3.c @@ -1815,9 +1815,9 @@ static int ican3_get_berr_counter(const struct net_device *ndev, * Sysfs Attributes */ -static ssize_t ican3_sysfs_show_term(struct device *dev, - struct device_attribute *attr, - char *buf) +static ssize_t termination_show(struct device *dev, + struct device_attribute *attr, + char *buf) { struct ican3_dev *mod = netdev_priv(to_net_dev(dev)); int ret; @@ -1834,9 +1834,9 @@ static ssize_t ican3_sysfs_show_term(struct device *dev, return snprintf(buf, PAGE_SIZE, "%u\n", mod->termination_enabled); } -static ssize_t ican3_sysfs_set_term(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) +static ssize_t termination_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) { struct ican3_dev *mod = netdev_priv(to_net_dev(dev)); unsigned long enable; @@ -1852,18 +1852,17 @@ static ssize_t ican3_sysfs_set_term(struct device *dev, return count; } -static ssize_t ican3_sysfs_show_fwinfo(struct device *dev, - struct device_attribute *attr, - char *buf) +static ssize_t fwinfo_show(struct device *dev, + struct device_attribute *attr, + char *buf) { struct ican3_dev *mod = netdev_priv(to_net_dev(dev)); return scnprintf(buf, PAGE_SIZE, "%s\n", mod->fwinfo); } -static DEVICE_ATTR(termination, 0644, ican3_sysfs_show_term, - ican3_sysfs_set_term); -static DEVICE_ATTR(fwinfo, 0444, ican3_sysfs_show_fwinfo, NULL); +static DEVICE_ATTR_RW(termination); +static DEVICE_ATTR_RO(fwinfo); static struct attribute *ican3_sysfs_attrs[] = { &dev_attr_termination.attr, diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index 43bca315a66c..2470c47b2e31 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -21,6 +21,7 @@ #include <linux/iopoll.h> #include <linux/can/dev.h> #include <linux/pinctrl/consumer.h> +#include <linux/phy/phy.h> #include "m_can.h" @@ -278,7 +279,7 @@ enum m_can_reg { /* Message RAM Elements */ #define M_CAN_FIFO_ID 0x0 #define M_CAN_FIFO_DLC 0x4 -#define M_CAN_FIFO_DATA(n) (0x8 + ((n) << 2)) +#define M_CAN_FIFO_DATA 0x8 /* Rx Buffer Element */ /* R0 */ @@ -308,6 +309,15 @@ enum m_can_reg { #define TX_EVENT_MM_MASK GENMASK(31, 24) #define TX_EVENT_TXTS_MASK GENMASK(15, 0) +/* The ID and DLC registers are adjacent in M_CAN FIFO memory, + * and we can save a (potentially slow) bus round trip by combining + * reads and writes to them. + */ +struct id_and_dlc { + u32 id; + u32 dlc; +}; + static inline u32 m_can_read(struct m_can_classdev *cdev, enum m_can_reg reg) { return cdev->ops->read_reg(cdev, reg); @@ -319,36 +329,39 @@ static inline void m_can_write(struct m_can_classdev *cdev, enum m_can_reg reg, cdev->ops->write_reg(cdev, reg, val); } -static u32 m_can_fifo_read(struct m_can_classdev *cdev, - u32 fgi, unsigned int offset) +static int +m_can_fifo_read(struct m_can_classdev *cdev, + u32 fgi, unsigned int offset, void *val, size_t val_count) { u32 addr_offset = cdev->mcfg[MRAM_RXF0].off + fgi * RXF0_ELEMENT_SIZE + offset; - return cdev->ops->read_fifo(cdev, addr_offset); + return cdev->ops->read_fifo(cdev, addr_offset, val, val_count); } -static void m_can_fifo_write(struct m_can_classdev *cdev, - u32 fpi, unsigned int offset, u32 val) +static int +m_can_fifo_write(struct m_can_classdev *cdev, + u32 fpi, unsigned int offset, const void *val, size_t val_count) { u32 addr_offset = cdev->mcfg[MRAM_TXB].off + fpi * TXB_ELEMENT_SIZE + offset; - cdev->ops->write_fifo(cdev, addr_offset, val); + return cdev->ops->write_fifo(cdev, addr_offset, val, val_count); } -static inline void m_can_fifo_write_no_off(struct m_can_classdev *cdev, - u32 fpi, u32 val) +static inline int m_can_fifo_write_no_off(struct m_can_classdev *cdev, + u32 fpi, u32 val) { - cdev->ops->write_fifo(cdev, fpi, val); + return cdev->ops->write_fifo(cdev, fpi, &val, 1); } -static u32 m_can_txe_fifo_read(struct m_can_classdev *cdev, u32 fgi, u32 offset) +static int +m_can_txe_fifo_read(struct m_can_classdev *cdev, u32 fgi, u32 offset, u32 *val) { u32 addr_offset = cdev->mcfg[MRAM_TXE].off + fgi * TXE_ELEMENT_SIZE + offset; - return cdev->ops->read_fifo(cdev, addr_offset); + return cdev->ops->read_fifo(cdev, addr_offset, val, 1); } static inline bool m_can_tx_fifo_full(struct m_can_classdev *cdev) @@ -436,7 +449,7 @@ static void m_can_clean(struct net_device *net) * napi. For non-peripherals, RX is done in napi already, so push * directly. timestamp is used to ensure good skb ordering in * rx-offload and is ignored for non-peripherals. -*/ + */ static void m_can_receive_skb(struct m_can_classdev *cdev, struct sk_buff *skb, u32 timestamp) @@ -454,54 +467,57 @@ static void m_can_receive_skb(struct m_can_classdev *cdev, } } -static void m_can_read_fifo(struct net_device *dev, u32 rxfs) +static int m_can_read_fifo(struct net_device *dev, u32 rxfs) { struct net_device_stats *stats = &dev->stats; struct m_can_classdev *cdev = netdev_priv(dev); struct canfd_frame *cf; struct sk_buff *skb; - u32 id, fgi, dlc; + struct id_and_dlc fifo_header; + u32 fgi; u32 timestamp = 0; - int i; + int err; /* calculate the fifo get index for where to read data */ fgi = FIELD_GET(RXFS_FGI_MASK, rxfs); - dlc = m_can_fifo_read(cdev, fgi, M_CAN_FIFO_DLC); - if (dlc & RX_BUF_FDF) + err = m_can_fifo_read(cdev, fgi, M_CAN_FIFO_ID, &fifo_header, 2); + if (err) + goto out_fail; + + if (fifo_header.dlc & RX_BUF_FDF) skb = alloc_canfd_skb(dev, &cf); else skb = alloc_can_skb(dev, (struct can_frame **)&cf); if (!skb) { stats->rx_dropped++; - return; + return 0; } - if (dlc & RX_BUF_FDF) - cf->len = can_fd_dlc2len((dlc >> 16) & 0x0F); + if (fifo_header.dlc & RX_BUF_FDF) + cf->len = can_fd_dlc2len((fifo_header.dlc >> 16) & 0x0F); else - cf->len = can_cc_dlc2len((dlc >> 16) & 0x0F); + cf->len = can_cc_dlc2len((fifo_header.dlc >> 16) & 0x0F); - id = m_can_fifo_read(cdev, fgi, M_CAN_FIFO_ID); - if (id & RX_BUF_XTD) - cf->can_id = (id & CAN_EFF_MASK) | CAN_EFF_FLAG; + if (fifo_header.id & RX_BUF_XTD) + cf->can_id = (fifo_header.id & CAN_EFF_MASK) | CAN_EFF_FLAG; else - cf->can_id = (id >> 18) & CAN_SFF_MASK; + cf->can_id = (fifo_header.id >> 18) & CAN_SFF_MASK; - if (id & RX_BUF_ESI) { + if (fifo_header.id & RX_BUF_ESI) { cf->flags |= CANFD_ESI; netdev_dbg(dev, "ESI Error\n"); } - if (!(dlc & RX_BUF_FDF) && (id & RX_BUF_RTR)) { + if (!(fifo_header.dlc & RX_BUF_FDF) && (fifo_header.id & RX_BUF_RTR)) { cf->can_id |= CAN_RTR_FLAG; } else { - if (dlc & RX_BUF_BRS) + if (fifo_header.dlc & RX_BUF_BRS) cf->flags |= CANFD_BRS; - for (i = 0; i < cf->len; i += 4) - *(u32 *)(cf->data + i) = - m_can_fifo_read(cdev, fgi, - M_CAN_FIFO_DATA(i / 4)); + err = m_can_fifo_read(cdev, fgi, M_CAN_FIFO_DATA, + cf->data, DIV_ROUND_UP(cf->len, 4)); + if (err) + goto out_fail; } /* acknowledge rx fifo 0 */ @@ -510,9 +526,15 @@ static void m_can_read_fifo(struct net_device *dev, u32 rxfs) stats->rx_packets++; stats->rx_bytes += cf->len; - timestamp = FIELD_GET(RX_BUF_RXTS_MASK, dlc); + timestamp = FIELD_GET(RX_BUF_RXTS_MASK, fifo_header.dlc); m_can_receive_skb(cdev, skb, timestamp); + + return 0; + +out_fail: + netdev_err(dev, "FIFO read returned %d\n", err); + return err; } static int m_can_do_rx_poll(struct net_device *dev, int quota) @@ -520,6 +542,7 @@ static int m_can_do_rx_poll(struct net_device *dev, int quota) struct m_can_classdev *cdev = netdev_priv(dev); u32 pkts = 0; u32 rxfs; + int err; rxfs = m_can_read(cdev, M_CAN_RXF0S); if (!(rxfs & RXFS_FFL_MASK)) { @@ -528,7 +551,9 @@ static int m_can_do_rx_poll(struct net_device *dev, int quota) } while ((rxfs & RXFS_FFL_MASK) && (quota > 0)) { - m_can_read_fifo(dev, rxfs); + err = m_can_read_fifo(dev, rxfs); + if (err) + return err; quota--; pkts++; @@ -874,6 +899,7 @@ static int m_can_handle_bus_errors(struct net_device *dev, u32 irqstatus, static int m_can_rx_handler(struct net_device *dev, int quota) { struct m_can_classdev *cdev = netdev_priv(dev); + int rx_work_or_err; int work_done = 0; u32 irqstatus, psr; @@ -910,8 +936,13 @@ static int m_can_rx_handler(struct net_device *dev, int quota) if (irqstatus & IR_ERR_BUS_30X) work_done += m_can_handle_bus_errors(dev, irqstatus, psr); - if (irqstatus & IR_RF0N) - work_done += m_can_do_rx_poll(dev, (quota - work_done)); + if (irqstatus & IR_RF0N) { + rx_work_or_err = m_can_do_rx_poll(dev, (quota - work_done)); + if (rx_work_or_err < 0) + return rx_work_or_err; + + work_done += rx_work_or_err; + } end: return work_done; } @@ -919,12 +950,17 @@ end: static int m_can_rx_peripheral(struct net_device *dev) { struct m_can_classdev *cdev = netdev_priv(dev); + int work_done; - m_can_rx_handler(dev, M_CAN_NAPI_WEIGHT); + work_done = m_can_rx_handler(dev, M_CAN_NAPI_WEIGHT); - m_can_enable_all_interrupts(cdev); + /* Don't re-enable interrupts if the driver had a fatal error + * (e.g., FIFO read failure). + */ + if (work_done >= 0) + m_can_enable_all_interrupts(cdev); - return 0; + return work_done; } static int m_can_poll(struct napi_struct *napi, int quota) @@ -934,7 +970,11 @@ static int m_can_poll(struct napi_struct *napi, int quota) int work_done; work_done = m_can_rx_handler(dev, quota); - if (work_done < quota) { + + /* Don't re-enable interrupts if the driver had a fatal error + * (e.g., FIFO read failure). + */ + if (work_done >= 0 && work_done < quota) { napi_complete_done(napi, work_done); m_can_enable_all_interrupts(cdev); } @@ -945,7 +985,7 @@ static int m_can_poll(struct napi_struct *napi, int quota) /* Echo tx skb and update net stats. Peripherals use rx-offload for * echo. timestamp is used for peripherals to ensure correct ordering * by rx-offload, and is ignored for non-peripherals. -*/ + */ static void m_can_tx_update_stats(struct m_can_classdev *cdev, unsigned int msg_mark, u32 timestamp) @@ -965,7 +1005,7 @@ static void m_can_tx_update_stats(struct m_can_classdev *cdev, stats->tx_packets++; } -static void m_can_echo_tx_event(struct net_device *dev) +static int m_can_echo_tx_event(struct net_device *dev) { u32 txe_count = 0; u32 m_can_txefs; @@ -984,12 +1024,18 @@ static void m_can_echo_tx_event(struct net_device *dev) /* Get and process all sent elements */ for (i = 0; i < txe_count; i++) { u32 txe, timestamp = 0; + int err; /* retrieve get index */ fgi = FIELD_GET(TXEFS_EFGI_MASK, m_can_read(cdev, M_CAN_TXEFS)); /* get message marker, timestamp */ - txe = m_can_txe_fifo_read(cdev, fgi, 4); + err = m_can_txe_fifo_read(cdev, fgi, 4, &txe); + if (err) { + netdev_err(dev, "TXE FIFO read returned %d\n", err); + return err; + } + msg_mark = FIELD_GET(TX_EVENT_MM_MASK, txe); timestamp = FIELD_GET(TX_EVENT_TXTS_MASK, txe); @@ -1000,6 +1046,8 @@ static void m_can_echo_tx_event(struct net_device *dev) /* update stats */ m_can_tx_update_stats(cdev, msg_mark, timestamp); } + + return 0; } static irqreturn_t m_can_isr(int irq, void *dev_id) @@ -1031,8 +1079,8 @@ static irqreturn_t m_can_isr(int irq, void *dev_id) m_can_disable_all_interrupts(cdev); if (!cdev->is_peripheral) napi_schedule(&cdev->napi); - else - m_can_rx_peripheral(dev); + else if (m_can_rx_peripheral(dev) < 0) + goto out_fail; } if (cdev->version == 30) { @@ -1050,7 +1098,9 @@ static irqreturn_t m_can_isr(int irq, void *dev_id) } else { if (ir & IR_TEFN) { /* New TX FIFO Element arrived */ - m_can_echo_tx_event(dev); + if (m_can_echo_tx_event(dev) != 0) + goto out_fail; + can_led_event(dev, CAN_LED_EVENT_TX); if (netif_queue_stopped(dev) && !m_can_tx_fifo_full(cdev)) @@ -1058,6 +1108,13 @@ static irqreturn_t m_can_isr(int irq, void *dev_id) } } + if (cdev->is_peripheral) + can_rx_offload_threaded_irq_finish(&cdev->offload); + + return IRQ_HANDLED; + +out_fail: + m_can_disable_all_interrupts(cdev); return IRQ_HANDLED; } @@ -1302,7 +1359,8 @@ static void m_can_chip_config(struct net_device *dev) m_can_set_bittiming(dev); /* enable internal timestamp generation, with a prescalar of 16. The - * prescalar is applied to the nominal bit timing */ + * prescalar is applied to the nominal bit timing + */ m_can_write(cdev, M_CAN_TSCC, FIELD_PREP(TSCC_TCP_MASK, 0xf)); m_can_config_endisable(cdev, false); @@ -1436,32 +1494,20 @@ static int m_can_dev_setup(struct m_can_classdev *cdev) case 30: /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.x */ can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO); - cdev->can.bittiming_const = cdev->bit_timing ? - cdev->bit_timing : &m_can_bittiming_const_30X; - - cdev->can.data_bittiming_const = cdev->data_timing ? - cdev->data_timing : - &m_can_data_bittiming_const_30X; + cdev->can.bittiming_const = &m_can_bittiming_const_30X; + cdev->can.data_bittiming_const = &m_can_data_bittiming_const_30X; break; case 31: /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.1.x */ can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO); - cdev->can.bittiming_const = cdev->bit_timing ? - cdev->bit_timing : &m_can_bittiming_const_31X; - - cdev->can.data_bittiming_const = cdev->data_timing ? - cdev->data_timing : - &m_can_data_bittiming_const_31X; + cdev->can.bittiming_const = &m_can_bittiming_const_31X; + cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X; break; case 32: case 33: /* Support both MCAN version v3.2.x and v3.3.0 */ - cdev->can.bittiming_const = cdev->bit_timing ? - cdev->bit_timing : &m_can_bittiming_const_31X; - - cdev->can.data_bittiming_const = cdev->data_timing ? - cdev->data_timing : - &m_can_data_bittiming_const_31X; + cdev->can.bittiming_const = &m_can_bittiming_const_31X; + cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X; cdev->can.ctrlmode_supported |= (m_can_niso_supported(cdev) ? @@ -1518,6 +1564,8 @@ static int m_can_close(struct net_device *dev) close_candev(dev); can_led_event(dev, CAN_LED_EVENT_STOP); + phy_power_off(cdev->transceiver); + return 0; } @@ -1540,8 +1588,9 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev) struct canfd_frame *cf = (struct canfd_frame *)cdev->tx_skb->data; struct net_device *dev = cdev->net; struct sk_buff *skb = cdev->tx_skb; - u32 id, cccr, fdflags; - int i; + struct id_and_dlc fifo_header; + u32 cccr, fdflags; + int err; int putidx; cdev->tx_skb = NULL; @@ -1549,27 +1598,29 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev) /* Generate ID field for TX buffer Element */ /* Common to all supported M_CAN versions */ if (cf->can_id & CAN_EFF_FLAG) { - id = cf->can_id & CAN_EFF_MASK; - id |= TX_BUF_XTD; + fifo_header.id = cf->can_id & CAN_EFF_MASK; + fifo_header.id |= TX_BUF_XTD; } else { - id = ((cf->can_id & CAN_SFF_MASK) << 18); + fifo_header.id = ((cf->can_id & CAN_SFF_MASK) << 18); } if (cf->can_id & CAN_RTR_FLAG) - id |= TX_BUF_RTR; + fifo_header.id |= TX_BUF_RTR; if (cdev->version == 30) { netif_stop_queue(dev); - /* message ram configuration */ - m_can_fifo_write(cdev, 0, M_CAN_FIFO_ID, id); - m_can_fifo_write(cdev, 0, M_CAN_FIFO_DLC, - can_fd_len2dlc(cf->len) << 16); + fifo_header.dlc = can_fd_len2dlc(cf->len) << 16; + + /* Write the frame ID, DLC, and payload to the FIFO element. */ + err = m_can_fifo_write(cdev, 0, M_CAN_FIFO_ID, &fifo_header, 2); + if (err) + goto out_fail; - for (i = 0; i < cf->len; i += 4) - m_can_fifo_write(cdev, 0, - M_CAN_FIFO_DATA(i / 4), - *(u32 *)(cf->data + i)); + err = m_can_fifo_write(cdev, 0, M_CAN_FIFO_DATA, + cf->data, DIV_ROUND_UP(cf->len, 4)); + if (err) + goto out_fail; can_put_echo_skb(skb, dev, 0, 0); @@ -1613,8 +1664,11 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev) /* get put index for frame */ putidx = FIELD_GET(TXFQS_TFQPI_MASK, m_can_read(cdev, M_CAN_TXFQS)); - /* Write ID Field to FIFO Element */ - m_can_fifo_write(cdev, putidx, M_CAN_FIFO_ID, id); + + /* Construct DLC Field, with CAN-FD configuration. + * Use the put index of the fifo as the message marker, + * used in the TX interrupt for sending the correct echo frame. + */ /* get CAN FD configuration of frame */ fdflags = 0; @@ -1624,20 +1678,17 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev) fdflags |= TX_BUF_BRS; } - /* Construct DLC Field. Also contains CAN-FD configuration - * use put index of fifo as message marker - * it is used in TX interrupt for - * sending the correct echo frame - */ - m_can_fifo_write(cdev, putidx, M_CAN_FIFO_DLC, - FIELD_PREP(TX_BUF_MM_MASK, putidx) | - FIELD_PREP(TX_BUF_DLC_MASK, - can_fd_len2dlc(cf->len)) | - fdflags | TX_BUF_EFC); + fifo_header.dlc = FIELD_PREP(TX_BUF_MM_MASK, putidx) | + FIELD_PREP(TX_BUF_DLC_MASK, can_fd_len2dlc(cf->len)) | + fdflags | TX_BUF_EFC; + err = m_can_fifo_write(cdev, putidx, M_CAN_FIFO_ID, &fifo_header, 2); + if (err) + goto out_fail; - for (i = 0; i < cf->len; i += 4) - m_can_fifo_write(cdev, putidx, M_CAN_FIFO_DATA(i / 4), - *(u32 *)(cf->data + i)); + err = m_can_fifo_write(cdev, putidx, M_CAN_FIFO_DATA, + cf->data, DIV_ROUND_UP(cf->len, 4)); + if (err) + goto out_fail; /* Push loopback echo. * Will be looped back on TX interrupt based on message marker @@ -1654,6 +1705,11 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev) } return NETDEV_TX_OK; + +out_fail: + netdev_err(dev, "FIFO write returned %d\n", err); + m_can_disable_all_interrupts(cdev); + return NETDEV_TX_BUSY; } static void m_can_tx_work_queue(struct work_struct *ws) @@ -1703,10 +1759,14 @@ static int m_can_open(struct net_device *dev) struct m_can_classdev *cdev = netdev_priv(dev); int err; - err = m_can_clk_start(cdev); + err = phy_power_on(cdev->transceiver); if (err) return err; + err = m_can_clk_start(cdev); + if (err) + goto out_phy_power_off; + /* open the can device */ err = open_candev(dev); if (err) { @@ -1763,6 +1823,8 @@ out_wq_fail: close_candev(dev); exit_disable_clks: m_can_clk_stop(cdev); +out_phy_power_off: + phy_power_off(cdev->transceiver); return err; } @@ -1819,9 +1881,10 @@ static void m_can_of_parse_mram(struct m_can_classdev *cdev, cdev->mcfg[MRAM_TXB].off, cdev->mcfg[MRAM_TXB].num); } -void m_can_init_ram(struct m_can_classdev *cdev) +int m_can_init_ram(struct m_can_classdev *cdev) { int end, i, start; + int err = 0; /* initialize the entire Message RAM in use to avoid possible * ECC/parity checksum errors when reading an uninitialized buffer @@ -1830,8 +1893,13 @@ void m_can_init_ram(struct m_can_classdev *cdev) end = cdev->mcfg[MRAM_TXB].off + cdev->mcfg[MRAM_TXB].num * TXB_ELEMENT_SIZE; - for (i = start; i < end; i += 4) - m_can_fifo_write_no_off(cdev, i, 0x0); + for (i = start; i < end; i += 4) { + err = m_can_fifo_write_no_off(cdev, i, 0x0); + if (err) + break; + } + + return err; } EXPORT_SYMBOL_GPL(m_can_init_ram); diff --git a/drivers/net/can/m_can/m_can.h b/drivers/net/can/m_can/m_can.h index ace071c3e58c..d18b515e6ccc 100644 --- a/drivers/net/can/m_can/m_can.h +++ b/drivers/net/can/m_can/m_can.h @@ -28,6 +28,7 @@ #include <linux/iopoll.h> #include <linux/can/dev.h> #include <linux/pinctrl/consumer.h> +#include <linux/phy/phy.h> /* m_can lec values */ enum m_can_lec_type { @@ -64,9 +65,9 @@ struct m_can_ops { int (*clear_interrupts)(struct m_can_classdev *cdev); u32 (*read_reg)(struct m_can_classdev *cdev, int reg); int (*write_reg)(struct m_can_classdev *cdev, int reg, int val); - u32 (*read_fifo)(struct m_can_classdev *cdev, int addr_offset); + int (*read_fifo)(struct m_can_classdev *cdev, int addr_offset, void *val, size_t val_count); int (*write_fifo)(struct m_can_classdev *cdev, int addr_offset, - int val); + const void *val, size_t val_count); int (*init)(struct m_can_classdev *cdev); }; @@ -82,9 +83,7 @@ struct m_can_classdev { struct workqueue_struct *tx_wq; struct work_struct tx_work; struct sk_buff *tx_skb; - - struct can_bittiming_const *bit_timing; - struct can_bittiming_const *data_timing; + struct phy *transceiver; struct m_can_ops *ops; @@ -102,7 +101,7 @@ void m_can_class_free_dev(struct net_device *net); int m_can_class_register(struct m_can_classdev *cdev); void m_can_class_unregister(struct m_can_classdev *cdev); int m_can_class_get_clocks(struct m_can_classdev *cdev); -void m_can_init_ram(struct m_can_classdev *priv); +int m_can_init_ram(struct m_can_classdev *priv); int m_can_class_suspend(struct device *dev); int m_can_class_resume(struct device *dev); diff --git a/drivers/net/can/m_can/m_can_pci.c b/drivers/net/can/m_can/m_can_pci.c index 128808605c3f..89cc3d41e952 100644 --- a/drivers/net/can/m_can/m_can_pci.c +++ b/drivers/net/can/m_can/m_can_pci.c @@ -39,11 +39,13 @@ static u32 iomap_read_reg(struct m_can_classdev *cdev, int reg) return readl(priv->base + reg); } -static u32 iomap_read_fifo(struct m_can_classdev *cdev, int offset) +static int iomap_read_fifo(struct m_can_classdev *cdev, int offset, void *val, size_t val_count) { struct m_can_pci_priv *priv = cdev_to_priv(cdev); - return readl(priv->base + offset); + ioread32_rep(priv->base + offset, val, val_count); + + return 0; } static int iomap_write_reg(struct m_can_classdev *cdev, int reg, int val) @@ -55,11 +57,12 @@ static int iomap_write_reg(struct m_can_classdev *cdev, int reg, int val) return 0; } -static int iomap_write_fifo(struct m_can_classdev *cdev, int offset, int val) +static int iomap_write_fifo(struct m_can_classdev *cdev, int offset, + const void *val, size_t val_count) { struct m_can_pci_priv *priv = cdev_to_priv(cdev); - writel(val, priv->base + offset); + iowrite32_rep(priv->base + offset, val, val_count); return 0; } diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c index 599de0e08cd7..308d4f2fff00 100644 --- a/drivers/net/can/m_can/m_can_platform.c +++ b/drivers/net/can/m_can/m_can_platform.c @@ -6,6 +6,7 @@ // Copyright (C) 2018-19 Texas Instruments Incorporated - http://www.ti.com/ #include <linux/platform_device.h> +#include <linux/phy/phy.h> #include "m_can.h" @@ -28,11 +29,13 @@ static u32 iomap_read_reg(struct m_can_classdev *cdev, int reg) return readl(priv->base + reg); } -static u32 iomap_read_fifo(struct m_can_classdev *cdev, int offset) +static int iomap_read_fifo(struct m_can_classdev *cdev, int offset, void *val, size_t val_count) { struct m_can_plat_priv *priv = cdev_to_priv(cdev); - return readl(priv->mram_base + offset); + ioread32_rep(priv->mram_base + offset, val, val_count); + + return 0; } static int iomap_write_reg(struct m_can_classdev *cdev, int reg, int val) @@ -44,11 +47,12 @@ static int iomap_write_reg(struct m_can_classdev *cdev, int reg, int val) return 0; } -static int iomap_write_fifo(struct m_can_classdev *cdev, int offset, int val) +static int iomap_write_fifo(struct m_can_classdev *cdev, int offset, + const void *val, size_t val_count) { struct m_can_plat_priv *priv = cdev_to_priv(cdev); - writel(val, priv->mram_base + offset); + iowrite32_rep(priv->base + offset, val, val_count); return 0; } @@ -67,6 +71,7 @@ static int m_can_plat_probe(struct platform_device *pdev) struct resource *res; void __iomem *addr; void __iomem *mram_addr; + struct phy *transceiver; int irq, ret = 0; mcan_class = m_can_class_allocate_dev(&pdev->dev, @@ -80,8 +85,7 @@ static int m_can_plat_probe(struct platform_device *pdev) if (ret) goto probe_fail; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "m_can"); - addr = devm_ioremap_resource(&pdev->dev, res); + addr = devm_platform_ioremap_resource_byname(pdev, "m_can"); irq = platform_get_irq_byname(pdev, "int0"); if (IS_ERR(addr) || irq < 0) { ret = -EINVAL; @@ -101,6 +105,16 @@ static int m_can_plat_probe(struct platform_device *pdev) goto probe_fail; } + transceiver = devm_phy_optional_get(&pdev->dev, NULL); + if (IS_ERR(transceiver)) { + ret = PTR_ERR(transceiver); + dev_err_probe(&pdev->dev, ret, "failed to get phy\n"); + goto probe_fail; + } + + if (transceiver) + mcan_class->can.bitrate_max = transceiver->attrs.max_link_rate; + priv->base = addr; priv->mram_base = mram_addr; @@ -108,6 +122,7 @@ static int m_can_plat_probe(struct platform_device *pdev) mcan_class->pm_clock_support = 1; mcan_class->can.clock.freq = clk_get_rate(mcan_class->cclk); mcan_class->dev = &pdev->dev; + mcan_class->transceiver = transceiver; mcan_class->ops = &m_can_plat_ops; @@ -115,7 +130,9 @@ static int m_can_plat_probe(struct platform_device *pdev) platform_set_drvdata(pdev, mcan_class); - m_can_init_ram(mcan_class); + ret = m_can_init_ram(mcan_class); + if (ret) + goto probe_fail; pm_runtime_enable(mcan_class->dev); ret = m_can_class_register(mcan_class); diff --git a/drivers/net/can/m_can/tcan4x5x-core.c b/drivers/net/can/m_can/tcan4x5x-core.c index 4147cecfbbd6..04687b15b250 100644 --- a/drivers/net/can/m_can/tcan4x5x-core.c +++ b/drivers/net/can/m_can/tcan4x5x-core.c @@ -105,7 +105,6 @@ static inline struct tcan4x5x_priv *cdev_to_priv(struct m_can_classdev *cdev) { return container_of(cdev, struct tcan4x5x_priv, cdev); - } static void tcan4x5x_check_wake(struct tcan4x5x_priv *priv) @@ -154,14 +153,12 @@ static u32 tcan4x5x_read_reg(struct m_can_classdev *cdev, int reg) return val; } -static u32 tcan4x5x_read_fifo(struct m_can_classdev *cdev, int addr_offset) +static int tcan4x5x_read_fifo(struct m_can_classdev *cdev, int addr_offset, + void *val, size_t val_count) { struct tcan4x5x_priv *priv = cdev_to_priv(cdev); - u32 val; - - regmap_read(priv->regmap, TCAN4X5X_MRAM_START + addr_offset, &val); - return val; + return regmap_bulk_read(priv->regmap, TCAN4X5X_MRAM_START + addr_offset, val, val_count); } static int tcan4x5x_write_reg(struct m_can_classdev *cdev, int reg, int val) @@ -172,11 +169,11 @@ static int tcan4x5x_write_reg(struct m_can_classdev *cdev, int reg, int val) } static int tcan4x5x_write_fifo(struct m_can_classdev *cdev, - int addr_offset, int val) + int addr_offset, const void *val, size_t val_count) { struct tcan4x5x_priv *priv = cdev_to_priv(cdev); - return regmap_write(priv->regmap, TCAN4X5X_MRAM_START + addr_offset, val); + return regmap_bulk_write(priv->regmap, TCAN4X5X_MRAM_START + addr_offset, val, val_count); } static int tcan4x5x_power_enable(struct regulator *reg, int enable) @@ -238,7 +235,9 @@ static int tcan4x5x_init(struct m_can_classdev *cdev) return ret; /* Zero out the MCAN buffers */ - m_can_init_ram(cdev); + ret = m_can_init_ram(cdev); + if (ret) + return ret; ret = regmap_update_bits(tcan4x5x->regmap, TCAN4X5X_CONFIG, TCAN4X5X_MODE_SEL_MASK, TCAN4X5X_MODE_NORMAL); diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c index e254e04ae257..35892c1efef0 100644 --- a/drivers/net/can/mscan/mpc5xxx_can.c +++ b/drivers/net/can/mscan/mpc5xxx_can.c @@ -279,7 +279,6 @@ static u32 mpc512x_can_get_clock(struct platform_device *ofdev, static const struct of_device_id mpc5xxx_can_table[]; static int mpc5xxx_can_probe(struct platform_device *ofdev) { - const struct of_device_id *match; const struct mpc5xxx_can_data *data; struct device_node *np = ofdev->dev.of_node; struct net_device *dev; @@ -289,10 +288,9 @@ static int mpc5xxx_can_probe(struct platform_device *ofdev) int irq, mscan_clksrc = 0; int err = -ENOMEM; - match = of_match_device(mpc5xxx_can_table, &ofdev->dev); - if (!match) + data = of_device_get_match_data(&ofdev->dev); + if (!data) return -EINVAL; - data = match->data; base = of_iomap(np, 0); if (!base) { @@ -319,7 +317,6 @@ static int mpc5xxx_can_probe(struct platform_device *ofdev) clock_name = of_get_property(np, "fsl,mscan-clock-source", NULL); - BUG_ON(!data); priv->type = data->type; priv->can.clock.freq = data->get_clock(ofdev, clock_name, &mscan_clksrc); diff --git a/drivers/net/can/rcar/Kconfig b/drivers/net/can/rcar/Kconfig index 29cabc20109e..56320a7f828b 100644 --- a/drivers/net/can/rcar/Kconfig +++ b/drivers/net/can/rcar/Kconfig @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 config CAN_RCAR tristate "Renesas R-Car and RZ/G CAN controller" - depends on ARCH_RENESAS || ARM + depends on ARCH_RENESAS || ARM || COMPILE_TEST help Say Y here if you want to use CAN controller found on Renesas R-Car or RZ/G SoCs. @@ -11,7 +11,7 @@ config CAN_RCAR config CAN_RCAR_CANFD tristate "Renesas R-Car CAN FD controller" - depends on ARCH_RENESAS || ARM + depends on ARCH_RENESAS || ARM || COMPILE_TEST help Say Y here if you want to use CAN FD controller found on Renesas R-Car SoCs. The driver puts the controller in CAN FD only diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c index 311e6ca3bdc4..c47988d3674e 100644 --- a/drivers/net/can/rcar/rcar_canfd.c +++ b/drivers/net/can/rcar/rcar_canfd.c @@ -37,9 +37,15 @@ #include <linux/bitmap.h> #include <linux/bitops.h> #include <linux/iopoll.h> +#include <linux/reset.h> #define RCANFD_DRV_NAME "rcar_canfd" +enum rcanfd_chip_id { + RENESAS_RCAR_GEN3 = 0, + RENESAS_RZG2L, +}; + /* Global register bits */ /* RSCFDnCFDGRMCFG */ @@ -513,6 +519,9 @@ struct rcar_canfd_global { enum rcar_canfd_fcanclk fcan; /* CANFD or Ext clock */ unsigned long channels_mask; /* Enabled channels mask */ bool fdmode; /* CAN FD or Classical CAN only mode */ + struct reset_control *rstc1; + struct reset_control *rstc2; + enum rcanfd_chip_id chip_id; }; /* CAN FD mode nominal rate constants */ @@ -1070,38 +1079,70 @@ static void rcar_canfd_tx_done(struct net_device *ndev) can_led_event(ndev, CAN_LED_EVENT_TX); } +static void rcar_canfd_handle_global_err(struct rcar_canfd_global *gpriv, u32 ch) +{ + struct rcar_canfd_channel *priv = gpriv->ch[ch]; + struct net_device *ndev = priv->ndev; + u32 gerfl; + + /* Handle global error interrupts */ + gerfl = rcar_canfd_read(priv->base, RCANFD_GERFL); + if (unlikely(RCANFD_GERFL_ERR(gpriv, gerfl))) + rcar_canfd_global_error(ndev); +} + +static irqreturn_t rcar_canfd_global_err_interrupt(int irq, void *dev_id) +{ + struct rcar_canfd_global *gpriv = dev_id; + u32 ch; + + for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) + rcar_canfd_handle_global_err(gpriv, ch); + + return IRQ_HANDLED; +} + +static void rcar_canfd_handle_global_receive(struct rcar_canfd_global *gpriv, u32 ch) +{ + struct rcar_canfd_channel *priv = gpriv->ch[ch]; + u32 ridx = ch + RCANFD_RFFIFO_IDX; + u32 sts; + + /* Handle Rx interrupts */ + sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(ridx)); + if (likely(sts & RCANFD_RFSTS_RFIF)) { + if (napi_schedule_prep(&priv->napi)) { + /* Disable Rx FIFO interrupts */ + rcar_canfd_clear_bit(priv->base, + RCANFD_RFCC(ridx), + RCANFD_RFCC_RFIE); + __napi_schedule(&priv->napi); + } + } +} + +static irqreturn_t rcar_canfd_global_receive_fifo_interrupt(int irq, void *dev_id) +{ + struct rcar_canfd_global *gpriv = dev_id; + u32 ch; + + for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) + rcar_canfd_handle_global_receive(gpriv, ch); + + return IRQ_HANDLED; +} + static irqreturn_t rcar_canfd_global_interrupt(int irq, void *dev_id) { struct rcar_canfd_global *gpriv = dev_id; - struct net_device *ndev; - struct rcar_canfd_channel *priv; - u32 sts, gerfl; - u32 ch, ridx; + u32 ch; /* Global error interrupts still indicate a condition specific * to a channel. RxFIFO interrupt is a global interrupt. */ for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) { - priv = gpriv->ch[ch]; - ndev = priv->ndev; - ridx = ch + RCANFD_RFFIFO_IDX; - - /* Global error interrupts */ - gerfl = rcar_canfd_read(priv->base, RCANFD_GERFL); - if (unlikely(RCANFD_GERFL_ERR(gpriv, gerfl))) - rcar_canfd_global_error(ndev); - - /* Handle Rx interrupts */ - sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(ridx)); - if (likely(sts & RCANFD_RFSTS_RFIF)) { - if (napi_schedule_prep(&priv->napi)) { - /* Disable Rx FIFO interrupts */ - rcar_canfd_clear_bit(priv->base, - RCANFD_RFCC(ridx), - RCANFD_RFCC_RFIE); - __napi_schedule(&priv->napi); - } - } + rcar_canfd_handle_global_err(gpriv, ch); + rcar_canfd_handle_global_receive(gpriv, ch); } return IRQ_HANDLED; } @@ -1139,38 +1180,73 @@ static void rcar_canfd_state_change(struct net_device *ndev, } } -static irqreturn_t rcar_canfd_channel_interrupt(int irq, void *dev_id) +static void rcar_canfd_handle_channel_tx(struct rcar_canfd_global *gpriv, u32 ch) +{ + struct rcar_canfd_channel *priv = gpriv->ch[ch]; + struct net_device *ndev = priv->ndev; + u32 sts; + + /* Handle Tx interrupts */ + sts = rcar_canfd_read(priv->base, + RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX)); + if (likely(sts & RCANFD_CFSTS_CFTXIF)) + rcar_canfd_tx_done(ndev); +} + +static irqreturn_t rcar_canfd_channel_tx_interrupt(int irq, void *dev_id) { struct rcar_canfd_global *gpriv = dev_id; - struct net_device *ndev; - struct rcar_canfd_channel *priv; - u32 sts, ch, cerfl; + u32 ch; + + for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) + rcar_canfd_handle_channel_tx(gpriv, ch); + + return IRQ_HANDLED; +} + +static void rcar_canfd_handle_channel_err(struct rcar_canfd_global *gpriv, u32 ch) +{ + struct rcar_canfd_channel *priv = gpriv->ch[ch]; + struct net_device *ndev = priv->ndev; u16 txerr, rxerr; + u32 sts, cerfl; + + /* Handle channel error interrupts */ + cerfl = rcar_canfd_read(priv->base, RCANFD_CERFL(ch)); + sts = rcar_canfd_read(priv->base, RCANFD_CSTS(ch)); + txerr = RCANFD_CSTS_TECCNT(sts); + rxerr = RCANFD_CSTS_RECCNT(sts); + if (unlikely(RCANFD_CERFL_ERR(cerfl))) + rcar_canfd_error(ndev, cerfl, txerr, rxerr); + + /* Handle state change to lower states */ + if (unlikely(priv->can.state != CAN_STATE_ERROR_ACTIVE && + priv->can.state != CAN_STATE_BUS_OFF)) + rcar_canfd_state_change(ndev, txerr, rxerr); +} + +static irqreturn_t rcar_canfd_channel_err_interrupt(int irq, void *dev_id) +{ + struct rcar_canfd_global *gpriv = dev_id; + u32 ch; + + for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) + rcar_canfd_handle_channel_err(gpriv, ch); + + return IRQ_HANDLED; +} + +static irqreturn_t rcar_canfd_channel_interrupt(int irq, void *dev_id) +{ + struct rcar_canfd_global *gpriv = dev_id; + u32 ch; /* Common FIFO is a per channel resource */ for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) { - priv = gpriv->ch[ch]; - ndev = priv->ndev; - - /* Channel error interrupts */ - cerfl = rcar_canfd_read(priv->base, RCANFD_CERFL(ch)); - sts = rcar_canfd_read(priv->base, RCANFD_CSTS(ch)); - txerr = RCANFD_CSTS_TECCNT(sts); - rxerr = RCANFD_CSTS_RECCNT(sts); - if (unlikely(RCANFD_CERFL_ERR(cerfl))) - rcar_canfd_error(ndev, cerfl, txerr, rxerr); - - /* Handle state change to lower states */ - if (unlikely((priv->can.state != CAN_STATE_ERROR_ACTIVE) && - (priv->can.state != CAN_STATE_BUS_OFF))) - rcar_canfd_state_change(ndev, txerr, rxerr); - - /* Handle Tx interrupts */ - sts = rcar_canfd_read(priv->base, - RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX)); - if (likely(sts & RCANFD_CFSTS_CFTXIF)) - rcar_canfd_tx_done(ndev); + rcar_canfd_handle_channel_err(gpriv, ch); + rcar_canfd_handle_channel_tx(gpriv, ch); } + return IRQ_HANDLED; } @@ -1577,6 +1653,53 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch, priv->can.clock.freq = fcan_freq; dev_info(&pdev->dev, "can_clk rate is %u\n", priv->can.clock.freq); + if (gpriv->chip_id == RENESAS_RZG2L) { + char *irq_name; + int err_irq; + int tx_irq; + + err_irq = platform_get_irq_byname(pdev, ch == 0 ? "ch0_err" : "ch1_err"); + if (err_irq < 0) { + err = err_irq; + goto fail; + } + + tx_irq = platform_get_irq_byname(pdev, ch == 0 ? "ch0_trx" : "ch1_trx"); + if (tx_irq < 0) { + err = tx_irq; + goto fail; + } + + irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, + "canfd.ch%d_err", ch); + if (!irq_name) { + err = -ENOMEM; + goto fail; + } + err = devm_request_irq(&pdev->dev, err_irq, + rcar_canfd_channel_err_interrupt, 0, + irq_name, gpriv); + if (err) { + dev_err(&pdev->dev, "devm_request_irq CH Err(%d) failed, error %d\n", + err_irq, err); + goto fail; + } + irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, + "canfd.ch%d_trx", ch); + if (!irq_name) { + err = -ENOMEM; + goto fail; + } + err = devm_request_irq(&pdev->dev, tx_irq, + rcar_canfd_channel_tx_interrupt, 0, + irq_name, gpriv); + if (err) { + dev_err(&pdev->dev, "devm_request_irq Tx (%d) failed, error %d\n", + tx_irq, err); + goto fail; + } + } + if (gpriv->fdmode) { priv->can.bittiming_const = &rcar_canfd_nom_bittiming_const; priv->can.data_bittiming_const = @@ -1636,7 +1759,11 @@ static int rcar_canfd_probe(struct platform_device *pdev) struct device_node *of_child; unsigned long channels_mask = 0; int err, ch_irq, g_irq; + int g_err_irq, g_recc_irq; bool fdmode = true; /* CAN FD only mode - default */ + enum rcanfd_chip_id chip_id; + + chip_id = (uintptr_t)of_device_get_match_data(&pdev->dev); if (of_property_read_bool(pdev->dev.of_node, "renesas,no-can-fd")) fdmode = false; /* Classical CAN only mode */ @@ -1649,16 +1776,30 @@ static int rcar_canfd_probe(struct platform_device *pdev) if (of_child && of_device_is_available(of_child)) channels_mask |= BIT(1); /* Channel 1 */ - ch_irq = platform_get_irq(pdev, 0); - if (ch_irq < 0) { - err = ch_irq; - goto fail_dev; - } + if (chip_id == RENESAS_RCAR_GEN3) { + ch_irq = platform_get_irq_byname_optional(pdev, "ch_int"); + if (ch_irq < 0) { + /* For backward compatibility get irq by index */ + ch_irq = platform_get_irq(pdev, 0); + if (ch_irq < 0) + return ch_irq; + } - g_irq = platform_get_irq(pdev, 1); - if (g_irq < 0) { - err = g_irq; - goto fail_dev; + g_irq = platform_get_irq_byname_optional(pdev, "g_int"); + if (g_irq < 0) { + /* For backward compatibility get irq by index */ + g_irq = platform_get_irq(pdev, 1); + if (g_irq < 0) + return g_irq; + } + } else { + g_err_irq = platform_get_irq_byname(pdev, "g_err"); + if (g_err_irq < 0) + return g_err_irq; + + g_recc_irq = platform_get_irq_byname(pdev, "g_recc"); + if (g_recc_irq < 0) + return g_recc_irq; } /* Global controller context */ @@ -1670,6 +1811,19 @@ static int rcar_canfd_probe(struct platform_device *pdev) gpriv->pdev = pdev; gpriv->channels_mask = channels_mask; gpriv->fdmode = fdmode; + gpriv->chip_id = chip_id; + + if (gpriv->chip_id == RENESAS_RZG2L) { + gpriv->rstc1 = devm_reset_control_get_exclusive(&pdev->dev, "rstp_n"); + if (IS_ERR(gpriv->rstc1)) + return dev_err_probe(&pdev->dev, PTR_ERR(gpriv->rstc1), + "failed to get rstp_n\n"); + + gpriv->rstc2 = devm_reset_control_get_exclusive(&pdev->dev, "rstc_n"); + if (IS_ERR(gpriv->rstc2)) + return dev_err_probe(&pdev->dev, PTR_ERR(gpriv->rstc2), + "failed to get rstc_n\n"); + } /* Peripheral clock */ gpriv->clkp = devm_clk_get(&pdev->dev, "fck"); @@ -1699,7 +1853,7 @@ static int rcar_canfd_probe(struct platform_device *pdev) } fcan_freq = clk_get_rate(gpriv->can_clk); - if (gpriv->fcan == RCANFD_CANFDCLK) + if (gpriv->fcan == RCANFD_CANFDCLK && gpriv->chip_id == RENESAS_RCAR_GEN3) /* CANFD clock is further divided by (1/2) within the IP */ fcan_freq /= 2; @@ -1711,20 +1865,51 @@ static int rcar_canfd_probe(struct platform_device *pdev) gpriv->base = addr; /* Request IRQ that's common for both channels */ - err = devm_request_irq(&pdev->dev, ch_irq, - rcar_canfd_channel_interrupt, 0, - "canfd.chn", gpriv); - if (err) { - dev_err(&pdev->dev, "devm_request_irq(%d) failed, error %d\n", - ch_irq, err); - goto fail_dev; + if (gpriv->chip_id == RENESAS_RCAR_GEN3) { + err = devm_request_irq(&pdev->dev, ch_irq, + rcar_canfd_channel_interrupt, 0, + "canfd.ch_int", gpriv); + if (err) { + dev_err(&pdev->dev, "devm_request_irq(%d) failed, error %d\n", + ch_irq, err); + goto fail_dev; + } + + err = devm_request_irq(&pdev->dev, g_irq, + rcar_canfd_global_interrupt, 0, + "canfd.g_int", gpriv); + if (err) { + dev_err(&pdev->dev, "devm_request_irq(%d) failed, error %d\n", + g_irq, err); + goto fail_dev; + } + } else { + err = devm_request_irq(&pdev->dev, g_recc_irq, + rcar_canfd_global_receive_fifo_interrupt, 0, + "canfd.g_recc", gpriv); + + if (err) { + dev_err(&pdev->dev, "devm_request_irq(%d) failed, error %d\n", + g_recc_irq, err); + goto fail_dev; + } + + err = devm_request_irq(&pdev->dev, g_err_irq, + rcar_canfd_global_err_interrupt, 0, + "canfd.g_err", gpriv); + if (err) { + dev_err(&pdev->dev, "devm_request_irq(%d) failed, error %d\n", + g_err_irq, err); + goto fail_dev; + } } - err = devm_request_irq(&pdev->dev, g_irq, - rcar_canfd_global_interrupt, 0, - "canfd.gbl", gpriv); + + err = reset_control_reset(gpriv->rstc1); + if (err) + goto fail_dev; + err = reset_control_reset(gpriv->rstc2); if (err) { - dev_err(&pdev->dev, "devm_request_irq(%d) failed, error %d\n", - g_irq, err); + reset_control_assert(gpriv->rstc1); goto fail_dev; } @@ -1733,7 +1918,7 @@ static int rcar_canfd_probe(struct platform_device *pdev) if (err) { dev_err(&pdev->dev, "failed to enable peripheral clock, error %d\n", err); - goto fail_dev; + goto fail_reset; } err = rcar_canfd_reset_controller(gpriv); @@ -1790,6 +1975,9 @@ fail_mode: rcar_canfd_disable_global_interrupts(gpriv); fail_clk: clk_disable_unprepare(gpriv->clkp); +fail_reset: + reset_control_assert(gpriv->rstc1); + reset_control_assert(gpriv->rstc2); fail_dev: return err; } @@ -1810,6 +1998,9 @@ static int rcar_canfd_remove(struct platform_device *pdev) /* Enter global sleep mode */ rcar_canfd_set_bit(gpriv->base, RCANFD_GCTR, RCANFD_GCTR_GSLPR); clk_disable_unprepare(gpriv->clkp); + reset_control_assert(gpriv->rstc1); + reset_control_assert(gpriv->rstc2); + return 0; } @@ -1827,7 +2018,8 @@ static SIMPLE_DEV_PM_OPS(rcar_canfd_pm_ops, rcar_canfd_suspend, rcar_canfd_resume); static const struct of_device_id rcar_canfd_of_table[] = { - { .compatible = "renesas,rcar-gen3-canfd" }, + { .compatible = "renesas,rcar-gen3-canfd", .data = (void *)RENESAS_RCAR_GEN3 }, + { .compatible = "renesas,rzg2l-canfd", .data = (void *)RENESAS_RZG2L }, { } }; diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c index 84eac8cb8686..6db90dc4bc9d 100644 --- a/drivers/net/can/sja1000/peak_pci.c +++ b/drivers/net/can/sja1000/peak_pci.c @@ -28,6 +28,10 @@ MODULE_LICENSE("GPL v2"); #define DRV_NAME "peak_pci" +/* FPGA cards FW version registers */ +#define PEAK_VER_REG1 0x40 +#define PEAK_VER_REG2 0x44 + struct peak_pciec_card; struct peak_pci_chan { void __iomem *cfg_base; /* Common for all channels */ @@ -41,9 +45,7 @@ struct peak_pci_chan { #define PEAK_PCI_CDR (CDR_CBP | CDR_CLKOUT_MASK) #define PEAK_PCI_OCR OCR_TX0_PUSHPULL -/* - * Important PITA registers - */ +/* Important PITA registers */ #define PITA_ICR 0x00 /* Interrupt control register */ #define PITA_GPIOICR 0x18 /* GPIO interface control register */ #define PITA_MISC 0x1C /* Miscellaneous register */ @@ -70,27 +72,47 @@ static const u16 peak_pci_icr_masks[PEAK_PCI_CHAN_MAX] = { }; static const struct pci_device_id peak_pci_tbl[] = { - {PEAK_PCI_VENDOR_ID, PEAK_PCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, - {PEAK_PCI_VENDOR_ID, PEAK_PCIE_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, - {PEAK_PCI_VENDOR_ID, PEAK_MPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, - {PEAK_PCI_VENDOR_ID, PEAK_MPCIE_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, - {PEAK_PCI_VENDOR_ID, PEAK_PC_104P_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, - {PEAK_PCI_VENDOR_ID, PEAK_PCI_104E_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, - {PEAK_PCI_VENDOR_ID, PEAK_CPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, - {PEAK_PCI_VENDOR_ID, PEAK_PCIE_OEM_ID, PCI_ANY_ID, PCI_ANY_ID,}, + { + PEAK_PCI_VENDOR_ID, PEAK_PCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, + .driver_data = (kernel_ulong_t)"PCAN-PCI", + }, { + PEAK_PCI_VENDOR_ID, PEAK_PCIE_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, + .driver_data = (kernel_ulong_t)"PCAN-PCI Express", + }, { + PEAK_PCI_VENDOR_ID, PEAK_MPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, + .driver_data = (kernel_ulong_t)"PCAN-miniPCI", + }, { + PEAK_PCI_VENDOR_ID, PEAK_MPCIE_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, + .driver_data = (kernel_ulong_t)"PCAN-miniPCIe", + }, { + PEAK_PCI_VENDOR_ID, PEAK_PC_104P_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, + .driver_data = (kernel_ulong_t)"PCAN-PC/104-Plus Quad", + }, { + PEAK_PCI_VENDOR_ID, PEAK_PCI_104E_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, + .driver_data = (kernel_ulong_t)"PCAN-PCI/104-Express", + }, { + PEAK_PCI_VENDOR_ID, PEAK_CPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, + .driver_data = (kernel_ulong_t)"PCAN-cPCI", + }, { + PEAK_PCI_VENDOR_ID, PEAK_PCIE_OEM_ID, PCI_ANY_ID, PCI_ANY_ID, + .driver_data = (kernel_ulong_t)"PCAN-Chip PCIe", + }, #ifdef CONFIG_CAN_PEAK_PCIEC - {PEAK_PCI_VENDOR_ID, PEAK_PCIEC_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, - {PEAK_PCI_VENDOR_ID, PEAK_PCIEC34_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, + { + PEAK_PCI_VENDOR_ID, PEAK_PCIEC_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, + .driver_data = (kernel_ulong_t)"PCAN-ExpressCard", + }, { + PEAK_PCI_VENDOR_ID, PEAK_PCIEC34_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, + .driver_data = (kernel_ulong_t)"PCAN-ExpressCard 34", + }, #endif - {0,} + { /* sentinel */ } }; MODULE_DEVICE_TABLE(pci, peak_pci_tbl); #ifdef CONFIG_CAN_PEAK_PCIEC -/* - * PCAN-ExpressCard needs I2C bit-banging configuration option. - */ +/* PCAN-ExpressCard needs I2C bit-banging configuration option. */ /* GPIOICR byte access offsets */ #define PITA_GPOUT 0x18 /* GPx output value */ @@ -156,12 +178,14 @@ static void peak_pci_write_reg(const struct sja1000_priv *priv, static inline void pita_set_scl_highz(struct peak_pciec_card *card) { u8 gp_outen = readb(card->cfg_base + PITA_GPOEN) & ~PITA_GPIN_SCL; + writeb(gp_outen, card->cfg_base + PITA_GPOEN); } static inline void pita_set_sda_highz(struct peak_pciec_card *card) { u8 gp_outen = readb(card->cfg_base + PITA_GPOEN) & ~PITA_GPIN_SDA; + writeb(gp_outen, card->cfg_base + PITA_GPOEN); } @@ -230,9 +254,7 @@ static int pita_getscl(void *data) return (readb(card->cfg_base + PITA_GPIN) & PITA_GPIN_SCL) ? 1 : 0; } -/* - * write commands to the LED chip though the I2C-bus of the PCAN-PCIeC - */ +/* write commands to the LED chip though the I2C-bus of the PCAN-PCIeC */ static int peak_pciec_write_pca9553(struct peak_pciec_card *card, u8 offset, u8 data) { @@ -248,7 +270,7 @@ static int peak_pciec_write_pca9553(struct peak_pciec_card *card, int ret; /* cache led mask */ - if ((offset == 5) && (data == card->led_cache)) + if (offset == 5 && data == card->led_cache) return 0; ret = i2c_transfer(&card->led_chip, &msg, 1); @@ -261,9 +283,7 @@ static int peak_pciec_write_pca9553(struct peak_pciec_card *card, return 0; } -/* - * delayed work callback used to control the LEDs - */ +/* delayed work callback used to control the LEDs */ static void peak_pciec_led_work(struct work_struct *work) { struct peak_pciec_card *card = @@ -309,9 +329,7 @@ static void peak_pciec_led_work(struct work_struct *work) schedule_delayed_work(&card->led_work, HZ); } -/* - * set LEDs blinking state - */ +/* set LEDs blinking state */ static void peak_pciec_set_leds(struct peak_pciec_card *card, u8 led_mask, u8 s) { u8 new_led = card->led_cache; @@ -328,25 +346,19 @@ static void peak_pciec_set_leds(struct peak_pciec_card *card, u8 led_mask, u8 s) peak_pciec_write_pca9553(card, 5, new_led); } -/* - * start one second delayed work to control LEDs - */ +/* start one second delayed work to control LEDs */ static void peak_pciec_start_led_work(struct peak_pciec_card *card) { schedule_delayed_work(&card->led_work, HZ); } -/* - * stop LEDs delayed work - */ +/* stop LEDs delayed work */ static void peak_pciec_stop_led_work(struct peak_pciec_card *card) { cancel_delayed_work_sync(&card->led_work); } -/* - * initialize the PCA9553 4-bit I2C-bus LED chip - */ +/* initialize the PCA9553 4-bit I2C-bus LED chip */ static int peak_pciec_init_leds(struct peak_pciec_card *card) { int err; @@ -375,17 +387,14 @@ static int peak_pciec_init_leds(struct peak_pciec_card *card) return peak_pciec_write_pca9553(card, 5, PCA9553_LS0_INIT); } -/* - * restore LEDs state to off peak_pciec_leds_exit - */ +/* restore LEDs state to off peak_pciec_leds_exit */ static void peak_pciec_leds_exit(struct peak_pciec_card *card) { /* switch LEDs to off */ peak_pciec_write_pca9553(card, 5, PCA9553_LED_OFF_ALL); } -/* - * normal write sja1000 register method overloaded to catch when controller +/* normal write sja1000 register method overloaded to catch when controller * is started or stopped, to control leds */ static void peak_pciec_write_reg(const struct sja1000_priv *priv, @@ -443,7 +452,7 @@ static int peak_pciec_probe(struct pci_dev *pdev, struct net_device *dev) /* channel is the first one: do the init part */ } else { /* create the bit banging I2C adapter structure */ - card = kzalloc(sizeof(struct peak_pciec_card), GFP_KERNEL); + card = kzalloc(sizeof(*card), GFP_KERNEL); if (!card) return -ENOMEM; @@ -506,9 +515,7 @@ static void peak_pciec_remove(struct peak_pciec_card *card) #else /* CONFIG_CAN_PEAK_PCIEC */ -/* - * Placebo functions when PCAN-ExpressCard support is not selected - */ +/* Placebo functions when PCAN-ExpressCard support is not selected */ static inline int peak_pciec_probe(struct pci_dev *pdev, struct net_device *dev) { return -ENODEV; @@ -549,6 +556,7 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) void __iomem *cfg_base, *reg_base; u16 sub_sys_id, icr; int i, err, channels; + char fw_str[14] = ""; err = pci_enable_device(pdev); if (err) @@ -602,6 +610,21 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* Leave parport mux mode */ writeb(0x04, cfg_base + PITA_MISC + 3); + /* FPGA equipped card if not 0 */ + if (readl(cfg_base + PEAK_VER_REG1)) { + /* FPGA card: display version of the running firmware */ + u32 fw_ver = readl(cfg_base + PEAK_VER_REG2); + + snprintf(fw_str, sizeof(fw_str), " FW v%u.%u.%u", + (fw_ver >> 12) & 0xf, + (fw_ver >> 8) & 0xf, + (fw_ver >> 4) & 0xf); + } + + /* Display commercial name (and, eventually, FW version) of the card */ + dev_info(&pdev->dev, "%ux CAN %s%s\n", + channels, (const char *)ent->driver_data, fw_str); + icr = readw(cfg_base + PITA_ICR + 2); for (i = 0; i < channels; i++) { @@ -642,8 +665,7 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) chan->prev_dev = pci_get_drvdata(pdev); pci_set_drvdata(pdev, dev); - /* - * PCAN-ExpressCard needs some additional i2c init. + /* PCAN-ExpressCard needs some additional i2c init. * This must be done *before* register_sja1000dev() but * *after* devices linkage */ @@ -709,7 +731,8 @@ failure_disable_pci: /* pci_xxx_config_word() return positive PCIBIOS_xxx error codes while * the probe() function must return a negative errno in case of failure - * (err is unchanged if negative) */ + * (err is unchanged if negative) + */ return pcibios_err_to_errno(err); } diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c index 9ae48072b6c6..673861ab665a 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c @@ -15,10 +15,10 @@ #include <linux/bitfield.h> #include <linux/clk.h> #include <linux/device.h> +#include <linux/mod_devicetable.h> #include <linux/module.h> -#include <linux/of.h> -#include <linux/of_device.h> #include <linux/pm_runtime.h> +#include <linux/property.h> #include <asm/unaligned.h> @@ -1456,7 +1456,7 @@ mcp251xfd_rx_ring_update(const struct mcp251xfd_priv *priv, } static void -mcp251xfd_hw_rx_obj_to_skb(struct mcp251xfd_priv *priv, +mcp251xfd_hw_rx_obj_to_skb(const struct mcp251xfd_priv *priv, const struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj, struct sk_buff *skb) { @@ -2195,8 +2195,10 @@ static irqreturn_t mcp251xfd_irq(int irq, void *dev_id) FIELD_GET(MCP251XFD_REG_INT_IE_MASK, priv->regs_status.intf); - if (!(intf_pending)) + if (!(intf_pending)) { + can_rx_offload_threaded_irq_finish(&priv->offload); return handled; + } /* Some interrupts must be ACKed in the * MCP251XFD_REG_INT register. @@ -2296,6 +2298,8 @@ static irqreturn_t mcp251xfd_irq(int irq, void *dev_id) } while (1); out_fail: + can_rx_offload_threaded_irq_finish(&priv->offload); + netdev_err(priv->ndev, "IRQ handler returned %d (intf=0x%08x).\n", err, priv->regs_status.intf); mcp251xfd_dump(priv); @@ -2524,8 +2528,8 @@ static int mcp251xfd_open(struct net_device *ndev) can_rx_offload_enable(&priv->offload); err = request_threaded_irq(spi->irq, NULL, mcp251xfd_irq, - IRQF_ONESHOT, dev_name(&spi->dev), - priv); + IRQF_SHARED | IRQF_ONESHOT, + dev_name(&spi->dev), priv); if (err) goto out_can_rx_offload_disable; @@ -2857,7 +2861,7 @@ static int mcp251xfd_probe(struct spi_device *spi) struct gpio_desc *rx_int; struct regulator *reg_vdd, *reg_xceiver; struct clk *clk; - u32 freq; + u32 freq = 0; int err; if (!spi->irq) @@ -2884,11 +2888,19 @@ static int mcp251xfd_probe(struct spi_device *spi) return dev_err_probe(&spi->dev, PTR_ERR(reg_xceiver), "Failed to get Transceiver regulator!\n"); - clk = devm_clk_get(&spi->dev, NULL); + clk = devm_clk_get_optional(&spi->dev, NULL); if (IS_ERR(clk)) return dev_err_probe(&spi->dev, PTR_ERR(clk), "Failed to get Oscillator (clock)!\n"); - freq = clk_get_rate(clk); + if (clk) { + freq = clk_get_rate(clk); + } else { + err = device_property_read_u32(&spi->dev, "clock-frequency", + &freq); + if (err) + return dev_err_probe(&spi->dev, err, + "Failed to get clock-frequency!\n"); + } /* Sanity check */ if (freq < MCP251XFD_SYSCLOCK_HZ_MIN || diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c index ed3169274d24..712e09186987 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c @@ -13,7 +13,7 @@ static u64 mcp251xfd_timestamp_read(const struct cyclecounter *cc) { - struct mcp251xfd_priv *priv; + const struct mcp251xfd_priv *priv; u32 timestamp = 0; int err; @@ -39,7 +39,7 @@ static void mcp251xfd_timestamp_work(struct work_struct *work) MCP251XFD_TIMESTAMP_WORK_DELAY_SEC * HZ); } -void mcp251xfd_skb_set_timestamp(struct mcp251xfd_priv *priv, +void mcp251xfd_skb_set_timestamp(const struct mcp251xfd_priv *priv, struct sk_buff *skb, u32 timestamp) { struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb); diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h index 1002f3902ad2..0f322dabaf65 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h @@ -853,7 +853,7 @@ int mcp251xfd_regmap_init(struct mcp251xfd_priv *priv); u16 mcp251xfd_crc16_compute2(const void *cmd, size_t cmd_size, const void *data, size_t data_size); u16 mcp251xfd_crc16_compute(const void *data, size_t data_size); -void mcp251xfd_skb_set_timestamp(struct mcp251xfd_priv *priv, +void mcp251xfd_skb_set_timestamp(const struct mcp251xfd_priv *priv, struct sk_buff *skb, u32 timestamp); void mcp251xfd_timestamp_init(struct mcp251xfd_priv *priv); void mcp251xfd_timestamp_stop(struct mcp251xfd_priv *priv); diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c index 73245d8836a9..353062ead98f 100644 --- a/drivers/net/can/ti_hecc.c +++ b/drivers/net/can/ti_hecc.c @@ -786,6 +786,8 @@ static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id) int_status = hecc_read(priv, HECC_CANGIF0); } + can_rx_offload_irq_finish(&priv->offload); + return IRQ_HANDLED; } diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c index 95ae740fc311..c6068a251fbe 100644 --- a/drivers/net/can/usb/esd_usb2.c +++ b/drivers/net/can/usb/esd_usb2.c @@ -476,7 +476,7 @@ static void esd_usb2_write_bulk_callback(struct urb *urb) netif_trans_update(netdev); } -static ssize_t show_firmware(struct device *d, +static ssize_t firmware_show(struct device *d, struct device_attribute *attr, char *buf) { struct usb_interface *intf = to_usb_interface(d); @@ -487,9 +487,9 @@ static ssize_t show_firmware(struct device *d, (dev->version >> 8) & 0xf, dev->version & 0xff); } -static DEVICE_ATTR(firmware, 0444, show_firmware, NULL); +static DEVICE_ATTR_RO(firmware); -static ssize_t show_hardware(struct device *d, +static ssize_t hardware_show(struct device *d, struct device_attribute *attr, char *buf) { struct usb_interface *intf = to_usb_interface(d); @@ -500,9 +500,9 @@ static ssize_t show_hardware(struct device *d, (dev->version >> 24) & 0xf, (dev->version >> 16) & 0xff); } -static DEVICE_ATTR(hardware, 0444, show_hardware, NULL); +static DEVICE_ATTR_RO(hardware); -static ssize_t show_nets(struct device *d, +static ssize_t nets_show(struct device *d, struct device_attribute *attr, char *buf) { struct usb_interface *intf = to_usb_interface(d); @@ -510,7 +510,7 @@ static ssize_t show_nets(struct device *d, return sprintf(buf, "%d", dev->net_count); } -static DEVICE_ATTR(nets, 0444, show_nets, NULL); +static DEVICE_ATTR_RO(nets); static int esd_usb2_send_msg(struct esd_usb2 *dev, struct esd_usb2_msg *msg) { diff --git a/drivers/net/can/usb/etas_es58x/es581_4.c b/drivers/net/can/usb/etas_es58x/es581_4.c index 1985f772fc3c..14e360c9f2c9 100644 --- a/drivers/net/can/usb/etas_es58x/es581_4.c +++ b/drivers/net/can/usb/etas_es58x/es581_4.c @@ -355,7 +355,7 @@ static int es581_4_tx_can_msg(struct es58x_priv *priv, return -EMSGSIZE; if (priv->tx_can_msg_cnt == 0) { - msg_len = 1; /* struct es581_4_bulk_tx_can_msg:num_can_msg */ + msg_len = sizeof(es581_4_urb_cmd->bulk_tx_can_msg.num_can_msg); es581_4_fill_urb_header(urb_cmd, ES581_4_CAN_COMMAND_TYPE, ES581_4_CMD_ID_TX_MSG, priv->channel_idx, msg_len); @@ -371,8 +371,7 @@ static int es581_4_tx_can_msg(struct es58x_priv *priv, return ret; /* Fill message contents. */ - tx_can_msg = (struct es581_4_tx_can_msg *) - &es581_4_urb_cmd->bulk_tx_can_msg.tx_can_msg_buf[msg_len - 1]; + tx_can_msg = (typeof(tx_can_msg))&es581_4_urb_cmd->raw_msg[msg_len]; put_unaligned_le32(es58x_get_raw_can_id(cf), &tx_can_msg->can_id); put_unaligned_le32(priv->tx_head, &tx_can_msg->packet_idx); put_unaligned_le16((u16)es58x_get_flags(skb), &tx_can_msg->flags); diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.c b/drivers/net/can/usb/etas_es58x/es58x_core.c index 8e9102482c52..96a13c770e4a 100644 --- a/drivers/net/can/usb/etas_es58x/es58x_core.c +++ b/drivers/net/can/usb/etas_es58x/es58x_core.c @@ -19,7 +19,7 @@ #include "es58x_core.h" #define DRV_VERSION "1.00" -MODULE_AUTHOR("Mailhol Vincent <mailhol.vincent@wanadoo.fr>"); +MODULE_AUTHOR("Vincent Mailhol <mailhol.vincent@wanadoo.fr>"); MODULE_AUTHOR("Arunachalam Santhanam <arunachalam.santhanam@in.bosch.com>"); MODULE_DESCRIPTION("Socket CAN driver for ETAS ES58X USB adapters"); MODULE_VERSION(DRV_VERSION); @@ -70,7 +70,7 @@ MODULE_DEVICE_TABLE(usb, es58x_id_table); * bytes (the start of frame) are skipped and the CRC calculation * starts on the third byte. */ -#define ES58X_CRC_CALC_OFFSET 2 +#define ES58X_CRC_CALC_OFFSET sizeof_field(union es58x_urb_cmd, sof) /** * es58x_calculate_crc() - Compute the crc16 of a given URB. @@ -2108,6 +2108,25 @@ static int es58x_init_netdev(struct es58x_device *es58x_dev, int channel_idx) } /** + * es58x_free_netdevs() - Release all network resources of the device. + * @es58x_dev: ES58X device. + */ +static void es58x_free_netdevs(struct es58x_device *es58x_dev) +{ + int i; + + for (i = 0; i < es58x_dev->num_can_ch; i++) { + struct net_device *netdev = es58x_dev->netdev[i]; + + if (!netdev) + continue; + unregister_candev(netdev); + es58x_dev->netdev[i] = NULL; + free_candev(netdev); + } +} + +/** * es58x_get_product_info() - Get the product information and print them. * @es58x_dev: ES58X device. * @@ -2152,14 +2171,13 @@ static int es58x_get_product_info(struct es58x_device *es58x_dev) /** * es58x_init_es58x_dev() - Initialize the ES58X device. * @intf: USB interface. - * @p_es58x_dev: pointer to the address of the ES58X device. * @driver_info: Quirks of the device. * - * Return: zero on success, errno when any error occurs. + * Return: pointer to an ES58X device on success, error pointer when + * any error occurs. */ -static int es58x_init_es58x_dev(struct usb_interface *intf, - struct es58x_device **p_es58x_dev, - kernel_ulong_t driver_info) +static struct es58x_device *es58x_init_es58x_dev(struct usb_interface *intf, + kernel_ulong_t driver_info) { struct device *dev = &intf->dev; struct es58x_device *es58x_dev; @@ -2176,7 +2194,7 @@ static int es58x_init_es58x_dev(struct usb_interface *intf, ret = usb_find_common_endpoints(intf->cur_altsetting, &ep_in, &ep_out, NULL, NULL); if (ret) - return ret; + return ERR_PTR(ret); if (driver_info & ES58X_FD_FAMILY) { param = &es58x_fd_param; @@ -2186,9 +2204,10 @@ static int es58x_init_es58x_dev(struct usb_interface *intf, ops = &es581_4_ops; } - es58x_dev = kzalloc(es58x_sizeof_es58x_device(param), GFP_KERNEL); + es58x_dev = devm_kzalloc(dev, es58x_sizeof_es58x_device(param), + GFP_KERNEL); if (!es58x_dev) - return -ENOMEM; + return ERR_PTR(-ENOMEM); es58x_dev->param = param; es58x_dev->ops = ops; @@ -2213,9 +2232,7 @@ static int es58x_init_es58x_dev(struct usb_interface *intf, ep_out->bEndpointAddress); es58x_dev->rx_max_packet_size = le16_to_cpu(ep_in->wMaxPacketSize); - *p_es58x_dev = es58x_dev; - - return 0; + return es58x_dev; } /** @@ -2232,30 +2249,21 @@ static int es58x_probe(struct usb_interface *intf, struct es58x_device *es58x_dev; int ch_idx, ret; - ret = es58x_init_es58x_dev(intf, &es58x_dev, id->driver_info); - if (ret) - return ret; + es58x_dev = es58x_init_es58x_dev(intf, id->driver_info); + if (IS_ERR(es58x_dev)) + return PTR_ERR(es58x_dev); ret = es58x_get_product_info(es58x_dev); if (ret) - goto cleanup_es58x_dev; + return ret; for (ch_idx = 0; ch_idx < es58x_dev->num_can_ch; ch_idx++) { ret = es58x_init_netdev(es58x_dev, ch_idx); - if (ret) - goto cleanup_candev; - } - - return ret; - - cleanup_candev: - for (ch_idx = 0; ch_idx < es58x_dev->num_can_ch; ch_idx++) - if (es58x_dev->netdev[ch_idx]) { - unregister_candev(es58x_dev->netdev[ch_idx]); - free_candev(es58x_dev->netdev[ch_idx]); + if (ret) { + es58x_free_netdevs(es58x_dev); + return ret; } - cleanup_es58x_dev: - kfree(es58x_dev); + } return ret; } @@ -2270,24 +2278,12 @@ static int es58x_probe(struct usb_interface *intf, static void es58x_disconnect(struct usb_interface *intf) { struct es58x_device *es58x_dev = usb_get_intfdata(intf); - struct net_device *netdev; - int i; dev_info(&intf->dev, "Disconnecting %s %s\n", es58x_dev->udev->manufacturer, es58x_dev->udev->product); - for (i = 0; i < es58x_dev->num_can_ch; i++) { - netdev = es58x_dev->netdev[i]; - if (!netdev) - continue; - unregister_candev(netdev); - es58x_dev->netdev[i] = NULL; - free_candev(netdev); - } - + es58x_free_netdevs(es58x_dev); es58x_free_urbs(es58x_dev); - - kfree(es58x_dev); usb_set_intfdata(intf, NULL); } diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.h b/drivers/net/can/usb/etas_es58x/es58x_core.h index fcf219e727bf..826a15871573 100644 --- a/drivers/net/can/usb/etas_es58x/es58x_core.h +++ b/drivers/net/can/usb/etas_es58x/es58x_core.h @@ -287,7 +287,7 @@ struct es58x_priv { * @rx_urb_cmd_max_len: Maximum length of a RX URB command. * @fifo_mask: Bit mask to quickly convert the tx_tail and tx_head * field of the struct es58x_priv into echo_skb - * indexes. Properties: @fifo_mask = echos_skb_max - 1 where + * indexes. Properties: @fifo_mask = echo_skb_max - 1 where * echo_skb_max must be a power of two. Also, echo_skb_max must * not exceed the maximum size of the device internal TX FIFO * length. This parameter is used to control the network queue diff --git a/drivers/net/can/usb/etas_es58x/es58x_fd.c b/drivers/net/can/usb/etas_es58x/es58x_fd.c index 1a2779d383a4..af042aa55f59 100644 --- a/drivers/net/can/usb/etas_es58x/es58x_fd.c +++ b/drivers/net/can/usb/etas_es58x/es58x_fd.c @@ -357,8 +357,7 @@ static int es58x_fd_tx_can_msg(struct es58x_priv *priv, return ret; /* Fill message contents. */ - tx_can_msg = (struct es58x_fd_tx_can_msg *) - &es58x_fd_urb_cmd->tx_can_msg_buf[msg_len]; + tx_can_msg = (typeof(tx_can_msg))&es58x_fd_urb_cmd->raw_msg[msg_len]; tx_can_msg->packet_idx = (u8)priv->tx_head; put_unaligned_le32(es58x_get_raw_can_id(cf), &tx_can_msg->can_id); tx_can_msg->flags = (u8)es58x_get_flags(skb); @@ -463,9 +462,9 @@ static int es58x_fd_get_timestamp(struct es58x_device *es58x_dev) } /* Nominal bittiming constants for ES582.1 and ES584.1 as specified in - * the microcontroller datasheet: "SAM E701/S70/V70/V71 Family" - * section 49.6.8 "MCAN Nominal Bit Timing and Prescaler Register" - * from Microchip. + * the microcontroller datasheet: "SAM E70/S70/V70/V71 Family" section + * 49.6.8 "MCAN Nominal Bit Timing and Prescaler Register" from + * Microchip. * * The values from the specification are the hardware register * values. To convert them to the functional values, all ranges were @@ -484,8 +483,8 @@ static const struct can_bittiming_const es58x_fd_nom_bittiming_const = { }; /* Data bittiming constants for ES582.1 and ES584.1 as specified in - * the microcontroller datasheet: "SAM E701/S70/V70/V71 Family" - * section 49.6.4 "MCAN Data Bit Timing and Prescaler Register" from + * the microcontroller datasheet: "SAM E70/S70/V70/V71 Family" section + * 49.6.4 "MCAN Data Bit Timing and Prescaler Register" from * Microchip. */ static const struct can_bittiming_const es58x_fd_data_bittiming_const = { @@ -501,9 +500,9 @@ static const struct can_bittiming_const es58x_fd_data_bittiming_const = { }; /* Transmission Delay Compensation constants for ES582.1 and ES584.1 - * as specified in the microcontroller datasheet: "SAM - * E701/S70/V70/V71 Family" section 49.6.15 "MCAN Transmitter Delay - * Compensation Register" from Microchip. + * as specified in the microcontroller datasheet: "SAM E70/S70/V70/V71 + * Family" section 49.6.15 "MCAN Transmitter Delay Compensation + * Register" from Microchip. */ static const struct can_tdc_const es58x_tdc_const = { .tdcv_max = 0, /* Manual mode not supported. */ diff --git a/drivers/net/can/usb/etas_es58x/es58x_fd.h b/drivers/net/can/usb/etas_es58x/es58x_fd.h index ee18a87e40c0..a191891b8777 100644 --- a/drivers/net/can/usb/etas_es58x/es58x_fd.h +++ b/drivers/net/can/usb/etas_es58x/es58x_fd.h @@ -96,23 +96,14 @@ struct es58x_fd_bittiming { * @ctrlmode: type enum es58x_fd_ctrlmode. * @canfd_enabled: boolean (0: Classical CAN, 1: CAN and/or CANFD). * @data_bittiming: Bittiming for flexible data-rate transmission. - * @tdc_enabled: Transmitter Delay Compensation switch (0: disabled, - * 1: enabled). On very high bitrates, the delay between when the - * bit is sent and received on the CANTX and CANRX pins of the - * transceiver start to be significant enough for errors to occur - * and thus need to be compensated. - * @tdco: Transmitter Delay Compensation Offset. Offset value, in time - * quanta, defining the delay between the start of the bit - * reception on the CANRX pin of the transceiver and the SSP - * (Secondary Sample Point). Valid values: 0 to 127. - * @tdcf: Transmitter Delay Compensation Filter window. Defines the - * minimum value for the SSP position, in time quanta. The - * feature is enabled when TDCF is configured to a value greater - * than TDCO. Valid values: 0 to 127. + * @tdc_enabled: Transmitter Delay Compensation switch (0: TDC is + * disabled, 1: TDC is enabled). + * @tdco: Transmitter Delay Compensation Offset. + * @tdcf: Transmitter Delay Compensation Filter window. * - * Please refer to the microcontroller datasheet: "SAM - * E701/S70/V70/V71 Family" section 49 "Controller Area Network - * (MCAN)" for additional information. + * Please refer to the microcontroller datasheet: "SAM E70/S70/V70/V71 + * Family" section 49 "Controller Area Network (MCAN)" for additional + * information. */ struct es58x_fd_tx_conf_msg { struct es58x_fd_bittiming nominal_bittiming; diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c index 899a3d21b77f..837b3fecd71e 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb.c @@ -63,6 +63,8 @@ #define PCAN_USB_MSG_HEADER_LEN 2 +#define PCAN_USB_MSG_TX_CAN 2 /* Tx msg is a CAN frame */ + /* PCAN-USB adapter internal clock (MHz) */ #define PCAN_USB_CRYSTAL_HZ 16000000 @@ -73,6 +75,10 @@ #define PCAN_USB_STATUSLEN_RTR (1 << 4) #define PCAN_USB_STATUSLEN_DLC (0xf) +/* PCAN-USB 4.1 CAN Id tx extended flags */ +#define PCAN_USB_TX_SRR 0x01 /* SJA1000 SRR command */ +#define PCAN_USB_TX_AT 0x02 /* SJA1000 AT command */ + /* PCAN-USB error flags */ #define PCAN_USB_ERROR_TXFULL 0x01 #define PCAN_USB_ERROR_RXQOVR 0x02 @@ -385,7 +391,8 @@ static int pcan_usb_get_device_id(struct peak_usb_device *dev, u32 *device_id) if (err) netdev_err(dev->netdev, "getting device id failure: %d\n", err); - *device_id = args[0]; + else + *device_id = args[0]; return err; } @@ -446,145 +453,65 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n, { struct sk_buff *skb; struct can_frame *cf; - enum can_state new_state; + enum can_state new_state = CAN_STATE_ERROR_ACTIVE; /* ignore this error until 1st ts received */ if (n == PCAN_USB_ERROR_QOVR) if (!mc->pdev->time_ref.tick_count) return 0; - new_state = mc->pdev->dev.can.state; - - switch (mc->pdev->dev.can.state) { - case CAN_STATE_ERROR_ACTIVE: - if (n & PCAN_USB_ERROR_BUS_LIGHT) { - new_state = CAN_STATE_ERROR_WARNING; - break; - } - fallthrough; - - case CAN_STATE_ERROR_WARNING: - if (n & PCAN_USB_ERROR_BUS_HEAVY) { - new_state = CAN_STATE_ERROR_PASSIVE; - break; - } - if (n & PCAN_USB_ERROR_BUS_OFF) { - new_state = CAN_STATE_BUS_OFF; - break; - } - if (n & ~PCAN_USB_ERROR_BUS) { - /* - * trick to bypass next comparison and process other - * errors - */ - new_state = CAN_STATE_MAX; - break; - } - if ((n & PCAN_USB_ERROR_BUS_LIGHT) == 0) { - /* no error (back to active state) */ - new_state = CAN_STATE_ERROR_ACTIVE; - break; - } - break; - - case CAN_STATE_ERROR_PASSIVE: - if (n & PCAN_USB_ERROR_BUS_OFF) { - new_state = CAN_STATE_BUS_OFF; - break; - } - if (n & PCAN_USB_ERROR_BUS_LIGHT) { - new_state = CAN_STATE_ERROR_WARNING; - break; - } - if (n & ~PCAN_USB_ERROR_BUS) { - /* - * trick to bypass next comparison and process other - * errors - */ - new_state = CAN_STATE_MAX; - break; - } - - if ((n & PCAN_USB_ERROR_BUS_HEAVY) == 0) { - /* no error (back to warning state) */ - new_state = CAN_STATE_ERROR_WARNING; - break; - } - break; - - default: - /* do nothing waiting for restart */ - return 0; - } - - /* donot post any error if current state didn't change */ - if (mc->pdev->dev.can.state == new_state) - return 0; - /* allocate an skb to store the error frame */ skb = alloc_can_err_skb(mc->netdev, &cf); - if (!skb) - return -ENOMEM; - - switch (new_state) { - case CAN_STATE_BUS_OFF: - cf->can_id |= CAN_ERR_BUSOFF; - mc->pdev->dev.can.can_stats.bus_off++; - can_bus_off(mc->netdev); - break; - - case CAN_STATE_ERROR_PASSIVE: - cf->can_id |= CAN_ERR_CRTL; - cf->data[1] = (mc->pdev->bec.txerr > mc->pdev->bec.rxerr) ? - CAN_ERR_CRTL_TX_PASSIVE : - CAN_ERR_CRTL_RX_PASSIVE; - cf->data[6] = mc->pdev->bec.txerr; - cf->data[7] = mc->pdev->bec.rxerr; - - mc->pdev->dev.can.can_stats.error_passive++; - break; - - case CAN_STATE_ERROR_WARNING: - cf->can_id |= CAN_ERR_CRTL; - cf->data[1] = (mc->pdev->bec.txerr > mc->pdev->bec.rxerr) ? - CAN_ERR_CRTL_TX_WARNING : - CAN_ERR_CRTL_RX_WARNING; - cf->data[6] = mc->pdev->bec.txerr; - cf->data[7] = mc->pdev->bec.rxerr; - - mc->pdev->dev.can.can_stats.error_warning++; - break; - case CAN_STATE_ERROR_ACTIVE: - cf->can_id |= CAN_ERR_CRTL; - cf->data[1] = CAN_ERR_CRTL_ACTIVE; - - /* sync local copies of rxerr/txerr counters */ - mc->pdev->bec.txerr = 0; - mc->pdev->bec.rxerr = 0; - break; - - default: - /* CAN_STATE_MAX (trick to handle other errors) */ - if (n & PCAN_USB_ERROR_TXQFULL) - netdev_dbg(mc->netdev, "device Tx queue full)\n"); - - if (n & PCAN_USB_ERROR_RXQOVR) { - netdev_dbg(mc->netdev, "data overrun interrupt\n"); + if (n & PCAN_USB_ERROR_RXQOVR) { + /* data overrun interrupt */ + netdev_dbg(mc->netdev, "data overrun interrupt\n"); + mc->netdev->stats.rx_over_errors++; + mc->netdev->stats.rx_errors++; + if (cf) { cf->can_id |= CAN_ERR_CRTL; cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW; - mc->netdev->stats.rx_over_errors++; - mc->netdev->stats.rx_errors++; } + } - cf->data[6] = mc->pdev->bec.txerr; - cf->data[7] = mc->pdev->bec.rxerr; + if (n & PCAN_USB_ERROR_TXQFULL) + netdev_dbg(mc->netdev, "device Tx queue full)\n"); - new_state = mc->pdev->dev.can.state; - break; + if (n & PCAN_USB_ERROR_BUS_OFF) { + new_state = CAN_STATE_BUS_OFF; + } else if (n & PCAN_USB_ERROR_BUS_HEAVY) { + new_state = ((mc->pdev->bec.txerr >= 128) || + (mc->pdev->bec.rxerr >= 128)) ? + CAN_STATE_ERROR_PASSIVE : + CAN_STATE_ERROR_WARNING; + } else { + new_state = CAN_STATE_ERROR_ACTIVE; } - mc->pdev->dev.can.state = new_state; + /* handle change of state */ + if (new_state != mc->pdev->dev.can.state) { + enum can_state tx_state = + (mc->pdev->bec.txerr >= mc->pdev->bec.rxerr) ? + new_state : 0; + enum can_state rx_state = + (mc->pdev->bec.txerr <= mc->pdev->bec.rxerr) ? + new_state : 0; + + can_change_state(mc->netdev, cf, tx_state, rx_state); + + if (new_state == CAN_STATE_BUS_OFF) { + can_bus_off(mc->netdev); + } else if (cf && (cf->can_id & CAN_ERR_CRTL)) { + /* Supply TX/RX error counters in case of + * controller error. + */ + cf->data[6] = mc->pdev->bec.txerr; + cf->data[7] = mc->pdev->bec.rxerr; + } + } + + if (!skb) + return -ENOMEM; if (status_len & PCAN_USB_STATUSLEN_TIMESTAMP) { struct skb_shared_hwtstamps *hwts = skb_hwtstamps(skb); @@ -706,6 +633,7 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len) struct sk_buff *skb; struct can_frame *cf; struct skb_shared_hwtstamps *hwts; + u32 can_id_flags; skb = alloc_can_skb(mc->netdev, &cf); if (!skb) @@ -715,13 +643,15 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len) if ((mc->ptr + 4) > mc->end) goto decode_failed; - cf->can_id = get_unaligned_le32(mc->ptr) >> 3 | CAN_EFF_FLAG; + can_id_flags = get_unaligned_le32(mc->ptr); + cf->can_id = can_id_flags >> 3 | CAN_EFF_FLAG; mc->ptr += 4; } else { if ((mc->ptr + 2) > mc->end) goto decode_failed; - cf->can_id = get_unaligned_le16(mc->ptr) >> 5; + can_id_flags = get_unaligned_le16(mc->ptr); + cf->can_id = can_id_flags >> 5; mc->ptr += 2; } @@ -744,6 +674,10 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len) memcpy(cf->data, mc->ptr, cf->len); mc->ptr += rec_len; + + /* Ignore next byte (client private id) if SRR bit is set */ + if (can_id_flags & PCAN_USB_TX_SRR) + mc->ptr++; } /* convert timestamp into kernel time */ @@ -821,10 +755,11 @@ static int pcan_usb_encode_msg(struct peak_usb_device *dev, struct sk_buff *skb, struct net_device *netdev = dev->netdev; struct net_device_stats *stats = &netdev->stats; struct can_frame *cf = (struct can_frame *)skb->data; + u32 can_id_flags = cf->can_id & CAN_ERR_MASK; u8 *pc; - obuf[0] = 2; - obuf[1] = 1; + obuf[0] = PCAN_USB_MSG_TX_CAN; + obuf[1] = 1; /* only one CAN frame is stored in the packet */ pc = obuf + PCAN_USB_MSG_HEADER_LEN; @@ -839,12 +774,28 @@ static int pcan_usb_encode_msg(struct peak_usb_device *dev, struct sk_buff *skb, *pc |= PCAN_USB_STATUSLEN_EXT_ID; pc++; - put_unaligned_le32((cf->can_id & CAN_ERR_MASK) << 3, pc); + can_id_flags <<= 3; + + if (dev->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) + can_id_flags |= PCAN_USB_TX_SRR; + + if (dev->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) + can_id_flags |= PCAN_USB_TX_AT; + + put_unaligned_le32(can_id_flags, pc); pc += 4; } else { pc++; - put_unaligned_le16((cf->can_id & CAN_ERR_MASK) << 5, pc); + can_id_flags <<= 5; + + if (dev->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) + can_id_flags |= PCAN_USB_TX_SRR; + + if (dev->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) + can_id_flags |= PCAN_USB_TX_AT; + + put_unaligned_le16(can_id_flags, pc); pc += 2; } @@ -854,6 +805,10 @@ static int pcan_usb_encode_msg(struct peak_usb_device *dev, struct sk_buff *skb, pc += cf->len; } + /* SRR bit needs a writer id (useless here) */ + if (can_id_flags & PCAN_USB_TX_SRR) + *pc++ = 0x80; + obuf[(*size)-1] = (u8)(stats->tx_packets & 0xff); return 0; @@ -928,6 +883,19 @@ static int pcan_usb_init(struct peak_usb_device *dev) return err; } + /* Since rev 4.1, PCAN-USB is able to make single-shot as well as + * looped back frames. + */ + if (dev->device_rev >= 41) { + struct can_priv *priv = netdev_priv(dev->netdev); + + priv->ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT | + CAN_CTRLMODE_LOOPBACK; + } else { + dev_info(dev->netdev->dev.parent, + "Firmware update available. Please contact support@peak-system.com\n"); + } + dev_info(dev->netdev->dev.parent, "PEAK-System %s adapter hwrev %u serial %08X (%u channel)\n", pcan_usb.name, dev->device_rev, serial_number, diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index b23e3488695b..bd1417a66cbf 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -2016,15 +2016,6 @@ int b53_br_flags(struct dsa_switch *ds, int port, } EXPORT_SYMBOL(b53_br_flags); -int b53_set_mrouter(struct dsa_switch *ds, int port, bool mrouter, - struct netlink_ext_ack *extack) -{ - b53_port_set_mcast_flood(ds->priv, port, mrouter); - - return 0; -} -EXPORT_SYMBOL(b53_set_mrouter); - static bool b53_possible_cpu_port(struct dsa_switch *ds, int port) { /* Broadcom switches will accept enabling Broadcom tags on the @@ -2268,7 +2259,6 @@ static const struct dsa_switch_ops b53_switch_ops = { .port_bridge_leave = b53_br_leave, .port_pre_bridge_flags = b53_br_flags_pre, .port_bridge_flags = b53_br_flags, - .port_set_mrouter = b53_set_mrouter, .port_stp_state_set = b53_br_set_stp_state, .port_fast_age = b53_br_fast_age, .port_vlan_filtering = b53_vlan_filtering, diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index 82700a5714c1..9bf8319342b0 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h @@ -328,8 +328,6 @@ int b53_br_flags_pre(struct dsa_switch *ds, int port, int b53_br_flags(struct dsa_switch *ds, int port, struct switchdev_brport_flags flags, struct netlink_ext_ack *extack); -int b53_set_mrouter(struct dsa_switch *ds, int port, bool mrouter, - struct netlink_ext_ack *extack); int b53_setup_devlink_resources(struct dsa_switch *ds); void b53_port_event(struct dsa_switch *ds, int port); void b53_phylink_validate(struct dsa_switch *ds, int port, diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 3b018fcf4412..6ce9ec1283e0 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -1199,7 +1199,6 @@ static const struct dsa_switch_ops bcm_sf2_ops = { .port_pre_bridge_flags = b53_br_flags_pre, .port_bridge_flags = b53_br_flags, .port_stp_state_set = b53_br_set_stp_state, - .port_set_mrouter = b53_set_mrouter, .port_fast_age = b53_br_fast_age, .port_vlan_filtering = b53_vlan_filtering, .port_vlan_add = b53_vlan_add, diff --git a/drivers/net/dsa/hirschmann/hellcreek.c b/drivers/net/dsa/hirschmann/hellcreek.c index 7062db6a083c..542cfc4ccb08 100644 --- a/drivers/net/dsa/hirschmann/hellcreek.c +++ b/drivers/net/dsa/hirschmann/hellcreek.c @@ -1345,6 +1345,7 @@ static int hellcreek_setup(struct dsa_switch *ds) * filtering setups are not supported. */ ds->vlan_filtering_is_global = true; + ds->needs_standalone_vlan_filtering = true; /* Intercept _all_ PTP multicast traffic */ ret = hellcreek_setup_fdb(hellcreek); diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 632f0fcc5aa7..d0cba2d1cd68 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -367,8 +367,8 @@ mt7530_fdb_write(struct mt7530_priv *priv, u16 vid, int i; reg[1] |= vid & CVID_MASK; - if (vid > 1) - reg[1] |= ATA2_IVL; + reg[1] |= ATA2_IVL; + reg[1] |= ATA2_FID(FID_BRIDGED); reg[2] |= (aging & AGE_TIMER_MASK) << AGE_TIMER; reg[2] |= (port_mask & PORT_MAP_MASK) << PORT_MAP; /* STATIC_ENT indicate that entry is static wouldn't @@ -1022,6 +1022,10 @@ mt753x_cpu_port_enable(struct dsa_switch *ds, int port) mt7530_write(priv, MT7530_PCR_P(port), PCR_MATRIX(dsa_user_ports(priv->ds))); + /* Set to fallback mode for independent VLAN learning */ + mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK, + MT7530_PORT_FALLBACK_MODE); + return 0; } @@ -1144,7 +1148,8 @@ mt7530_stp_state_set(struct dsa_switch *ds, int port, u8 state) break; } - mt7530_rmw(priv, MT7530_SSP_P(port), FID_PST_MASK, stp_state); + mt7530_rmw(priv, MT7530_SSP_P(port), FID_PST_MASK(FID_BRIDGED), + FID_PST(FID_BRIDGED, stp_state)); } static int @@ -1186,18 +1191,6 @@ mt7530_port_bridge_flags(struct dsa_switch *ds, int port, } static int -mt7530_port_set_mrouter(struct dsa_switch *ds, int port, bool mrouter, - struct netlink_ext_ack *extack) -{ - struct mt7530_priv *priv = ds->priv; - - mt7530_rmw(priv, MT7530_MFC, UNM_FFP(BIT(port)), - mrouter ? UNM_FFP(BIT(port)) : 0); - - return 0; -} - -static int mt7530_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *bridge) { @@ -1230,6 +1223,10 @@ mt7530_port_bridge_join(struct dsa_switch *ds, int port, PCR_MATRIX_MASK, PCR_MATRIX(port_bitmap)); priv->ports[port].pm |= PCR_MATRIX(port_bitmap); + /* Set to fallback mode for independent VLAN learning */ + mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK, + MT7530_PORT_FALLBACK_MODE); + mutex_unlock(&priv->reg_mutex); return 0; @@ -1242,15 +1239,22 @@ mt7530_port_set_vlan_unaware(struct dsa_switch *ds, int port) bool all_user_ports_removed = true; int i; - /* When a port is removed from the bridge, the port would be set up - * back to the default as is at initial boot which is a VLAN-unaware - * port. + /* This is called after .port_bridge_leave when leaving a VLAN-aware + * bridge. Don't set standalone ports to fallback mode. */ - mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK, - MT7530_PORT_MATRIX_MODE); - mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK | PVC_EG_TAG_MASK, + if (dsa_to_port(ds, port)->bridge_dev) + mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK, + MT7530_PORT_FALLBACK_MODE); + + mt7530_rmw(priv, MT7530_PVC_P(port), + VLAN_ATTR_MASK | PVC_EG_TAG_MASK | ACC_FRM_MASK, VLAN_ATTR(MT7530_VLAN_TRANSPARENT) | - PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT)); + PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT) | + MT7530_VLAN_ACC_ALL); + + /* Set PVID to 0 */ + mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK, + G0_PORT_VID_DEF); for (i = 0; i < MT7530_NUM_PORTS; i++) { if (dsa_is_user_port(ds, i) && @@ -1277,15 +1281,19 @@ mt7530_port_set_vlan_aware(struct dsa_switch *ds, int port) struct mt7530_priv *priv = ds->priv; /* Trapped into security mode allows packet forwarding through VLAN - * table lookup. CPU port is set to fallback mode to let untagged - * frames pass through. + * table lookup. */ - if (dsa_is_cpu_port(ds, port)) - mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK, - MT7530_PORT_FALLBACK_MODE); - else + if (dsa_is_user_port(ds, port)) { mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK, MT7530_PORT_SECURITY_MODE); + mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK, + G0_PORT_VID(priv->ports[port].pvid)); + + /* Only accept tagged frames if PVID is not set */ + if (!priv->ports[port].pvid) + mt7530_rmw(priv, MT7530_PVC_P(port), ACC_FRM_MASK, + MT7530_VLAN_ACC_TAGGED); + } /* Set the port as a user port which is to be able to recognize VID * from incoming packets before fetching entry within the VLAN table. @@ -1308,11 +1316,8 @@ mt7530_port_bridge_leave(struct dsa_switch *ds, int port, /* Remove this port from the port matrix of the other ports * in the same bridge. If the port is disabled, port matrix * is kept and not being setup until the port becomes enabled. - * And the other port's port matrix cannot be broken when the - * other port is still a VLAN-aware port. */ - if (dsa_is_user_port(ds, i) && i != port && - !dsa_port_is_vlan_filtering(dsa_to_port(ds, i))) { + if (dsa_is_user_port(ds, i) && i != port) { if (dsa_to_port(ds, i)->bridge_dev != bridge) continue; if (priv->ports[i].enable) @@ -1330,6 +1335,13 @@ mt7530_port_bridge_leave(struct dsa_switch *ds, int port, PCR_MATRIX(BIT(MT7530_CPU_PORT))); priv->ports[port].pm = PCR_MATRIX(BIT(MT7530_CPU_PORT)); + /* When a port is removed from the bridge, the port would be set up + * back to the default as is at initial boot which is a VLAN-unaware + * port. + */ + mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK, + MT7530_PORT_MATRIX_MODE); + mutex_unlock(&priv->reg_mutex); } @@ -1512,7 +1524,8 @@ mt7530_hw_vlan_add(struct mt7530_priv *priv, /* Validate the entry with independent learning, create egress tag per * VLAN and joining the port as one of the port members. */ - val = IVL_MAC | VTAG_EN | PORT_MEM(new_members) | VLAN_VALID; + val = IVL_MAC | VTAG_EN | PORT_MEM(new_members) | FID(FID_BRIDGED) | + VLAN_VALID; mt7530_write(priv, MT7530_VAWD1, val); /* Decide whether adding tag or not for those outgoing packets from the @@ -1587,6 +1600,21 @@ mt7530_hw_vlan_update(struct mt7530_priv *priv, u16 vid, } static int +mt7530_setup_vlan0(struct mt7530_priv *priv) +{ + u32 val; + + /* Validate the entry with independent learning, keep the original + * ingress tag attribute. + */ + val = IVL_MAC | EG_CON | PORT_MEM(MT7530_ALL_MEMBERS) | FID(FID_BRIDGED) | + VLAN_VALID; + mt7530_write(priv, MT7530_VAWD1, val); + + return mt7530_vlan_cmd(priv, MT7530_VTCR_WR_VID, 0); +} + +static int mt7530_port_vlan_add(struct dsa_switch *ds, int port, const struct switchdev_obj_port_vlan *vlan, struct netlink_ext_ack *extack) @@ -1602,9 +1630,28 @@ mt7530_port_vlan_add(struct dsa_switch *ds, int port, mt7530_hw_vlan_update(priv, vlan->vid, &new_entry, mt7530_hw_vlan_add); if (pvid) { - mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK, - G0_PORT_VID(vlan->vid)); priv->ports[port].pvid = vlan->vid; + + /* Accept all frames if PVID is set */ + mt7530_rmw(priv, MT7530_PVC_P(port), ACC_FRM_MASK, + MT7530_VLAN_ACC_ALL); + + /* Only configure PVID if VLAN filtering is enabled */ + if (dsa_port_is_vlan_filtering(dsa_to_port(ds, port))) + mt7530_rmw(priv, MT7530_PPBV1_P(port), + G0_PORT_VID_MASK, + G0_PORT_VID(vlan->vid)); + } else if (vlan->vid && priv->ports[port].pvid == vlan->vid) { + /* This VLAN is overwritten without PVID, so unset it */ + priv->ports[port].pvid = G0_PORT_VID_DEF; + + /* Only accept tagged frames if the port is VLAN-aware */ + if (dsa_port_is_vlan_filtering(dsa_to_port(ds, port))) + mt7530_rmw(priv, MT7530_PVC_P(port), ACC_FRM_MASK, + MT7530_VLAN_ACC_TAGGED); + + mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK, + G0_PORT_VID_DEF); } mutex_unlock(&priv->reg_mutex); @@ -1618,11 +1665,9 @@ mt7530_port_vlan_del(struct dsa_switch *ds, int port, { struct mt7530_hw_vlan_entry target_entry; struct mt7530_priv *priv = ds->priv; - u16 pvid; mutex_lock(&priv->reg_mutex); - pvid = priv->ports[port].pvid; mt7530_hw_vlan_entry_init(&target_entry, port, 0); mt7530_hw_vlan_update(priv, vlan->vid, &target_entry, mt7530_hw_vlan_del); @@ -1630,11 +1675,18 @@ mt7530_port_vlan_del(struct dsa_switch *ds, int port, /* PVID is being restored to the default whenever the PVID port * is being removed from the VLAN. */ - if (pvid == vlan->vid) - pvid = G0_PORT_VID_DEF; + if (priv->ports[port].pvid == vlan->vid) { + priv->ports[port].pvid = G0_PORT_VID_DEF; + + /* Only accept tagged frames if the port is VLAN-aware */ + if (dsa_port_is_vlan_filtering(dsa_to_port(ds, port))) + mt7530_rmw(priv, MT7530_PVC_P(port), ACC_FRM_MASK, + MT7530_VLAN_ACC_TAGGED); + + mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK, + G0_PORT_VID_DEF); + } - mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK, pvid); - priv->ports[port].pvid = pvid; mutex_unlock(&priv->reg_mutex); @@ -1718,15 +1770,7 @@ static enum dsa_tag_protocol mtk_get_tag_protocol(struct dsa_switch *ds, int port, enum dsa_tag_protocol mp) { - struct mt7530_priv *priv = ds->priv; - - if (port != MT7530_CPU_PORT) { - dev_warn(priv->dev, - "port not matched with tagging CPU port\n"); - return DSA_TAG_PROTO_NONE; - } else { - return DSA_TAG_PROTO_MTK; - } + return DSA_TAG_PROTO_MTK; } #ifdef CONFIG_GPIOLIB @@ -2055,6 +2099,7 @@ mt7530_setup(struct dsa_switch *ds) * as two netdev instances. */ dn = dsa_to_port(ds, MT7530_CPU_PORT)->master->dev.of_node->parent; + ds->assisted_learning_on_cpu_port = true; ds->mtu_enforcement_ingress = true; if (priv->id == ID_MT7530) { @@ -2125,6 +2170,9 @@ mt7530_setup(struct dsa_switch *ds) mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK, PCR_MATRIX_CLR); + /* Disable learning by default on all ports */ + mt7530_set(priv, MT7530_PSC_P(i), SA_DIS); + if (dsa_is_cpu_port(ds, i)) { ret = mt753x_cpu_port_enable(ds, i); if (ret) @@ -2132,14 +2180,20 @@ mt7530_setup(struct dsa_switch *ds) } else { mt7530_port_disable(ds, i); - /* Disable learning by default on all user ports */ - mt7530_set(priv, MT7530_PSC_P(i), SA_DIS); + /* Set default PVID to 0 on all user ports */ + mt7530_rmw(priv, MT7530_PPBV1_P(i), G0_PORT_VID_MASK, + G0_PORT_VID_DEF); } /* Enable consistent egress tag */ mt7530_rmw(priv, MT7530_PVC_P(i), PVC_EG_TAG_MASK, PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT)); } + /* Setup VLAN ID 0 for VLAN-unaware bridges */ + ret = mt7530_setup_vlan0(priv); + if (ret) + return ret; + /* Setup port 5 */ priv->p5_intf_sel = P5_DISABLED; interface = PHY_INTERFACE_MODE_NA; @@ -2290,6 +2344,9 @@ mt7531_setup(struct dsa_switch *ds) mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK, PCR_MATRIX_CLR); + /* Disable learning by default on all ports */ + mt7530_set(priv, MT7530_PSC_P(i), SA_DIS); + mt7530_set(priv, MT7531_DBG_CNT(i), MT7531_DIS_CLR); if (dsa_is_cpu_port(ds, i)) { @@ -2299,8 +2356,9 @@ mt7531_setup(struct dsa_switch *ds) } else { mt7530_port_disable(ds, i); - /* Disable learning by default on all user ports */ - mt7530_set(priv, MT7530_PSC_P(i), SA_DIS); + /* Set default PVID to 0 on all user ports */ + mt7530_rmw(priv, MT7530_PPBV1_P(i), G0_PORT_VID_MASK, + G0_PORT_VID_DEF); } /* Enable consistent egress tag */ @@ -2308,6 +2366,12 @@ mt7531_setup(struct dsa_switch *ds) PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT)); } + /* Setup VLAN ID 0 for VLAN-unaware bridges */ + ret = mt7530_setup_vlan0(priv); + if (ret) + return ret; + + ds->assisted_learning_on_cpu_port = true; ds->mtu_enforcement_ingress = true; /* Flush the FDB table */ @@ -3061,7 +3125,6 @@ static const struct dsa_switch_ops mt7530_switch_ops = { .port_stp_state_set = mt7530_stp_state_set, .port_pre_bridge_flags = mt7530_port_pre_bridge_flags, .port_bridge_flags = mt7530_port_bridge_flags, - .port_set_mrouter = mt7530_port_set_mrouter, .port_bridge_join = mt7530_port_bridge_join, .port_bridge_leave = mt7530_port_bridge_leave, .port_fdb_add = mt7530_port_fdb_add, diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h index b19b389ff10a..91508e2feef9 100644 --- a/drivers/net/dsa/mt7530.h +++ b/drivers/net/dsa/mt7530.h @@ -80,6 +80,7 @@ enum mt753x_bpdu_port_fw { #define STATIC_ENT 3 #define MT7530_ATA2 0x78 #define ATA2_IVL BIT(15) +#define ATA2_FID(x) (((x) & 0x7) << 12) /* Register for address table write data */ #define MT7530_ATWD 0x7c @@ -144,15 +145,24 @@ enum mt7530_vlan_cmd { #define PORT_STAG BIT(31) /* Independent VLAN Learning */ #define IVL_MAC BIT(30) +/* Egress Tag Consistent */ +#define EG_CON BIT(29) /* Per VLAN Egress Tag Control */ #define VTAG_EN BIT(28) /* VLAN Member Control */ #define PORT_MEM(x) (((x) & 0xff) << 16) +/* Filter ID */ +#define FID(x) (((x) & 0x7) << 1) /* VLAN Entry Valid */ #define VLAN_VALID BIT(0) #define PORT_MEM_SHFT 16 #define PORT_MEM_MASK 0xff +enum mt7530_fid { + FID_STANDALONE = 0, + FID_BRIDGED = 1, +}; + #define MT7530_VAWD2 0x98 /* Egress Tag Control */ #define ETAG_CTRL_P(p, x) (((x) & 0x3) << ((p) << 1)) @@ -179,8 +189,8 @@ enum mt7530_vlan_egress_attr { /* Register for port STP state control */ #define MT7530_SSP_P(x) (0x2000 + ((x) * 0x100)) -#define FID_PST(x) ((x) & 0x3) -#define FID_PST_MASK FID_PST(0x3) +#define FID_PST(fid, state) (((state) & 0x3) << ((fid) * 2)) +#define FID_PST_MASK(fid) FID_PST(fid, 0x3) enum mt7530_stp_state { MT7530_STP_DISABLED = 0, @@ -230,6 +240,7 @@ enum mt7530_port_mode { #define PVC_EG_TAG_MASK PVC_EG_TAG(7) #define VLAN_ATTR(x) (((x) & 0x3) << 6) #define VLAN_ATTR_MASK VLAN_ATTR(3) +#define ACC_FRM_MASK GENMASK(1, 0) enum mt7530_vlan_port_eg_tag { MT7530_VLAN_EG_DISABLED = 0, @@ -241,13 +252,19 @@ enum mt7530_vlan_port_attr { MT7530_VLAN_TRANSPARENT = 3, }; +enum mt7530_vlan_port_acc_frm { + MT7530_VLAN_ACC_ALL = 0, + MT7530_VLAN_ACC_TAGGED = 1, + MT7530_VLAN_ACC_UNTAGGED = 2, +}; + #define STAG_VPID (((x) & 0xffff) << 16) /* Register for port port-and-protocol based vlan 1 control */ #define MT7530_PPBV1_P(x) (0x2014 + ((x) * 0x100)) #define G0_PORT_VID(x) (((x) & 0xfff) << 0) #define G0_PORT_VID_MASK G0_PORT_VID(0xfff) -#define G0_PORT_VID_DEF G0_PORT_VID(1) +#define G0_PORT_VID_DEF G0_PORT_VID(0) /* Register for port MAC control register */ #define MT7530_PMCR_P(x) (0x3000 + ((x) * 0x100)) diff --git a/drivers/net/dsa/mv88e6xxx/Kconfig b/drivers/net/dsa/mv88e6xxx/Kconfig index 634a48e6616b..7a2445a34eb7 100644 --- a/drivers/net/dsa/mv88e6xxx/Kconfig +++ b/drivers/net/dsa/mv88e6xxx/Kconfig @@ -2,6 +2,7 @@ config NET_DSA_MV88E6XXX tristate "Marvell 88E6xxx Ethernet switch fabric support" depends on NET_DSA + depends on PTP_1588_CLOCK_OPTIONAL select IRQ_DOMAIN select NET_DSA_TAG_EDSA select NET_DSA_TAG_DSA diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 272b0535d946..c45ca2473743 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -1221,14 +1221,36 @@ static u16 mv88e6xxx_port_vlan(struct mv88e6xxx_chip *chip, int dev, int port) bool found = false; u16 pvlan; - list_for_each_entry(dp, &dst->ports, list) { - if (dp->ds->index == dev && dp->index == port) { + /* dev is a physical switch */ + if (dev <= dst->last_switch) { + list_for_each_entry(dp, &dst->ports, list) { + if (dp->ds->index == dev && dp->index == port) { + /* dp might be a DSA link or a user port, so it + * might or might not have a bridge_dev + * pointer. Use the "found" variable for both + * cases. + */ + br = dp->bridge_dev; + found = true; + break; + } + } + /* dev is a virtual bridge */ + } else { + list_for_each_entry(dp, &dst->ports, list) { + if (dp->bridge_num < 0) + continue; + + if (dp->bridge_num + 1 + dst->last_switch != dev) + continue; + + br = dp->bridge_dev; found = true; break; } } - /* Prevent frames from unknown switch or port */ + /* Prevent frames from unknown switch or virtual bridge */ if (!found) return 0; @@ -1236,7 +1258,6 @@ static u16 mv88e6xxx_port_vlan(struct mv88e6xxx_chip *chip, int dev, int port) if (dp->type == DSA_PORT_TYPE_CPU || dp->type == DSA_PORT_TYPE_DSA) return mv88e6xxx_port_mask(chip); - br = dp->bridge_dev; pvlan = 0; /* Frames from user ports can egress any local DSA links and CPU ports, @@ -2422,6 +2443,44 @@ static void mv88e6xxx_crosschip_bridge_leave(struct dsa_switch *ds, mv88e6xxx_reg_unlock(chip); } +/* Treat the software bridge as a virtual single-port switch behind the + * CPU and map in the PVT. First dst->last_switch elements are taken by + * physical switches, so start from beyond that range. + */ +static int mv88e6xxx_map_virtual_bridge_to_pvt(struct dsa_switch *ds, + int bridge_num) +{ + u8 dev = bridge_num + ds->dst->last_switch + 1; + struct mv88e6xxx_chip *chip = ds->priv; + int err; + + mv88e6xxx_reg_lock(chip); + err = mv88e6xxx_pvt_map(chip, dev, 0); + mv88e6xxx_reg_unlock(chip); + + return err; +} + +static int mv88e6xxx_bridge_tx_fwd_offload(struct dsa_switch *ds, int port, + struct net_device *br, + int bridge_num) +{ + return mv88e6xxx_map_virtual_bridge_to_pvt(ds, bridge_num); +} + +static void mv88e6xxx_bridge_tx_fwd_unoffload(struct dsa_switch *ds, int port, + struct net_device *br, + int bridge_num) +{ + int err; + + err = mv88e6xxx_map_virtual_bridge_to_pvt(ds, bridge_num); + if (err) { + dev_err(ds->dev, "failed to remap cross-chip Port VLAN: %pe\n", + ERR_PTR(err)); + } +} + static int mv88e6xxx_software_reset(struct mv88e6xxx_chip *chip) { if (chip->info->ops->reset) @@ -3025,6 +3084,15 @@ static int mv88e6xxx_setup(struct dsa_switch *ds) chip->ds = ds; ds->slave_mii_bus = mv88e6xxx_default_mdio_bus(chip); + /* Since virtual bridges are mapped in the PVT, the number we support + * depends on the physical switch topology. We need to let DSA figure + * that out and therefore we cannot set this at dsa_register_switch() + * time. + */ + if (mv88e6xxx_has_pvt(chip)) + ds->num_fwd_offloading_bridges = MV88E6XXX_MAX_PVT_SWITCHES - + ds->dst->last_switch - 1; + mv88e6xxx_reg_lock(chip); if (chip->info->ops->setup_errata) { @@ -5729,7 +5797,6 @@ static int mv88e6xxx_port_bridge_flags(struct dsa_switch *ds, int port, struct netlink_ext_ack *extack) { struct mv88e6xxx_chip *chip = ds->priv; - bool do_fast_age = false; int err = -EOPNOTSUPP; mv88e6xxx_reg_lock(chip); @@ -5741,9 +5808,6 @@ static int mv88e6xxx_port_bridge_flags(struct dsa_switch *ds, int port, err = mv88e6xxx_port_set_assoc_vector(chip, port, pav); if (err) goto out; - - if (!learning) - do_fast_age = true; } if (flags.mask & BR_FLOOD) { @@ -5775,26 +5839,6 @@ static int mv88e6xxx_port_bridge_flags(struct dsa_switch *ds, int port, out: mv88e6xxx_reg_unlock(chip); - if (do_fast_age) - mv88e6xxx_port_fast_age(ds, port); - - return err; -} - -static int mv88e6xxx_port_set_mrouter(struct dsa_switch *ds, int port, - bool mrouter, - struct netlink_ext_ack *extack) -{ - struct mv88e6xxx_chip *chip = ds->priv; - int err; - - if (!chip->info->ops->port_set_mcast_flood) - return -EOPNOTSUPP; - - mv88e6xxx_reg_lock(chip); - err = chip->info->ops->port_set_mcast_flood(chip, port, mrouter); - mv88e6xxx_reg_unlock(chip); - return err; } @@ -6099,7 +6143,6 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = { .port_bridge_leave = mv88e6xxx_port_bridge_leave, .port_pre_bridge_flags = mv88e6xxx_port_pre_bridge_flags, .port_bridge_flags = mv88e6xxx_port_bridge_flags, - .port_set_mrouter = mv88e6xxx_port_set_mrouter, .port_stp_state_set = mv88e6xxx_port_stp_state_set, .port_fast_age = mv88e6xxx_port_fast_age, .port_vlan_filtering = mv88e6xxx_port_vlan_filtering, @@ -6128,6 +6171,8 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = { .crosschip_lag_change = mv88e6xxx_crosschip_lag_change, .crosschip_lag_join = mv88e6xxx_crosschip_lag_join, .crosschip_lag_leave = mv88e6xxx_crosschip_lag_leave, + .port_bridge_tx_fwd_offload = mv88e6xxx_bridge_tx_fwd_offload, + .port_bridge_tx_fwd_unoffload = mv88e6xxx_bridge_tx_fwd_unoffload, }; static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip) diff --git a/drivers/net/dsa/ocelot/Kconfig b/drivers/net/dsa/ocelot/Kconfig index 932b6b6fe817..9948544ba1c4 100644 --- a/drivers/net/dsa/ocelot/Kconfig +++ b/drivers/net/dsa/ocelot/Kconfig @@ -5,6 +5,7 @@ config NET_DSA_MSCC_FELIX depends on NET_VENDOR_MICROSEMI depends on NET_VENDOR_FREESCALE depends on HAS_IOMEM + depends on PTP_1588_CLOCK_OPTIONAL select MSCC_OCELOT_SWITCH_LIB select NET_DSA_TAG_OCELOT_8021Q select NET_DSA_TAG_OCELOT @@ -19,6 +20,7 @@ config NET_DSA_MSCC_SEVILLE depends on NET_DSA depends on NET_VENDOR_MICROSEMI depends on HAS_IOMEM + depends on PTP_1588_CLOCK_OPTIONAL select MSCC_OCELOT_SWITCH_LIB select NET_DSA_TAG_OCELOT_8021Q select NET_DSA_TAG_OCELOT diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index a2a15919b960..3656e67af789 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -231,11 +231,6 @@ static int felix_tag_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid) return 0; } -static const struct dsa_8021q_ops felix_tag_8021q_ops = { - .vlan_add = felix_tag_8021q_vlan_add, - .vlan_del = felix_tag_8021q_vlan_del, -}; - /* Alternatively to using the NPI functionality, that same hardware MAC * connected internally to the enetc or fman DSA master can be configured to * use the software-defined tag_8021q frame format. As far as the hardware is @@ -425,29 +420,18 @@ static int felix_setup_tag_8021q(struct dsa_switch *ds, int cpu) ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_MC); ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_BC); - felix->dsa_8021q_ctx = kzalloc(sizeof(*felix->dsa_8021q_ctx), - GFP_KERNEL); - if (!felix->dsa_8021q_ctx) - return -ENOMEM; - - felix->dsa_8021q_ctx->ops = &felix_tag_8021q_ops; - felix->dsa_8021q_ctx->proto = htons(ETH_P_8021AD); - felix->dsa_8021q_ctx->ds = ds; - - err = dsa_8021q_setup(felix->dsa_8021q_ctx, true); + err = dsa_tag_8021q_register(ds, htons(ETH_P_8021AD)); if (err) - goto out_free_dsa_8021_ctx; + return err; err = felix_setup_mmio_filtering(felix); if (err) - goto out_teardown_dsa_8021q; + goto out_tag_8021q_unregister; return 0; -out_teardown_dsa_8021q: - dsa_8021q_setup(felix->dsa_8021q_ctx, false); -out_free_dsa_8021_ctx: - kfree(felix->dsa_8021q_ctx); +out_tag_8021q_unregister: + dsa_tag_8021q_unregister(ds); return err; } @@ -462,11 +446,7 @@ static void felix_teardown_tag_8021q(struct dsa_switch *ds, int cpu) dev_err(ds->dev, "felix_teardown_mmio_filtering returned %d", err); - err = dsa_8021q_setup(felix->dsa_8021q_ctx, false); - if (err) - dev_err(ds->dev, "dsa_8021q_setup returned %d", err); - - kfree(felix->dsa_8021q_ctx); + dsa_tag_8021q_unregister(ds); for (port = 0; port < ds->num_ports; port++) { if (dsa_is_unused_port(ds, port)) @@ -762,7 +742,8 @@ static int felix_lag_change(struct dsa_switch *ds, int port) } static int felix_vlan_prepare(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_vlan *vlan) + const struct switchdev_obj_port_vlan *vlan, + struct netlink_ext_ack *extack) { struct ocelot *ocelot = ds->priv; u16 flags = vlan->flags; @@ -780,7 +761,8 @@ static int felix_vlan_prepare(struct dsa_switch *ds, int port, return ocelot_vlan_prepare(ocelot, port, vlan->vid, flags & BRIDGE_VLAN_INFO_PVID, - flags & BRIDGE_VLAN_INFO_UNTAGGED); + flags & BRIDGE_VLAN_INFO_UNTAGGED, + extack); } static int felix_vlan_filtering(struct dsa_switch *ds, int port, bool enabled, @@ -788,7 +770,7 @@ static int felix_vlan_filtering(struct dsa_switch *ds, int port, bool enabled, { struct ocelot *ocelot = ds->priv; - return ocelot_port_vlan_filtering(ocelot, port, enabled); + return ocelot_port_vlan_filtering(ocelot, port, enabled, extack); } static int felix_vlan_add(struct dsa_switch *ds, int port, @@ -799,7 +781,7 @@ static int felix_vlan_add(struct dsa_switch *ds, int port, u16 flags = vlan->flags; int err; - err = felix_vlan_prepare(ds, port, vlan); + err = felix_vlan_prepare(ds, port, vlan, extack); if (err) return err; @@ -816,23 +798,6 @@ static int felix_vlan_del(struct dsa_switch *ds, int port, return ocelot_vlan_del(ocelot, port, vlan->vid); } -static int felix_port_enable(struct dsa_switch *ds, int port, - struct phy_device *phy) -{ - struct ocelot *ocelot = ds->priv; - - ocelot_port_enable(ocelot, port, phy); - - return 0; -} - -static void felix_port_disable(struct dsa_switch *ds, int port) -{ - struct ocelot *ocelot = ds->priv; - - return ocelot_port_disable(ocelot, port); -} - static void felix_phylink_validate(struct dsa_switch *ds, int port, unsigned long *supported, struct phylink_link_state *state) @@ -861,25 +826,9 @@ static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port, phy_interface_t interface) { struct ocelot *ocelot = ds->priv; - struct ocelot_port *ocelot_port = ocelot->ports[port]; - int err; - - ocelot_port_rmwl(ocelot_port, 0, DEV_MAC_ENA_CFG_RX_ENA, - DEV_MAC_ENA_CFG); - ocelot_fields_write(ocelot, port, QSYS_SWITCH_PORT_MODE_PORT_ENA, 0); - - err = ocelot_port_flush(ocelot, port); - if (err) - dev_err(ocelot->dev, "failed to flush port %d: %d\n", - port, err); - - /* Put the port in reset. */ - ocelot_port_writel(ocelot_port, - DEV_CLOCK_CFG_MAC_TX_RST | - DEV_CLOCK_CFG_MAC_RX_RST | - DEV_CLOCK_CFG_LINK_SPEED(OCELOT_SPEED_1000), - DEV_CLOCK_CFG); + ocelot_phylink_mac_link_down(ocelot, port, link_an_mode, interface, + FELIX_MAC_QUIRKS); } static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port, @@ -890,75 +839,11 @@ static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port, bool tx_pause, bool rx_pause) { struct ocelot *ocelot = ds->priv; - struct ocelot_port *ocelot_port = ocelot->ports[port]; struct felix *felix = ocelot_to_felix(ocelot); - u32 mac_fc_cfg; - - /* Take port out of reset by clearing the MAC_TX_RST, MAC_RX_RST and - * PORT_RST bits in DEV_CLOCK_CFG. Note that the way this system is - * integrated is that the MAC speed is fixed and it's the PCS who is - * performing the rate adaptation, so we have to write "1000Mbps" into - * the LINK_SPEED field of DEV_CLOCK_CFG (which is also its default - * value). - */ - ocelot_port_writel(ocelot_port, - DEV_CLOCK_CFG_LINK_SPEED(OCELOT_SPEED_1000), - DEV_CLOCK_CFG); - - switch (speed) { - case SPEED_10: - mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(3); - break; - case SPEED_100: - mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(2); - break; - case SPEED_1000: - case SPEED_2500: - mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(1); - break; - default: - dev_err(ocelot->dev, "Unsupported speed on port %d: %d\n", - port, speed); - return; - } - - /* handle Rx pause in all cases, with 2500base-X this is used for rate - * adaptation. - */ - mac_fc_cfg |= SYS_MAC_FC_CFG_RX_FC_ENA; - - if (tx_pause) - mac_fc_cfg |= SYS_MAC_FC_CFG_TX_FC_ENA | - SYS_MAC_FC_CFG_PAUSE_VAL_CFG(0xffff) | - SYS_MAC_FC_CFG_FC_LATENCY_CFG(0x7) | - SYS_MAC_FC_CFG_ZERO_PAUSE_ENA; - - /* Flow control. Link speed is only used here to evaluate the time - * specification in incoming pause frames. - */ - ocelot_write_rix(ocelot, mac_fc_cfg, SYS_MAC_FC_CFG, port); - - ocelot_write_rix(ocelot, 0, ANA_POL_FLOWC, port); - - ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, tx_pause); - - /* Undo the effects of felix_phylink_mac_link_down: - * enable MAC module - */ - ocelot_port_writel(ocelot_port, DEV_MAC_ENA_CFG_RX_ENA | - DEV_MAC_ENA_CFG_TX_ENA, DEV_MAC_ENA_CFG); - - /* Enable receiving frames on the port, and activate auto-learning of - * MAC addresses. - */ - ocelot_write_gix(ocelot, ANA_PORT_PORT_CFG_LEARNAUTO | - ANA_PORT_PORT_CFG_RECV_ENA | - ANA_PORT_PORT_CFG_PORTID_VAL(port), - ANA_PORT_PORT_CFG, port); - /* Core: Enable port for frame transfer */ - ocelot_fields_write(ocelot, port, - QSYS_SWITCH_PORT_MODE_PORT_ENA, 1); + ocelot_phylink_mac_link_up(ocelot, port, phydev, link_an_mode, + interface, speed, duplex, tx_pause, rx_pause, + FELIX_MAC_QUIRKS); if (felix->info->port_sched_speed_set) felix->info->port_sched_speed_set(ocelot, port, speed); @@ -1635,8 +1520,6 @@ const struct dsa_switch_ops felix_switch_ops = { .phylink_mac_config = felix_phylink_mac_config, .phylink_mac_link_down = felix_phylink_mac_link_down, .phylink_mac_link_up = felix_phylink_mac_link_up, - .port_enable = felix_port_enable, - .port_disable = felix_port_disable, .port_fdb_dump = felix_fdb_dump, .port_fdb_add = felix_fdb_add, .port_fdb_del = felix_fdb_del, @@ -1679,6 +1562,8 @@ const struct dsa_switch_ops felix_switch_ops = { .port_mrp_del = felix_mrp_del, .port_mrp_add_ring_role = felix_mrp_add_ring_role, .port_mrp_del_ring_role = felix_mrp_del_ring_role, + .tag_8021q_vlan_add = felix_tag_8021q_vlan_add, + .tag_8021q_vlan_del = felix_tag_8021q_vlan_del, }; struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port) diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h index 4d96cad815d5..5854bab43327 100644 --- a/drivers/net/dsa/ocelot/felix.h +++ b/drivers/net/dsa/ocelot/felix.h @@ -5,6 +5,7 @@ #define _MSCC_FELIX_H #define ocelot_to_felix(o) container_of((o), struct felix, ocelot) +#define FELIX_MAC_QUIRKS OCELOT_QUIRK_PCS_PERFORMS_RATE_ADAPTATION /* Platform-specific information */ struct felix_info { @@ -60,7 +61,6 @@ struct felix { struct lynx_pcs **pcs; resource_size_t switch_base; resource_size_t imdio_base; - struct dsa_8021q_context *dsa_8021q_ctx; enum dsa_tag_protocol tag_proto; }; diff --git a/drivers/net/dsa/sja1105/Kconfig b/drivers/net/dsa/sja1105/Kconfig index b29d41e5e1e7..1291bba3f3b6 100644 --- a/drivers/net/dsa/sja1105/Kconfig +++ b/drivers/net/dsa/sja1105/Kconfig @@ -2,6 +2,7 @@ config NET_DSA_SJA1105 tristate "NXP SJA1105 Ethernet switch family support" depends on NET_DSA && SPI + depends on PTP_1588_CLOCK_OPTIONAL select NET_DSA_TAG_SJA1105 select PCS_XPCS select PACKING diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h index 221c7abdef0e..5e5d24e7c02b 100644 --- a/drivers/net/dsa/sja1105/sja1105.h +++ b/drivers/net/dsa/sja1105/sja1105.h @@ -115,12 +115,6 @@ struct sja1105_info { const struct sja1105_dynamic_table_ops *dyn_ops; const struct sja1105_table_ops *static_ops; const struct sja1105_regs *regs; - /* Both E/T and P/Q/R/S have quirks when it comes to popping the S-Tag - * from double-tagged frames. E/T will pop it only when it's equal to - * TPID from the General Parameters Table, while P/Q/R/S will only - * pop it when it's equal to TPID2. - */ - u16 qinq_tpid; bool can_limit_mcast_flood; int (*reset_cmd)(struct dsa_switch *ds); int (*setup_rgmii_delay)(const void *ctx, int port); @@ -226,28 +220,13 @@ struct sja1105_flow_block { int num_virtual_links; }; -struct sja1105_bridge_vlan { - struct list_head list; - int port; - u16 vid; - bool pvid; - bool untagged; -}; - -enum sja1105_vlan_state { - SJA1105_VLAN_UNAWARE, - SJA1105_VLAN_BEST_EFFORT, - SJA1105_VLAN_FILTERING_FULL, -}; - struct sja1105_private { struct sja1105_static_config static_config; bool rgmii_rx_delay[SJA1105_MAX_NUM_PORTS]; bool rgmii_tx_delay[SJA1105_MAX_NUM_PORTS]; phy_interface_t phy_mode[SJA1105_MAX_NUM_PORTS]; bool fixed_link[SJA1105_MAX_NUM_PORTS]; - bool best_effort_vlan_filtering; - unsigned long learn_ena; + bool vlan_aware; unsigned long ucast_egress_floods; unsigned long bcast_egress_floods; const struct sja1105_info *info; @@ -255,16 +234,14 @@ struct sja1105_private { struct gpio_desc *reset_gpio; struct spi_device *spidev; struct dsa_switch *ds; - struct list_head dsa_8021q_vlans; - struct list_head bridge_vlans; + u16 bridge_pvid[SJA1105_MAX_NUM_PORTS]; + u16 tag_8021q_pvid[SJA1105_MAX_NUM_PORTS]; struct sja1105_flow_block flow_block; struct sja1105_port ports[SJA1105_MAX_NUM_PORTS]; /* Serializes transmission of management frames so that * the switch doesn't confuse them with one another. */ struct mutex mgmt_lock; - struct dsa_8021q_context *dsa_8021q_ctx; - enum sja1105_vlan_state vlan_state; struct devlink_region **regions; struct sja1105_cbs_entry *cbs; struct mii_bus *mdio_base_t1; @@ -311,10 +288,6 @@ int sja1110_pcs_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val); /* From sja1105_devlink.c */ int sja1105_devlink_setup(struct dsa_switch *ds); void sja1105_devlink_teardown(struct dsa_switch *ds); -int sja1105_devlink_param_get(struct dsa_switch *ds, u32 id, - struct devlink_param_gset_ctx *ctx); -int sja1105_devlink_param_set(struct dsa_switch *ds, u32 id, - struct devlink_param_gset_ctx *ctx); int sja1105_devlink_info_get(struct dsa_switch *ds, struct devlink_info_req *req, struct netlink_ext_ack *extack); diff --git a/drivers/net/dsa/sja1105/sja1105_devlink.c b/drivers/net/dsa/sja1105/sja1105_devlink.c index b6a4a16b8c7e..05c7f4ca3b1a 100644 --- a/drivers/net/dsa/sja1105/sja1105_devlink.c +++ b/drivers/net/dsa/sja1105/sja1105_devlink.c @@ -115,105 +115,6 @@ static void sja1105_teardown_devlink_regions(struct dsa_switch *ds) kfree(priv->regions); } -static int sja1105_best_effort_vlan_filtering_get(struct sja1105_private *priv, - bool *be_vlan) -{ - *be_vlan = priv->best_effort_vlan_filtering; - - return 0; -} - -static int sja1105_best_effort_vlan_filtering_set(struct sja1105_private *priv, - bool be_vlan) -{ - struct dsa_switch *ds = priv->ds; - bool vlan_filtering; - int port; - int rc; - - priv->best_effort_vlan_filtering = be_vlan; - - rtnl_lock(); - for (port = 0; port < ds->num_ports; port++) { - struct dsa_port *dp; - - if (!dsa_is_user_port(ds, port)) - continue; - - dp = dsa_to_port(ds, port); - vlan_filtering = dsa_port_is_vlan_filtering(dp); - - rc = sja1105_vlan_filtering(ds, port, vlan_filtering, NULL); - if (rc) - break; - } - rtnl_unlock(); - - return rc; -} - -enum sja1105_devlink_param_id { - SJA1105_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, - SJA1105_DEVLINK_PARAM_ID_BEST_EFFORT_VLAN_FILTERING, -}; - -int sja1105_devlink_param_get(struct dsa_switch *ds, u32 id, - struct devlink_param_gset_ctx *ctx) -{ - struct sja1105_private *priv = ds->priv; - int err; - - switch (id) { - case SJA1105_DEVLINK_PARAM_ID_BEST_EFFORT_VLAN_FILTERING: - err = sja1105_best_effort_vlan_filtering_get(priv, - &ctx->val.vbool); - break; - default: - err = -EOPNOTSUPP; - break; - } - - return err; -} - -int sja1105_devlink_param_set(struct dsa_switch *ds, u32 id, - struct devlink_param_gset_ctx *ctx) -{ - struct sja1105_private *priv = ds->priv; - int err; - - switch (id) { - case SJA1105_DEVLINK_PARAM_ID_BEST_EFFORT_VLAN_FILTERING: - err = sja1105_best_effort_vlan_filtering_set(priv, - ctx->val.vbool); - break; - default: - err = -EOPNOTSUPP; - break; - } - - return err; -} - -static const struct devlink_param sja1105_devlink_params[] = { - DSA_DEVLINK_PARAM_DRIVER(SJA1105_DEVLINK_PARAM_ID_BEST_EFFORT_VLAN_FILTERING, - "best_effort_vlan_filtering", - DEVLINK_PARAM_TYPE_BOOL, - BIT(DEVLINK_PARAM_CMODE_RUNTIME)), -}; - -static int sja1105_setup_devlink_params(struct dsa_switch *ds) -{ - return dsa_devlink_params_register(ds, sja1105_devlink_params, - ARRAY_SIZE(sja1105_devlink_params)); -} - -static void sja1105_teardown_devlink_params(struct dsa_switch *ds) -{ - dsa_devlink_params_unregister(ds, sja1105_devlink_params, - ARRAY_SIZE(sja1105_devlink_params)); -} - int sja1105_devlink_info_get(struct dsa_switch *ds, struct devlink_info_req *req, struct netlink_ext_ack *extack) @@ -233,23 +134,10 @@ int sja1105_devlink_info_get(struct dsa_switch *ds, int sja1105_devlink_setup(struct dsa_switch *ds) { - int rc; - - rc = sja1105_setup_devlink_params(ds); - if (rc) - return rc; - - rc = sja1105_setup_devlink_regions(ds); - if (rc < 0) { - sja1105_teardown_devlink_params(ds); - return rc; - } - - return 0; + return sja1105_setup_devlink_regions(ds); } void sja1105_devlink_teardown(struct dsa_switch *ds) { - sja1105_teardown_devlink_params(ds); sja1105_teardown_devlink_regions(ds); } diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c index 147709131c13..f2049f52833c 100644 --- a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c +++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c @@ -1355,14 +1355,14 @@ u8 sja1105et_fdb_hash(struct sja1105_private *priv, const u8 *addr, u16 vid) { struct sja1105_l2_lookup_params_entry *l2_lookup_params = priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS].entries; - u64 poly_koopman = l2_lookup_params->poly; + u64 input, poly_koopman = l2_lookup_params->poly; /* Convert polynomial from Koopman to 'normal' notation */ u8 poly = (u8)(1 + (poly_koopman << 1)); - u64 vlanid = l2_lookup_params->shared_learn ? 0 : vid; - u64 input = (vlanid << 48) | ether_addr_to_u64(addr); u8 crc = 0; /* seed */ int i; + input = ((u64)vid << 48) | ether_addr_to_u64(addr); + /* Mask the eight bytes starting from MSB one at a time */ for (i = 56; i >= 0; i -= 8) { u8 byte = (input & (0xffull << i)) >> i; diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c index 49eb0ac41b7d..2f8cc6686c38 100644 --- a/drivers/net/dsa/sja1105/sja1105_main.c +++ b/drivers/net/dsa/sja1105/sja1105_main.c @@ -26,9 +26,6 @@ #include "sja1105_tas.h" #define SJA1105_UNKNOWN_MULTICAST 0x010000000000ull -#define SJA1105_DEFAULT_VLAN (VLAN_N_VID - 1) - -static const struct dsa_switch_ops sja1105_switch_ops; static void sja1105_hw_reset(struct gpio_desc *gpio, unsigned int pulse_len, unsigned int startup_delay) @@ -57,6 +54,93 @@ static bool sja1105_can_forward(struct sja1105_l2_forwarding_entry *l2_fwd, return !!(l2_fwd[from].reach_port & BIT(to)); } +static int sja1105_is_vlan_configured(struct sja1105_private *priv, u16 vid) +{ + struct sja1105_vlan_lookup_entry *vlan; + int count, i; + + vlan = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entries; + count = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entry_count; + + for (i = 0; i < count; i++) + if (vlan[i].vlanid == vid) + return i; + + /* Return an invalid entry index if not found */ + return -1; +} + +static int sja1105_drop_untagged(struct dsa_switch *ds, int port, bool drop) +{ + struct sja1105_private *priv = ds->priv; + struct sja1105_mac_config_entry *mac; + + mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; + + if (mac[port].drpuntag == drop) + return 0; + + mac[port].drpuntag = drop; + + return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port, + &mac[port], true); +} + +static int sja1105_pvid_apply(struct sja1105_private *priv, int port, u16 pvid) +{ + struct sja1105_mac_config_entry *mac; + + mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; + + if (mac[port].vlanid == pvid) + return 0; + + mac[port].vlanid = pvid; + + return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port, + &mac[port], true); +} + +static int sja1105_commit_pvid(struct dsa_switch *ds, int port) +{ + struct dsa_port *dp = dsa_to_port(ds, port); + struct sja1105_private *priv = ds->priv; + struct sja1105_vlan_lookup_entry *vlan; + bool drop_untagged = false; + int match, rc; + u16 pvid; + + if (dp->bridge_dev && br_vlan_enabled(dp->bridge_dev)) + pvid = priv->bridge_pvid[port]; + else + pvid = priv->tag_8021q_pvid[port]; + + rc = sja1105_pvid_apply(priv, port, pvid); + if (rc) + return rc; + + /* Only force dropping of untagged packets when the port is under a + * VLAN-aware bridge. When the tag_8021q pvid is used, we are + * deliberately removing the RX VLAN from the port's VMEMB_PORT list, + * to prevent DSA tag spoofing from the link partner. Untagged packets + * are the only ones that should be received with tag_8021q, so + * definitely don't drop them. + */ + if (pvid == priv->bridge_pvid[port]) { + vlan = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entries; + + match = sja1105_is_vlan_configured(priv, pvid); + + if (match < 0 || !(vlan[match].vmemb_port & BIT(port))) + drop_untagged = true; + } + + if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) + drop_untagged = true; + + return sja1105_drop_untagged(ds, port, drop_untagged); +} + static int sja1105_init_mac_settings(struct sja1105_private *priv) { struct sja1105_mac_config_entry default_mac = { @@ -101,7 +185,7 @@ static int sja1105_init_mac_settings(struct sja1105_private *priv) struct sja1105_mac_config_entry *mac; struct dsa_switch *ds = priv->ds; struct sja1105_table *table; - int i; + struct dsa_port *dp; table = &priv->static_config.tables[BLK_IDX_MAC_CONFIG]; @@ -120,14 +204,27 @@ static int sja1105_init_mac_settings(struct sja1105_private *priv) mac = table->entries; - for (i = 0; i < ds->num_ports; i++) { - mac[i] = default_mac; + list_for_each_entry(dp, &ds->dst->ports, list) { + if (dp->ds != ds) + continue; + + mac[dp->index] = default_mac; /* Let sja1105_bridge_stp_state_set() keep address learning - * enabled for the CPU port. + * enabled for the DSA ports. CPU ports use software-assisted + * learning to ensure that only FDB entries belonging to the + * bridge are learned, and that they are learned towards all + * CPU ports in a cross-chip topology if multiple CPU ports + * exist. + */ + if (dsa_port_is_dsa(dp)) + dp->learning = true; + + /* Disallow untagged packets from being received on the + * CPU and DSA ports. */ - if (dsa_is_cpu_port(ds, i)) - priv->learn_ena |= BIT(i); + if (dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)) + mac[dp->index].drpuntag = true; } return 0; @@ -378,8 +475,6 @@ static int sja1105_init_static_vlan(struct sja1105_private *priv) table->entry_count = 1; for (port = 0; port < ds->num_ports; port++) { - struct sja1105_bridge_vlan *v; - if (dsa_is_unused_port(ds, port)) continue; @@ -387,22 +482,10 @@ static int sja1105_init_static_vlan(struct sja1105_private *priv) pvid.vlan_bc |= BIT(port); pvid.tag_port &= ~BIT(port); - v = kzalloc(sizeof(*v), GFP_KERNEL); - if (!v) - return -ENOMEM; - - v->port = port; - v->vid = SJA1105_DEFAULT_VLAN; - v->untagged = true; - if (dsa_is_cpu_port(ds, port)) - v->pvid = true; - list_add(&v->list, &priv->dsa_8021q_vlans); - - v = kmemdup(v, sizeof(*v), GFP_KERNEL); - if (!v) - return -ENOMEM; - - list_add(&v->list, &priv->bridge_vlans); + if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) { + priv->tag_8021q_pvid[port] = SJA1105_DEFAULT_VLAN; + priv->bridge_pvid[port] = SJA1105_DEFAULT_VLAN; + } } ((struct sja1105_vlan_lookup_entry *)table->entries)[0] = pvid; @@ -413,8 +496,11 @@ static int sja1105_init_l2_forwarding(struct sja1105_private *priv) { struct sja1105_l2_forwarding_entry *l2fwd; struct dsa_switch *ds = priv->ds; + struct dsa_switch_tree *dst; struct sja1105_table *table; - int i, j; + struct dsa_link *dl; + int port, tc; + int from, to; table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING]; @@ -432,47 +518,109 @@ static int sja1105_init_l2_forwarding(struct sja1105_private *priv) l2fwd = table->entries; - /* First 5 entries define the forwarding rules */ - for (i = 0; i < ds->num_ports; i++) { - unsigned int upstream = dsa_upstream_port(priv->ds, i); + /* First 5 entries in the L2 Forwarding Table define the forwarding + * rules and the VLAN PCP to ingress queue mapping. + * Set up the ingress queue mapping first. + */ + for (port = 0; port < ds->num_ports; port++) { + if (dsa_is_unused_port(ds, port)) + continue; + + for (tc = 0; tc < SJA1105_NUM_TC; tc++) + l2fwd[port].vlan_pmap[tc] = tc; + } - if (dsa_is_unused_port(ds, i)) + /* Then manage the forwarding domain for user ports. These can forward + * only to the always-on domain (CPU port and DSA links) + */ + for (from = 0; from < ds->num_ports; from++) { + if (!dsa_is_user_port(ds, from)) continue; - for (j = 0; j < SJA1105_NUM_TC; j++) - l2fwd[i].vlan_pmap[j] = j; + for (to = 0; to < ds->num_ports; to++) { + if (!dsa_is_cpu_port(ds, to) && + !dsa_is_dsa_port(ds, to)) + continue; - /* All ports start up with egress flooding enabled, - * including the CPU port. - */ - priv->ucast_egress_floods |= BIT(i); - priv->bcast_egress_floods |= BIT(i); + l2fwd[from].bc_domain |= BIT(to); + l2fwd[from].fl_domain |= BIT(to); - if (i == upstream) + sja1105_port_allow_traffic(l2fwd, from, to, true); + } + } + + /* Then manage the forwarding domain for DSA links and CPU ports (the + * always-on domain). These can send packets to any enabled port except + * themselves. + */ + for (from = 0; from < ds->num_ports; from++) { + if (!dsa_is_cpu_port(ds, from) && !dsa_is_dsa_port(ds, from)) continue; - sja1105_port_allow_traffic(l2fwd, i, upstream, true); - sja1105_port_allow_traffic(l2fwd, upstream, i, true); + for (to = 0; to < ds->num_ports; to++) { + if (dsa_is_unused_port(ds, to)) + continue; - l2fwd[i].bc_domain = BIT(upstream); - l2fwd[i].fl_domain = BIT(upstream); + if (from == to) + continue; - l2fwd[upstream].bc_domain |= BIT(i); - l2fwd[upstream].fl_domain |= BIT(i); + l2fwd[from].bc_domain |= BIT(to); + l2fwd[from].fl_domain |= BIT(to); + + sja1105_port_allow_traffic(l2fwd, from, to, true); + } + } + + /* In odd topologies ("H" connections where there is a DSA link to + * another switch which also has its own CPU port), TX packets can loop + * back into the system (they are flooded from CPU port 1 to the DSA + * link, and from there to CPU port 2). Prevent this from happening by + * cutting RX from DSA links towards our CPU port, if the remote switch + * has its own CPU port and therefore doesn't need ours for network + * stack termination. + */ + dst = ds->dst; + + list_for_each_entry(dl, &dst->rtable, list) { + if (dl->dp->ds != ds || dl->link_dp->cpu_dp == dl->dp->cpu_dp) + continue; + + from = dl->dp->index; + to = dsa_upstream_port(ds, from); + + dev_warn(ds->dev, + "H topology detected, cutting RX from DSA link %d to CPU port %d to prevent TX packet loops\n", + from, to); + + sja1105_port_allow_traffic(l2fwd, from, to, false); + + l2fwd[from].bc_domain &= ~BIT(to); + l2fwd[from].fl_domain &= ~BIT(to); + } + + /* Finally, manage the egress flooding domain. All ports start up with + * flooding enabled, including the CPU port and DSA links. + */ + for (port = 0; port < ds->num_ports; port++) { + if (dsa_is_unused_port(ds, port)) + continue; + + priv->ucast_egress_floods |= BIT(port); + priv->bcast_egress_floods |= BIT(port); } /* Next 8 entries define VLAN PCP mapping from ingress to egress. * Create a one-to-one mapping. */ - for (i = 0; i < SJA1105_NUM_TC; i++) { - for (j = 0; j < ds->num_ports; j++) { - if (dsa_is_unused_port(ds, j)) + for (tc = 0; tc < SJA1105_NUM_TC; tc++) { + for (port = 0; port < ds->num_ports; port++) { + if (dsa_is_unused_port(ds, port)) continue; - l2fwd[ds->num_ports + i].vlan_pmap[j] = i; + l2fwd[ds->num_ports + tc].vlan_pmap[port] = tc; } - l2fwd[ds->num_ports + i].type_egrpcp2outputq = true; + l2fwd[ds->num_ports + tc].type_egrpcp2outputq = true; } return 0; @@ -551,18 +699,11 @@ void sja1105_frame_memory_partitioning(struct sja1105_private *priv) { struct sja1105_l2_forwarding_params_entry *l2_fwd_params; struct sja1105_vl_forwarding_params_entry *vl_fwd_params; - int max_mem = priv->info->max_frame_mem; struct sja1105_table *table; - /* VLAN retagging is implemented using a loopback port that consumes - * frame buffers. That leaves less for us. - */ - if (priv->vlan_state == SJA1105_VLAN_BEST_EFFORT) - max_mem -= SJA1105_FRAME_MEMORY_RETAGGING_OVERHEAD; - table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS]; l2_fwd_params = table->entries; - l2_fwd_params->part_spc[0] = max_mem; + l2_fwd_params->part_spc[0] = SJA1105_MAX_FRAME_MEMORY; /* If we have any critical-traffic virtual links, we need to reserve * some frame buffer memory for them. At the moment, hardcode the value @@ -634,6 +775,72 @@ static void sja1110_select_tdmaconfigidx(struct sja1105_private *priv) general_params->tdmaconfigidx = tdmaconfigidx; } +static int sja1105_init_topology(struct sja1105_private *priv, + struct sja1105_general_params_entry *general_params) +{ + struct dsa_switch *ds = priv->ds; + int port; + + /* The host port is the destination for traffic matching mac_fltres1 + * and mac_fltres0 on all ports except itself. Default to an invalid + * value. + */ + general_params->host_port = ds->num_ports; + + /* Link-local traffic received on casc_port will be forwarded + * to host_port without embedding the source port and device ID + * info in the destination MAC address, and no RX timestamps will be + * taken either (presumably because it is a cascaded port and a + * downstream SJA switch already did that). + * To disable the feature, we need to do different things depending on + * switch generation. On SJA1105 we need to set an invalid port, while + * on SJA1110 which support multiple cascaded ports, this field is a + * bitmask so it must be left zero. + */ + if (!priv->info->multiple_cascade_ports) + general_params->casc_port = ds->num_ports; + + for (port = 0; port < ds->num_ports; port++) { + bool is_upstream = dsa_is_upstream_port(ds, port); + bool is_dsa_link = dsa_is_dsa_port(ds, port); + + /* Upstream ports can be dedicated CPU ports or + * upstream-facing DSA links + */ + if (is_upstream) { + if (general_params->host_port == ds->num_ports) { + general_params->host_port = port; + } else { + dev_err(ds->dev, + "Port %llu is already a host port, configuring %d as one too is not supported\n", + general_params->host_port, port); + return -EINVAL; + } + } + + /* Cascade ports are downstream-facing DSA links */ + if (is_dsa_link && !is_upstream) { + if (priv->info->multiple_cascade_ports) { + general_params->casc_port |= BIT(port); + } else if (general_params->casc_port == ds->num_ports) { + general_params->casc_port = port; + } else { + dev_err(ds->dev, + "Port %llu is already a cascade port, configuring %d as one too is not supported\n", + general_params->casc_port, port); + return -EINVAL; + } + } + } + + if (general_params->host_port == ds->num_ports) { + dev_err(ds->dev, "No host port configured\n"); + return -EINVAL; + } + + return 0; +} + static int sja1105_init_general_params(struct sja1105_private *priv) { struct sja1105_general_params_entry default_general_params = { @@ -652,12 +859,6 @@ static int sja1105_init_general_params(struct sja1105_private *priv) .mac_flt0 = SJA1105_LINKLOCAL_FILTER_B_MASK, .incl_srcpt0 = false, .send_meta0 = false, - /* The destination for traffic matching mac_fltres1 and - * mac_fltres0 on all ports except host_port. Such traffic - * receieved on host_port itself would be dropped, except - * by installing a temporary 'management route' - */ - .host_port = priv->ds->num_ports, /* Default to an invalid value */ .mirr_port = priv->ds->num_ports, /* No TTEthernet */ @@ -677,16 +878,12 @@ static int sja1105_init_general_params(struct sja1105_private *priv) .header_type = ETH_P_SJA1110, }; struct sja1105_general_params_entry *general_params; - struct dsa_switch *ds = priv->ds; struct sja1105_table *table; - int port; + int rc; - for (port = 0; port < ds->num_ports; port++) { - if (dsa_is_cpu_port(ds, port)) { - default_general_params.host_port = port; - break; - } - } + rc = sja1105_init_topology(priv, &default_general_params); + if (rc) + return rc; table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; @@ -709,19 +906,6 @@ static int sja1105_init_general_params(struct sja1105_private *priv) sja1110_select_tdmaconfigidx(priv); - /* Link-local traffic received on casc_port will be forwarded - * to host_port without embedding the source port and device ID - * info in the destination MAC address, and no RX timestamps will be - * taken either (presumably because it is a cascaded port and a - * downstream SJA switch already did that). - * To disable the feature, we need to do different things depending on - * switch generation. On SJA1105 we need to set an invalid port, while - * on SJA1110 which support multiple cascaded ports, this field is a - * bitmask so it must be left zero. - */ - if (!priv->info->multiple_cascade_ports) - general_params->casc_port = ds->num_ports; - return 0; } @@ -849,7 +1033,7 @@ static int sja1105_init_l2_policing(struct sja1105_private *priv) for (port = 0; port < ds->num_ports; port++) { int mtu = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN; - if (dsa_is_cpu_port(priv->ds, port)) + if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) mtu += VLAN_HLEN; policing[port].smax = 65535; /* Burst size in bytes */ @@ -1568,18 +1752,6 @@ static int sja1105_fdb_add(struct dsa_switch *ds, int port, { struct sja1105_private *priv = ds->priv; - /* dsa_8021q is in effect when the bridge's vlan_filtering isn't, - * so the switch still does some VLAN processing internally. - * But Shared VLAN Learning (SVL) is also active, and it will take - * care of autonomous forwarding between the unique pvid's of each - * port. Here we just make sure that users can't add duplicate FDB - * entries when in this mode - the actual VID doesn't matter except - * for what gets printed in 'bridge fdb show'. In the case of zero, - * no VID gets printed at all. - */ - if (priv->vlan_state != SJA1105_VLAN_FILTERING_FULL) - vid = 0; - return priv->info->fdb_add_cmd(ds, port, addr, vid); } @@ -1588,9 +1760,6 @@ static int sja1105_fdb_del(struct dsa_switch *ds, int port, { struct sja1105_private *priv = ds->priv; - if (priv->vlan_state != SJA1105_VLAN_FILTERING_FULL) - vid = 0; - return priv->info->fdb_del_cmd(ds, port, addr, vid); } @@ -1633,7 +1802,7 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port, u64_to_ether_addr(l2_lookup.macaddr, macaddr); /* We need to hide the dsa_8021q VLANs from the user. */ - if (priv->vlan_state == SJA1105_VLAN_UNAWARE) + if (!priv->vlan_aware) l2_lookup.vlanid = 0; rc = cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data); if (rc) @@ -1642,6 +1811,46 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port, return 0; } +static void sja1105_fast_age(struct dsa_switch *ds, int port) +{ + struct sja1105_private *priv = ds->priv; + int i; + + for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) { + struct sja1105_l2_lookup_entry l2_lookup = {0}; + u8 macaddr[ETH_ALEN]; + int rc; + + rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, + i, &l2_lookup); + /* No fdb entry at i, not an issue */ + if (rc == -ENOENT) + continue; + if (rc) { + dev_err(ds->dev, "Failed to read FDB: %pe\n", + ERR_PTR(rc)); + return; + } + + if (!(l2_lookup.destports & BIT(port))) + continue; + + /* Don't delete static FDB entries */ + if (l2_lookup.lockeds) + continue; + + u64_to_ether_addr(l2_lookup.macaddr, macaddr); + + rc = sja1105_fdb_del(ds, port, macaddr, l2_lookup.vlanid); + if (rc) { + dev_err(ds->dev, + "Failed to delete FDB entry %pM vid %lld: %pe\n", + macaddr, l2_lookup.vlanid, ERR_PTR(rc)); + return; + } + } +} + static int sja1105_mdb_add(struct dsa_switch *ds, int port, const struct switchdev_obj_port_mdb *mdb) { @@ -1740,12 +1949,17 @@ static int sja1105_bridge_member(struct dsa_switch *ds, int port, if (rc) return rc; + rc = sja1105_commit_pvid(ds, port); + if (rc) + return rc; + return sja1105_manage_flood_domains(priv); } static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port, u8 state) { + struct dsa_port *dp = dsa_to_port(ds, port); struct sja1105_private *priv = ds->priv; struct sja1105_mac_config_entry *mac; @@ -1771,12 +1985,12 @@ static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port, case BR_STATE_LEARNING: mac[port].ingress = true; mac[port].egress = false; - mac[port].dyn_learn = !!(priv->learn_ena & BIT(port)); + mac[port].dyn_learn = dp->learning; break; case BR_STATE_FORWARDING: mac[port].ingress = true; mac[port].egress = true; - mac[port].dyn_learn = !!(priv->learn_ena & BIT(port)); + mac[port].dyn_learn = dp->learning; break; default: dev_err(ds->dev, "invalid STP state: %d\n", state); @@ -2039,97 +2253,6 @@ out: return rc; } -static int sja1105_pvid_apply(struct sja1105_private *priv, int port, u16 pvid) -{ - struct sja1105_mac_config_entry *mac; - - mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; - - mac[port].vlanid = pvid; - - return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port, - &mac[port], true); -} - -static int sja1105_crosschip_bridge_join(struct dsa_switch *ds, - int tree_index, int sw_index, - int other_port, struct net_device *br) -{ - struct dsa_switch *other_ds = dsa_switch_find(tree_index, sw_index); - struct sja1105_private *other_priv = other_ds->priv; - struct sja1105_private *priv = ds->priv; - int port, rc; - - if (other_ds->ops != &sja1105_switch_ops) - return 0; - - for (port = 0; port < ds->num_ports; port++) { - if (!dsa_is_user_port(ds, port)) - continue; - if (dsa_to_port(ds, port)->bridge_dev != br) - continue; - - rc = dsa_8021q_crosschip_bridge_join(priv->dsa_8021q_ctx, - port, - other_priv->dsa_8021q_ctx, - other_port); - if (rc) - return rc; - - rc = dsa_8021q_crosschip_bridge_join(other_priv->dsa_8021q_ctx, - other_port, - priv->dsa_8021q_ctx, - port); - if (rc) - return rc; - } - - return 0; -} - -static void sja1105_crosschip_bridge_leave(struct dsa_switch *ds, - int tree_index, int sw_index, - int other_port, - struct net_device *br) -{ - struct dsa_switch *other_ds = dsa_switch_find(tree_index, sw_index); - struct sja1105_private *other_priv = other_ds->priv; - struct sja1105_private *priv = ds->priv; - int port; - - if (other_ds->ops != &sja1105_switch_ops) - return; - - for (port = 0; port < ds->num_ports; port++) { - if (!dsa_is_user_port(ds, port)) - continue; - if (dsa_to_port(ds, port)->bridge_dev != br) - continue; - - dsa_8021q_crosschip_bridge_leave(priv->dsa_8021q_ctx, port, - other_priv->dsa_8021q_ctx, - other_port); - - dsa_8021q_crosschip_bridge_leave(other_priv->dsa_8021q_ctx, - other_port, - priv->dsa_8021q_ctx, port); - } -} - -static int sja1105_setup_8021q_tagging(struct dsa_switch *ds, bool enabled) -{ - struct sja1105_private *priv = ds->priv; - int rc; - - rc = dsa_8021q_setup(priv->dsa_8021q_ctx, enabled); - if (rc) - return rc; - - dev_info(ds->dev, "%s switch tagging\n", - enabled ? "Enabled" : "Disabled"); - return 0; -} - static enum dsa_tag_protocol sja1105_get_tag_protocol(struct dsa_switch *ds, int port, enum dsa_tag_protocol mp) @@ -2139,669 +2262,6 @@ sja1105_get_tag_protocol(struct dsa_switch *ds, int port, return priv->info->tag_proto; } -static int sja1105_find_free_subvlan(u16 *subvlan_map, bool pvid) -{ - int subvlan; - - if (pvid) - return 0; - - for (subvlan = 1; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++) - if (subvlan_map[subvlan] == VLAN_N_VID) - return subvlan; - - return -1; -} - -static int sja1105_find_subvlan(u16 *subvlan_map, u16 vid) -{ - int subvlan; - - for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++) - if (subvlan_map[subvlan] == vid) - return subvlan; - - return -1; -} - -static int sja1105_find_committed_subvlan(struct sja1105_private *priv, - int port, u16 vid) -{ - struct sja1105_port *sp = &priv->ports[port]; - - return sja1105_find_subvlan(sp->subvlan_map, vid); -} - -static void sja1105_init_subvlan_map(u16 *subvlan_map) -{ - int subvlan; - - for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++) - subvlan_map[subvlan] = VLAN_N_VID; -} - -static void sja1105_commit_subvlan_map(struct sja1105_private *priv, int port, - u16 *subvlan_map) -{ - struct sja1105_port *sp = &priv->ports[port]; - int subvlan; - - for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++) - sp->subvlan_map[subvlan] = subvlan_map[subvlan]; -} - -static int sja1105_is_vlan_configured(struct sja1105_private *priv, u16 vid) -{ - struct sja1105_vlan_lookup_entry *vlan; - int count, i; - - vlan = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entries; - count = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entry_count; - - for (i = 0; i < count; i++) - if (vlan[i].vlanid == vid) - return i; - - /* Return an invalid entry index if not found */ - return -1; -} - -static int -sja1105_find_retagging_entry(struct sja1105_retagging_entry *retagging, - int count, int from_port, u16 from_vid, - u16 to_vid) -{ - int i; - - for (i = 0; i < count; i++) - if (retagging[i].ing_port == BIT(from_port) && - retagging[i].vlan_ing == from_vid && - retagging[i].vlan_egr == to_vid) - return i; - - /* Return an invalid entry index if not found */ - return -1; -} - -static int sja1105_commit_vlans(struct sja1105_private *priv, - struct sja1105_vlan_lookup_entry *new_vlan, - struct sja1105_retagging_entry *new_retagging, - int num_retagging) -{ - struct sja1105_retagging_entry *retagging; - struct sja1105_vlan_lookup_entry *vlan; - struct sja1105_table *table; - int num_vlans = 0; - int rc, i, k = 0; - - /* VLAN table */ - table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP]; - vlan = table->entries; - - for (i = 0; i < VLAN_N_VID; i++) { - int match = sja1105_is_vlan_configured(priv, i); - - if (new_vlan[i].vlanid != VLAN_N_VID) - num_vlans++; - - if (new_vlan[i].vlanid == VLAN_N_VID && match >= 0) { - /* Was there before, no longer is. Delete */ - dev_dbg(priv->ds->dev, "Deleting VLAN %d\n", i); - rc = sja1105_dynamic_config_write(priv, - BLK_IDX_VLAN_LOOKUP, - i, &vlan[match], false); - if (rc < 0) - return rc; - } else if (new_vlan[i].vlanid != VLAN_N_VID) { - /* Nothing changed, don't do anything */ - if (match >= 0 && - vlan[match].vlanid == new_vlan[i].vlanid && - vlan[match].tag_port == new_vlan[i].tag_port && - vlan[match].vlan_bc == new_vlan[i].vlan_bc && - vlan[match].vmemb_port == new_vlan[i].vmemb_port) - continue; - /* Update entry */ - dev_dbg(priv->ds->dev, "Updating VLAN %d\n", i); - rc = sja1105_dynamic_config_write(priv, - BLK_IDX_VLAN_LOOKUP, - i, &new_vlan[i], - true); - if (rc < 0) - return rc; - } - } - - if (table->entry_count) - kfree(table->entries); - - table->entries = kcalloc(num_vlans, table->ops->unpacked_entry_size, - GFP_KERNEL); - if (!table->entries) - return -ENOMEM; - - table->entry_count = num_vlans; - vlan = table->entries; - - for (i = 0; i < VLAN_N_VID; i++) { - if (new_vlan[i].vlanid == VLAN_N_VID) - continue; - vlan[k++] = new_vlan[i]; - } - - /* VLAN Retagging Table */ - table = &priv->static_config.tables[BLK_IDX_RETAGGING]; - retagging = table->entries; - - for (i = 0; i < table->entry_count; i++) { - rc = sja1105_dynamic_config_write(priv, BLK_IDX_RETAGGING, - i, &retagging[i], false); - if (rc) - return rc; - } - - if (table->entry_count) - kfree(table->entries); - - table->entries = kcalloc(num_retagging, table->ops->unpacked_entry_size, - GFP_KERNEL); - if (!table->entries) - return -ENOMEM; - - table->entry_count = num_retagging; - retagging = table->entries; - - for (i = 0; i < num_retagging; i++) { - retagging[i] = new_retagging[i]; - - /* Update entry */ - rc = sja1105_dynamic_config_write(priv, BLK_IDX_RETAGGING, - i, &retagging[i], true); - if (rc < 0) - return rc; - } - - return 0; -} - -struct sja1105_crosschip_vlan { - struct list_head list; - u16 vid; - bool untagged; - int port; - int other_port; - struct dsa_8021q_context *other_ctx; -}; - -struct sja1105_crosschip_switch { - struct list_head list; - struct dsa_8021q_context *other_ctx; -}; - -static int sja1105_commit_pvid(struct sja1105_private *priv) -{ - struct sja1105_bridge_vlan *v; - struct list_head *vlan_list; - int rc = 0; - - if (priv->vlan_state == SJA1105_VLAN_FILTERING_FULL) - vlan_list = &priv->bridge_vlans; - else - vlan_list = &priv->dsa_8021q_vlans; - - list_for_each_entry(v, vlan_list, list) { - if (v->pvid) { - rc = sja1105_pvid_apply(priv, v->port, v->vid); - if (rc) - break; - } - } - - return rc; -} - -static int -sja1105_build_bridge_vlans(struct sja1105_private *priv, - struct sja1105_vlan_lookup_entry *new_vlan) -{ - struct sja1105_bridge_vlan *v; - - if (priv->vlan_state == SJA1105_VLAN_UNAWARE) - return 0; - - list_for_each_entry(v, &priv->bridge_vlans, list) { - int match = v->vid; - - new_vlan[match].vlanid = v->vid; - new_vlan[match].vmemb_port |= BIT(v->port); - new_vlan[match].vlan_bc |= BIT(v->port); - if (!v->untagged) - new_vlan[match].tag_port |= BIT(v->port); - new_vlan[match].type_entry = SJA1110_VLAN_D_TAG; - } - - return 0; -} - -static int -sja1105_build_dsa_8021q_vlans(struct sja1105_private *priv, - struct sja1105_vlan_lookup_entry *new_vlan) -{ - struct sja1105_bridge_vlan *v; - - if (priv->vlan_state == SJA1105_VLAN_FILTERING_FULL) - return 0; - - list_for_each_entry(v, &priv->dsa_8021q_vlans, list) { - int match = v->vid; - - new_vlan[match].vlanid = v->vid; - new_vlan[match].vmemb_port |= BIT(v->port); - new_vlan[match].vlan_bc |= BIT(v->port); - if (!v->untagged) - new_vlan[match].tag_port |= BIT(v->port); - new_vlan[match].type_entry = SJA1110_VLAN_D_TAG; - } - - return 0; -} - -static int sja1105_build_subvlans(struct sja1105_private *priv, - u16 subvlan_map[][DSA_8021Q_N_SUBVLAN], - struct sja1105_vlan_lookup_entry *new_vlan, - struct sja1105_retagging_entry *new_retagging, - int *num_retagging) -{ - struct sja1105_bridge_vlan *v; - int k = *num_retagging; - - if (priv->vlan_state != SJA1105_VLAN_BEST_EFFORT) - return 0; - - list_for_each_entry(v, &priv->bridge_vlans, list) { - int upstream = dsa_upstream_port(priv->ds, v->port); - int match, subvlan; - u16 rx_vid; - - /* Only sub-VLANs on user ports need to be applied. - * Bridge VLANs also include VLANs added automatically - * by DSA on the CPU port. - */ - if (!dsa_is_user_port(priv->ds, v->port)) - continue; - - subvlan = sja1105_find_subvlan(subvlan_map[v->port], - v->vid); - if (subvlan < 0) { - subvlan = sja1105_find_free_subvlan(subvlan_map[v->port], - v->pvid); - if (subvlan < 0) { - dev_err(priv->ds->dev, "No more free subvlans\n"); - return -ENOSPC; - } - } - - rx_vid = dsa_8021q_rx_vid_subvlan(priv->ds, v->port, subvlan); - - /* @v->vid on @v->port needs to be retagged to @rx_vid - * on @upstream. Assume @v->vid on @v->port and on - * @upstream was already configured by the previous - * iteration over bridge_vlans. - */ - match = rx_vid; - new_vlan[match].vlanid = rx_vid; - new_vlan[match].vmemb_port |= BIT(v->port); - new_vlan[match].vmemb_port |= BIT(upstream); - new_vlan[match].vlan_bc |= BIT(v->port); - new_vlan[match].vlan_bc |= BIT(upstream); - /* The "untagged" flag is set the same as for the - * original VLAN - */ - if (!v->untagged) - new_vlan[match].tag_port |= BIT(v->port); - /* But it's always tagged towards the CPU */ - new_vlan[match].tag_port |= BIT(upstream); - new_vlan[match].type_entry = SJA1110_VLAN_D_TAG; - - /* The Retagging Table generates packet *clones* with - * the new VLAN. This is a very odd hardware quirk - * which we need to suppress by dropping the original - * packet. - * Deny egress of the original VLAN towards the CPU - * port. This will force the switch to drop it, and - * we'll see only the retagged packets. - */ - match = v->vid; - new_vlan[match].vlan_bc &= ~BIT(upstream); - - /* And the retagging itself */ - new_retagging[k].vlan_ing = v->vid; - new_retagging[k].vlan_egr = rx_vid; - new_retagging[k].ing_port = BIT(v->port); - new_retagging[k].egr_port = BIT(upstream); - if (k++ == SJA1105_MAX_RETAGGING_COUNT) { - dev_err(priv->ds->dev, "No more retagging rules\n"); - return -ENOSPC; - } - - subvlan_map[v->port][subvlan] = v->vid; - } - - *num_retagging = k; - - return 0; -} - -/* Sadly, in crosschip scenarios where the CPU port is also the link to another - * switch, we should retag backwards (the dsa_8021q vid to the original vid) on - * the CPU port of neighbour switches. - */ -static int -sja1105_build_crosschip_subvlans(struct sja1105_private *priv, - struct sja1105_vlan_lookup_entry *new_vlan, - struct sja1105_retagging_entry *new_retagging, - int *num_retagging) -{ - struct sja1105_crosschip_vlan *tmp, *pos; - struct dsa_8021q_crosschip_link *c; - struct sja1105_bridge_vlan *v, *w; - struct list_head crosschip_vlans; - int k = *num_retagging; - int rc = 0; - - if (priv->vlan_state != SJA1105_VLAN_BEST_EFFORT) - return 0; - - INIT_LIST_HEAD(&crosschip_vlans); - - list_for_each_entry(c, &priv->dsa_8021q_ctx->crosschip_links, list) { - struct sja1105_private *other_priv = c->other_ctx->ds->priv; - - if (other_priv->vlan_state == SJA1105_VLAN_FILTERING_FULL) - continue; - - /* Crosschip links are also added to the CPU ports. - * Ignore those. - */ - if (!dsa_is_user_port(priv->ds, c->port)) - continue; - if (!dsa_is_user_port(c->other_ctx->ds, c->other_port)) - continue; - - /* Search for VLANs on the remote port */ - list_for_each_entry(v, &other_priv->bridge_vlans, list) { - bool already_added = false; - bool we_have_it = false; - - if (v->port != c->other_port) - continue; - - /* If @v is a pvid on @other_ds, it does not need - * re-retagging, because its SVL field is 0 and we - * already allow that, via the dsa_8021q crosschip - * links. - */ - if (v->pvid) - continue; - - /* Search for the VLAN on our local port */ - list_for_each_entry(w, &priv->bridge_vlans, list) { - if (w->port == c->port && w->vid == v->vid) { - we_have_it = true; - break; - } - } - - if (!we_have_it) - continue; - - list_for_each_entry(tmp, &crosschip_vlans, list) { - if (tmp->vid == v->vid && - tmp->untagged == v->untagged && - tmp->port == c->port && - tmp->other_port == v->port && - tmp->other_ctx == c->other_ctx) { - already_added = true; - break; - } - } - - if (already_added) - continue; - - tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); - if (!tmp) { - dev_err(priv->ds->dev, "Failed to allocate memory\n"); - rc = -ENOMEM; - goto out; - } - tmp->vid = v->vid; - tmp->port = c->port; - tmp->other_port = v->port; - tmp->other_ctx = c->other_ctx; - tmp->untagged = v->untagged; - list_add(&tmp->list, &crosschip_vlans); - } - } - - list_for_each_entry(tmp, &crosschip_vlans, list) { - struct sja1105_private *other_priv = tmp->other_ctx->ds->priv; - int upstream = dsa_upstream_port(priv->ds, tmp->port); - int match, subvlan; - u16 rx_vid; - - subvlan = sja1105_find_committed_subvlan(other_priv, - tmp->other_port, - tmp->vid); - /* If this happens, it's a bug. The neighbour switch does not - * have a subvlan for tmp->vid on tmp->other_port, but it - * should, since we already checked for its vlan_state. - */ - if (WARN_ON(subvlan < 0)) { - rc = -EINVAL; - goto out; - } - - rx_vid = dsa_8021q_rx_vid_subvlan(tmp->other_ctx->ds, - tmp->other_port, - subvlan); - - /* The @rx_vid retagged from @tmp->vid on - * {@tmp->other_ds, @tmp->other_port} needs to be - * re-retagged to @tmp->vid on the way back to us. - * - * Assume the original @tmp->vid is already configured - * on this local switch, otherwise we wouldn't be - * retagging its subvlan on the other switch in the - * first place. We just need to add a reverse retagging - * rule for @rx_vid and install @rx_vid on our ports. - */ - match = rx_vid; - new_vlan[match].vlanid = rx_vid; - new_vlan[match].vmemb_port |= BIT(tmp->port); - new_vlan[match].vmemb_port |= BIT(upstream); - /* The "untagged" flag is set the same as for the - * original VLAN. And towards the CPU, it doesn't - * really matter, because @rx_vid will only receive - * traffic on that port. For consistency with other dsa_8021q - * VLANs, we'll keep the CPU port tagged. - */ - if (!tmp->untagged) - new_vlan[match].tag_port |= BIT(tmp->port); - new_vlan[match].tag_port |= BIT(upstream); - new_vlan[match].type_entry = SJA1110_VLAN_D_TAG; - /* Deny egress of @rx_vid towards our front-panel port. - * This will force the switch to drop it, and we'll see - * only the re-retagged packets (having the original, - * pre-initial-retagging, VLAN @tmp->vid). - */ - new_vlan[match].vlan_bc &= ~BIT(tmp->port); - - /* On reverse retagging, the same ingress VLAN goes to multiple - * ports. So we have an opportunity to create composite rules - * to not waste the limited space in the retagging table. - */ - k = sja1105_find_retagging_entry(new_retagging, *num_retagging, - upstream, rx_vid, tmp->vid); - if (k < 0) { - if (*num_retagging == SJA1105_MAX_RETAGGING_COUNT) { - dev_err(priv->ds->dev, "No more retagging rules\n"); - rc = -ENOSPC; - goto out; - } - k = (*num_retagging)++; - } - /* And the retagging itself */ - new_retagging[k].vlan_ing = rx_vid; - new_retagging[k].vlan_egr = tmp->vid; - new_retagging[k].ing_port = BIT(upstream); - new_retagging[k].egr_port |= BIT(tmp->port); - } - -out: - list_for_each_entry_safe(tmp, pos, &crosschip_vlans, list) { - list_del(&tmp->list); - kfree(tmp); - } - - return rc; -} - -static int sja1105_build_vlan_table(struct sja1105_private *priv, bool notify); - -static int sja1105_notify_crosschip_switches(struct sja1105_private *priv) -{ - struct sja1105_crosschip_switch *s, *pos; - struct list_head crosschip_switches; - struct dsa_8021q_crosschip_link *c; - int rc = 0; - - INIT_LIST_HEAD(&crosschip_switches); - - list_for_each_entry(c, &priv->dsa_8021q_ctx->crosschip_links, list) { - bool already_added = false; - - list_for_each_entry(s, &crosschip_switches, list) { - if (s->other_ctx == c->other_ctx) { - already_added = true; - break; - } - } - - if (already_added) - continue; - - s = kzalloc(sizeof(*s), GFP_KERNEL); - if (!s) { - dev_err(priv->ds->dev, "Failed to allocate memory\n"); - rc = -ENOMEM; - goto out; - } - s->other_ctx = c->other_ctx; - list_add(&s->list, &crosschip_switches); - } - - list_for_each_entry(s, &crosschip_switches, list) { - struct sja1105_private *other_priv = s->other_ctx->ds->priv; - - rc = sja1105_build_vlan_table(other_priv, false); - if (rc) - goto out; - } - -out: - list_for_each_entry_safe(s, pos, &crosschip_switches, list) { - list_del(&s->list); - kfree(s); - } - - return rc; -} - -static int sja1105_build_vlan_table(struct sja1105_private *priv, bool notify) -{ - u16 subvlan_map[SJA1105_MAX_NUM_PORTS][DSA_8021Q_N_SUBVLAN]; - struct sja1105_retagging_entry *new_retagging; - struct sja1105_vlan_lookup_entry *new_vlan; - struct sja1105_table *table; - int i, num_retagging = 0; - int rc; - - table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP]; - new_vlan = kcalloc(VLAN_N_VID, - table->ops->unpacked_entry_size, GFP_KERNEL); - if (!new_vlan) - return -ENOMEM; - - table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP]; - new_retagging = kcalloc(SJA1105_MAX_RETAGGING_COUNT, - table->ops->unpacked_entry_size, GFP_KERNEL); - if (!new_retagging) { - kfree(new_vlan); - return -ENOMEM; - } - - for (i = 0; i < VLAN_N_VID; i++) - new_vlan[i].vlanid = VLAN_N_VID; - - for (i = 0; i < SJA1105_MAX_RETAGGING_COUNT; i++) - new_retagging[i].vlan_ing = VLAN_N_VID; - - for (i = 0; i < priv->ds->num_ports; i++) - sja1105_init_subvlan_map(subvlan_map[i]); - - /* Bridge VLANs */ - rc = sja1105_build_bridge_vlans(priv, new_vlan); - if (rc) - goto out; - - /* VLANs necessary for dsa_8021q operation, given to us by tag_8021q.c: - * - RX VLANs - * - TX VLANs - * - Crosschip links - */ - rc = sja1105_build_dsa_8021q_vlans(priv, new_vlan); - if (rc) - goto out; - - /* Private VLANs necessary for dsa_8021q operation, which we need to - * determine on our own: - * - Sub-VLANs - * - Sub-VLANs of crosschip switches - */ - rc = sja1105_build_subvlans(priv, subvlan_map, new_vlan, new_retagging, - &num_retagging); - if (rc) - goto out; - - rc = sja1105_build_crosschip_subvlans(priv, new_vlan, new_retagging, - &num_retagging); - if (rc) - goto out; - - rc = sja1105_commit_vlans(priv, new_vlan, new_retagging, num_retagging); - if (rc) - goto out; - - rc = sja1105_commit_pvid(priv); - if (rc) - goto out; - - for (i = 0; i < priv->ds->num_ports; i++) - sja1105_commit_subvlan_map(priv, i, subvlan_map[i]); - - if (notify) { - rc = sja1105_notify_crosschip_switches(priv); - if (rc) - goto out; - } - -out: - kfree(new_vlan); - kfree(new_retagging); - - return rc; -} - /* The TPID setting belongs to the General Parameters table, * which can only be partially reconfigured at runtime (and not the TPID). * So a switch reset is required. @@ -2812,10 +2272,8 @@ int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled, struct sja1105_l2_lookup_params_entry *l2_lookup_params; struct sja1105_general_params_entry *general_params; struct sja1105_private *priv = ds->priv; - enum sja1105_vlan_state state; struct sja1105_table *table; struct sja1105_rule *rule; - bool want_tagging; u16 tpid, tpid2; int rc; @@ -2837,28 +2295,10 @@ int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled, tpid2 = ETH_P_SJA1105; } - for (port = 0; port < ds->num_ports; port++) { - struct sja1105_port *sp = &priv->ports[port]; - - if (enabled) - sp->xmit_tpid = priv->info->qinq_tpid; - else - sp->xmit_tpid = ETH_P_SJA1105; - } - - if (!enabled) - state = SJA1105_VLAN_UNAWARE; - else if (priv->best_effort_vlan_filtering) - state = SJA1105_VLAN_BEST_EFFORT; - else - state = SJA1105_VLAN_FILTERING_FULL; - - if (priv->vlan_state == state) + if (priv->vlan_aware == enabled) return 0; - priv->vlan_state = state; - want_tagging = (state == SJA1105_VLAN_UNAWARE || - state == SJA1105_VLAN_BEST_EFFORT); + priv->vlan_aware = enabled; table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; general_params = table->entries; @@ -2872,8 +2312,6 @@ int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled, general_params->incl_srcpt1 = enabled; general_params->incl_srcpt0 = enabled; - want_tagging = priv->best_effort_vlan_filtering || !enabled; - /* VLAN filtering => independent VLAN learning. * No VLAN filtering (or best effort) => shared VLAN learning. * @@ -2894,314 +2332,205 @@ int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled, */ table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS]; l2_lookup_params = table->entries; - l2_lookup_params->shared_learn = want_tagging; + l2_lookup_params->shared_learn = !priv->vlan_aware; - sja1105_frame_memory_partitioning(priv); + for (port = 0; port < ds->num_ports; port++) { + if (dsa_is_unused_port(ds, port)) + continue; - rc = sja1105_build_vlan_table(priv, false); - if (rc) - return rc; + rc = sja1105_commit_pvid(ds, port); + if (rc) + return rc; + } rc = sja1105_static_config_reload(priv, SJA1105_VLAN_FILTERING); if (rc) NL_SET_ERR_MSG_MOD(extack, "Failed to change VLAN Ethertype"); - /* Switch port identification based on 802.1Q is only passable - * if we are not under a vlan_filtering bridge. So make sure - * the two configurations are mutually exclusive (of course, the - * user may know better, i.e. best_effort_vlan_filtering). - */ - return sja1105_setup_8021q_tagging(ds, want_tagging); + return rc; } -/* Returns number of VLANs added (0 or 1) on success, - * or a negative error code. - */ -static int sja1105_vlan_add_one(struct dsa_switch *ds, int port, u16 vid, - u16 flags, struct list_head *vlan_list) -{ - bool untagged = flags & BRIDGE_VLAN_INFO_UNTAGGED; - bool pvid = flags & BRIDGE_VLAN_INFO_PVID; - struct sja1105_bridge_vlan *v; - - list_for_each_entry(v, vlan_list, list) { - if (v->port == port && v->vid == vid) { - /* Already added */ - if (v->untagged == untagged && v->pvid == pvid) - /* Nothing changed */ - return 0; - - /* It's the same VLAN, but some of the flags changed - * and the user did not bother to delete it first. - * Update it and trigger sja1105_build_vlan_table. - */ - v->untagged = untagged; - v->pvid = pvid; - return 1; - } - } +static int sja1105_vlan_add(struct sja1105_private *priv, int port, u16 vid, + u16 flags, bool allowed_ingress) +{ + struct sja1105_vlan_lookup_entry *vlan; + struct sja1105_table *table; + int match, rc; - v = kzalloc(sizeof(*v), GFP_KERNEL); - if (!v) { - dev_err(ds->dev, "Out of memory while storing VLAN\n"); - return -ENOMEM; + table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP]; + + match = sja1105_is_vlan_configured(priv, vid); + if (match < 0) { + rc = sja1105_table_resize(table, table->entry_count + 1); + if (rc) + return rc; + match = table->entry_count - 1; } - v->port = port; - v->vid = vid; - v->untagged = untagged; - v->pvid = pvid; - list_add(&v->list, vlan_list); + /* Assign pointer after the resize (it's new memory) */ + vlan = table->entries; + + vlan[match].type_entry = SJA1110_VLAN_D_TAG; + vlan[match].vlanid = vid; + vlan[match].vlan_bc |= BIT(port); + + if (allowed_ingress) + vlan[match].vmemb_port |= BIT(port); + else + vlan[match].vmemb_port &= ~BIT(port); + + if (flags & BRIDGE_VLAN_INFO_UNTAGGED) + vlan[match].tag_port &= ~BIT(port); + else + vlan[match].tag_port |= BIT(port); - return 1; + return sja1105_dynamic_config_write(priv, BLK_IDX_VLAN_LOOKUP, vid, + &vlan[match], true); } -/* Returns number of VLANs deleted (0 or 1) */ -static int sja1105_vlan_del_one(struct dsa_switch *ds, int port, u16 vid, - struct list_head *vlan_list) +static int sja1105_vlan_del(struct sja1105_private *priv, int port, u16 vid) { - struct sja1105_bridge_vlan *v, *n; + struct sja1105_vlan_lookup_entry *vlan; + struct sja1105_table *table; + bool keep = true; + int match, rc; - list_for_each_entry_safe(v, n, vlan_list, list) { - if (v->port == port && v->vid == vid) { - list_del(&v->list); - kfree(v); - return 1; - } - } + table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP]; + + match = sja1105_is_vlan_configured(priv, vid); + /* Can't delete a missing entry. */ + if (match < 0) + return 0; + + /* Assign pointer after the resize (it's new memory) */ + vlan = table->entries; + + vlan[match].vlanid = vid; + vlan[match].vlan_bc &= ~BIT(port); + vlan[match].vmemb_port &= ~BIT(port); + /* Also unset tag_port, just so we don't have a confusing bitmap + * (no practical purpose). + */ + vlan[match].tag_port &= ~BIT(port); + + /* If there's no port left as member of this VLAN, + * it's time for it to go. + */ + if (!vlan[match].vmemb_port) + keep = false; + + rc = sja1105_dynamic_config_write(priv, BLK_IDX_VLAN_LOOKUP, vid, + &vlan[match], keep); + if (rc < 0) + return rc; + + if (!keep) + return sja1105_table_delete_entry(table, match); return 0; } -static int sja1105_vlan_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_vlan *vlan, - struct netlink_ext_ack *extack) +static int sja1105_bridge_vlan_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan, + struct netlink_ext_ack *extack) { struct sja1105_private *priv = ds->priv; - bool vlan_table_changed = false; + u16 flags = vlan->flags; int rc; - /* If the user wants best-effort VLAN filtering (aka vlan_filtering - * bridge plus tagging), be sure to at least deny alterations to the - * configuration done by dsa_8021q. + /* Be sure to deny alterations to the configuration done by tag_8021q. */ - if (priv->vlan_state != SJA1105_VLAN_FILTERING_FULL && - vid_is_dsa_8021q(vlan->vid)) { + if (vid_is_dsa_8021q(vlan->vid)) { NL_SET_ERR_MSG_MOD(extack, "Range 1024-3071 reserved for dsa_8021q operation"); return -EBUSY; } - rc = sja1105_vlan_add_one(ds, port, vlan->vid, vlan->flags, - &priv->bridge_vlans); - if (rc < 0) + /* Always install bridge VLANs as egress-tagged on CPU and DSA ports */ + if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) + flags = 0; + + rc = sja1105_vlan_add(priv, port, vlan->vid, flags, true); + if (rc) return rc; - if (rc > 0) - vlan_table_changed = true; - if (!vlan_table_changed) - return 0; + if (vlan->flags & BRIDGE_VLAN_INFO_PVID) + priv->bridge_pvid[port] = vlan->vid; - return sja1105_build_vlan_table(priv, true); + return sja1105_commit_pvid(ds, port); } -static int sja1105_vlan_del(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_vlan *vlan) +static int sja1105_bridge_vlan_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) { struct sja1105_private *priv = ds->priv; - bool vlan_table_changed = false; int rc; - rc = sja1105_vlan_del_one(ds, port, vlan->vid, &priv->bridge_vlans); - if (rc > 0) - vlan_table_changed = true; - - if (!vlan_table_changed) - return 0; + rc = sja1105_vlan_del(priv, port, vlan->vid); + if (rc) + return rc; - return sja1105_build_vlan_table(priv, true); + /* In case the pvid was deleted, make sure that untagged packets will + * be dropped. + */ + return sja1105_commit_pvid(ds, port); } static int sja1105_dsa_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid, u16 flags) { struct sja1105_private *priv = ds->priv; + bool allowed_ingress = true; int rc; - rc = sja1105_vlan_add_one(ds, port, vid, flags, &priv->dsa_8021q_vlans); - if (rc <= 0) + /* Prevent attackers from trying to inject a DSA tag from + * the outside world. + */ + if (dsa_is_user_port(ds, port)) + allowed_ingress = false; + + rc = sja1105_vlan_add(priv, port, vid, flags, allowed_ingress); + if (rc) return rc; - return sja1105_build_vlan_table(priv, true); + if (flags & BRIDGE_VLAN_INFO_PVID) + priv->tag_8021q_pvid[port] = vid; + + return sja1105_commit_pvid(ds, port); } static int sja1105_dsa_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid) { struct sja1105_private *priv = ds->priv; - int rc; - - rc = sja1105_vlan_del_one(ds, port, vid, &priv->dsa_8021q_vlans); - if (!rc) - return 0; - return sja1105_build_vlan_table(priv, true); + return sja1105_vlan_del(priv, port, vid); } -static const struct dsa_8021q_ops sja1105_dsa_8021q_ops = { - .vlan_add = sja1105_dsa_8021q_vlan_add, - .vlan_del = sja1105_dsa_8021q_vlan_del, -}; - -/* The programming model for the SJA1105 switch is "all-at-once" via static - * configuration tables. Some of these can be dynamically modified at runtime, - * but not the xMII mode parameters table. - * Furthermode, some PHYs may not have crystals for generating their clocks - * (e.g. RMII). Instead, their 50MHz clock is supplied via the SJA1105 port's - * ref_clk pin. So port clocking needs to be initialized early, before - * connecting to PHYs is attempted, otherwise they won't respond through MDIO. - * Setting correct PHY link speed does not matter now. - * But dsa_slave_phy_setup is called later than sja1105_setup, so the PHY - * bindings are not yet parsed by DSA core. We need to parse early so that we - * can populate the xMII mode parameters table. - */ -static int sja1105_setup(struct dsa_switch *ds) +static int sja1105_prechangeupper(struct dsa_switch *ds, int port, + struct netdev_notifier_changeupper_info *info) { - struct sja1105_private *priv = ds->priv; - int rc; + struct netlink_ext_ack *extack = info->info.extack; + struct net_device *upper = info->upper_dev; + struct dsa_switch_tree *dst = ds->dst; + struct dsa_port *dp; - rc = sja1105_parse_dt(priv); - if (rc < 0) { - dev_err(ds->dev, "Failed to parse DT: %d\n", rc); - return rc; - } - - /* Error out early if internal delays are required through DT - * and we can't apply them. - */ - rc = sja1105_parse_rgmii_delays(priv); - if (rc < 0) { - dev_err(ds->dev, "RGMII delay not supported\n"); - return rc; - } - - rc = sja1105_ptp_clock_register(ds); - if (rc < 0) { - dev_err(ds->dev, "Failed to register PTP clock: %d\n", rc); - return rc; - } - - rc = sja1105_mdiobus_register(ds); - if (rc < 0) { - dev_err(ds->dev, "Failed to register MDIO bus: %pe\n", - ERR_PTR(rc)); - goto out_ptp_clock_unregister; - } - - if (priv->info->disable_microcontroller) { - rc = priv->info->disable_microcontroller(priv); - if (rc < 0) { - dev_err(ds->dev, - "Failed to disable microcontroller: %pe\n", - ERR_PTR(rc)); - goto out_mdiobus_unregister; - } - } - - /* Create and send configuration down to device */ - rc = sja1105_static_config_load(priv); - if (rc < 0) { - dev_err(ds->dev, "Failed to load static config: %d\n", rc); - goto out_mdiobus_unregister; + if (is_vlan_dev(upper)) { + NL_SET_ERR_MSG_MOD(extack, "8021q uppers are not supported"); + return -EBUSY; } - /* Configure the CGU (PHY link modes and speeds) */ - if (priv->info->clocking_setup) { - rc = priv->info->clocking_setup(priv); - if (rc < 0) { - dev_err(ds->dev, - "Failed to configure MII clocking: %pe\n", - ERR_PTR(rc)); - goto out_static_config_free; + if (netif_is_bridge_master(upper)) { + list_for_each_entry(dp, &dst->ports, list) { + if (dp->bridge_dev && dp->bridge_dev != upper && + br_vlan_enabled(dp->bridge_dev)) { + NL_SET_ERR_MSG_MOD(extack, + "Only one VLAN-aware bridge is supported"); + return -EBUSY; + } } } - /* On SJA1105, VLAN filtering per se is always enabled in hardware. - * The only thing we can do to disable it is lie about what the 802.1Q - * EtherType is. - * So it will still try to apply VLAN filtering, but all ingress - * traffic (except frames received with EtherType of ETH_P_SJA1105) - * will be internally tagged with a distorted VLAN header where the - * TPID is ETH_P_SJA1105, and the VLAN ID is the port pvid. - */ - ds->vlan_filtering_is_global = true; - - /* Advertise the 8 egress queues */ - ds->num_tx_queues = SJA1105_NUM_TC; - - ds->mtu_enforcement_ingress = true; - - priv->best_effort_vlan_filtering = true; - - rc = sja1105_devlink_setup(ds); - if (rc < 0) - goto out_static_config_free; - - /* The DSA/switchdev model brings up switch ports in standalone mode by - * default, and that means vlan_filtering is 0 since they're not under - * a bridge, so it's safe to set up switch tagging at this time. - */ - rtnl_lock(); - rc = sja1105_setup_8021q_tagging(ds, true); - rtnl_unlock(); - if (rc) - goto out_devlink_teardown; - return 0; - -out_devlink_teardown: - sja1105_devlink_teardown(ds); -out_mdiobus_unregister: - sja1105_mdiobus_unregister(ds); -out_ptp_clock_unregister: - sja1105_ptp_clock_unregister(ds); -out_static_config_free: - sja1105_static_config_free(&priv->static_config); - - return rc; -} - -static void sja1105_teardown(struct dsa_switch *ds) -{ - struct sja1105_private *priv = ds->priv; - struct sja1105_bridge_vlan *v, *n; - int port; - - for (port = 0; port < ds->num_ports; port++) { - struct sja1105_port *sp = &priv->ports[port]; - - if (!dsa_is_user_port(ds, port)) - continue; - - if (sp->xmit_worker) - kthread_destroy_worker(sp->xmit_worker); - } - - sja1105_devlink_teardown(ds); - sja1105_mdiobus_unregister(ds); - sja1105_flower_teardown(ds); - sja1105_tas_teardown(ds); - sja1105_ptp_clock_unregister(ds); - sja1105_static_config_free(&priv->static_config); - - list_for_each_entry_safe(v, n, &priv->dsa_8021q_vlans, list) { - list_del(&v->list); - kfree(v); - } - - list_for_each_entry_safe(v, n, &priv->bridge_vlans, list) { - list_del(&v->list); - kfree(v); - } } static void sja1105_port_disable(struct dsa_switch *ds, int port) @@ -3337,7 +2666,7 @@ static int sja1105_change_mtu(struct dsa_switch *ds, int port, int new_mtu) new_mtu += VLAN_ETH_HLEN + ETH_FCS_LEN; - if (dsa_is_cpu_port(ds, port)) + if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) new_mtu += VLAN_HLEN; policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries; @@ -3484,23 +2813,13 @@ static int sja1105_port_set_learning(struct sja1105_private *priv, int port, bool enabled) { struct sja1105_mac_config_entry *mac; - int rc; mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; mac[port].dyn_learn = enabled; - rc = sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port, - &mac[port], true); - if (rc) - return rc; - - if (enabled) - priv->learn_ena |= BIT(port); - else - priv->learn_ena &= ~BIT(port); - - return 0; + return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port, + &mac[port], true); } static int sja1105_port_ucast_bcast_flood(struct sja1105_private *priv, int to, @@ -3616,7 +2935,189 @@ static int sja1105_port_bridge_flags(struct dsa_switch *ds, int port, return 0; } -static const struct dsa_switch_ops sja1105_switch_ops = { +static void sja1105_teardown_ports(struct sja1105_private *priv) +{ + struct dsa_switch *ds = priv->ds; + int port; + + for (port = 0; port < ds->num_ports; port++) { + struct sja1105_port *sp = &priv->ports[port]; + + if (sp->xmit_worker) + kthread_destroy_worker(sp->xmit_worker); + } +} + +static int sja1105_setup_ports(struct sja1105_private *priv) +{ + struct sja1105_tagger_data *tagger_data = &priv->tagger_data; + struct dsa_switch *ds = priv->ds; + int port, rc; + + /* Connections between dsa_port and sja1105_port */ + for (port = 0; port < ds->num_ports; port++) { + struct sja1105_port *sp = &priv->ports[port]; + struct dsa_port *dp = dsa_to_port(ds, port); + struct kthread_worker *worker; + struct net_device *slave; + + if (!dsa_port_is_user(dp)) + continue; + + dp->priv = sp; + sp->dp = dp; + sp->data = tagger_data; + slave = dp->slave; + kthread_init_work(&sp->xmit_work, sja1105_port_deferred_xmit); + worker = kthread_create_worker(0, "%s_xmit", slave->name); + if (IS_ERR(worker)) { + rc = PTR_ERR(worker); + dev_err(ds->dev, + "failed to create deferred xmit thread: %d\n", + rc); + goto out_destroy_workers; + } + sp->xmit_worker = worker; + skb_queue_head_init(&sp->xmit_queue); + } + + return 0; + +out_destroy_workers: + sja1105_teardown_ports(priv); + return rc; +} + +/* The programming model for the SJA1105 switch is "all-at-once" via static + * configuration tables. Some of these can be dynamically modified at runtime, + * but not the xMII mode parameters table. + * Furthermode, some PHYs may not have crystals for generating their clocks + * (e.g. RMII). Instead, their 50MHz clock is supplied via the SJA1105 port's + * ref_clk pin. So port clocking needs to be initialized early, before + * connecting to PHYs is attempted, otherwise they won't respond through MDIO. + * Setting correct PHY link speed does not matter now. + * But dsa_slave_phy_setup is called later than sja1105_setup, so the PHY + * bindings are not yet parsed by DSA core. We need to parse early so that we + * can populate the xMII mode parameters table. + */ +static int sja1105_setup(struct dsa_switch *ds) +{ + struct sja1105_private *priv = ds->priv; + int rc; + + if (priv->info->disable_microcontroller) { + rc = priv->info->disable_microcontroller(priv); + if (rc < 0) { + dev_err(ds->dev, + "Failed to disable microcontroller: %pe\n", + ERR_PTR(rc)); + return rc; + } + } + + /* Create and send configuration down to device */ + rc = sja1105_static_config_load(priv); + if (rc < 0) { + dev_err(ds->dev, "Failed to load static config: %d\n", rc); + return rc; + } + + /* Configure the CGU (PHY link modes and speeds) */ + if (priv->info->clocking_setup) { + rc = priv->info->clocking_setup(priv); + if (rc < 0) { + dev_err(ds->dev, + "Failed to configure MII clocking: %pe\n", + ERR_PTR(rc)); + goto out_static_config_free; + } + } + + rc = sja1105_setup_ports(priv); + if (rc) + goto out_static_config_free; + + sja1105_tas_setup(ds); + sja1105_flower_setup(ds); + + rc = sja1105_ptp_clock_register(ds); + if (rc < 0) { + dev_err(ds->dev, "Failed to register PTP clock: %d\n", rc); + goto out_flower_teardown; + } + + rc = sja1105_mdiobus_register(ds); + if (rc < 0) { + dev_err(ds->dev, "Failed to register MDIO bus: %pe\n", + ERR_PTR(rc)); + goto out_ptp_clock_unregister; + } + + rc = sja1105_devlink_setup(ds); + if (rc < 0) + goto out_mdiobus_unregister; + + rtnl_lock(); + rc = dsa_tag_8021q_register(ds, htons(ETH_P_8021Q)); + rtnl_unlock(); + if (rc) + goto out_devlink_teardown; + + /* On SJA1105, VLAN filtering per se is always enabled in hardware. + * The only thing we can do to disable it is lie about what the 802.1Q + * EtherType is. + * So it will still try to apply VLAN filtering, but all ingress + * traffic (except frames received with EtherType of ETH_P_SJA1105) + * will be internally tagged with a distorted VLAN header where the + * TPID is ETH_P_SJA1105, and the VLAN ID is the port pvid. + */ + ds->vlan_filtering_is_global = true; + ds->untag_bridge_pvid = true; + /* tag_8021q has 3 bits for the VBID, and the value 0 is reserved */ + ds->num_fwd_offloading_bridges = 7; + + /* Advertise the 8 egress queues */ + ds->num_tx_queues = SJA1105_NUM_TC; + + ds->mtu_enforcement_ingress = true; + ds->assisted_learning_on_cpu_port = true; + + return 0; + +out_devlink_teardown: + sja1105_devlink_teardown(ds); +out_mdiobus_unregister: + sja1105_mdiobus_unregister(ds); +out_ptp_clock_unregister: + sja1105_ptp_clock_unregister(ds); +out_flower_teardown: + sja1105_flower_teardown(ds); + sja1105_tas_teardown(ds); + sja1105_teardown_ports(priv); +out_static_config_free: + sja1105_static_config_free(&priv->static_config); + + return rc; +} + +static void sja1105_teardown(struct dsa_switch *ds) +{ + struct sja1105_private *priv = ds->priv; + + rtnl_lock(); + dsa_tag_8021q_unregister(ds); + rtnl_unlock(); + + sja1105_devlink_teardown(ds); + sja1105_mdiobus_unregister(ds); + sja1105_ptp_clock_unregister(ds); + sja1105_flower_teardown(ds); + sja1105_tas_teardown(ds); + sja1105_teardown_ports(priv); + sja1105_static_config_free(&priv->static_config); +} + +const struct dsa_switch_ops sja1105_switch_ops = { .get_tag_protocol = sja1105_get_tag_protocol, .setup = sja1105_setup, .teardown = sja1105_teardown, @@ -3635,14 +3136,15 @@ static const struct dsa_switch_ops sja1105_switch_ops = { .port_fdb_dump = sja1105_fdb_dump, .port_fdb_add = sja1105_fdb_add, .port_fdb_del = sja1105_fdb_del, + .port_fast_age = sja1105_fast_age, .port_bridge_join = sja1105_bridge_join, .port_bridge_leave = sja1105_bridge_leave, .port_pre_bridge_flags = sja1105_port_pre_bridge_flags, .port_bridge_flags = sja1105_port_bridge_flags, .port_stp_state_set = sja1105_bridge_stp_state_set, .port_vlan_filtering = sja1105_vlan_filtering, - .port_vlan_add = sja1105_vlan_add, - .port_vlan_del = sja1105_vlan_del, + .port_vlan_add = sja1105_bridge_vlan_add, + .port_vlan_del = sja1105_bridge_vlan_del, .port_mdb_add = sja1105_mdb_add, .port_mdb_del = sja1105_mdb_del, .port_hwtstamp_get = sja1105_hwtstamp_get, @@ -3657,12 +3159,14 @@ static const struct dsa_switch_ops sja1105_switch_ops = { .cls_flower_add = sja1105_cls_flower_add, .cls_flower_del = sja1105_cls_flower_del, .cls_flower_stats = sja1105_cls_flower_stats, - .crosschip_bridge_join = sja1105_crosschip_bridge_join, - .crosschip_bridge_leave = sja1105_crosschip_bridge_leave, - .devlink_param_get = sja1105_devlink_param_get, - .devlink_param_set = sja1105_devlink_param_set, .devlink_info_get = sja1105_devlink_info_get, + .tag_8021q_vlan_add = sja1105_dsa_8021q_vlan_add, + .tag_8021q_vlan_del = sja1105_dsa_8021q_vlan_del, + .port_prechangeupper = sja1105_prechangeupper, + .port_bridge_tx_fwd_offload = dsa_tag_8021q_bridge_tx_fwd_offload, + .port_bridge_tx_fwd_unoffload = dsa_tag_8021q_bridge_tx_fwd_unoffload, }; +EXPORT_SYMBOL_GPL(sja1105_switch_ops); static const struct of_device_id sja1105_dt_ids[]; @@ -3715,12 +3219,11 @@ static int sja1105_check_device_id(struct sja1105_private *priv) static int sja1105_probe(struct spi_device *spi) { - struct sja1105_tagger_data *tagger_data; struct device *dev = &spi->dev; struct sja1105_private *priv; size_t max_xfer, max_msg; struct dsa_switch *ds; - int rc, port; + int rc; if (!dev->of_node) { dev_err(dev, "No DTS bindings for SJA1105 driver\n"); @@ -3800,95 +3303,42 @@ static int sja1105_probe(struct spi_device *spi) ds->priv = priv; priv->ds = ds; - tagger_data = &priv->tagger_data; - mutex_init(&priv->ptp_data.lock); mutex_init(&priv->mgmt_lock); - priv->dsa_8021q_ctx = devm_kzalloc(dev, sizeof(*priv->dsa_8021q_ctx), - GFP_KERNEL); - if (!priv->dsa_8021q_ctx) - return -ENOMEM; - - priv->dsa_8021q_ctx->ops = &sja1105_dsa_8021q_ops; - priv->dsa_8021q_ctx->proto = htons(ETH_P_8021Q); - priv->dsa_8021q_ctx->ds = ds; - - INIT_LIST_HEAD(&priv->dsa_8021q_ctx->crosschip_links); - INIT_LIST_HEAD(&priv->bridge_vlans); - INIT_LIST_HEAD(&priv->dsa_8021q_vlans); - - sja1105_tas_setup(ds); - sja1105_flower_setup(ds); + rc = sja1105_parse_dt(priv); + if (rc < 0) { + dev_err(ds->dev, "Failed to parse DT: %d\n", rc); + return rc; + } - rc = dsa_register_switch(priv->ds); - if (rc) + /* Error out early if internal delays are required through DT + * and we can't apply them. + */ + rc = sja1105_parse_rgmii_delays(priv); + if (rc < 0) { + dev_err(ds->dev, "RGMII delay not supported\n"); return rc; + } if (IS_ENABLED(CONFIG_NET_SCH_CBS)) { priv->cbs = devm_kcalloc(dev, priv->info->num_cbs_shapers, sizeof(struct sja1105_cbs_entry), GFP_KERNEL); - if (!priv->cbs) { - rc = -ENOMEM; - goto out_unregister_switch; - } - } - - /* Connections between dsa_port and sja1105_port */ - for (port = 0; port < ds->num_ports; port++) { - struct sja1105_port *sp = &priv->ports[port]; - struct dsa_port *dp = dsa_to_port(ds, port); - struct net_device *slave; - int subvlan; - - if (!dsa_is_user_port(ds, port)) - continue; - - dp->priv = sp; - sp->dp = dp; - sp->data = tagger_data; - slave = dp->slave; - kthread_init_work(&sp->xmit_work, sja1105_port_deferred_xmit); - sp->xmit_worker = kthread_create_worker(0, "%s_xmit", - slave->name); - if (IS_ERR(sp->xmit_worker)) { - rc = PTR_ERR(sp->xmit_worker); - dev_err(ds->dev, - "failed to create deferred xmit thread: %d\n", - rc); - goto out_destroy_workers; - } - skb_queue_head_init(&sp->xmit_queue); - sp->xmit_tpid = ETH_P_SJA1105; - - for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++) - sp->subvlan_map[subvlan] = VLAN_N_VID; - } - - return 0; - -out_destroy_workers: - while (port-- > 0) { - struct sja1105_port *sp = &priv->ports[port]; - - if (!dsa_is_user_port(ds, port)) - continue; - - kthread_destroy_worker(sp->xmit_worker); + if (!priv->cbs) + return -ENOMEM; } -out_unregister_switch: - dsa_unregister_switch(ds); - - return rc; + return dsa_register_switch(priv->ds); } static int sja1105_remove(struct spi_device *spi) { struct sja1105_private *priv = spi_get_drvdata(spi); + struct dsa_switch *ds = priv->ds; + + dsa_unregister_switch(ds); - dsa_unregister_switch(priv->ds); return 0; } diff --git a/drivers/net/dsa/sja1105/sja1105_spi.c b/drivers/net/dsa/sja1105/sja1105_spi.c index 08cc5dbf2fa6..d60a530d0272 100644 --- a/drivers/net/dsa/sja1105/sja1105_spi.c +++ b/drivers/net/dsa/sja1105/sja1105_spi.c @@ -575,7 +575,6 @@ const struct sja1105_info sja1105e_info = { .part_no = SJA1105ET_PART_NO, .static_ops = sja1105e_table_ops, .dyn_ops = sja1105et_dyn_ops, - .qinq_tpid = ETH_P_8021Q, .tag_proto = DSA_TAG_PROTO_SJA1105, .can_limit_mcast_flood = false, .ptp_ts_bits = 24, @@ -608,7 +607,6 @@ const struct sja1105_info sja1105t_info = { .part_no = SJA1105ET_PART_NO, .static_ops = sja1105t_table_ops, .dyn_ops = sja1105et_dyn_ops, - .qinq_tpid = ETH_P_8021Q, .tag_proto = DSA_TAG_PROTO_SJA1105, .can_limit_mcast_flood = false, .ptp_ts_bits = 24, @@ -641,7 +639,6 @@ const struct sja1105_info sja1105p_info = { .part_no = SJA1105P_PART_NO, .static_ops = sja1105p_table_ops, .dyn_ops = sja1105pqrs_dyn_ops, - .qinq_tpid = ETH_P_8021AD, .tag_proto = DSA_TAG_PROTO_SJA1105, .can_limit_mcast_flood = true, .ptp_ts_bits = 32, @@ -675,7 +672,6 @@ const struct sja1105_info sja1105q_info = { .part_no = SJA1105Q_PART_NO, .static_ops = sja1105q_table_ops, .dyn_ops = sja1105pqrs_dyn_ops, - .qinq_tpid = ETH_P_8021AD, .tag_proto = DSA_TAG_PROTO_SJA1105, .can_limit_mcast_flood = true, .ptp_ts_bits = 32, @@ -709,7 +705,6 @@ const struct sja1105_info sja1105r_info = { .part_no = SJA1105R_PART_NO, .static_ops = sja1105r_table_ops, .dyn_ops = sja1105pqrs_dyn_ops, - .qinq_tpid = ETH_P_8021AD, .tag_proto = DSA_TAG_PROTO_SJA1105, .can_limit_mcast_flood = true, .ptp_ts_bits = 32, @@ -747,7 +742,6 @@ const struct sja1105_info sja1105s_info = { .static_ops = sja1105s_table_ops, .dyn_ops = sja1105pqrs_dyn_ops, .regs = &sja1105pqrs_regs, - .qinq_tpid = ETH_P_8021AD, .tag_proto = DSA_TAG_PROTO_SJA1105, .can_limit_mcast_flood = true, .ptp_ts_bits = 32, @@ -784,7 +778,6 @@ const struct sja1105_info sja1110a_info = { .static_ops = sja1110_table_ops, .dyn_ops = sja1110_dyn_ops, .regs = &sja1110_regs, - .qinq_tpid = ETH_P_8021AD, .tag_proto = DSA_TAG_PROTO_SJA1110, .can_limit_mcast_flood = true, .multiple_cascade_ports = true, @@ -835,7 +828,6 @@ const struct sja1105_info sja1110b_info = { .static_ops = sja1110_table_ops, .dyn_ops = sja1110_dyn_ops, .regs = &sja1110_regs, - .qinq_tpid = ETH_P_8021AD, .tag_proto = DSA_TAG_PROTO_SJA1110, .can_limit_mcast_flood = true, .multiple_cascade_ports = true, @@ -886,7 +878,6 @@ const struct sja1105_info sja1110c_info = { .static_ops = sja1110_table_ops, .dyn_ops = sja1110_dyn_ops, .regs = &sja1110_regs, - .qinq_tpid = ETH_P_8021AD, .tag_proto = DSA_TAG_PROTO_SJA1110, .can_limit_mcast_flood = true, .multiple_cascade_ports = true, @@ -937,7 +928,6 @@ const struct sja1105_info sja1110d_info = { .static_ops = sja1110_table_ops, .dyn_ops = sja1110_dyn_ops, .regs = &sja1110_regs, - .qinq_tpid = ETH_P_8021AD, .tag_proto = DSA_TAG_PROTO_SJA1110, .can_limit_mcast_flood = true, .multiple_cascade_ports = true, diff --git a/drivers/net/dsa/sja1105/sja1105_vl.c b/drivers/net/dsa/sja1105/sja1105_vl.c index f6e13e6c6a18..ec7b65daec20 100644 --- a/drivers/net/dsa/sja1105/sja1105_vl.c +++ b/drivers/net/dsa/sja1105/sja1105_vl.c @@ -496,14 +496,11 @@ int sja1105_vl_redirect(struct sja1105_private *priv, int port, struct sja1105_rule *rule = sja1105_rule_find(priv, cookie); int rc; - if (priv->vlan_state == SJA1105_VLAN_UNAWARE && - key->type != SJA1105_KEY_VLAN_UNAWARE_VL) { + if (!priv->vlan_aware && key->type != SJA1105_KEY_VLAN_UNAWARE_VL) { NL_SET_ERR_MSG_MOD(extack, "Can only redirect based on DMAC"); return -EOPNOTSUPP; - } else if ((priv->vlan_state == SJA1105_VLAN_BEST_EFFORT || - priv->vlan_state == SJA1105_VLAN_FILTERING_FULL) && - key->type != SJA1105_KEY_VLAN_AWARE_VL) { + } else if (priv->vlan_aware && key->type != SJA1105_KEY_VLAN_AWARE_VL) { NL_SET_ERR_MSG_MOD(extack, "Can only redirect based on {DMAC, VID, PCP}"); return -EOPNOTSUPP; @@ -595,14 +592,11 @@ int sja1105_vl_gate(struct sja1105_private *priv, int port, return -ERANGE; } - if (priv->vlan_state == SJA1105_VLAN_UNAWARE && - key->type != SJA1105_KEY_VLAN_UNAWARE_VL) { + if (!priv->vlan_aware && key->type != SJA1105_KEY_VLAN_UNAWARE_VL) { NL_SET_ERR_MSG_MOD(extack, "Can only gate based on DMAC"); return -EOPNOTSUPP; - } else if ((priv->vlan_state == SJA1105_VLAN_BEST_EFFORT || - priv->vlan_state == SJA1105_VLAN_FILTERING_FULL) && - key->type != SJA1105_KEY_VLAN_AWARE_VL) { + } else if (priv->vlan_aware && key->type != SJA1105_KEY_VLAN_AWARE_VL) { NL_SET_ERR_MSG_MOD(extack, "Can only gate based on {DMAC, VID, PCP}"); return -EOPNOTSUPP; diff --git a/drivers/net/eql.c b/drivers/net/eql.c index 74263f8efe1a..8ef34901c2d8 100644 --- a/drivers/net/eql.c +++ b/drivers/net/eql.c @@ -113,6 +113,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/compat.h> #include <linux/capability.h> #include <linux/module.h> #include <linux/kernel.h> @@ -131,7 +132,8 @@ static int eql_open(struct net_device *dev); static int eql_close(struct net_device *dev); -static int eql_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); +static int eql_siocdevprivate(struct net_device *dev, struct ifreq *ifr, + void __user *data, int cmd); static netdev_tx_t eql_slave_xmit(struct sk_buff *skb, struct net_device *dev); #define eql_is_slave(dev) ((dev->flags & IFF_SLAVE) == IFF_SLAVE) @@ -170,7 +172,7 @@ static const char version[] __initconst = static const struct net_device_ops eql_netdev_ops = { .ndo_open = eql_open, .ndo_stop = eql_close, - .ndo_do_ioctl = eql_ioctl, + .ndo_siocdevprivate = eql_siocdevprivate, .ndo_start_xmit = eql_slave_xmit, }; @@ -268,25 +270,29 @@ static int eql_s_slave_cfg(struct net_device *dev, slave_config_t __user *sc); static int eql_g_master_cfg(struct net_device *dev, master_config_t __user *mc); static int eql_s_master_cfg(struct net_device *dev, master_config_t __user *mc); -static int eql_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +static int eql_siocdevprivate(struct net_device *dev, struct ifreq *ifr, + void __user *data, int cmd) { if (cmd != EQL_GETMASTRCFG && cmd != EQL_GETSLAVECFG && !capable(CAP_NET_ADMIN)) return -EPERM; + if (in_compat_syscall()) /* to be implemented */ + return -EOPNOTSUPP; + switch (cmd) { case EQL_ENSLAVE: - return eql_enslave(dev, ifr->ifr_data); + return eql_enslave(dev, data); case EQL_EMANCIPATE: - return eql_emancipate(dev, ifr->ifr_data); + return eql_emancipate(dev, data); case EQL_GETSLAVECFG: - return eql_g_slave_cfg(dev, ifr->ifr_data); + return eql_g_slave_cfg(dev, data); case EQL_SETSLAVECFG: - return eql_s_slave_cfg(dev, ifr->ifr_data); + return eql_s_slave_cfg(dev, data); case EQL_GETMASTRCFG: - return eql_g_master_cfg(dev, ifr->ifr_data); + return eql_g_master_cfg(dev, data); case EQL_SETMASTRCFG: - return eql_s_master_cfg(dev, ifr->ifr_data); + return eql_s_master_cfg(dev, data); default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c index 96cc5fc36eb5..87c906e744fb 100644 --- a/drivers/net/ethernet/3com/3c509.c +++ b/drivers/net/ethernet/3com/3c509.c @@ -302,7 +302,6 @@ static int el3_isa_match(struct device *pdev, unsigned int ndev) return -ENOMEM; SET_NETDEV_DEV(dev, pdev); - netdev_boot_setup_check(dev); if (!request_region(ioaddr, EL3_IO_EXTENT, "3c509-isa")) { free_netdev(dev); @@ -421,7 +420,6 @@ static int el3_pnp_probe(struct pnp_dev *pdev, const struct pnp_device_id *id) return -ENOMEM; } SET_NETDEV_DEV(dev, &pdev->dev); - netdev_boot_setup_check(dev); el3_dev_fill(dev, phys_addr, ioaddr, irq, if_port, EL3_PNP); pnp_set_drvdata(pdev, dev); @@ -514,7 +512,9 @@ static int el3_common_init(struct net_device *dev) { struct el3_private *lp = netdev_priv(dev); int err; - const char *if_names[] = {"10baseT", "AUI", "undefined", "BNC"}; + static const char * const if_names[] = { + "10baseT", "AUI", "undefined", "BNC" + }; spin_lock_init(&lp->lock); @@ -588,7 +588,6 @@ static int el3_eisa_probe(struct device *device) } SET_NETDEV_DEV(dev, device); - netdev_boot_setup_check(dev); el3_dev_fill(dev, phys_addr, ioaddr, irq, if_port, EL3_EISA); eisa_set_drvdata (edev, dev); diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c index 47b4215bb93b..8d90fed5d33e 100644 --- a/drivers/net/ethernet/3com/3c515.c +++ b/drivers/net/ethernet/3com/3c515.c @@ -407,7 +407,7 @@ MODULE_PARM_DESC(max_interrupt_work, "3c515 maximum events handled per interrupt /* we will need locking (and refcounting) if we ever use it for more */ static LIST_HEAD(root_corkscrew_dev); -int init_module(void) +static int corkscrew_init_module(void) { int found = 0; if (debug >= 0) @@ -416,6 +416,7 @@ int init_module(void) found++; return found ? 0 : -ENODEV; } +module_init(corkscrew_init_module); #else struct net_device *tc515_probe(int unit) diff --git a/drivers/net/ethernet/3com/3c574_cs.c b/drivers/net/ethernet/3com/3c574_cs.c index f66e7fb9a2bb..dd4d3c48b98d 100644 --- a/drivers/net/ethernet/3com/3c574_cs.c +++ b/drivers/net/ethernet/3com/3c574_cs.c @@ -252,7 +252,7 @@ static const struct net_device_ops el3_netdev_ops = { .ndo_start_xmit = el3_start_xmit, .ndo_tx_timeout = el3_tx_timeout, .ndo_get_stats = el3_get_stats, - .ndo_do_ioctl = el3_ioctl, + .ndo_eth_ioctl = el3_ioctl, .ndo_set_rx_mode = set_multicast_list, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index 7d7d3ffe25c3..17c16333a412 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c @@ -1052,7 +1052,7 @@ static const struct net_device_ops boomrang_netdev_ops = { .ndo_tx_timeout = vortex_tx_timeout, .ndo_get_stats = vortex_get_stats, #ifdef CONFIG_PCI - .ndo_do_ioctl = vortex_ioctl, + .ndo_eth_ioctl = vortex_ioctl, #endif .ndo_set_rx_mode = set_rx_mode, .ndo_set_mac_address = eth_mac_addr, @@ -1069,7 +1069,7 @@ static const struct net_device_ops vortex_netdev_ops = { .ndo_tx_timeout = vortex_tx_timeout, .ndo_get_stats = vortex_get_stats, #ifdef CONFIG_PCI - .ndo_do_ioctl = vortex_ioctl, + .ndo_eth_ioctl = vortex_ioctl, #endif .ndo_set_rx_mode = set_rx_mode, .ndo_set_mac_address = eth_mac_addr, diff --git a/drivers/net/ethernet/3com/Kconfig b/drivers/net/ethernet/3com/Kconfig index a52a3740f0c9..706bd59bf645 100644 --- a/drivers/net/ethernet/3com/Kconfig +++ b/drivers/net/ethernet/3com/Kconfig @@ -34,6 +34,7 @@ config EL3 config 3C515 tristate "3c515 ISA \"Fast EtherLink\"" depends on ISA && ISA_DMA_API && !PPC32 + select NETDEV_LEGACY_INIT help If you have a 3Com ISA EtherLink XL "Corkscrew" 3c515 Fast Ethernet network card, say Y here. diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig index 9f4b302fd2ce..a4130e643342 100644 --- a/drivers/net/ethernet/8390/Kconfig +++ b/drivers/net/ethernet/8390/Kconfig @@ -102,6 +102,7 @@ config MCF8390 config NE2000 tristate "NE2000/NE1000 support" depends on (ISA || (Q40 && m) || MACH_TX49XX || ATARI_ETHERNEC) + select NETDEV_LEGACY_INIT if ISA select CRC32 help If you have a network (Ethernet) card of this type, say Y here. @@ -169,6 +170,7 @@ config STNIC config ULTRA tristate "SMC Ultra support" depends on ISA + select NETDEV_LEGACY_INIT select CRC32 help If you have a network (Ethernet) card of this type, say Y here. @@ -186,6 +188,7 @@ config ULTRA config WD80x3 tristate "WD80*3 support" depends on ISA + select NETDEV_LEGACY_INIT select CRC32 help If you have a network (Ethernet) card of this type, say Y here. diff --git a/drivers/net/ethernet/8390/apne.c b/drivers/net/ethernet/8390/apne.c index fe6c834c422e..da1ae37a9d73 100644 --- a/drivers/net/ethernet/8390/apne.c +++ b/drivers/net/ethernet/8390/apne.c @@ -75,7 +75,6 @@ #define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */ -struct net_device * __init apne_probe(int unit); static int apne_probe1(struct net_device *dev, int ioaddr); static void apne_reset_8390(struct net_device *dev); @@ -120,7 +119,7 @@ static u32 apne_msg_enable; module_param_named(msg_enable, apne_msg_enable, uint, 0444); MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)"); -struct net_device * __init apne_probe(int unit) +static struct net_device * __init apne_probe(void) { struct net_device *dev; struct ei_device *ei_local; @@ -150,10 +149,6 @@ struct net_device * __init apne_probe(int unit) dev = alloc_ei_netdev(); if (!dev) return ERR_PTR(-ENOMEM); - if (unit >= 0) { - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - } ei_local = netdev_priv(dev); ei_local->msg_enable = apne_msg_enable; @@ -554,12 +549,11 @@ static irqreturn_t apne_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -#ifdef MODULE static struct net_device *apne_dev; static int __init apne_module_init(void) { - apne_dev = apne_probe(-1); + apne_dev = apne_probe(); return PTR_ERR_OR_ZERO(apne_dev); } @@ -579,7 +573,6 @@ static void __exit apne_module_exit(void) } module_init(apne_module_init); module_exit(apne_module_exit); -#endif static int init_pcmcia(void) { diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c index 172947fc051a..6c6bdd5913ec 100644 --- a/drivers/net/ethernet/8390/ax88796.c +++ b/drivers/net/ethernet/8390/ax88796.c @@ -101,6 +101,13 @@ static inline struct ax_device *to_ax_dev(struct net_device *dev) return (struct ax_device *)(ei_local + 1); } +void ax_NS8390_reinit(struct net_device *dev) +{ + ax_NS8390_init(dev, 1); +} + +EXPORT_SYMBOL_GPL(ax_NS8390_reinit); + /* * ax_initial_check * @@ -635,7 +642,7 @@ static void ax_eeprom_register_write(struct eeprom_93cx6 *eeprom) static const struct net_device_ops ax_netdev_ops = { .ndo_open = ax_open, .ndo_stop = ax_close, - .ndo_do_ioctl = ax_ioctl, + .ndo_eth_ioctl = ax_ioctl, .ndo_start_xmit = ax_ei_start_xmit, .ndo_tx_timeout = ax_ei_tx_timeout, diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c index 8c321dfc7b3b..3c370e686ec3 100644 --- a/drivers/net/ethernet/8390/axnet_cs.c +++ b/drivers/net/ethernet/8390/axnet_cs.c @@ -128,7 +128,7 @@ static inline struct axnet_dev *PRIV(struct net_device *dev) static const struct net_device_ops axnet_netdev_ops = { .ndo_open = axnet_open, .ndo_stop = axnet_close, - .ndo_do_ioctl = axnet_ioctl, + .ndo_eth_ioctl = axnet_ioctl, .ndo_start_xmit = axnet_start_xmit, .ndo_tx_timeout = axnet_tx_timeout, .ndo_get_stats = get_stats, diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c index e9756d0ea5b8..53660bc8d6ff 100644 --- a/drivers/net/ethernet/8390/ne.c +++ b/drivers/net/ethernet/8390/ne.c @@ -923,7 +923,7 @@ static void __init ne_add_devices(void) } #ifdef MODULE -int __init init_module(void) +static int __init ne_init(void) { int retval; ne_add_devices(); @@ -940,6 +940,7 @@ int __init init_module(void) ne_loop_rm_unreg(0); return retval; } +module_init(ne_init); #else /* MODULE */ static int __init ne_init(void) { @@ -951,6 +952,7 @@ static int __init ne_init(void) } module_init(ne_init); +#ifdef CONFIG_NETDEV_LEGACY_INIT struct net_device * __init ne_probe(int unit) { int this_dev; @@ -991,6 +993,7 @@ struct net_device * __init ne_probe(int unit) return ERR_PTR(-ENODEV); } +#endif #endif /* MODULE */ static void __exit ne_exit(void) diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c index cac036706382..96ad72abd373 100644 --- a/drivers/net/ethernet/8390/pcnet_cs.c +++ b/drivers/net/ethernet/8390/pcnet_cs.c @@ -223,7 +223,7 @@ static const struct net_device_ops pcnet_netdev_ops = { .ndo_set_config = set_config, .ndo_start_xmit = ei_start_xmit, .ndo_get_stats = ei_get_stats, - .ndo_do_ioctl = ei_ioctl, + .ndo_eth_ioctl = ei_ioctl, .ndo_set_rx_mode = ei_set_multicast_list, .ndo_tx_timeout = ei_tx_timeout, .ndo_set_mac_address = eth_mac_addr, diff --git a/drivers/net/ethernet/8390/smc-ultra.c b/drivers/net/ethernet/8390/smc-ultra.c index 1d8ed7357b7f..0890fa493f70 100644 --- a/drivers/net/ethernet/8390/smc-ultra.c +++ b/drivers/net/ethernet/8390/smc-ultra.c @@ -522,7 +522,6 @@ static void ultra_pio_input(struct net_device *dev, int count, /* We know skbuffs are padded to at least word alignment. */ insw(ioaddr + IOPD, buf, (count+1)>>1); } - static void ultra_pio_output(struct net_device *dev, int count, const unsigned char *buf, const int start_page) { @@ -572,8 +571,7 @@ MODULE_LICENSE("GPL"); /* This is set up so that only a single autoprobe takes place per call. ISA device autoprobes on a running machine are not recommended. */ -int __init -init_module(void) +static int __init ultra_init_module(void) { struct net_device *dev; int this_dev, found = 0; @@ -600,6 +598,7 @@ init_module(void) return 0; return -ENXIO; } +module_init(ultra_init_module); static void cleanup_card(struct net_device *dev) { @@ -613,8 +612,7 @@ static void cleanup_card(struct net_device *dev) iounmap(ei_status.mem); } -void __exit -cleanup_module(void) +static void __exit ultra_cleanup_module(void) { int this_dev; @@ -627,4 +625,5 @@ cleanup_module(void) } } } +module_exit(ultra_cleanup_module); #endif /* MODULE */ diff --git a/drivers/net/ethernet/8390/wd.c b/drivers/net/ethernet/8390/wd.c index c834123560f1..263a942d81fa 100644 --- a/drivers/net/ethernet/8390/wd.c +++ b/drivers/net/ethernet/8390/wd.c @@ -519,7 +519,7 @@ MODULE_LICENSE("GPL"); /* This is set up so that only a single autoprobe takes place per call. ISA device autoprobes on a running machine are not recommended. */ -int __init init_module(void) +static int __init wd_init_module(void) { struct net_device *dev; int this_dev, found = 0; @@ -548,6 +548,7 @@ int __init init_module(void) return 0; return -ENXIO; } +module_init(wd_init_module); static void cleanup_card(struct net_device *dev) { @@ -556,8 +557,7 @@ static void cleanup_card(struct net_device *dev) iounmap(ei_status.mem); } -void __exit -cleanup_module(void) +static void __exit wd_cleanup_module(void) { int this_dev; @@ -570,4 +570,5 @@ cleanup_module(void) } } } +module_exit(wd_cleanup_module); #endif /* MODULE */ diff --git a/drivers/net/ethernet/8390/xsurf100.c b/drivers/net/ethernet/8390/xsurf100.c index e2c963821ffe..fe7a74707aa4 100644 --- a/drivers/net/ethernet/8390/xsurf100.c +++ b/drivers/net/ethernet/8390/xsurf100.c @@ -22,8 +22,6 @@ #define XS100_8390_DATA_WRITE32_BASE 0x0C80 #define XS100_8390_DATA_AREA_SIZE 0x80 -#define __NS8390_init ax_NS8390_init - /* force unsigned long back to 'void __iomem *' */ #define ax_convert_addr(_a) ((void __force __iomem *)(_a)) @@ -42,10 +40,7 @@ /* Ensure we have our RCR base value */ #define AX88796_PLATFORM -static unsigned char version[] = - "ax88796.c: Copyright 2005,2007 Simtec Electronics\n"; - -#include "lib8390.c" +#include "8390.h" /* from ne.c */ #define NE_CMD EI_SHIFT(0x00) @@ -232,7 +227,7 @@ static void xs100_block_output(struct net_device *dev, int count, if (jiffies - dma_start > 2 * HZ / 100) { /* 20ms */ netdev_warn(dev, "timeout waiting for Tx RDC.\n"); ei_local->reset_8390(dev); - ax_NS8390_init(dev, 1); + ax_NS8390_reinit(dev); break; } } diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 1cdff1dca790..d796684ec9ca 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -118,6 +118,7 @@ config LANTIQ_XRX200 Support for the PMAC of the Gigabit switch (GSWIP) inside the Lantiq / Intel VRX200 VDSL SoC +source "drivers/net/ethernet/litex/Kconfig" source "drivers/net/ethernet/marvell/Kconfig" source "drivers/net/ethernet/mediatek/Kconfig" source "drivers/net/ethernet/mellanox/Kconfig" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index cb3f9084a21b..aaa5078cd7d1 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -51,6 +51,7 @@ obj-$(CONFIG_JME) += jme.o obj-$(CONFIG_KORINA) += korina.o obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o obj-$(CONFIG_LANTIQ_XRX200) += lantiq_xrx200.o +obj-$(CONFIG_NET_VENDOR_LITEX) += litex/ obj-$(CONFIG_NET_VENDOR_MARVELL) += marvell/ obj-$(CONFIG_NET_VENDOR_MEDIATEK) += mediatek/ obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/ diff --git a/drivers/net/ethernet/actions/Kconfig b/drivers/net/ethernet/actions/Kconfig index ccad6a3f4d6f..f630cac2ab6c 100644 --- a/drivers/net/ethernet/actions/Kconfig +++ b/drivers/net/ethernet/actions/Kconfig @@ -2,8 +2,8 @@ config NET_VENDOR_ACTIONS bool "Actions Semi devices" - default y - depends on ARCH_ACTIONS + depends on ARCH_ACTIONS || COMPILE_TEST + default ARCH_ACTIONS help If you have a network (Ethernet) card belonging to this class, say Y. diff --git a/drivers/net/ethernet/actions/owl-emac.c b/drivers/net/ethernet/actions/owl-emac.c index b8e771c2bc40..c4ecf4fcadf8 100644 --- a/drivers/net/ethernet/actions/owl-emac.c +++ b/drivers/net/ethernet/actions/owl-emac.c @@ -1179,8 +1179,8 @@ static int owl_emac_ndo_set_mac_addr(struct net_device *netdev, void *addr) return owl_emac_setup_frame_xmit(netdev_priv(netdev)); } -static int owl_emac_ndo_do_ioctl(struct net_device *netdev, - struct ifreq *req, int cmd) +static int owl_emac_ndo_eth_ioctl(struct net_device *netdev, + struct ifreq *req, int cmd) { if (!netif_running(netdev)) return -EINVAL; @@ -1224,7 +1224,7 @@ static const struct net_device_ops owl_emac_netdev_ops = { .ndo_set_rx_mode = owl_emac_ndo_set_rx_mode, .ndo_set_mac_address = owl_emac_ndo_set_mac_addr, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = owl_emac_ndo_do_ioctl, + .ndo_eth_ioctl = owl_emac_ndo_eth_ioctl, .ndo_tx_timeout = owl_emac_ndo_tx_timeout, .ndo_get_stats = owl_emac_ndo_get_stats, }; diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c index 7965e5e3c985..e0f6cc910bd2 100644 --- a/drivers/net/ethernet/adaptec/starfire.c +++ b/drivers/net/ethernet/adaptec/starfire.c @@ -625,7 +625,7 @@ static const struct net_device_ops netdev_ops = { .ndo_tx_timeout = tx_timeout, .ndo_get_stats = get_stats, .ndo_set_rx_mode = set_rx_mode, - .ndo_do_ioctl = netdev_ioctl, + .ndo_eth_ioctl = netdev_ioctl, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, #ifdef VLAN_SUPPORT diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c index 41f8821f792d..920633161174 100644 --- a/drivers/net/ethernet/agere/et131x.c +++ b/drivers/net/ethernet/agere/et131x.c @@ -3882,7 +3882,7 @@ static const struct net_device_ops et131x_netdev_ops = { .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_get_stats = et131x_stats, - .ndo_do_ioctl = phy_do_ioctl, + .ndo_eth_ioctl = phy_do_ioctl, }; static int et131x_pci_setup(struct pci_dev *pdev, diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c index f99ae317c188..037baea1c738 100644 --- a/drivers/net/ethernet/allwinner/sun4i-emac.c +++ b/drivers/net/ethernet/allwinner/sun4i-emac.c @@ -774,7 +774,7 @@ static const struct net_device_ops emac_netdev_ops = { .ndo_start_xmit = emac_start_xmit, .ndo_tx_timeout = emac_timeout, .ndo_set_rx_mode = emac_set_rx_mode, - .ndo_do_ioctl = phy_do_ioctl_running, + .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = emac_set_mac_address, #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c index 27dae632efcb..13e745cf3781 100644 --- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c +++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c @@ -357,7 +357,9 @@ static int ena_get_link_ksettings(struct net_device *netdev, } static int ena_get_coalesce(struct net_device *net_dev, - struct ethtool_coalesce *coalesce) + struct ethtool_coalesce *coalesce, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct ena_adapter *adapter = netdev_priv(net_dev); struct ena_com_dev *ena_dev = adapter->ena_dev; @@ -402,7 +404,9 @@ static void ena_update_rx_rings_nonadaptive_intr_moderation(struct ena_adapter * } static int ena_set_coalesce(struct net_device *net_dev, - struct ethtool_coalesce *coalesce) + struct ethtool_coalesce *coalesce, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct ena_adapter *adapter = netdev_priv(net_dev); struct ena_com_dev *ena_dev = adapter->ena_dev; diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig index d0b0609bbe23..4786f0504691 100644 --- a/drivers/net/ethernet/amd/Kconfig +++ b/drivers/net/ethernet/amd/Kconfig @@ -46,6 +46,7 @@ config AMD8111_ETH config LANCE tristate "AMD LANCE and PCnet (AT1500 and NE2100) support" depends on ISA && ISA_DMA_API && !ARM && !PPC32 + select NETDEV_LEGACY_INIT help If you have a network (Ethernet) card of this type, say Y here. Some LinkSys cards are of this type. @@ -132,6 +133,7 @@ config PCMCIA_NMCLAN config NI65 tristate "NI6510 support" depends on ISA && ISA_DMA_API && !ARM && !PPC32 + select NETDEV_LEGACY_INIT help If you have a network (Ethernet) card of this type, say Y here. @@ -168,11 +170,11 @@ config AMD_XGBE tristate "AMD 10GbE Ethernet driver" depends on ((OF_NET && OF_ADDRESS) || ACPI || PCI) && HAS_IOMEM depends on X86 || ARM64 || COMPILE_TEST + depends on PTP_1588_CLOCK_OPTIONAL select BITREVERSE select CRC32 select PHYLIB select AMD_XGBE_HAVE_ECC if X86 - imply PTP_1588_CLOCK help This driver supports the AMD 10GbE Ethernet device found on an AMD SoC. diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c index 9cac5aa75a73..92e4246dc359 100644 --- a/drivers/net/ethernet/amd/amd8111e.c +++ b/drivers/net/ethernet/amd/amd8111e.c @@ -1729,7 +1729,7 @@ static const struct net_device_ops amd8111e_netdev_ops = { .ndo_set_rx_mode = amd8111e_set_multicast_list, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = amd8111e_set_mac_address, - .ndo_do_ioctl = amd8111e_ioctl, + .ndo_eth_ioctl = amd8111e_ioctl, .ndo_change_mtu = amd8111e_change_mtu, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = amd8111e_poll, diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c index 36f54d13a2eb..9d2f49fd945e 100644 --- a/drivers/net/ethernet/amd/atarilance.c +++ b/drivers/net/ethernet/amd/atarilance.c @@ -367,7 +367,7 @@ static void *slow_memcpy( void *dst, const void *src, size_t len ) } -struct net_device * __init atarilance_probe(int unit) +struct net_device * __init atarilance_probe(void) { int i; static int found; @@ -382,10 +382,6 @@ struct net_device * __init atarilance_probe(int unit) dev = alloc_etherdev(sizeof(struct lance_private)); if (!dev) return ERR_PTR(-ENOMEM); - if (unit >= 0) { - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - } for( i = 0; i < N_LANCE_ADDR; ++i ) { if (lance_probe1( dev, &lance_addr_list[i] )) { @@ -1137,13 +1133,11 @@ static int lance_set_mac_address( struct net_device *dev, void *addr ) return 0; } - -#ifdef MODULE static struct net_device *atarilance_dev; static int __init atarilance_module_init(void) { - atarilance_dev = atarilance_probe(-1); + atarilance_dev = atarilance_probe(); return PTR_ERR_OR_ZERO(atarilance_dev); } @@ -1155,4 +1149,3 @@ static void __exit atarilance_module_exit(void) } module_init(atarilance_module_init); module_exit(atarilance_module_exit); -#endif /* MODULE */ diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c index 19e195420e24..9c1636222b99 100644 --- a/drivers/net/ethernet/amd/au1000_eth.c +++ b/drivers/net/ethernet/amd/au1000_eth.c @@ -1051,7 +1051,7 @@ static const struct net_device_ops au1000_netdev_ops = { .ndo_stop = au1000_close, .ndo_start_xmit = au1000_tx, .ndo_set_rx_mode = au1000_multicast_list, - .ndo_do_ioctl = phy_do_ioctl_running, + .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_tx_timeout = au1000_tx_timeout, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c index 2178e6b89dbd..945bf1d87507 100644 --- a/drivers/net/ethernet/amd/lance.c +++ b/drivers/net/ethernet/amd/lance.c @@ -327,7 +327,7 @@ MODULE_PARM_DESC(dma, "LANCE/PCnet ISA DMA channel (ignored for some devices)"); MODULE_PARM_DESC(irq, "LANCE/PCnet IRQ number (ignored for some devices)"); MODULE_PARM_DESC(lance_debug, "LANCE/PCnet debug level (0-7)"); -int __init init_module(void) +static int __init lance_init_module(void) { struct net_device *dev; int this_dev, found = 0; @@ -356,6 +356,7 @@ int __init init_module(void) return 0; return -ENXIO; } +module_init(lance_init_module); static void cleanup_card(struct net_device *dev) { @@ -368,7 +369,7 @@ static void cleanup_card(struct net_device *dev) kfree(lp); } -void __exit cleanup_module(void) +static void __exit lance_cleanup_module(void) { int this_dev; @@ -381,6 +382,7 @@ void __exit cleanup_module(void) } } } +module_exit(lance_cleanup_module); #endif /* MODULE */ MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/amd/mvme147.c b/drivers/net/ethernet/amd/mvme147.c index 3f2e4cdd0b83..da97fccea9ea 100644 --- a/drivers/net/ethernet/amd/mvme147.c +++ b/drivers/net/ethernet/amd/mvme147.c @@ -68,7 +68,7 @@ static const struct net_device_ops lance_netdev_ops = { }; /* Initialise the one and only on-board 7990 */ -struct net_device * __init mvme147lance_probe(int unit) +static struct net_device * __init mvme147lance_probe(void) { struct net_device *dev; static int called; @@ -86,9 +86,6 @@ struct net_device * __init mvme147lance_probe(int unit) if (!dev) return ERR_PTR(-ENOMEM); - if (unit >= 0) - sprintf(dev->name, "eth%d", unit); - /* Fill the dev fields */ dev->base_addr = (unsigned long)MVME147_LANCE_BASE; dev->netdev_ops = &lance_netdev_ops; @@ -179,22 +176,21 @@ static int m147lance_close(struct net_device *dev) return 0; } -#ifdef MODULE MODULE_LICENSE("GPL"); static struct net_device *dev_mvme147_lance; -int __init init_module(void) +static int __init m147lance_init(void) { - dev_mvme147_lance = mvme147lance_probe(-1); + dev_mvme147_lance = mvme147lance_probe(); return PTR_ERR_OR_ZERO(dev_mvme147_lance); } +module_init(m147lance_init); -void __exit cleanup_module(void) +static void __exit m147lance_exit(void) { struct m147lance_private *lp = netdev_priv(dev_mvme147_lance); unregister_netdev(dev_mvme147_lance); free_pages(lp->ram, 3); free_netdev(dev_mvme147_lance); } - -#endif /* MODULE */ +module_exit(m147lance_exit); diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c index 5c1cfb0c4a42..b5df7ad5a83f 100644 --- a/drivers/net/ethernet/amd/ni65.c +++ b/drivers/net/ethernet/amd/ni65.c @@ -1230,18 +1230,20 @@ MODULE_PARM_DESC(irq, "ni6510 IRQ number (ignored for some cards)"); MODULE_PARM_DESC(io, "ni6510 I/O base address"); MODULE_PARM_DESC(dma, "ni6510 ISA DMA channel (ignored for some cards)"); -int __init init_module(void) +static int __init ni65_init_module(void) { dev_ni65 = ni65_probe(-1); return PTR_ERR_OR_ZERO(dev_ni65); } +module_init(ni65_init_module); -void __exit cleanup_module(void) +static void __exit ni65_cleanup_module(void) { unregister_netdev(dev_ni65); cleanup_card(dev_ni65); free_netdev(dev_ni65); } +module_exit(ni65_cleanup_module); #endif /* MODULE */ MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c index 4100ab07e6b7..70d76fdb9f56 100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c @@ -1572,7 +1572,7 @@ static const struct net_device_ops pcnet32_netdev_ops = { .ndo_tx_timeout = pcnet32_tx_timeout, .ndo_get_stats = pcnet32_get_stats, .ndo_set_rx_mode = pcnet32_set_multicast_list, - .ndo_do_ioctl = pcnet32_ioctl, + .ndo_eth_ioctl = pcnet32_ioctl, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c index f8d7a9387a56..4a845bc071b2 100644 --- a/drivers/net/ethernet/amd/sun3lance.c +++ b/drivers/net/ethernet/amd/sun3lance.c @@ -245,7 +245,7 @@ static void set_multicast_list( struct net_device *dev ); /************************* End of Prototypes **************************/ -struct net_device * __init sun3lance_probe(int unit) +static struct net_device * __init sun3lance_probe(void) { struct net_device *dev; static int found; @@ -272,10 +272,6 @@ struct net_device * __init sun3lance_probe(int unit) dev = alloc_etherdev(sizeof(struct lance_private)); if (!dev) return ERR_PTR(-ENOMEM); - if (unit >= 0) { - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - } if (!lance_probe(dev)) goto out; @@ -924,17 +920,16 @@ static void set_multicast_list( struct net_device *dev ) } -#ifdef MODULE - static struct net_device *sun3lance_dev; -int __init init_module(void) +static int __init sun3lance_init(void) { - sun3lance_dev = sun3lance_probe(-1); + sun3lance_dev = sun3lance_probe(); return PTR_ERR_OR_ZERO(sun3lance_dev); } +module_init(sun3lance_init); -void __exit cleanup_module(void) +static void __exit sun3lance_cleanup(void) { unregister_netdev(sun3lance_dev); #ifdef CONFIG_SUN3 @@ -942,6 +937,4 @@ void __exit cleanup_module(void) #endif free_netdev(sun3lance_dev); } - -#endif /* MODULE */ - +module_exit(sun3lance_cleanup); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 4f714f874c4f..17a585adfb49 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -2284,7 +2284,7 @@ static const struct net_device_ops xgbe_netdev_ops = { .ndo_set_rx_mode = xgbe_set_rx_mode, .ndo_set_mac_address = xgbe_set_mac_address, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = xgbe_ioctl, + .ndo_eth_ioctl = xgbe_ioctl, .ndo_change_mtu = xgbe_change_mtu, .ndo_tx_timeout = xgbe_tx_timeout, .ndo_get_stats64 = xgbe_get_stats64, diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c index 61f39a0e04f9..bafc51c34e0b 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c @@ -428,7 +428,9 @@ static void xgbe_set_msglevel(struct net_device *netdev, u32 msglevel) } static int xgbe_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct xgbe_prv_data *pdata = netdev_priv(netdev); @@ -443,7 +445,9 @@ static int xgbe_get_coalesce(struct net_device *netdev, } static int xgbe_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct xgbe_prv_data *pdata = netdev_priv(netdev); struct xgbe_hw_if *hw_if = &pdata->hw_if; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c index de2a9348bc3f..a9ef0544e30f 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c @@ -547,7 +547,9 @@ static int aq_ethtool_set_rxnfc(struct net_device *ndev, } static int aq_ethtool_get_coalesce(struct net_device *ndev, - struct ethtool_coalesce *coal) + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct aq_nic_s *aq_nic = netdev_priv(ndev); struct aq_nic_cfg_s *cfg; @@ -571,7 +573,9 @@ static int aq_ethtool_get_coalesce(struct net_device *ndev, } static int aq_ethtool_set_coalesce(struct net_device *ndev, - struct ethtool_coalesce *coal) + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct aq_nic_s *aq_nic = netdev_priv(ndev); struct aq_nic_cfg_s *cfg; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c index 4af0cd9530de..e22935ce9573 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c @@ -421,7 +421,7 @@ static const struct net_device_ops aq_ndev_ops = { .ndo_change_mtu = aq_ndev_change_mtu, .ndo_set_mac_address = aq_ndev_set_mac_address, .ndo_set_features = aq_ndev_set_features, - .ndo_do_ioctl = aq_ndev_ioctl, + .ndo_eth_ioctl = aq_ndev_ioctl, .ndo_vlan_rx_add_vid = aq_ndo_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = aq_ndo_vlan_rx_kill_vid, .ndo_setup_tc = aq_ndo_setup_tc, diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c index 59253846e885..dee9ff74d6d6 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c @@ -119,16 +119,10 @@ static int aq_pci_func_init(struct pci_dev *pdev) { int err; - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); - if (!err) - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (err) { - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (!err) - err = pci_set_consistent_dma_mask(pdev, - DMA_BIT_MASK(32)); - } - if (err != 0) { err = -ENOSR; goto err_exit; } @@ -417,6 +411,9 @@ static int atl_resume_common(struct device *dev, bool deep) pci_restore_state(pdev); if (deep) { + /* Reinitialize Nic/Vecs objects */ + aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol); + ret = aq_nic_init(nic); if (ret) goto err_exit; diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index 67b8113a2b53..38c288ec9059 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c @@ -844,7 +844,7 @@ static const struct net_device_ops arc_emac_netdev_ops = { .ndo_set_mac_address = arc_emac_set_address, .ndo_get_stats = arc_emac_stats, .ndo_set_rx_mode = arc_emac_set_rx_mode, - .ndo_do_ioctl = phy_do_ioctl_running, + .ndo_eth_ioctl = phy_do_ioctl_running, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = arc_emac_poll_controller, #endif diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c index 1ba81b1eb6fd..02ae98aabf91 100644 --- a/drivers/net/ethernet/atheros/ag71xx.c +++ b/drivers/net/ethernet/atheros/ag71xx.c @@ -1851,7 +1851,7 @@ static const struct net_device_ops ag71xx_netdev_ops = { .ndo_open = ag71xx_open, .ndo_stop = ag71xx_stop, .ndo_start_xmit = ag71xx_hard_start_xmit, - .ndo_do_ioctl = phy_do_ioctl, + .ndo_eth_ioctl = phy_do_ioctl, .ndo_tx_timeout = ag71xx_tx_timeout, .ndo_change_mtu = ag71xx_change_mtu, .ndo_set_mac_address = eth_mac_addr, diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index 11ef1fbe7aee..4ea157efca86 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -1701,7 +1701,7 @@ static const struct net_device_ops alx_netdev_ops = { .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = alx_set_mac_address, .ndo_change_mtu = alx_change_mtu, - .ndo_do_ioctl = alx_ioctl, + .ndo_eth_ioctl = alx_ioctl, .ndo_tx_timeout = alx_tx_timeout, .ndo_fix_features = alx_fix_features, #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index 1c6246a5dc22..3b51b172b317 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -2609,7 +2609,7 @@ static const struct net_device_ops atl1c_netdev_ops = { .ndo_change_mtu = atl1c_change_mtu, .ndo_fix_features = atl1c_fix_features, .ndo_set_features = atl1c_set_features, - .ndo_do_ioctl = atl1c_ioctl, + .ndo_eth_ioctl = atl1c_ioctl, .ndo_tx_timeout = atl1c_tx_timeout, .ndo_get_stats = atl1c_get_stats, #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c index 2eb0a2ab69f6..753973ac922e 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c @@ -2247,7 +2247,7 @@ static const struct net_device_ops atl1e_netdev_ops = { .ndo_fix_features = atl1e_fix_features, .ndo_set_features = atl1e_set_features, .ndo_change_mtu = atl1e_change_mtu, - .ndo_do_ioctl = atl1e_ioctl, + .ndo_eth_ioctl = atl1e_ioctl, .ndo_tx_timeout = atl1e_tx_timeout, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = atl1e_netpoll, diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c index c67201a13cf5..68f6c0bbd945 100644 --- a/drivers/net/ethernet/atheros/atlx/atl1.c +++ b/drivers/net/ethernet/atheros/atlx/atl1.c @@ -2885,7 +2885,7 @@ static const struct net_device_ops atl1_netdev_ops = { .ndo_change_mtu = atl1_change_mtu, .ndo_fix_features = atlx_fix_features, .ndo_set_features = atlx_set_features, - .ndo_do_ioctl = atlx_ioctl, + .ndo_eth_ioctl = atlx_ioctl, .ndo_tx_timeout = atlx_tx_timeout, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = atl1_poll_controller, diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c index 0cc0db04c27d..b69298ddb647 100644 --- a/drivers/net/ethernet/atheros/atlx/atl2.c +++ b/drivers/net/ethernet/atheros/atlx/atl2.c @@ -1293,7 +1293,7 @@ static const struct net_device_ops atl2_netdev_ops = { .ndo_change_mtu = atl2_change_mtu, .ndo_fix_features = atl2_fix_features, .ndo_set_features = atl2_set_features, - .ndo_do_ioctl = atl2_ioctl, + .ndo_eth_ioctl = atl2_ioctl, .ndo_tx_timeout = atl2_tx_timeout, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = atl2_poll_controller, diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index 1a02ca600b71..56e0fb07aec7 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -122,8 +122,8 @@ config SB1250_MAC config TIGON3 tristate "Broadcom Tigon3 support" depends on PCI + depends on PTP_1588_CLOCK_OPTIONAL select PHYLIB - imply PTP_1588_CLOCK help This driver supports Broadcom Tigon3 based gigabit Ethernet cards. @@ -140,7 +140,7 @@ config TIGON3_HWMON config BNX2X tristate "Broadcom NetXtremeII 10Gb support" depends on PCI - imply PTP_1588_CLOCK + depends on PTP_1588_CLOCK_OPTIONAL select FW_LOADER select ZLIB_INFLATE select LIBCRC32C @@ -206,7 +206,7 @@ config SYSTEMPORT config BNXT tristate "Broadcom NetXtreme-C/E support" depends on PCI - imply PTP_1588_CLOCK + depends on PTP_1588_CLOCK_OPTIONAL select FW_LOADER select LIBCRC32C select NET_DEVLINK diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index ad2655efe423..fa784953c601 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c @@ -2198,7 +2198,7 @@ static const struct net_device_ops b44_netdev_ops = { .ndo_set_rx_mode = b44_set_rx_mode, .ndo_set_mac_address = b44_set_mac_addr, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = b44_ioctl, + .ndo_eth_ioctl = b44_ioctl, .ndo_tx_timeout = b44_tx_timeout, .ndo_change_mtu = b44_change_mtu, #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index 977f097fc7bf..d56886300ecf 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -1699,7 +1699,7 @@ static const struct net_device_ops bcm_enet_ops = { .ndo_start_xmit = bcm_enet_start_xmit, .ndo_set_mac_address = bcm_enet_set_mac_address, .ndo_set_rx_mode = bcm_enet_set_multicast_list, - .ndo_do_ioctl = bcm_enet_ioctl, + .ndo_eth_ioctl = bcm_enet_ioctl, .ndo_change_mtu = bcm_enet_change_mtu, }; @@ -2446,7 +2446,7 @@ static const struct net_device_ops bcm_enetsw_ops = { .ndo_stop = bcm_enetsw_stop, .ndo_start_xmit = bcm_enet_start_xmit, .ndo_change_mtu = bcm_enet_change_mtu, - .ndo_do_ioctl = bcm_enetsw_ioctl, + .ndo_eth_ioctl = bcm_enetsw_ioctl, }; @@ -2649,7 +2649,6 @@ static int bcm_enetsw_probe(struct platform_device *pdev) if (!res_mem || irq_rx < 0) return -ENODEV; - ret = 0; dev = alloc_etherdev(sizeof(*priv)); if (!dev) return -ENOMEM; diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index d9f0f0df8f7b..7fa1b695400d 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -607,7 +607,9 @@ static void bcm_sysport_set_tx_coalesce(struct bcm_sysport_tx_ring *ring, } static int bcm_sysport_get_coalesce(struct net_device *dev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct bcm_sysport_priv *priv = netdev_priv(dev); u32 reg; @@ -627,7 +629,9 @@ static int bcm_sysport_get_coalesce(struct net_device *dev, } static int bcm_sysport_set_coalesce(struct net_device *dev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct bcm_sysport_priv *priv = netdev_priv(dev); struct dim_cq_moder moder; diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 075f6e146b29..fe4d99abd548 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -1263,7 +1263,7 @@ static const struct net_device_ops bgmac_netdev_ops = { .ndo_set_rx_mode = bgmac_set_rx_mode, .ndo_set_mac_address = bgmac_set_mac_address, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = phy_do_ioctl_running, + .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_change_mtu = bgmac_change_mtu, }; diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index bee6cfad9fc6..a705e2615307 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -2730,7 +2730,7 @@ bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gf if (!page) return -ENOMEM; mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); if (dma_mapping_error(&bp->pdev->dev, mapping)) { __free_page(page); return -EIO; @@ -2753,7 +2753,7 @@ bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index) return; dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping), - PAGE_SIZE, PCI_DMA_FROMDEVICE); + PAGE_SIZE, DMA_FROM_DEVICE); __free_page(page); rx_pg->page = NULL; @@ -2775,7 +2775,7 @@ bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gf mapping = dma_map_single(&bp->pdev->dev, get_l2_fhdr(data), bp->rx_buf_use_size, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); if (dma_mapping_error(&bp->pdev->dev, mapping)) { kfree(data); return -EIO; @@ -2881,7 +2881,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) } dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), - skb_headlen(skb), PCI_DMA_TODEVICE); + skb_headlen(skb), DMA_TO_DEVICE); tx_buf->skb = NULL; last = tx_buf->nr_frags; @@ -2895,7 +2895,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), skb_frag_size(&skb_shinfo(skb)->frags[i]), - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); } sw_cons = BNX2_NEXT_TX_BD(sw_cons); @@ -3003,7 +3003,7 @@ bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, dma_sync_single_for_device(&bp->pdev->dev, dma_unmap_addr(cons_rx_buf, mapping), - BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE); + BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, DMA_FROM_DEVICE); rxr->rx_prod_bseq += bp->rx_buf_use_size; @@ -3044,7 +3044,7 @@ error: } dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); skb = build_skb(data, 0); if (!skb) { kfree(data); @@ -3110,7 +3110,7 @@ error: } dma_unmap_page(&bp->pdev->dev, mapping_old, - PAGE_SIZE, PCI_DMA_FROMDEVICE); + PAGE_SIZE, DMA_FROM_DEVICE); frag_size -= frag_len; skb->data_len += frag_len; @@ -3180,7 +3180,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); next_ring_idx = BNX2_RX_RING_IDX(BNX2_NEXT_RX_BD(sw_cons)); next_rx_buf = &rxr->rx_buf_ring[next_ring_idx]; @@ -5449,7 +5449,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp) dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), skb_headlen(skb), - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); tx_buf->skb = NULL; @@ -5460,7 +5460,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp) dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), skb_frag_size(&skb_shinfo(skb)->frags[k]), - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); } dev_kfree_skb(skb); } @@ -5491,7 +5491,7 @@ bnx2_free_rx_skbs(struct bnx2 *bp) dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), bp->rx_buf_use_size, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); rx_buf->data = NULL; @@ -5843,7 +5843,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) packet[i] = (unsigned char) (i & 0xff); map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); if (dma_mapping_error(&bp->pdev->dev, map)) { dev_kfree_skb(skb); return -EIO; @@ -5882,7 +5882,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) udelay(5); - dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE); + dma_unmap_single(&bp->pdev->dev, map, pkt_size, DMA_TO_DEVICE); dev_kfree_skb(skb); if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod) @@ -5901,7 +5901,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) dma_sync_single_for_cpu(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), - bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); + bp->rx_buf_use_size, DMA_FROM_DEVICE); if (rx_hdr->l2_fhdr_status & (L2_FHDR_ERRORS_BAD_CRC | @@ -6660,7 +6660,8 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) } else mss = 0; - mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE); + mapping = dma_map_single(&bp->pdev->dev, skb->data, len, + DMA_TO_DEVICE); if (dma_mapping_error(&bp->pdev->dev, mapping)) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; @@ -6741,7 +6742,7 @@ dma_error: tx_buf = &txr->tx_buf_ring[ring_prod]; tx_buf->skb = NULL; dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), - skb_headlen(skb), PCI_DMA_TODEVICE); + skb_headlen(skb), DMA_TO_DEVICE); /* unmap remaining mapped pages */ for (i = 0; i < last_frag; i++) { @@ -6750,7 +6751,7 @@ dma_error: tx_buf = &txr->tx_buf_ring[ring_prod]; dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), skb_frag_size(&skb_shinfo(skb)->frags[i]), - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); } dev_kfree_skb_any(skb); @@ -7241,8 +7242,10 @@ bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, return rc; } -static int -bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal) +static int bnx2_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct bnx2 *bp = netdev_priv(dev); @@ -7263,8 +7266,10 @@ bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal) return 0; } -static int -bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal) +static int bnx2_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct bnx2 *bp = netdev_priv(dev); @@ -8041,21 +8046,16 @@ bnx2_read_vpd_fw_ver(struct bnx2 *bp) #define BNX2_VPD_LEN 128 #define BNX2_MAX_VER_SLEN 30 - data = kmalloc(256, GFP_KERNEL); + data = kmalloc(BNX2_VPD_LEN, GFP_KERNEL); if (!data) return; - rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN, - BNX2_VPD_LEN); + rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data, BNX2_VPD_LEN); if (rc) goto vpd_done; - for (i = 0; i < BNX2_VPD_LEN; i += 4) { - data[i] = data[i + BNX2_VPD_LEN + 3]; - data[i + 1] = data[i + BNX2_VPD_LEN + 2]; - data[i + 2] = data[i + BNX2_VPD_LEN + 1]; - data[i + 3] = data[i + BNX2_VPD_LEN]; - } + for (i = 0; i < BNX2_VPD_LEN; i += 4) + swab32s((u32 *)&data[i]); i = pci_vpd_find_tag(data, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA); if (i < 0) @@ -8224,15 +8224,15 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) persist_dma_mask = dma_mask = DMA_BIT_MASK(64); /* Configure DMA attributes. */ - if (pci_set_dma_mask(pdev, dma_mask) == 0) { + if (dma_set_mask(&pdev->dev, dma_mask) == 0) { dev->features |= NETIF_F_HIGHDMA; - rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask); + rc = dma_set_coherent_mask(&pdev->dev, persist_dma_mask); if (rc) { dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed, aborting\n"); goto err_out_unmap; } - } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) { + } else if ((rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) != 0) { dev_err(&pdev->dev, "System does not support DMA, aborting\n"); goto err_out_unmap; } @@ -8546,7 +8546,7 @@ static const struct net_device_ops bnx2_netdev_ops = { .ndo_stop = bnx2_close, .ndo_get_stats64 = bnx2_get_stats64, .ndo_set_rx_mode = bnx2_set_rx_mode, - .ndo_do_ioctl = bnx2_ioctl, + .ndo_eth_ioctl = bnx2_ioctl, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = bnx2_change_mac_addr, .ndo_change_mtu = bnx2_change_mtu, diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index 32245bbe88a8..472a3a478038 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -1878,7 +1878,9 @@ static int bnx2x_set_eeprom(struct net_device *dev, } static int bnx2x_get_coalesce(struct net_device *dev, - struct ethtool_coalesce *coal) + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct bnx2x *bp = netdev_priv(dev); @@ -1891,7 +1893,9 @@ static int bnx2x_get_coalesce(struct net_device *dev, } static int bnx2x_set_coalesce(struct net_device *dev, - struct ethtool_coalesce *coal) + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct bnx2x *bp = netdev_priv(dev); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 2acbc73dcd18..6d98134913cd 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -13048,7 +13048,7 @@ static const struct net_device_ops bnx2x_netdev_ops = { .ndo_set_rx_mode = bnx2x_set_rx_mode, .ndo_set_mac_address = bnx2x_change_mac_addr, .ndo_validate_addr = bnx2x_validate_addr, - .ndo_do_ioctl = bnx2x_ioctl, + .ndo_eth_ioctl = bnx2x_ioctl, .ndo_change_mtu = bnx2x_change_mtu, .ndo_fix_features = bnx2x_fix_features, .ndo_set_features = bnx2x_set_features, diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 27943b0446c2..f255fd0b16db 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -1858,7 +1858,6 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) { int i; int first_queue_query_index, num_queues_req; - dma_addr_t cur_data_offset; struct stats_query_entry *cur_query_entry; u8 stats_count = 0; bool is_fcoe = false; @@ -1879,10 +1878,6 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index, first_queue_query_index + num_queues_req); - cur_data_offset = bp->fw_stats_data_mapping + - offsetof(struct bnx2x_fw_stats_data, queue_stats) + - num_queues_req * sizeof(struct per_queue_stats); - cur_query_entry = &bp->fw_stats_req-> query[first_queue_query_index + num_queues_req]; @@ -1933,7 +1928,6 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) cur_query_entry->funcID, j, cur_query_entry->index); cur_query_entry++; - cur_data_offset += sizeof(struct per_queue_stats); stats_count++; /* all stats are coalesced to the leading queue */ diff --git a/drivers/net/ethernet/broadcom/bnxt/Makefile b/drivers/net/ethernet/broadcom/bnxt/Makefile index 2b8ae687b3c1..c6ef7ec2c115 100644 --- a/drivers/net/ethernet/broadcom/bnxt/Makefile +++ b/drivers/net/ethernet/broadcom/bnxt/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_BNXT) += bnxt_en.o -bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_ptp.o bnxt_vfr.o bnxt_devlink.o bnxt_dim.o +bnxt_en-y := bnxt.o bnxt_hwrm.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_ptp.o bnxt_vfr.o bnxt_devlink.o bnxt_dim.o bnxt_en-$(CONFIG_BNXT_FLOWER_OFFLOAD) += bnxt_tc.o bnxt_en-$(CONFIG_DEBUG_FS) += bnxt_debugfs.o diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 8a97640cdfe7..627f85ee3922 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -60,6 +60,7 @@ #include "bnxt_hsi.h" #include "bnxt.h" +#include "bnxt_hwrm.h" #include "bnxt_ulp.h" #include "bnxt_sriov.h" #include "bnxt_ethtool.h" @@ -276,8 +277,11 @@ static const u16 bnxt_async_events_arr[] = { ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY, ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY, ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION, + ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE, ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG, ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST, + ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP, + ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT, }; static struct workqueue_struct *bnxt_pf_wq; @@ -670,7 +674,7 @@ tx_dma_error: prod = txr->tx_prod; tx_buf = &txr->tx_buf_ring[prod]; dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), - skb_headlen(skb), PCI_DMA_TODEVICE); + skb_headlen(skb), DMA_TO_DEVICE); prod = NEXT_TX(prod); /* unmap remaining mapped pages */ @@ -679,7 +683,7 @@ tx_dma_error: tx_buf = &txr->tx_buf_ring[prod]; dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping), skb_frag_size(&skb_shinfo(skb)->frags[i]), - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); } tx_free: @@ -718,7 +722,7 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) } dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), - skb_headlen(skb), PCI_DMA_TODEVICE); + skb_headlen(skb), DMA_TO_DEVICE); last = tx_buf->nr_frags; for (j = 0; j < last; j++) { @@ -728,7 +732,7 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) &pdev->dev, dma_unmap_addr(tx_buf, mapping), skb_frag_size(&skb_shinfo(skb)->frags[j]), - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); } if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { if (bp->flags & BNXT_FLAG_CHIP_P5) { @@ -901,7 +905,7 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp, } mapping = dma_map_page_attrs(&pdev->dev, page, offset, - BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE, + BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE, DMA_ATTR_WEAK_ORDERING); if (dma_mapping_error(&pdev->dev, mapping)) { __free_page(page); @@ -1141,7 +1145,7 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, } dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE, - PCI_DMA_FROMDEVICE, + DMA_FROM_DEVICE, DMA_ATTR_WEAK_ORDERING); skb->data_len += frag_len; @@ -1649,6 +1653,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping); if (!skb) { bnxt_abort_tpa(cpr, idx, agg_bufs); + cpr->sw_stats.rx.rx_oom_discards += 1; return NULL; } } else { @@ -1658,6 +1663,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC); if (!new_data) { bnxt_abort_tpa(cpr, idx, agg_bufs); + cpr->sw_stats.rx.rx_oom_discards += 1; return NULL; } @@ -1673,6 +1679,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, if (!skb) { kfree(data); bnxt_abort_tpa(cpr, idx, agg_bufs); + cpr->sw_stats.rx.rx_oom_discards += 1; return NULL; } skb_reserve(skb, bp->rx_offset); @@ -1683,6 +1690,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true); if (!skb) { /* Page reuse already handled by bnxt_rx_pages(). */ + cpr->sw_stats.rx.rx_oom_discards += 1; return NULL; } } @@ -1886,6 +1894,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, if (agg_bufs) bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs, false); + cpr->sw_stats.rx.rx_oom_discards += 1; rc = -ENOMEM; goto next_rx; } @@ -1899,6 +1908,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr, payload | len); if (!skb) { + cpr->sw_stats.rx.rx_oom_discards += 1; rc = -ENOMEM; goto next_rx; } @@ -1907,6 +1917,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, if (agg_bufs) { skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false); if (!skb) { + cpr->sw_stats.rx.rx_oom_discards += 1; rc = -ENOMEM; goto next_rx; } @@ -2001,6 +2012,7 @@ static int bnxt_force_rx_discard(struct bnxt *bp, struct rx_cmp *rxcmp; u16 cp_cons; u8 cmp_type; + int rc; cp_cons = RING_CMP(tmp_raw_cons); rxcmp = (struct rx_cmp *) @@ -2029,7 +2041,10 @@ static int bnxt_force_rx_discard(struct bnxt *bp, tpa_end1->rx_tpa_end_cmp_errors_v2 |= cpu_to_le32(RX_TPA_END_CMP_ERRORS); } - return bnxt_rx_pkt(bp, cpr, raw_cons, event); + rc = bnxt_rx_pkt(bp, cpr, raw_cons, event); + if (rc && rc != -EBUSY) + cpr->sw_stats.rx.rx_netpoll_discards += 1; + return rc; } u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx) @@ -2074,6 +2089,19 @@ static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id) return INVALID_HW_RING_ID; } +static void bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2) +{ + switch (BNXT_EVENT_ERROR_REPORT_TYPE(data1)) { + case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL: + netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n", + BNXT_EVENT_INVALID_SIGNAL_DATA(data2)); + break; + default: + netdev_err(bp->dev, "FW reported unknown error type\n"); + break; + } +} + #define BNXT_GET_EVENT_PORT(data) \ ((data) & \ ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK) @@ -2234,6 +2262,20 @@ static int bnxt_async_event_process(struct bnxt *bp, } goto async_event_process_exit; } + case ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP: { + bnxt_ptp_pps_event(bp, data1, data2); + goto async_event_process_exit; + } + case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: { + bnxt_event_error_report(bp, data1, data2); + goto async_event_process_exit; + } + case ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE: { + u16 seq_id = le32_to_cpu(cmpl->event_data2) & 0xffff; + + hwrm_update_token(bp, seq_id, BNXT_HWRM_DEFERRED); + goto async_event_process_exit; + } default: goto async_event_process_exit; } @@ -2253,10 +2295,7 @@ static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp) switch (cmpl_type) { case CMPL_BASE_TYPE_HWRM_DONE: seq_id = le16_to_cpu(h_cmpl->sequence_id); - if (seq_id == bp->hwrm_intr_seq_id) - bp->hwrm_intr_seq_id = (u16)~bp->hwrm_intr_seq_id; - else - netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id); + hwrm_update_token(bp, seq_id, BNXT_HWRM_COMPLETE); break; case CMPL_BASE_TYPE_HWRM_FWD_REQ: @@ -2690,7 +2729,7 @@ static void bnxt_free_tx_skbs(struct bnxt *bp) dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), dma_unmap_len(tx_buf, len), - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); xdp_return_frame(tx_buf->xdpf); tx_buf->action = 0; tx_buf->xdpf = NULL; @@ -2715,7 +2754,7 @@ static void bnxt_free_tx_skbs(struct bnxt *bp) dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), skb_headlen(skb), - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); last = tx_buf->nr_frags; j += 2; @@ -2727,7 +2766,7 @@ static void bnxt_free_tx_skbs(struct bnxt *bp) dma_unmap_page( &pdev->dev, dma_unmap_addr(tx_buf, mapping), - skb_frag_size(frag), PCI_DMA_TODEVICE); + skb_frag_size(frag), DMA_TO_DEVICE); } dev_kfree_skb(skb); } @@ -2794,7 +2833,7 @@ skip_rx_tpa_free: continue; dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping, - BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE, + BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE, DMA_ATTR_WEAK_ORDERING); rx_agg_buf->page = NULL; @@ -3176,6 +3215,58 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp) return 0; } +static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr) +{ + kfree(cpr->cp_desc_ring); + cpr->cp_desc_ring = NULL; + kfree(cpr->cp_desc_mapping); + cpr->cp_desc_mapping = NULL; +} + +static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n) +{ + cpr->cp_desc_ring = kcalloc(n, sizeof(*cpr->cp_desc_ring), GFP_KERNEL); + if (!cpr->cp_desc_ring) + return -ENOMEM; + cpr->cp_desc_mapping = kcalloc(n, sizeof(*cpr->cp_desc_mapping), + GFP_KERNEL); + if (!cpr->cp_desc_mapping) + return -ENOMEM; + return 0; +} + +static void bnxt_free_all_cp_arrays(struct bnxt *bp) +{ + int i; + + if (!bp->bnapi) + return; + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + + if (!bnapi) + continue; + bnxt_free_cp_arrays(&bnapi->cp_ring); + } +} + +static int bnxt_alloc_all_cp_arrays(struct bnxt *bp) +{ + int i, n = bp->cp_nr_pages; + + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + int rc; + + if (!bnapi) + continue; + rc = bnxt_alloc_cp_arrays(&bnapi->cp_ring, n); + if (rc) + return rc; + } + return 0; +} + static void bnxt_free_cp_rings(struct bnxt *bp) { int i; @@ -3203,6 +3294,7 @@ static void bnxt_free_cp_rings(struct bnxt *bp) if (cpr2) { ring = &cpr2->cp_ring_struct; bnxt_free_ring(bp, &ring->ring_mem); + bnxt_free_cp_arrays(cpr2); kfree(cpr2); cpr->cp_ring_arr[j] = NULL; } @@ -3221,6 +3313,12 @@ static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp) if (!cpr) return NULL; + rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages); + if (rc) { + bnxt_free_cp_arrays(cpr); + kfree(cpr); + return NULL; + } ring = &cpr->cp_ring_struct; rmem = &ring->ring_mem; rmem->nr_pages = bp->cp_nr_pages; @@ -3231,6 +3329,7 @@ static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp) rc = bnxt_alloc_ring(bp, rmem); if (rc) { bnxt_free_ring(bp, rmem); + bnxt_free_cp_arrays(cpr); kfree(cpr); cpr = NULL; } @@ -3663,9 +3762,15 @@ void bnxt_set_ring_params(struct bnxt *bp) if (jumbo_factor > agg_factor) agg_factor = jumbo_factor; } - agg_ring_size = ring_size * agg_factor; + if (agg_factor) { + if (ring_size > BNXT_MAX_RX_DESC_CNT_JUM_ENA) { + ring_size = BNXT_MAX_RX_DESC_CNT_JUM_ENA; + netdev_warn(bp->dev, "RX ring size reduced from %d to %d because the jumbo ring is now enabled\n", + bp->rx_ring_size, ring_size); + bp->rx_ring_size = ring_size; + } + agg_ring_size = ring_size * agg_factor; - if (agg_ring_size) { bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size, RX_DESC_CNT); if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) { @@ -3855,77 +3960,26 @@ out: static void bnxt_free_hwrm_resources(struct bnxt *bp) { - struct pci_dev *pdev = bp->pdev; - - if (bp->hwrm_cmd_resp_addr) { - dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr, - bp->hwrm_cmd_resp_dma_addr); - bp->hwrm_cmd_resp_addr = NULL; - } - - if (bp->hwrm_cmd_kong_resp_addr) { - dma_free_coherent(&pdev->dev, PAGE_SIZE, - bp->hwrm_cmd_kong_resp_addr, - bp->hwrm_cmd_kong_resp_dma_addr); - bp->hwrm_cmd_kong_resp_addr = NULL; - } -} + struct bnxt_hwrm_wait_token *token; -static int bnxt_alloc_kong_hwrm_resources(struct bnxt *bp) -{ - struct pci_dev *pdev = bp->pdev; + dma_pool_destroy(bp->hwrm_dma_pool); + bp->hwrm_dma_pool = NULL; - if (bp->hwrm_cmd_kong_resp_addr) - return 0; - - bp->hwrm_cmd_kong_resp_addr = - dma_alloc_coherent(&pdev->dev, PAGE_SIZE, - &bp->hwrm_cmd_kong_resp_dma_addr, - GFP_KERNEL); - if (!bp->hwrm_cmd_kong_resp_addr) - return -ENOMEM; - - return 0; + rcu_read_lock(); + hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node) + WRITE_ONCE(token->state, BNXT_HWRM_CANCELLED); + rcu_read_unlock(); } static int bnxt_alloc_hwrm_resources(struct bnxt *bp) { - struct pci_dev *pdev = bp->pdev; - - bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, - &bp->hwrm_cmd_resp_dma_addr, - GFP_KERNEL); - if (!bp->hwrm_cmd_resp_addr) + bp->hwrm_dma_pool = dma_pool_create("bnxt_hwrm", &bp->pdev->dev, + BNXT_HWRM_DMA_SIZE, + BNXT_HWRM_DMA_ALIGN, 0); + if (!bp->hwrm_dma_pool) return -ENOMEM; - return 0; -} - -static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp) -{ - if (bp->hwrm_short_cmd_req_addr) { - struct pci_dev *pdev = bp->pdev; - - dma_free_coherent(&pdev->dev, bp->hwrm_max_ext_req_len, - bp->hwrm_short_cmd_req_addr, - bp->hwrm_short_cmd_req_dma_addr); - bp->hwrm_short_cmd_req_addr = NULL; - } -} - -static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp) -{ - struct pci_dev *pdev = bp->pdev; - - if (bp->hwrm_short_cmd_req_addr) - return 0; - - bp->hwrm_short_cmd_req_addr = - dma_alloc_coherent(&pdev->dev, bp->hwrm_max_ext_req_len, - &bp->hwrm_short_cmd_req_dma_addr, - GFP_KERNEL); - if (!bp->hwrm_short_cmd_req_addr) - return -ENOMEM; + INIT_HLIST_HEAD(&bp->hwrm_pending_list); return 0; } @@ -3986,8 +4040,8 @@ static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count) static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp, struct bnxt_stats_mem *stats) { - struct hwrm_func_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_func_qstats_ext_input req = {0}; + struct hwrm_func_qstats_ext_output *resp; + struct hwrm_func_qstats_ext_input *req; __le64 *hw_masks; int rc; @@ -3995,19 +4049,20 @@ static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp, !(bp->flags & BNXT_FLAG_CHIP_P5)) return -EOPNOTSUPP; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QSTATS_EXT, -1, -1); - req.fid = cpu_to_le16(0xffff); - req.flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK; - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT); if (rc) - goto qstat_exit; + return rc; - hw_masks = &resp->rx_ucast_pkts; - bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8); + req->fid = cpu_to_le16(0xffff); + req->flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK; -qstat_exit: - mutex_unlock(&bp->hwrm_cmd_lock); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (!rc) { + hw_masks = &resp->rx_ucast_pkts; + bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8); + } + hwrm_req_drop(bp, req); return rc; } @@ -4266,6 +4321,7 @@ static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init) bnxt_free_tx_rings(bp); bnxt_free_rx_rings(bp); bnxt_free_cp_rings(bp); + bnxt_free_all_cp_arrays(bp); bnxt_free_ntp_fltrs(bp, irq_re_init); if (irq_re_init) { bnxt_free_ring_stats(bp); @@ -4386,6 +4442,10 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) goto alloc_mem_err; } + rc = bnxt_alloc_all_cp_arrays(bp); + if (rc) + goto alloc_mem_err; + bnxt_init_ring_struct(bp); rc = bnxt_alloc_rx_rings(bp); @@ -4468,313 +4528,38 @@ static void bnxt_enable_int(struct bnxt *bp) } } -void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type, - u16 cmpl_ring, u16 target_id) -{ - struct input *req = request; - - req->req_type = cpu_to_le16(req_type); - req->cmpl_ring = cpu_to_le16(cmpl_ring); - req->target_id = cpu_to_le16(target_id); - if (bnxt_kong_hwrm_message(bp, req)) - req->resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr); - else - req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr); -} - -static int bnxt_hwrm_to_stderr(u32 hwrm_err) -{ - switch (hwrm_err) { - case HWRM_ERR_CODE_SUCCESS: - return 0; - case HWRM_ERR_CODE_RESOURCE_LOCKED: - return -EROFS; - case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED: - return -EACCES; - case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR: - return -ENOSPC; - case HWRM_ERR_CODE_INVALID_PARAMS: - case HWRM_ERR_CODE_INVALID_FLAGS: - case HWRM_ERR_CODE_INVALID_ENABLES: - case HWRM_ERR_CODE_UNSUPPORTED_TLV: - case HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR: - return -EINVAL; - case HWRM_ERR_CODE_NO_BUFFER: - return -ENOMEM; - case HWRM_ERR_CODE_HOT_RESET_PROGRESS: - case HWRM_ERR_CODE_BUSY: - return -EAGAIN; - case HWRM_ERR_CODE_CMD_NOT_SUPPORTED: - return -EOPNOTSUPP; - default: - return -EIO; - } -} - -static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, - int timeout, bool silent) -{ - int i, intr_process, rc, tmo_count; - struct input *req = msg; - u32 *data = msg; - u8 *valid; - u16 cp_ring_id, len = 0; - struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr; - u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN; - struct hwrm_short_input short_input = {0}; - u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER; - u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM; - u16 dst = BNXT_HWRM_CHNL_CHIMP; - - if (BNXT_NO_FW_ACCESS(bp) && - le16_to_cpu(req->req_type) != HWRM_FUNC_RESET) - return -EBUSY; - - if (msg_len > BNXT_HWRM_MAX_REQ_LEN) { - if (msg_len > bp->hwrm_max_ext_req_len || - !bp->hwrm_short_cmd_req_addr) - return -EINVAL; - } - - if (bnxt_hwrm_kong_chnl(bp, req)) { - dst = BNXT_HWRM_CHNL_KONG; - bar_offset = BNXT_GRCPF_REG_KONG_COMM; - doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER; - resp = bp->hwrm_cmd_kong_resp_addr; - } - - memset(resp, 0, PAGE_SIZE); - cp_ring_id = le16_to_cpu(req->cmpl_ring); - intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1; - - req->seq_id = cpu_to_le16(bnxt_get_hwrm_seq_id(bp, dst)); - /* currently supports only one outstanding message */ - if (intr_process) - bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id); - - if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || - msg_len > BNXT_HWRM_MAX_REQ_LEN) { - void *short_cmd_req = bp->hwrm_short_cmd_req_addr; - u16 max_msg_len; - - /* Set boundary for maximum extended request length for short - * cmd format. If passed up from device use the max supported - * internal req length. - */ - max_msg_len = bp->hwrm_max_ext_req_len; - - memcpy(short_cmd_req, req, msg_len); - if (msg_len < max_msg_len) - memset(short_cmd_req + msg_len, 0, - max_msg_len - msg_len); - - short_input.req_type = req->req_type; - short_input.signature = - cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD); - short_input.size = cpu_to_le16(msg_len); - short_input.req_addr = - cpu_to_le64(bp->hwrm_short_cmd_req_dma_addr); - - data = (u32 *)&short_input; - msg_len = sizeof(short_input); - - /* Sync memory write before updating doorbell */ - wmb(); - - max_req_len = BNXT_HWRM_SHORT_REQ_LEN; - } - - /* Write request msg to hwrm channel */ - __iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4); - - for (i = msg_len; i < max_req_len; i += 4) - writel(0, bp->bar0 + bar_offset + i); - - /* Ring channel doorbell */ - writel(1, bp->bar0 + doorbell_offset); - - if (!pci_is_enabled(bp->pdev)) - return -ENODEV; - - if (!timeout) - timeout = DFLT_HWRM_CMD_TIMEOUT; - /* Limit timeout to an upper limit */ - timeout = min(timeout, HWRM_CMD_MAX_TIMEOUT); - /* convert timeout to usec */ - timeout *= 1000; - - i = 0; - /* Short timeout for the first few iterations: - * number of loops = number of loops for short timeout + - * number of loops for standard timeout. - */ - tmo_count = HWRM_SHORT_TIMEOUT_COUNTER; - timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER; - tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT); - - if (intr_process) { - u16 seq_id = bp->hwrm_intr_seq_id; - - /* Wait until hwrm response cmpl interrupt is processed */ - while (bp->hwrm_intr_seq_id != (u16)~seq_id && - i++ < tmo_count) { - /* Abort the wait for completion if the FW health - * check has failed. - */ - if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) - return -EBUSY; - /* on first few passes, just barely sleep */ - if (i < HWRM_SHORT_TIMEOUT_COUNTER) { - usleep_range(HWRM_SHORT_MIN_TIMEOUT, - HWRM_SHORT_MAX_TIMEOUT); - } else { - if (HWRM_WAIT_MUST_ABORT(bp, req)) - break; - usleep_range(HWRM_MIN_TIMEOUT, - HWRM_MAX_TIMEOUT); - } - } - - if (bp->hwrm_intr_seq_id != (u16)~seq_id) { - if (!silent) - netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n", - le16_to_cpu(req->req_type)); - return -EBUSY; - } - len = le16_to_cpu(resp->resp_len); - valid = ((u8 *)resp) + len - 1; - } else { - int j; - - /* Check if response len is updated */ - for (i = 0; i < tmo_count; i++) { - /* Abort the wait for completion if the FW health - * check has failed. - */ - if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) - return -EBUSY; - len = le16_to_cpu(resp->resp_len); - if (len) - break; - /* on first few passes, just barely sleep */ - if (i < HWRM_SHORT_TIMEOUT_COUNTER) { - usleep_range(HWRM_SHORT_MIN_TIMEOUT, - HWRM_SHORT_MAX_TIMEOUT); - } else { - if (HWRM_WAIT_MUST_ABORT(bp, req)) - goto timeout_abort; - usleep_range(HWRM_MIN_TIMEOUT, - HWRM_MAX_TIMEOUT); - } - } - - if (i >= tmo_count) { -timeout_abort: - if (!silent) - netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n", - HWRM_TOTAL_TIMEOUT(i), - le16_to_cpu(req->req_type), - le16_to_cpu(req->seq_id), len); - return -EBUSY; - } - - /* Last byte of resp contains valid bit */ - valid = ((u8 *)resp) + len - 1; - for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) { - /* make sure we read from updated DMA memory */ - dma_rmb(); - if (*valid) - break; - usleep_range(1, 5); - } - - if (j >= HWRM_VALID_BIT_DELAY_USEC) { - if (!silent) - netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n", - HWRM_TOTAL_TIMEOUT(i), - le16_to_cpu(req->req_type), - le16_to_cpu(req->seq_id), len, - *valid); - return -EBUSY; - } - } - - /* Zero valid bit for compatibility. Valid bit in an older spec - * may become a new field in a newer spec. We must make sure that - * a new field not implemented by old spec will read zero. - */ - *valid = 0; - rc = le16_to_cpu(resp->error_code); - if (rc && !silent) - netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n", - le16_to_cpu(resp->req_type), - le16_to_cpu(resp->seq_id), rc); - return bnxt_hwrm_to_stderr(rc); -} - -int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) -{ - return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false); -} - -int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len, - int timeout) -{ - return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true); -} - -int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) -{ - int rc; - - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, msg, msg_len, timeout); - mutex_unlock(&bp->hwrm_cmd_lock); - return rc; -} - -int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len, - int timeout) -{ - int rc; - - mutex_lock(&bp->hwrm_cmd_lock); - rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true); - mutex_unlock(&bp->hwrm_cmd_lock); - return rc; -} - int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size, bool async_only) { - struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_func_drv_rgtr_input req = {0}; DECLARE_BITMAP(async_events_bmap, 256); u32 *events = (u32 *)async_events_bmap; + struct hwrm_func_drv_rgtr_output *resp; + struct hwrm_func_drv_rgtr_input *req; u32 flags; int rc, i; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_RGTR); + if (rc) + return rc; - req.enables = - cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE | - FUNC_DRV_RGTR_REQ_ENABLES_VER | - FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); + req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE | + FUNC_DRV_RGTR_REQ_ENABLES_VER | + FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); - req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX); + req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX); flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE; if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET) flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT; if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT | FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT; - req.flags = cpu_to_le32(flags); - req.ver_maj_8b = DRV_VER_MAJ; - req.ver_min_8b = DRV_VER_MIN; - req.ver_upd_8b = DRV_VER_UPD; - req.ver_maj = cpu_to_le16(DRV_VER_MAJ); - req.ver_min = cpu_to_le16(DRV_VER_MIN); - req.ver_upd = cpu_to_le16(DRV_VER_UPD); + req->flags = cpu_to_le32(flags); + req->ver_maj_8b = DRV_VER_MAJ; + req->ver_min_8b = DRV_VER_MIN; + req->ver_upd_8b = DRV_VER_UPD; + req->ver_maj = cpu_to_le16(DRV_VER_MAJ); + req->ver_min = cpu_to_le16(DRV_VER_MIN); + req->ver_upd = cpu_to_le16(DRV_VER_UPD); if (BNXT_PF(bp)) { u32 data[8]; @@ -4791,14 +4576,14 @@ int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size, } for (i = 0; i < 8; i++) - req.vf_req_fwd[i] = cpu_to_le32(data[i]); + req->vf_req_fwd[i] = cpu_to_le32(data[i]); - req.enables |= + req->enables |= cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD); } if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) - req.flags |= cpu_to_le32( + req->flags |= cpu_to_le32( FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE); memset(async_events_bmap, 0, sizeof(async_events_bmap)); @@ -4817,57 +4602,63 @@ int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size, } } for (i = 0; i < 8; i++) - req.async_event_fwd[i] |= cpu_to_le32(events[i]); + req->async_event_fwd[i] |= cpu_to_le32(events[i]); if (async_only) - req.enables = + req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) { set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state); if (resp->flags & cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED)) bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE; } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp) { - struct hwrm_func_drv_unrgtr_input req = {0}; + struct hwrm_func_drv_unrgtr_input *req; + int rc; if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state)) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_UNRGTR); + if (rc) + return rc; + return hwrm_req_send(bp, req); } static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type) { - u32 rc = 0; - struct hwrm_tunnel_dst_port_free_input req = {0}; + struct hwrm_tunnel_dst_port_free_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_FREE); + if (rc) + return rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1); - req.tunnel_type = tunnel_type; + req->tunnel_type = tunnel_type; switch (tunnel_type) { case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN: - req.tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id); + req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id); bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID; break; case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE: - req.tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id); + req->tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id); bp->nge_fw_dst_port_id = INVALID_HW_RING_ID; break; default: break; } - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); if (rc) netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n", rc); @@ -4877,17 +4668,19 @@ static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type) static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port, u8 tunnel_type) { - u32 rc = 0; - struct hwrm_tunnel_dst_port_alloc_input req = {0}; - struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_tunnel_dst_port_alloc_output *resp; + struct hwrm_tunnel_dst_port_alloc_input *req; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_ALLOC); + if (rc) + return rc; - req.tunnel_type = tunnel_type; - req.tunnel_dst_port_val = port; + req->tunnel_type = tunnel_type; + req->tunnel_dst_port_val = port; - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (rc) { netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n", rc); @@ -4907,33 +4700,40 @@ static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port, } err_out: - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id) { - struct hwrm_cfa_l2_set_rx_mask_input req = {0}; + struct hwrm_cfa_l2_set_rx_mask_input *req; struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1); - req.vnic_id = cpu_to_le32(vnic->fw_vnic_id); + rc = hwrm_req_init(bp, req, HWRM_CFA_L2_SET_RX_MASK); + if (rc) + return rc; - req.num_mc_entries = cpu_to_le32(vnic->mc_list_count); - req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping); - req.mask = cpu_to_le32(vnic->rx_mask); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); + req->num_mc_entries = cpu_to_le32(vnic->mc_list_count); + req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping); + req->mask = cpu_to_le32(vnic->rx_mask); + return hwrm_req_send_silent(bp, req); } #ifdef CONFIG_RFS_ACCEL static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp, struct bnxt_ntuple_filter *fltr) { - struct hwrm_cfa_ntuple_filter_free_input req = {0}; + struct hwrm_cfa_ntuple_filter_free_input *req; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1); - req.ntuple_filter_id = fltr->filter_id; - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE); + if (rc) + return rc; + + req->ntuple_filter_id = fltr->filter_id; + return hwrm_req_send(bp, req); } #define BNXT_NTP_FLTR_FLAGS \ @@ -4958,69 +4758,70 @@ static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp, static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, struct bnxt_ntuple_filter *fltr) { - struct hwrm_cfa_ntuple_filter_alloc_input req = {0}; struct hwrm_cfa_ntuple_filter_alloc_output *resp; + struct hwrm_cfa_ntuple_filter_alloc_input *req; struct flow_keys *keys = &fltr->fkeys; struct bnxt_vnic_info *vnic; u32 flags = 0; - int rc = 0; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC); + if (rc) + return rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1); - req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx]; + req->l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx]; if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) { flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX; - req.dst_id = cpu_to_le16(fltr->rxq); + req->dst_id = cpu_to_le16(fltr->rxq); } else { vnic = &bp->vnic_info[fltr->rxq + 1]; - req.dst_id = cpu_to_le16(vnic->fw_vnic_id); + req->dst_id = cpu_to_le16(vnic->fw_vnic_id); } - req.flags = cpu_to_le32(flags); - req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS); + req->flags = cpu_to_le32(flags); + req->enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS); - req.ethertype = htons(ETH_P_IP); - memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN); - req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4; - req.ip_protocol = keys->basic.ip_proto; + req->ethertype = htons(ETH_P_IP); + memcpy(req->src_macaddr, fltr->src_mac_addr, ETH_ALEN); + req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4; + req->ip_protocol = keys->basic.ip_proto; if (keys->basic.n_proto == htons(ETH_P_IPV6)) { int i; - req.ethertype = htons(ETH_P_IPV6); - req.ip_addr_type = + req->ethertype = htons(ETH_P_IPV6); + req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6; - *(struct in6_addr *)&req.src_ipaddr[0] = + *(struct in6_addr *)&req->src_ipaddr[0] = keys->addrs.v6addrs.src; - *(struct in6_addr *)&req.dst_ipaddr[0] = + *(struct in6_addr *)&req->dst_ipaddr[0] = keys->addrs.v6addrs.dst; for (i = 0; i < 4; i++) { - req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff); - req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff); + req->src_ipaddr_mask[i] = cpu_to_be32(0xffffffff); + req->dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff); } } else { - req.src_ipaddr[0] = keys->addrs.v4addrs.src; - req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff); - req.dst_ipaddr[0] = keys->addrs.v4addrs.dst; - req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff); + req->src_ipaddr[0] = keys->addrs.v4addrs.src; + req->src_ipaddr_mask[0] = cpu_to_be32(0xffffffff); + req->dst_ipaddr[0] = keys->addrs.v4addrs.dst; + req->dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff); } if (keys->control.flags & FLOW_DIS_ENCAPSULATION) { - req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG); - req.tunnel_type = + req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG); + req->tunnel_type = CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL; } - req.src_port = keys->ports.src; - req.src_port_mask = cpu_to_be16(0xffff); - req.dst_port = keys->ports.dst; - req.dst_port_mask = cpu_to_be16(0xffff); + req->src_port = keys->ports.src; + req->src_port_mask = cpu_to_be16(0xffff); + req->dst_port = keys->ports.dst; + req->dst_port_mask = cpu_to_be16(0xffff); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (!rc) { - resp = bnxt_get_hwrm_resp_addr(bp, &req); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (!rc) fltr->filter_id = resp->ntuple_filter_id; - } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } #endif @@ -5028,62 +4829,62 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx, u8 *mac_addr) { - u32 rc = 0; - struct hwrm_cfa_l2_filter_alloc_input req = {0}; - struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_cfa_l2_filter_alloc_output *resp; + struct hwrm_cfa_l2_filter_alloc_input *req; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1); - req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX); + rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC); + if (rc) + return rc; + + req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX); if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) - req.flags |= + req->flags |= cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST); - req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id); - req.enables = + req->dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id); + req->enables = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR | CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID | CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK); - memcpy(req.l2_addr, mac_addr, ETH_ALEN); - req.l2_addr_mask[0] = 0xff; - req.l2_addr_mask[1] = 0xff; - req.l2_addr_mask[2] = 0xff; - req.l2_addr_mask[3] = 0xff; - req.l2_addr_mask[4] = 0xff; - req.l2_addr_mask[5] = 0xff; - - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + memcpy(req->l2_addr, mac_addr, ETH_ALEN); + req->l2_addr_mask[0] = 0xff; + req->l2_addr_mask[1] = 0xff; + req->l2_addr_mask[2] = 0xff; + req->l2_addr_mask[3] = 0xff; + req->l2_addr_mask[4] = 0xff; + req->l2_addr_mask[5] = 0xff; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) bp->vnic_info[vnic_id].fw_l2_filter_id[idx] = resp->l2_filter_id; - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp) { + struct hwrm_cfa_l2_filter_free_input *req; u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */ - int rc = 0; + int rc; /* Any associated ntuple filters will also be cleared by firmware. */ - mutex_lock(&bp->hwrm_cmd_lock); + rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE); + if (rc) + return rc; + hwrm_req_hold(bp, req); for (i = 0; i < num_of_vnics; i++) { struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; for (j = 0; j < vnic->uc_filter_count; j++) { - struct hwrm_cfa_l2_filter_free_input req = {0}; - - bnxt_hwrm_cmd_hdr_init(bp, &req, - HWRM_CFA_L2_FILTER_FREE, -1, -1); + req->l2_filter_id = vnic->fw_l2_filter_id[j]; - req.l2_filter_id = vnic->fw_l2_filter_id[j]; - - rc = _hwrm_send_message(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); } vnic->uc_filter_count = 0; } - mutex_unlock(&bp->hwrm_cmd_lock); - + hwrm_req_drop(bp, req); return rc; } @@ -5091,12 +4892,15 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) { struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX; - struct hwrm_vnic_tpa_cfg_input req = {0}; + struct hwrm_vnic_tpa_cfg_input *req; + int rc; if (vnic->fw_vnic_id == INVALID_HW_RING_ID) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_VNIC_TPA_CFG); + if (rc) + return rc; if (tpa_flags) { u16 mss = bp->dev->mtu - 40; @@ -5110,9 +4914,9 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) if (tpa_flags & BNXT_FLAG_GRO) flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO; - req.flags = cpu_to_le32(flags); + req->flags = cpu_to_le32(flags); - req.enables = + req->enables = cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS | VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS | VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN); @@ -5136,14 +4940,14 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) } else { segs = ilog2(nsegs); } - req.max_agg_segs = cpu_to_le16(segs); - req.max_aggs = cpu_to_le16(max_aggs); + req->max_agg_segs = cpu_to_le16(segs); + req->max_aggs = cpu_to_le16(max_aggs); - req.min_agg_len = cpu_to_le32(512); + req->min_agg_len = cpu_to_le32(512); } - req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); + req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + return hwrm_req_send(bp, req); } static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring) @@ -5287,86 +5091,102 @@ static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic) static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss) { struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; - struct hwrm_vnic_rss_cfg_input req = {0}; + struct hwrm_vnic_rss_cfg_input *req; + int rc; if ((bp->flags & BNXT_FLAG_CHIP_P5) || vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG); + if (rc) + return rc; + if (set_rss) { bnxt_fill_hw_rss_tbl(bp, vnic); - req.hash_type = cpu_to_le32(bp->rss_hash_cfg); - req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT; - req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr); - req.hash_key_tbl_addr = + req->hash_type = cpu_to_le32(bp->rss_hash_cfg); + req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT; + req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr); + req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr); } - req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); + return hwrm_req_send(bp, req); } static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss) { struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; - struct hwrm_vnic_rss_cfg_input req = {0}; + struct hwrm_vnic_rss_cfg_input *req; dma_addr_t ring_tbl_map; u32 i, nr_ctxs; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG); + if (rc) + return rc; + + req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); + if (!set_rss) + return hwrm_req_send(bp, req); - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1); - req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); - if (!set_rss) { - hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - return 0; - } bnxt_fill_hw_rss_tbl(bp, vnic); - req.hash_type = cpu_to_le32(bp->rss_hash_cfg); - req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT; - req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr); + req->hash_type = cpu_to_le32(bp->rss_hash_cfg); + req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT; + req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr); ring_tbl_map = vnic->rss_table_dma_addr; nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings); - for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) { - int rc; - req.ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map); - req.ring_table_pair_index = i; - req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + hwrm_req_hold(bp, req); + for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) { + req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map); + req->ring_table_pair_index = i; + req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]); + rc = hwrm_req_send(bp, req); if (rc) - return rc; + goto exit; } - return 0; + +exit: + hwrm_req_drop(bp, req); + return rc; } static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id) { struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; - struct hwrm_vnic_plcmodes_cfg_input req = {0}; + struct hwrm_vnic_plcmodes_cfg_input *req; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1); - req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT | - VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 | - VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6); - req.enables = + rc = hwrm_req_init(bp, req, HWRM_VNIC_PLCMODES_CFG); + if (rc) + return rc; + + req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT | + VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 | + VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6); + req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID | VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID); /* thresholds not implemented in firmware yet */ - req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh); - req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh); - req.vnic_id = cpu_to_le32(vnic->fw_vnic_id); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh); + req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh); + req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); + return hwrm_req_send(bp, req); } static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id, u16 ctx_idx) { - struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0}; + struct hwrm_vnic_rss_cos_lb_ctx_free_input *req; + + if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE)) + return; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1); - req.rss_cos_lb_ctx_id = + req->rss_cos_lb_ctx_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]); - hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + hwrm_req_send(bp, req); bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID; } @@ -5387,20 +5207,20 @@ static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp) static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx) { + struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp; + struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req; int rc; - struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0}; - struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp = - bp->hwrm_cmd_resp_addr; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1, - -1); + rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC); + if (rc) + return rc; - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = le16_to_cpu(resp->rss_cos_lb_ctx_id); - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -5414,47 +5234,50 @@ static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp) int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) { - unsigned int ring = 0, grp_idx; struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; - struct hwrm_vnic_cfg_input req = {0}; + struct hwrm_vnic_cfg_input *req; + unsigned int ring = 0, grp_idx; u16 def_vlan = 0; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_VNIC_CFG); + if (rc) + return rc; if (bp->flags & BNXT_FLAG_CHIP_P5) { struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0]; - req.default_rx_ring_id = + req->default_rx_ring_id = cpu_to_le16(rxr->rx_ring_struct.fw_ring_id); - req.default_cmpl_ring_id = + req->default_cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr)); - req.enables = + req->enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID | VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID); goto vnic_mru; } - req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP); + req->enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP); /* Only RSS support for now TBD: COS & LB */ if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) { - req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); - req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | + req->rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); + req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | VNIC_CFG_REQ_ENABLES_MRU); } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) { - req.rss_rule = + req->rss_rule = cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]); - req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | + req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | VNIC_CFG_REQ_ENABLES_MRU); - req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE); + req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE); } else { - req.rss_rule = cpu_to_le16(0xffff); + req->rss_rule = cpu_to_le16(0xffff); } if (BNXT_CHIP_TYPE_NITRO_A0(bp) && (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) { - req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]); - req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE); + req->cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]); + req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE); } else { - req.cos_rule = cpu_to_le16(0xffff); + req->cos_rule = cpu_to_le16(0xffff); } if (vnic->flags & BNXT_VNIC_RSS_FLAG) @@ -5465,34 +5288,36 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) ring = bp->rx_nr_rings - 1; grp_idx = bp->rx_ring[ring].bnapi->index; - req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id); - req.lb_rule = cpu_to_le16(0xffff); + req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id); + req->lb_rule = cpu_to_le16(0xffff); vnic_mru: - req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN); + req->mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN); - req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); + req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); #ifdef CONFIG_BNXT_SRIOV if (BNXT_VF(bp)) def_vlan = bp->vf.vlan; #endif if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan) - req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE); + req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE); if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) - req.flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp)); + req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp)); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + return hwrm_req_send(bp, req); } static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id) { if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) { - struct hwrm_vnic_free_input req = {0}; + struct hwrm_vnic_free_input *req; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1); - req.vnic_id = + if (hwrm_req_init(bp, req, HWRM_VNIC_FREE)) + return; + + req->vnic_id = cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id); - hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + hwrm_req_send(bp, req); bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID; } } @@ -5509,11 +5334,15 @@ static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, unsigned int start_rx_ring_idx, unsigned int nr_rings) { - int rc = 0; unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings; - struct hwrm_vnic_alloc_input req = {0}; - struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr; struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; + struct hwrm_vnic_alloc_output *resp; + struct hwrm_vnic_alloc_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_VNIC_ALLOC); + if (rc) + return rc; if (bp->flags & BNXT_FLAG_CHIP_P5) goto vnic_no_ring_grps; @@ -5533,22 +5362,20 @@ vnic_no_ring_grps: for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID; if (vnic_id == 0) - req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT); + req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT); - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1); - - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id); - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) { - struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_vnic_qcaps_input req = {0}; + struct hwrm_vnic_qcaps_output *resp; + struct hwrm_vnic_qcaps_input *req; int rc; bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats); @@ -5556,9 +5383,12 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) if (bp->hwrm_spec_code < 0x10600) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_VNIC_QCAPS); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) { u32 flags = le32_to_cpu(resp->flags); @@ -5584,92 +5414,96 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5_SR2; } } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp) { + struct hwrm_ring_grp_alloc_output *resp; + struct hwrm_ring_grp_alloc_input *req; + int rc; u16 i; - u32 rc = 0; if (bp->flags & BNXT_FLAG_CHIP_P5) return 0; - mutex_lock(&bp->hwrm_cmd_lock); + rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); for (i = 0; i < bp->rx_nr_rings; i++) { - struct hwrm_ring_grp_alloc_input req = {0}; - struct hwrm_ring_grp_alloc_output *resp = - bp->hwrm_cmd_resp_addr; unsigned int grp_idx = bp->rx_ring[i].bnapi->index; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1); + req->cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id); + req->rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id); + req->ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id); + req->sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx); - req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id); - req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id); - req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id); - req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx); + rc = hwrm_req_send(bp, req); - rc = _hwrm_send_message(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); if (rc) break; bp->grp_info[grp_idx].fw_grp_id = le32_to_cpu(resp->ring_group_id); } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } static void bnxt_hwrm_ring_grp_free(struct bnxt *bp) { + struct hwrm_ring_grp_free_input *req; u16 i; - struct hwrm_ring_grp_free_input req = {0}; if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5)) return; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1); + if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE)) + return; - mutex_lock(&bp->hwrm_cmd_lock); + hwrm_req_hold(bp, req); for (i = 0; i < bp->cp_nr_rings; i++) { if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID) continue; - req.ring_group_id = + req->ring_group_id = cpu_to_le32(bp->grp_info[i].fw_grp_id); - _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + hwrm_req_send(bp, req); bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); } static int hwrm_ring_alloc_send_msg(struct bnxt *bp, struct bnxt_ring_struct *ring, u32 ring_type, u32 map_index) { - int rc = 0, err = 0; - struct hwrm_ring_alloc_input req = {0}; - struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_ring_alloc_output *resp; + struct hwrm_ring_alloc_input *req; struct bnxt_ring_mem_info *rmem = &ring->ring_mem; struct bnxt_ring_grp_info *grp_info; + int rc, err = 0; u16 ring_id; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_RING_ALLOC); + if (rc) + goto exit; - req.enables = 0; + req->enables = 0; if (rmem->nr_pages > 1) { - req.page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map); + req->page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map); /* Page size is in log2 units */ - req.page_size = BNXT_PAGE_SHIFT; - req.page_tbl_depth = 1; + req->page_size = BNXT_PAGE_SHIFT; + req->page_tbl_depth = 1; } else { - req.page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]); + req->page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]); } - req.fbo = 0; + req->fbo = 0; /* Association of ring index with doorbell index and MSIX number */ - req.logical_id = cpu_to_le16(map_index); + req->logical_id = cpu_to_le16(map_index); switch (ring_type) { case HWRM_RING_ALLOC_TX: { @@ -5677,67 +5511,67 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp, txr = container_of(ring, struct bnxt_tx_ring_info, tx_ring_struct); - req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX; + req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX; /* Association of transmit ring with completion ring */ grp_info = &bp->grp_info[ring->grp_idx]; - req.cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr)); - req.length = cpu_to_le32(bp->tx_ring_mask + 1); - req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); - req.queue_id = cpu_to_le16(ring->queue_id); + req->cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr)); + req->length = cpu_to_le32(bp->tx_ring_mask + 1); + req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); + req->queue_id = cpu_to_le16(ring->queue_id); break; } case HWRM_RING_ALLOC_RX: - req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX; - req.length = cpu_to_le32(bp->rx_ring_mask + 1); + req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX; + req->length = cpu_to_le32(bp->rx_ring_mask + 1); if (bp->flags & BNXT_FLAG_CHIP_P5) { u16 flags = 0; /* Association of rx ring with stats context */ grp_info = &bp->grp_info[ring->grp_idx]; - req.rx_buf_size = cpu_to_le16(bp->rx_buf_use_size); - req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); - req.enables |= cpu_to_le32( + req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size); + req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); + req->enables |= cpu_to_le32( RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID); if (NET_IP_ALIGN == 2) flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD; - req.flags = cpu_to_le16(flags); + req->flags = cpu_to_le16(flags); } break; case HWRM_RING_ALLOC_AGG: if (bp->flags & BNXT_FLAG_CHIP_P5) { - req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG; + req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG; /* Association of agg ring with rx ring */ grp_info = &bp->grp_info[ring->grp_idx]; - req.rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id); - req.rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE); - req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); - req.enables |= cpu_to_le32( + req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id); + req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE); + req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); + req->enables |= cpu_to_le32( RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID | RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID); } else { - req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX; + req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX; } - req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1); + req->length = cpu_to_le32(bp->rx_agg_ring_mask + 1); break; case HWRM_RING_ALLOC_CMPL: - req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL; - req.length = cpu_to_le32(bp->cp_ring_mask + 1); + req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL; + req->length = cpu_to_le32(bp->cp_ring_mask + 1); if (bp->flags & BNXT_FLAG_CHIP_P5) { /* Association of cp ring with nq */ grp_info = &bp->grp_info[map_index]; - req.nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id); - req.cq_handle = cpu_to_le64(ring->handle); - req.enables |= cpu_to_le32( + req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id); + req->cq_handle = cpu_to_le64(ring->handle); + req->enables |= cpu_to_le32( RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID); } else if (bp->flags & BNXT_FLAG_USING_MSIX) { - req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; + req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; } break; case HWRM_RING_ALLOC_NQ: - req.ring_type = RING_ALLOC_REQ_RING_TYPE_NQ; - req.length = cpu_to_le32(bp->cp_ring_mask + 1); + req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ; + req->length = cpu_to_le32(bp->cp_ring_mask + 1); if (bp->flags & BNXT_FLAG_USING_MSIX) - req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; + req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; break; default: netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n", @@ -5745,12 +5579,13 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp, return -1; } - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); err = le16_to_cpu(resp->error_code); ring_id = le16_to_cpu(resp->ring_id); - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); +exit: if (rc || err) { netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n", ring_type, rc, err); @@ -5765,23 +5600,28 @@ static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx) int rc; if (BNXT_PF(bp)) { - struct hwrm_func_cfg_input req = {0}; + struct hwrm_func_cfg_input *req; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); - req.fid = cpu_to_le16(0xffff); - req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR); - req.async_event_cr = cpu_to_le16(idx); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); + if (rc) + return rc; + + req->fid = cpu_to_le16(0xffff); + req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR); + req->async_event_cr = cpu_to_le16(idx); + return hwrm_req_send(bp, req); } else { - struct hwrm_func_vf_cfg_input req = {0}; + struct hwrm_func_vf_cfg_input *req; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1); - req.enables = + rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG); + if (rc) + return rc; + + req->enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR); - req.async_event_cr = cpu_to_le16(idx); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->async_event_cr = cpu_to_le16(idx); + return hwrm_req_send(bp, req); } - return rc; } static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type, @@ -5952,23 +5792,27 @@ static int hwrm_ring_free_send_msg(struct bnxt *bp, struct bnxt_ring_struct *ring, u32 ring_type, int cmpl_ring_id) { + struct hwrm_ring_free_output *resp; + struct hwrm_ring_free_input *req; + u16 error_code = 0; int rc; - struct hwrm_ring_free_input req = {0}; - struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr; - u16 error_code; if (BNXT_NO_FW_ACCESS(bp)) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1); - req.ring_type = ring_type; - req.ring_id = cpu_to_le16(ring->fw_ring_id); + rc = hwrm_req_init(bp, req, HWRM_RING_FREE); + if (rc) + goto exit; - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - error_code = le16_to_cpu(resp->error_code); - mutex_unlock(&bp->hwrm_cmd_lock); + req->cmpl_ring = cpu_to_le16(cmpl_ring_id); + req->ring_type = ring_type; + req->ring_id = cpu_to_le16(ring->fw_ring_id); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + error_code = le16_to_cpu(resp->error_code); + hwrm_req_drop(bp, req); +exit: if (rc || error_code) { netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n", ring_type, rc, error_code); @@ -6083,20 +5927,23 @@ static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, static int bnxt_hwrm_get_rings(struct bnxt *bp) { - struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; struct bnxt_hw_resc *hw_resc = &bp->hw_resc; - struct hwrm_func_qcfg_input req = {0}; + struct hwrm_func_qcfg_output *resp; + struct hwrm_func_qcfg_input *req; int rc; if (bp->hwrm_spec_code < 0x10601) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); - req.fid = cpu_to_le16(0xffff); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); + if (rc) + return rc; + + req->fid = cpu_to_le16(0xffff); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (rc) { - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -6130,39 +5977,45 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp) hw_resc->resv_cp_rings = cp; hw_resc->resv_stat_ctxs = stats; } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return 0; } -/* Caller must hold bp->hwrm_cmd_lock */ int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings) { - struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_func_qcfg_input req = {0}; + struct hwrm_func_qcfg_output *resp; + struct hwrm_func_qcfg_input *req; int rc; if (bp->hwrm_spec_code < 0x10601) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); - req.fid = cpu_to_le16(fid); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); + if (rc) + return rc; + + req->fid = cpu_to_le16(fid); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) *tx_rings = le16_to_cpu(resp->alloc_tx_rings); + hwrm_req_drop(bp, req); return rc; } static bool bnxt_rfs_supported(struct bnxt *bp); -static void -__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req, - int tx_rings, int rx_rings, int ring_grps, - int cp_rings, int stats, int vnics) +static struct hwrm_func_cfg_input * +__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, + int ring_grps, int cp_rings, int stats, int vnics) { + struct hwrm_func_cfg_input *req; u32 enables = 0; - bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1); + if (hwrm_req_init(bp, req, HWRM_FUNC_CFG)) + return NULL; + req->fid = cpu_to_le16(0xffff); enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; req->num_tx_rings = cpu_to_le16(tx_rings); @@ -6203,17 +6056,19 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req, req->num_vnics = cpu_to_le16(vnics); } req->enables = cpu_to_le32(enables); + return req; } -static void -__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, - struct hwrm_func_vf_cfg_input *req, int tx_rings, - int rx_rings, int ring_grps, int cp_rings, - int stats, int vnics) +static struct hwrm_func_vf_cfg_input * +__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, + int ring_grps, int cp_rings, int stats, int vnics) { + struct hwrm_func_vf_cfg_input *req; u32 enables = 0; - bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1); + if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG)) + return NULL; + enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS | FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; @@ -6245,21 +6100,27 @@ __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, req->num_vnics = cpu_to_le16(vnics); req->enables = cpu_to_le32(enables); + return req; } static int bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, int ring_grps, int cp_rings, int stats, int vnics) { - struct hwrm_func_cfg_input req = {0}; + struct hwrm_func_cfg_input *req; int rc; - __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps, - cp_rings, stats, vnics); - if (!req.enables) + req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps, + cp_rings, stats, vnics); + if (!req) + return -ENOMEM; + + if (!req->enables) { + hwrm_req_drop(bp, req); return 0; + } - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); if (rc) return rc; @@ -6273,7 +6134,7 @@ static int bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, int ring_grps, int cp_rings, int stats, int vnics) { - struct hwrm_func_vf_cfg_input req = {0}; + struct hwrm_func_vf_cfg_input *req; int rc; if (!BNXT_NEW_RM(bp)) { @@ -6281,9 +6142,12 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, return 0; } - __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps, - cp_rings, stats, vnics); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps, + cp_rings, stats, vnics); + if (!req) + return -ENOMEM; + + rc = hwrm_req_send(bp, req); if (rc) return rc; @@ -6484,14 +6348,14 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, int ring_grps, int cp_rings, int stats, int vnics) { - struct hwrm_func_vf_cfg_input req = {0}; + struct hwrm_func_vf_cfg_input *req; u32 flags; if (!BNXT_NEW_RM(bp)) return 0; - __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps, - cp_rings, stats, vnics); + req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps, + cp_rings, stats, vnics); flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST | FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST | FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | @@ -6501,20 +6365,19 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, if (!(bp->flags & BNXT_FLAG_CHIP_P5)) flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; - req.flags = cpu_to_le32(flags); - return hwrm_send_message_silent(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); + req->flags = cpu_to_le32(flags); + return hwrm_req_send_silent(bp, req); } static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, int ring_grps, int cp_rings, int stats, int vnics) { - struct hwrm_func_cfg_input req = {0}; + struct hwrm_func_cfg_input *req; u32 flags; - __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps, - cp_rings, stats, vnics); + req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps, + cp_rings, stats, vnics); flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST; if (BNXT_NEW_RM(bp)) { flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST | @@ -6528,9 +6391,8 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; } - req.flags = cpu_to_le32(flags); - return hwrm_send_message_silent(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); + req->flags = cpu_to_le32(flags); + return hwrm_req_send_silent(bp, req); } static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings, @@ -6551,9 +6413,9 @@ static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings, static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp) { - struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr; struct bnxt_coal_cap *coal_cap = &bp->coal_cap; - struct hwrm_ring_aggint_qcaps_input req = {0}; + struct hwrm_ring_aggint_qcaps_output *resp; + struct hwrm_ring_aggint_qcaps_input *req; int rc; coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS; @@ -6569,9 +6431,11 @@ static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp) if (bp->hwrm_spec_code < 0x10902) return; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_AGGINT_QCAPS, -1, -1); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (hwrm_req_init(bp, req, HWRM_RING_AGGINT_QCAPS)) + return; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send_silent(bp, req); if (!rc) { coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params); coal_cap->nq_params = le32_to_cpu(resp->nq_params); @@ -6591,7 +6455,7 @@ static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp) le16_to_cpu(resp->num_cmpl_aggr_int_max); coal_cap->timer_units = le16_to_cpu(resp->timer_units); } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); } static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec) @@ -6659,37 +6523,40 @@ static void bnxt_hwrm_set_coal_params(struct bnxt *bp, req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES); } -/* Caller holds bp->hwrm_cmd_lock */ static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi, struct bnxt_coal *hw_coal) { - struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0}; + struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; struct bnxt_coal_cap *coal_cap = &bp->coal_cap; u32 nq_params = coal_cap->nq_params; u16 tmr; + int rc; if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN)) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, - -1, -1); - req.ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id); - req.flags = + rc = hwrm_req_init(bp, req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); + if (rc) + return rc; + + req->ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id); + req->flags = cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ); tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2; tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max); - req.int_lat_tmr_min = cpu_to_le16(tmr); - req.enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE); - return _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->int_lat_tmr_min = cpu_to_le16(tmr); + req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE); + return hwrm_req_send(bp, req); } int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi) { - struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0}; + struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; struct bnxt_coal coal; + int rc; /* Tick values in micro seconds. * 1 coal_buf x bufs_per_record = 1 completion record. @@ -6702,48 +6569,53 @@ int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi) if (!bnapi->rx_ring) return -ENODEV; - bnxt_hwrm_cmd_hdr_init(bp, &req_rx, - HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); + rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); + if (rc) + return rc; - bnxt_hwrm_set_coal_params(bp, &coal, &req_rx); + bnxt_hwrm_set_coal_params(bp, &coal, req_rx); - req_rx.ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring)); + req_rx->ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring)); - return hwrm_send_message(bp, &req_rx, sizeof(req_rx), - HWRM_CMD_TIMEOUT); + return hwrm_req_send(bp, req_rx); } int bnxt_hwrm_set_coal(struct bnxt *bp) { - int i, rc = 0; - struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0}, - req_tx = {0}, *req; + struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx, + *req; + int i, rc; - bnxt_hwrm_cmd_hdr_init(bp, &req_rx, - HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); - bnxt_hwrm_cmd_hdr_init(bp, &req_tx, - HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); + rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); + if (rc) + return rc; + + rc = hwrm_req_init(bp, req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); + if (rc) { + hwrm_req_drop(bp, req_rx); + return rc; + } - bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, &req_rx); - bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, &req_tx); + bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, req_rx); + bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, req_tx); - mutex_lock(&bp->hwrm_cmd_lock); + hwrm_req_hold(bp, req_rx); + hwrm_req_hold(bp, req_tx); for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_coal *hw_coal; u16 ring_id; - req = &req_rx; + req = req_rx; if (!bnapi->rx_ring) { ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring); - req = &req_tx; + req = req_tx; } else { ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring); } req->ring_id = cpu_to_le16(ring_id); - rc = _hwrm_send_message(bp, req, sizeof(*req), - HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); if (rc) break; @@ -6751,11 +6623,10 @@ int bnxt_hwrm_set_coal(struct bnxt *bp) continue; if (bnapi->rx_ring && bnapi->tx_ring) { - req = &req_tx; + req = req_tx; ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring); req->ring_id = cpu_to_le16(ring_id); - rc = _hwrm_send_message(bp, req, sizeof(*req), - HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); if (rc) break; } @@ -6765,14 +6636,15 @@ int bnxt_hwrm_set_coal(struct bnxt *bp) hw_coal = &bp->tx_coal; __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal); } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req_rx); + hwrm_req_drop(bp, req_tx); return rc; } static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp) { - struct hwrm_stat_ctx_clr_stats_input req0 = {0}; - struct hwrm_stat_ctx_free_input req = {0}; + struct hwrm_stat_ctx_clr_stats_input *req0 = NULL; + struct hwrm_stat_ctx_free_input *req; int i; if (!bp->bnapi) @@ -6781,53 +6653,60 @@ static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp) if (BNXT_CHIP_TYPE_NITRO_A0(bp)) return; - bnxt_hwrm_cmd_hdr_init(bp, &req0, HWRM_STAT_CTX_CLR_STATS, -1, -1); - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1); - - mutex_lock(&bp->hwrm_cmd_lock); + if (hwrm_req_init(bp, req, HWRM_STAT_CTX_FREE)) + return; + if (BNXT_FW_MAJ(bp) <= 20) { + if (hwrm_req_init(bp, req0, HWRM_STAT_CTX_CLR_STATS)) { + hwrm_req_drop(bp, req); + return; + } + hwrm_req_hold(bp, req0); + } + hwrm_req_hold(bp, req); for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) { - req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id); - if (BNXT_FW_MAJ(bp) <= 20) { - req0.stat_ctx_id = req.stat_ctx_id; - _hwrm_send_message(bp, &req0, sizeof(req0), - HWRM_CMD_TIMEOUT); + req->stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id); + if (req0) { + req0->stat_ctx_id = req->stat_ctx_id; + hwrm_req_send(bp, req0); } - _hwrm_send_message(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); + hwrm_req_send(bp, req); cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; } } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); + if (req0) + hwrm_req_drop(bp, req0); } static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp) { - int rc = 0, i; - struct hwrm_stat_ctx_alloc_input req = {0}; - struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_stat_ctx_alloc_output *resp; + struct hwrm_stat_ctx_alloc_input *req; + int rc, i; if (BNXT_CHIP_TYPE_NITRO_A0(bp)) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_STAT_CTX_ALLOC); + if (rc) + return rc; - req.stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size); - req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000); + req->stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size); + req->update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000); - mutex_lock(&bp->hwrm_cmd_lock); + resp = hwrm_req_hold(bp, req); for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; - req.stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map); + req->stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map); - rc = _hwrm_send_message(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); if (rc) break; @@ -6835,22 +6714,25 @@ static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp) bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id; } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } static int bnxt_hwrm_func_qcfg(struct bnxt *bp) { - struct hwrm_func_qcfg_input req = {0}; - struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_func_qcfg_output *resp; + struct hwrm_func_qcfg_input *req; u32 min_db_offset = 0; u16 flags; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); - req.fid = cpu_to_le16(0xffff); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); + if (rc) + return rc; + + req->fid = cpu_to_le16(0xffff); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (rc) goto func_qcfg_exit; @@ -6910,7 +6792,7 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp) bp->db_size = pci_resource_len(bp->pdev, 2); func_qcfg_exit: - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -6949,17 +6831,19 @@ static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_info *ctx, static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp) { - struct hwrm_func_backing_store_qcaps_input req = {0}; - struct hwrm_func_backing_store_qcaps_output *resp = - bp->hwrm_cmd_resp_addr; + struct hwrm_func_backing_store_qcaps_output *resp; + struct hwrm_func_backing_store_qcaps_input *req; int rc; if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_QCAPS, -1, -1); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send_silent(bp, req); if (!rc) { struct bnxt_ctx_pg_info *ctx_pg; struct bnxt_ctx_mem_info *ctx; @@ -7024,7 +6908,7 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp) rc = 0; } ctx_err: - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -7055,15 +6939,17 @@ static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr, static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables) { - struct hwrm_func_backing_store_cfg_input req = {0}; + struct hwrm_func_backing_store_cfg_input *req; struct bnxt_ctx_mem_info *ctx = bp->ctx; struct bnxt_ctx_pg_info *ctx_pg; - u32 req_len = sizeof(req); + void **__req = (void **)&req; + u32 req_len = sizeof(*req); __le32 *num_entries; __le64 *pg_dir; u32 flags = 0; u8 *pg_attr; u32 ena; + int rc; int i; if (!ctx) @@ -7071,90 +6957,93 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables) if (req_len > bp->hwrm_max_ext_req_len) req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_CFG, -1, -1); - req.enables = cpu_to_le32(enables); + rc = __hwrm_req_init(bp, __req, HWRM_FUNC_BACKING_STORE_CFG, req_len); + if (rc) + return rc; + req->enables = cpu_to_le32(enables); if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) { ctx_pg = &ctx->qp_mem; - req.qp_num_entries = cpu_to_le32(ctx_pg->entries); - req.qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries); - req.qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries); - req.qp_entry_size = cpu_to_le16(ctx->qp_entry_size); + req->qp_num_entries = cpu_to_le32(ctx_pg->entries); + req->qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries); + req->qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries); + req->qp_entry_size = cpu_to_le16(ctx->qp_entry_size); bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, - &req.qpc_pg_size_qpc_lvl, - &req.qpc_page_dir); + &req->qpc_pg_size_qpc_lvl, + &req->qpc_page_dir); } if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) { ctx_pg = &ctx->srq_mem; - req.srq_num_entries = cpu_to_le32(ctx_pg->entries); - req.srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries); - req.srq_entry_size = cpu_to_le16(ctx->srq_entry_size); + req->srq_num_entries = cpu_to_le32(ctx_pg->entries); + req->srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries); + req->srq_entry_size = cpu_to_le16(ctx->srq_entry_size); bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, - &req.srq_pg_size_srq_lvl, - &req.srq_page_dir); + &req->srq_pg_size_srq_lvl, + &req->srq_page_dir); } if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) { ctx_pg = &ctx->cq_mem; - req.cq_num_entries = cpu_to_le32(ctx_pg->entries); - req.cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries); - req.cq_entry_size = cpu_to_le16(ctx->cq_entry_size); - bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, &req.cq_pg_size_cq_lvl, - &req.cq_page_dir); + req->cq_num_entries = cpu_to_le32(ctx_pg->entries); + req->cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries); + req->cq_entry_size = cpu_to_le16(ctx->cq_entry_size); + bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, + &req->cq_pg_size_cq_lvl, + &req->cq_page_dir); } if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) { ctx_pg = &ctx->vnic_mem; - req.vnic_num_vnic_entries = + req->vnic_num_vnic_entries = cpu_to_le16(ctx->vnic_max_vnic_entries); - req.vnic_num_ring_table_entries = + req->vnic_num_ring_table_entries = cpu_to_le16(ctx->vnic_max_ring_table_entries); - req.vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size); + req->vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size); bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, - &req.vnic_pg_size_vnic_lvl, - &req.vnic_page_dir); + &req->vnic_pg_size_vnic_lvl, + &req->vnic_page_dir); } if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) { ctx_pg = &ctx->stat_mem; - req.stat_num_entries = cpu_to_le32(ctx->stat_max_entries); - req.stat_entry_size = cpu_to_le16(ctx->stat_entry_size); + req->stat_num_entries = cpu_to_le32(ctx->stat_max_entries); + req->stat_entry_size = cpu_to_le16(ctx->stat_entry_size); bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, - &req.stat_pg_size_stat_lvl, - &req.stat_page_dir); + &req->stat_pg_size_stat_lvl, + &req->stat_page_dir); } if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) { ctx_pg = &ctx->mrav_mem; - req.mrav_num_entries = cpu_to_le32(ctx_pg->entries); + req->mrav_num_entries = cpu_to_le32(ctx_pg->entries); if (ctx->mrav_num_entries_units) flags |= FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT; - req.mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size); + req->mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size); bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, - &req.mrav_pg_size_mrav_lvl, - &req.mrav_page_dir); + &req->mrav_pg_size_mrav_lvl, + &req->mrav_page_dir); } if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) { ctx_pg = &ctx->tim_mem; - req.tim_num_entries = cpu_to_le32(ctx_pg->entries); - req.tim_entry_size = cpu_to_le16(ctx->tim_entry_size); + req->tim_num_entries = cpu_to_le32(ctx_pg->entries); + req->tim_entry_size = cpu_to_le16(ctx->tim_entry_size); bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, - &req.tim_pg_size_tim_lvl, - &req.tim_page_dir); + &req->tim_pg_size_tim_lvl, + &req->tim_page_dir); } - for (i = 0, num_entries = &req.tqm_sp_num_entries, - pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl, - pg_dir = &req.tqm_sp_page_dir, + for (i = 0, num_entries = &req->tqm_sp_num_entries, + pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl, + pg_dir = &req->tqm_sp_page_dir, ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP; i < BNXT_MAX_TQM_RINGS; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) { if (!(enables & ena)) continue; - req.tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size); + req->tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size); ctx_pg = ctx->tqm_mem[i]; *num_entries = cpu_to_le32(ctx_pg->entries); bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir); } - req.flags = cpu_to_le32(flags); - return hwrm_send_message(bp, &req, req_len, HWRM_CMD_TIMEOUT); + req->flags = cpu_to_le32(flags); + return hwrm_req_send(bp, req); } static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, @@ -7434,17 +7323,18 @@ skip_rdma: int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all) { - struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_func_resource_qcaps_input req = {0}; + struct hwrm_func_resource_qcaps_output *resp; + struct hwrm_func_resource_qcaps_input *req; struct bnxt_hw_resc *hw_resc = &bp->hw_resc; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESOURCE_QCAPS, -1, -1); - req.fid = cpu_to_le16(0xffff); + rc = hwrm_req_init(bp, req, HWRM_FUNC_RESOURCE_QCAPS); + if (rc) + return rc; - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message_silent(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); + req->fid = cpu_to_le16(0xffff); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send_silent(bp, req); if (rc) goto hwrm_func_resc_qcaps_exit; @@ -7485,15 +7375,14 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all) pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL; } hwrm_func_resc_qcaps_exit: - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } -/* bp->hwrm_cmd_lock already held. */ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp) { - struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_port_mac_ptp_qcfg_input req = {0}; + struct hwrm_port_mac_ptp_qcfg_output *resp; + struct hwrm_port_mac_ptp_qcfg_input *req; struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; u8 flags; int rc; @@ -7503,21 +7392,27 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp) goto no_ptp; } - req.port_id = cpu_to_le16(bp->pf.port_id); - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_MAC_PTP_QCFG, -1, -1); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_PTP_QCFG); if (rc) goto no_ptp; + req->port_id = cpu_to_le16(bp->pf.port_id); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (rc) + goto exit; + flags = resp->flags; if (!(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) { rc = -ENODEV; - goto no_ptp; + goto exit; } if (!ptp) { ptp = kzalloc(sizeof(*ptp), GFP_KERNEL); - if (!ptp) - return -ENOMEM; + if (!ptp) { + rc = -ENOMEM; + goto exit; + } ptp->bp = bp; bp->ptp_cfg = ptp; } @@ -7529,11 +7424,18 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp) ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER; } else { rc = -ENODEV; - goto no_ptp; + goto exit; } - return 0; + rc = bnxt_ptp_init(bp); + if (rc) + netdev_warn(bp->dev, "PTP initialization failed.\n"); +exit: + hwrm_req_drop(bp, req); + if (!rc) + return 0; no_ptp: + bnxt_ptp_clear(bp); kfree(ptp); bp->ptp_cfg = NULL; return rc; @@ -7541,17 +7443,19 @@ no_ptp: static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) { - int rc = 0; - struct hwrm_func_qcaps_input req = {0}; - struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_func_qcaps_output *resp; + struct hwrm_func_qcaps_input *req; struct bnxt_hw_resc *hw_resc = &bp->hw_resc; u32 flags, flags_ext; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1); - req.fid = cpu_to_le16(0xffff); + rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS); + if (rc) + return rc; - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->fid = cpu_to_le16(0xffff); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (rc) goto hwrm_func_qcaps_exit; @@ -7576,6 +7480,8 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) flags_ext = le32_to_cpu(resp->flags_ext); if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED) bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED; + if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED)) + bp->fw_cap |= BNXT_FW_CAP_PTP_PPS; bp->tx_push_thresh = 0; if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) && @@ -7613,6 +7519,7 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) { __bnxt_hwrm_ptp_qcfg(bp); } else { + bnxt_ptp_clear(bp); kfree(bp->ptp_cfg); bp->ptp_cfg = NULL; } @@ -7626,7 +7533,7 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) } hwrm_func_qcaps_exit: - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -7657,19 +7564,20 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp) static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp) { - struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0}; struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp; - int rc = 0; + struct hwrm_cfa_adv_flow_mgnt_qcaps_input *req; u32 flags; + int rc; if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW)) return 0; - resp = bp->hwrm_cmd_resp_addr; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS); + if (rc) + return rc; - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (rc) goto hwrm_cfa_adv_qcaps_exit; @@ -7679,7 +7587,7 @@ static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp) bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2; hwrm_cfa_adv_qcaps_exit: - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -7822,17 +7730,20 @@ static int bnxt_map_fw_health_regs(struct bnxt *bp) static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp) { - struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr; struct bnxt_fw_health *fw_health = bp->fw_health; - struct hwrm_error_recovery_qcfg_input req = {0}; + struct hwrm_error_recovery_qcfg_output *resp; + struct hwrm_error_recovery_qcfg_input *req; int rc, i; if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_ERROR_RECOVERY_QCFG, -1, -1); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_ERROR_RECOVERY_QCFG); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (rc) goto err_recovery_out; fw_health->flags = le32_to_cpu(resp->flags); @@ -7874,7 +7785,7 @@ static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp) resp->delay_after_reset[i]; } err_recovery_out: - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); if (!rc) rc = bnxt_map_fw_health_regs(bp); if (rc) @@ -7884,12 +7795,16 @@ err_recovery_out: static int bnxt_hwrm_func_reset(struct bnxt *bp) { - struct hwrm_func_reset_input req = {0}; + struct hwrm_func_reset_input *req; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1); - req.enables = 0; + rc = hwrm_req_init(bp, req, HWRM_FUNC_RESET); + if (rc) + return rc; - return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT); + req->enables = 0; + hwrm_req_timeout(bp, req, HWRM_RESET_TIMEOUT); + return hwrm_req_send(bp, req); } static void bnxt_nvm_cfg_ver_get(struct bnxt *bp) @@ -7904,16 +7819,18 @@ static void bnxt_nvm_cfg_ver_get(struct bnxt *bp) static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) { - int rc = 0; - struct hwrm_queue_qportcfg_input req = {0}; - struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_queue_qportcfg_output *resp; + struct hwrm_queue_qportcfg_input *req; u8 i, j, *qptr; bool no_rdma; + int rc = 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_QUEUE_QPORTCFG); + if (rc) + return rc; - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (rc) goto qportcfg_exit; @@ -7947,35 +7864,48 @@ static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) bp->max_lltc = bp->max_tc; qportcfg_exit: - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } -static int __bnxt_hwrm_ver_get(struct bnxt *bp, bool silent) +static int bnxt_hwrm_poll(struct bnxt *bp) { - struct hwrm_ver_get_input req = {0}; + struct hwrm_ver_get_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1); - req.hwrm_intf_maj = HWRM_VERSION_MAJOR; - req.hwrm_intf_min = HWRM_VERSION_MINOR; - req.hwrm_intf_upd = HWRM_VERSION_UPDATE; + rc = hwrm_req_init(bp, req, HWRM_VER_GET); + if (rc) + return rc; + + req->hwrm_intf_maj = HWRM_VERSION_MAJOR; + req->hwrm_intf_min = HWRM_VERSION_MINOR; + req->hwrm_intf_upd = HWRM_VERSION_UPDATE; - rc = bnxt_hwrm_do_send_msg(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT, - silent); + hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT); + rc = hwrm_req_send(bp, req); return rc; } static int bnxt_hwrm_ver_get(struct bnxt *bp) { - struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_ver_get_output *resp; + struct hwrm_ver_get_input *req; u16 fw_maj, fw_min, fw_bld, fw_rsv; u32 dev_caps_cfg, hwrm_ver; int rc, len; + rc = hwrm_req_init(bp, req, HWRM_VER_GET); + if (rc) + return rc; + + hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT); bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN; - mutex_lock(&bp->hwrm_cmd_lock); - rc = __bnxt_hwrm_ver_get(bp, false); + req->hwrm_intf_maj = HWRM_VERSION_MAJOR; + req->hwrm_intf_min = HWRM_VERSION_MINOR; + req->hwrm_intf_upd = HWRM_VERSION_UPDATE; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (rc) goto hwrm_ver_get_exit; @@ -8067,29 +7997,33 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp) bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW; hwrm_ver_get_exit: - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } int bnxt_hwrm_fw_set_time(struct bnxt *bp) { - struct hwrm_fw_set_time_input req = {0}; + struct hwrm_fw_set_time_input *req; struct tm tm; time64_t now = ktime_get_real_seconds(); + int rc; if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) || bp->hwrm_spec_code < 0x10400) return -EOPNOTSUPP; time64_to_tm(now, 0, &tm); - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1); - req.year = cpu_to_le16(1900 + tm.tm_year); - req.month = 1 + tm.tm_mon; - req.day = tm.tm_mday; - req.hour = tm.tm_hour; - req.minute = tm.tm_min; - req.second = tm.tm_sec; - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_FW_SET_TIME); + if (rc) + return rc; + + req->year = cpu_to_le16(1900 + tm.tm_year); + req->month = 1 + tm.tm_mon; + req->day = tm.tm_mday; + req->hour = tm.tm_hour; + req->minute = tm.tm_min; + req->second = tm.tm_sec; + return hwrm_req_send(bp, req); } static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask) @@ -8177,8 +8111,9 @@ static void bnxt_accumulate_all_stats(struct bnxt *bp) static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags) { + struct hwrm_port_qstats_input *req; struct bnxt_pf_info *pf = &bp->pf; - struct hwrm_port_qstats_input req = {0}; + int rc; if (!(bp->flags & BNXT_FLAG_PORT_STATS)) return 0; @@ -8186,20 +8121,24 @@ static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags) if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)) return -EOPNOTSUPP; - req.flags = flags; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1); - req.port_id = cpu_to_le16(pf->port_id); - req.tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map + + rc = hwrm_req_init(bp, req, HWRM_PORT_QSTATS); + if (rc) + return rc; + + req->flags = flags; + req->port_id = cpu_to_le16(pf->port_id); + req->tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map + BNXT_TX_PORT_STATS_BYTE_OFFSET); - req.rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map); + return hwrm_req_send(bp, req); } static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags) { - struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_queue_pri2cos_qcfg_input req2 = {0}; - struct hwrm_port_qstats_ext_input req = {0}; + struct hwrm_queue_pri2cos_qcfg_output *resp_qc; + struct hwrm_queue_pri2cos_qcfg_input *req_qc; + struct hwrm_port_qstats_ext_output *resp_qs; + struct hwrm_port_qstats_ext_input *req_qs; struct bnxt_pf_info *pf = &bp->pf; u32 tx_stat_size; int rc; @@ -8210,46 +8149,53 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags) if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)) return -EOPNOTSUPP; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1); - req.flags = flags; - req.port_id = cpu_to_le16(pf->port_id); - req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext)); - req.rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map); + rc = hwrm_req_init(bp, req_qs, HWRM_PORT_QSTATS_EXT); + if (rc) + return rc; + + req_qs->flags = flags; + req_qs->port_id = cpu_to_le16(pf->port_id); + req_qs->rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext)); + req_qs->rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map); tx_stat_size = bp->tx_port_stats_ext.hw_stats ? sizeof(struct tx_port_stats_ext) : 0; - req.tx_stat_size = cpu_to_le16(tx_stat_size); - req.tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req_qs->tx_stat_size = cpu_to_le16(tx_stat_size); + req_qs->tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map); + resp_qs = hwrm_req_hold(bp, req_qs); + rc = hwrm_req_send(bp, req_qs); if (!rc) { - bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8; + bp->fw_rx_stats_ext_size = + le16_to_cpu(resp_qs->rx_stat_size) / 8; bp->fw_tx_stats_ext_size = tx_stat_size ? - le16_to_cpu(resp->tx_stat_size) / 8 : 0; + le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0; } else { bp->fw_rx_stats_ext_size = 0; bp->fw_tx_stats_ext_size = 0; } + hwrm_req_drop(bp, req_qs); + if (flags) - goto qstats_done; + return rc; if (bp->fw_tx_stats_ext_size <= offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) { - mutex_unlock(&bp->hwrm_cmd_lock); bp->pri2cos_valid = 0; return rc; } - bnxt_hwrm_cmd_hdr_init(bp, &req2, HWRM_QUEUE_PRI2COS_QCFG, -1, -1); - req2.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN); + rc = hwrm_req_init(bp, req_qc, HWRM_QUEUE_PRI2COS_QCFG); + if (rc) + return rc; + + req_qc->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN); - rc = _hwrm_send_message(bp, &req2, sizeof(req2), HWRM_CMD_TIMEOUT); + resp_qc = hwrm_req_hold(bp, req_qc); + rc = hwrm_req_send(bp, req_qc); if (!rc) { - struct hwrm_queue_pri2cos_qcfg_output *resp2; u8 *pri2cos; int i, j; - resp2 = bp->hwrm_cmd_resp_addr; - pri2cos = &resp2->pri0_cos_queue_id; + pri2cos = &resp_qc->pri0_cos_queue_id; for (i = 0; i < 8; i++) { u8 queue_id = pri2cos[i]; u8 queue_idx; @@ -8258,17 +8204,18 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags) queue_idx = queue_id % 10; if (queue_idx > BNXT_MAX_QUEUE) { bp->pri2cos_valid = false; - goto qstats_done; + hwrm_req_drop(bp, req_qc); + return rc; } for (j = 0; j < bp->max_q; j++) { if (bp->q_ids[j] == queue_id) bp->pri2cos_idx[i] = queue_idx; } } - bp->pri2cos_valid = 1; + bp->pri2cos_valid = true; } -qstats_done: - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req_qc); + return rc; } @@ -8343,35 +8290,46 @@ static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path, static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode) { - struct hwrm_func_cfg_input req = {0}; + struct hwrm_func_cfg_input *req; + u8 evb_mode; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); - req.fid = cpu_to_le16(0xffff); - req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE); if (br_mode == BRIDGE_MODE_VEB) - req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB; + evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB; else if (br_mode == BRIDGE_MODE_VEPA) - req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA; + evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA; else return -EINVAL; - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + + rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); + if (rc) + return rc; + + req->fid = cpu_to_le16(0xffff); + req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE); + req->evb_mode = evb_mode; + return hwrm_req_send(bp, req); } static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size) { - struct hwrm_func_cfg_input req = {0}; + struct hwrm_func_cfg_input *req; + int rc; if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); - req.fid = cpu_to_le16(0xffff); - req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE); - req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64; + rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); + if (rc) + return rc; + + req->fid = cpu_to_le16(0xffff); + req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE); + req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64; if (size == 128) - req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128; + req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128; - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + return hwrm_req_send(bp, req); } static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) @@ -9319,18 +9277,20 @@ static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp) static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) { - int rc = 0; - struct hwrm_port_phy_qcaps_input req = {0}; - struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr; struct bnxt_link_info *link_info = &bp->link_info; + struct hwrm_port_phy_qcaps_output *resp; + struct hwrm_port_phy_qcaps_input *req; + int rc = 0; if (bp->hwrm_spec_code < 0x10201) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS); + if (rc) + return rc; - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (rc) goto hwrm_phy_qcaps_exit; @@ -9368,7 +9328,7 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) bp->port_count = resp->port_cnt; hwrm_phy_qcaps_exit: - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -9381,19 +9341,21 @@ static bool bnxt_support_dropped(u16 advertising, u16 supported) int bnxt_update_link(struct bnxt *bp, bool chng_link_state) { - int rc = 0; struct bnxt_link_info *link_info = &bp->link_info; - struct hwrm_port_phy_qcfg_input req = {0}; - struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_port_phy_qcfg_output *resp; + struct hwrm_port_phy_qcfg_input *req; u8 link_up = link_info->link_up; bool support_changed = false; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG); + if (rc) + return rc; - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (rc) { - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -9488,7 +9450,7 @@ int bnxt_update_link(struct bnxt *bp, bool chng_link_state) /* alwasy link down if not require to update link state */ link_info->link_up = 0; } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); if (!BNXT_PHY_CFG_ABLE(bp)) return 0; @@ -9598,18 +9560,20 @@ static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_ int bnxt_hwrm_set_pause(struct bnxt *bp) { - struct hwrm_port_phy_cfg_input req = {0}; + struct hwrm_port_phy_cfg_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); - bnxt_hwrm_set_pause_common(bp, &req); + rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); + if (rc) + return rc; + + bnxt_hwrm_set_pause_common(bp, req); if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) || bp->link_info.force_link_chng) - bnxt_hwrm_set_link_common(bp, &req); + bnxt_hwrm_set_link_common(bp, req); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) { /* since changing of pause setting doesn't trigger any link * change event, the driver needs to update the current pause @@ -9622,7 +9586,6 @@ int bnxt_hwrm_set_pause(struct bnxt *bp) bnxt_report_link(bp); } bp->link_info.force_link_chng = false; - mutex_unlock(&bp->hwrm_cmd_lock); return rc; } @@ -9651,22 +9614,27 @@ static void bnxt_hwrm_set_eee(struct bnxt *bp, int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee) { - struct hwrm_port_phy_cfg_input req = {0}; + struct hwrm_port_phy_cfg_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); + if (rc) + return rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); if (set_pause) - bnxt_hwrm_set_pause_common(bp, &req); + bnxt_hwrm_set_pause_common(bp, req); - bnxt_hwrm_set_link_common(bp, &req); + bnxt_hwrm_set_link_common(bp, req); if (set_eee) - bnxt_hwrm_set_eee(bp, &req); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + bnxt_hwrm_set_eee(bp, req); + return hwrm_req_send(bp, req); } static int bnxt_hwrm_shutdown_link(struct bnxt *bp) { - struct hwrm_port_phy_cfg_input req = {0}; + struct hwrm_port_phy_cfg_input *req; + int rc; if (!BNXT_SINGLE_PF(bp)) return 0; @@ -9675,9 +9643,12 @@ static int bnxt_hwrm_shutdown_link(struct bnxt *bp) !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN)) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); - req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); + if (rc) + return rc; + + req->flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN); + return hwrm_req_send(bp, req); } static int bnxt_fw_init_one(struct bnxt *bp); @@ -9703,16 +9674,14 @@ static int bnxt_try_recover_fw(struct bnxt *bp) int retry = 0, rc; u32 sts; - mutex_lock(&bp->hwrm_cmd_lock); do { sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); - rc = __bnxt_hwrm_ver_get(bp, true); + rc = bnxt_hwrm_poll(bp); if (!BNXT_FW_IS_BOOTING(sts) && !BNXT_FW_IS_RECOVERING(sts)) break; retry++; } while (rc == -EBUSY && retry < BNXT_FW_RETRY); - mutex_unlock(&bp->hwrm_cmd_lock); if (!BNXT_FW_IS_HEALTHY(sts)) { netdev_err(bp->dev, @@ -9732,8 +9701,8 @@ static int bnxt_try_recover_fw(struct bnxt *bp) static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) { - struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_func_drv_if_change_input req = {0}; + struct hwrm_func_drv_if_change_output *resp; + struct hwrm_func_drv_if_change_input *req; bool fw_reset = !bp->irq_tbl; bool resc_reinit = false; int rc, retry = 0; @@ -9742,29 +9711,34 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE)) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_IF_CHANGE, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_IF_CHANGE); + if (rc) + return rc; + if (up) - req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP); - mutex_lock(&bp->hwrm_cmd_lock); + req->flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP); + resp = hwrm_req_hold(bp, req); + + hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT); while (retry < BNXT_FW_IF_RETRY) { - rc = _hwrm_send_message(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); if (rc != -EAGAIN) break; msleep(50); retry++; } - if (!rc) - flags = le32_to_cpu(resp->flags); - mutex_unlock(&bp->hwrm_cmd_lock); - if (rc == -EAGAIN) + if (rc == -EAGAIN) { + hwrm_req_drop(bp, req); return rc; - if (rc && up) { + } else if (!rc) { + flags = le32_to_cpu(resp->flags); + } else if (up) { rc = bnxt_try_recover_fw(bp); fw_reset = true; } + hwrm_req_drop(bp, req); if (rc) return rc; @@ -9833,8 +9807,8 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) { - struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_port_led_qcaps_input req = {0}; + struct hwrm_port_led_qcaps_output *resp; + struct hwrm_port_led_qcaps_input *req; struct bnxt_pf_info *pf = &bp->pf; int rc; @@ -9842,12 +9816,15 @@ static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1); - req.port_id = cpu_to_le16(pf->port_id); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_PORT_LED_QCAPS); + if (rc) + return rc; + + req->port_id = cpu_to_le16(pf->port_id); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (rc) { - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) { @@ -9867,52 +9844,64 @@ static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) } } } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return 0; } int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp) { - struct hwrm_wol_filter_alloc_input req = {0}; - struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_wol_filter_alloc_output *resp; + struct hwrm_wol_filter_alloc_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1); - req.port_id = cpu_to_le16(bp->pf.port_id); - req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT; - req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS); - memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_ALLOC); + if (rc) + return rc; + + req->port_id = cpu_to_le16(bp->pf.port_id); + req->wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT; + req->enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS); + memcpy(req->mac_address, bp->dev->dev_addr, ETH_ALEN); + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) bp->wol_filter_id = resp->wol_filter_id; - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } int bnxt_hwrm_free_wol_fltr(struct bnxt *bp) { - struct hwrm_wol_filter_free_input req = {0}; + struct hwrm_wol_filter_free_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_FREE); + if (rc) + return rc; + + req->port_id = cpu_to_le16(bp->pf.port_id); + req->enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID); + req->wol_filter_id = bp->wol_filter_id; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1); - req.port_id = cpu_to_le16(bp->pf.port_id); - req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID); - req.wol_filter_id = bp->wol_filter_id; - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + return hwrm_req_send(bp, req); } static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle) { - struct hwrm_wol_filter_qcfg_input req = {0}; - struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_wol_filter_qcfg_output *resp; + struct hwrm_wol_filter_qcfg_input *req; u16 next_handle = 0; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1); - req.port_id = cpu_to_le16(bp->pf.port_id); - req.handle = cpu_to_le16(handle); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_QCFG); + if (rc) + return rc; + + req->port_id = cpu_to_le16(bp->pf.port_id); + req->handle = cpu_to_le16(handle); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) { next_handle = le16_to_cpu(resp->next_handle); if (next_handle != 0) { @@ -9923,7 +9912,7 @@ static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle) } } } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return next_handle; } @@ -9944,19 +9933,20 @@ static void bnxt_get_wol_settings(struct bnxt *bp) static ssize_t bnxt_show_temp(struct device *dev, struct device_attribute *devattr, char *buf) { - struct hwrm_temp_monitor_query_input req = {0}; struct hwrm_temp_monitor_query_output *resp; + struct hwrm_temp_monitor_query_input *req; struct bnxt *bp = dev_get_drvdata(dev); u32 len = 0; int rc; - resp = bp->hwrm_cmd_resp_addr; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY); + if (rc) + return rc; + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */ - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); if (rc) return rc; return len; @@ -9979,12 +9969,13 @@ static void bnxt_hwmon_close(struct bnxt *bp) static void bnxt_hwmon_open(struct bnxt *bp) { - struct hwrm_temp_monitor_query_input req = {0}; + struct hwrm_temp_monitor_query_input *req; struct pci_dev *pdev = bp->pdev; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1); - rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY); + if (!rc) + rc = hwrm_req_send_silent(bp, req); if (rc == -EACCES || rc == -EOPNOTSUPP) { bnxt_hwmon_close(bp); return; @@ -10209,7 +10200,9 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) bnxt_tx_enable(bp); mod_timer(&bp->timer, jiffies + bp->current_interval); /* Poll link status and check for SFP+ module status */ + mutex_lock(&bp->link_lock); bnxt_get_port_module_status(bp); + mutex_unlock(&bp->link_lock); /* VF-reps may need to be re-opened after the PF is re-opened */ if (BNXT_PF(bp)) @@ -10316,15 +10309,9 @@ static int bnxt_open(struct net_device *dev) if (rc) return rc; - if (bnxt_ptp_init(bp)) { - netdev_warn(dev, "PTP initialization failed.\n"); - kfree(bp->ptp_cfg); - bp->ptp_cfg = NULL; - } rc = __bnxt_open_nic(bp, true, true); if (rc) { bnxt_hwrm_if_change(bp, false); - bnxt_ptp_clear(bp); } else { if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) { if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { @@ -10415,7 +10402,6 @@ static int bnxt_close(struct net_device *dev) { struct bnxt *bp = netdev_priv(dev); - bnxt_ptp_clear(bp); bnxt_hwmon_close(bp); bnxt_close_nic(bp, true, true); bnxt_hwrm_shutdown_link(bp); @@ -10426,53 +10412,60 @@ static int bnxt_close(struct net_device *dev) static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg, u16 *val) { - struct hwrm_port_phy_mdio_read_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_port_phy_mdio_read_input req = {0}; + struct hwrm_port_phy_mdio_read_output *resp; + struct hwrm_port_phy_mdio_read_input *req; int rc; if (bp->hwrm_spec_code < 0x10a00) return -EOPNOTSUPP; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_READ, -1, -1); - req.port_id = cpu_to_le16(bp->pf.port_id); - req.phy_addr = phy_addr; - req.reg_addr = cpu_to_le16(reg & 0x1f); + rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_READ); + if (rc) + return rc; + + req->port_id = cpu_to_le16(bp->pf.port_id); + req->phy_addr = phy_addr; + req->reg_addr = cpu_to_le16(reg & 0x1f); if (mdio_phy_id_is_c45(phy_addr)) { - req.cl45_mdio = 1; - req.phy_addr = mdio_phy_id_prtad(phy_addr); - req.dev_addr = mdio_phy_id_devad(phy_addr); - req.reg_addr = cpu_to_le16(reg); + req->cl45_mdio = 1; + req->phy_addr = mdio_phy_id_prtad(phy_addr); + req->dev_addr = mdio_phy_id_devad(phy_addr); + req->reg_addr = cpu_to_le16(reg); } - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) *val = le16_to_cpu(resp->reg_data); - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg, u16 val) { - struct hwrm_port_phy_mdio_write_input req = {0}; + struct hwrm_port_phy_mdio_write_input *req; + int rc; if (bp->hwrm_spec_code < 0x10a00) return -EOPNOTSUPP; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_WRITE, -1, -1); - req.port_id = cpu_to_le16(bp->pf.port_id); - req.phy_addr = phy_addr; - req.reg_addr = cpu_to_le16(reg & 0x1f); + rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_WRITE); + if (rc) + return rc; + + req->port_id = cpu_to_le16(bp->pf.port_id); + req->phy_addr = phy_addr; + req->reg_addr = cpu_to_le16(reg & 0x1f); if (mdio_phy_id_is_c45(phy_addr)) { - req.cl45_mdio = 1; - req.phy_addr = mdio_phy_id_prtad(phy_addr); - req.dev_addr = mdio_phy_id_devad(phy_addr); - req.reg_addr = cpu_to_le16(reg); + req->cl45_mdio = 1; + req->phy_addr = mdio_phy_id_prtad(phy_addr); + req->dev_addr = mdio_phy_id_devad(phy_addr); + req->reg_addr = cpu_to_le16(reg); } - req.reg_data = cpu_to_le16(val); + req->reg_data = cpu_to_le16(val); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + return hwrm_req_send(bp, req); } /* rtnl_lock held */ @@ -10551,6 +10544,10 @@ static void bnxt_get_ring_stats(struct bnxt *bp, stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts); stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts); + + stats->rx_dropped += + cpr->sw_stats.rx.rx_netpoll_discards + + cpr->sw_stats.rx.rx_oom_discards; } } @@ -10565,6 +10562,7 @@ static void bnxt_add_prev_stats(struct bnxt *bp, stats->tx_bytes += prev_stats->tx_bytes; stats->rx_missed_errors += prev_stats->rx_missed_errors; stats->multicast += prev_stats->multicast; + stats->rx_dropped += prev_stats->rx_dropped; stats->tx_dropped += prev_stats->tx_dropped; } @@ -10709,6 +10707,7 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp) { struct net_device *dev = bp->dev; struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; + struct hwrm_cfa_l2_filter_free_input *req; struct netdev_hw_addr *ha; int i, off = 0, rc; bool uc_update; @@ -10720,19 +10719,16 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp) if (!uc_update) goto skip_uc; - mutex_lock(&bp->hwrm_cmd_lock); + rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE); + if (rc) + return rc; + hwrm_req_hold(bp, req); for (i = 1; i < vnic->uc_filter_count; i++) { - struct hwrm_cfa_l2_filter_free_input req = {0}; - - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1, - -1); + req->l2_filter_id = vnic->fw_l2_filter_id[i]; - req.l2_filter_id = vnic->fw_l2_filter_id[i]; - - rc = _hwrm_send_message(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); vnic->uc_filter_count = 1; @@ -11084,22 +11080,30 @@ static netdev_features_t bnxt_features_check(struct sk_buff *skb, int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words, u32 *reg_buf) { - struct hwrm_dbg_read_direct_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_dbg_read_direct_input req = {0}; + struct hwrm_dbg_read_direct_output *resp; + struct hwrm_dbg_read_direct_input *req; __le32 *dbg_reg_buf; dma_addr_t mapping; int rc, i; - dbg_reg_buf = dma_alloc_coherent(&bp->pdev->dev, num_words * 4, - &mapping, GFP_KERNEL); - if (!dbg_reg_buf) - return -ENOMEM; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_READ_DIRECT, -1, -1); - req.host_dest_addr = cpu_to_le64(mapping); - req.read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR); - req.read_len32 = cpu_to_le32(num_words); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_DBG_READ_DIRECT); + if (rc) + return rc; + + dbg_reg_buf = hwrm_req_dma_slice(bp, req, num_words * 4, + &mapping); + if (!dbg_reg_buf) { + rc = -ENOMEM; + goto dbg_rd_reg_exit; + } + + req->host_dest_addr = cpu_to_le64(mapping); + + resp = hwrm_req_hold(bp, req); + req->read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR); + req->read_len32 = cpu_to_le32(num_words); + + rc = hwrm_req_send(bp, req); if (rc || resp->error_code) { rc = -EIO; goto dbg_rd_reg_exit; @@ -11108,28 +11112,30 @@ int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words, reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]); dbg_rd_reg_exit: - mutex_unlock(&bp->hwrm_cmd_lock); - dma_free_coherent(&bp->pdev->dev, num_words * 4, dbg_reg_buf, mapping); + hwrm_req_drop(bp, req); return rc; } static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type, u32 ring_id, u32 *prod, u32 *cons) { - struct hwrm_dbg_ring_info_get_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_dbg_ring_info_get_input req = {0}; + struct hwrm_dbg_ring_info_get_output *resp; + struct hwrm_dbg_ring_info_get_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_RING_INFO_GET, -1, -1); - req.ring_type = ring_type; - req.fw_ring_id = cpu_to_le32(ring_id); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_DBG_RING_INFO_GET); + if (rc) + return rc; + + req->ring_type = ring_type; + req->fw_ring_id = cpu_to_le32(ring_id); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) { *prod = le32_to_cpu(resp->producer_index); *cons = le32_to_cpu(resp->consumer_index); } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -11187,18 +11193,22 @@ static void bnxt_dbg_dump_states(struct bnxt *bp) static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr) { struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; - struct hwrm_ring_reset_input req = {0}; + struct hwrm_ring_reset_input *req; struct bnxt_napi *bnapi = rxr->bnapi; struct bnxt_cp_ring_info *cpr; u16 cp_ring_id; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_RING_RESET); + if (rc) + return rc; cpr = &bnapi->cp_ring; cp_ring_id = cpr->cp_ring_struct.fw_ring_id; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_RESET, cp_ring_id, -1); - req.ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP; - req.ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id); - return hwrm_send_message_silent(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); + req->cmpl_ring = cpu_to_le16(cp_ring_id); + req->ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP; + req->ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id); + return hwrm_req_send_silent(bp, req); } static void bnxt_reset_task(struct bnxt *bp, bool silent) @@ -11405,7 +11415,6 @@ static void bnxt_fw_reset_close(struct bnxt *bp) bnxt_clear_int_mode(bp); pci_disable_device(bp->pdev); } - bnxt_ptp_clear(bp); __bnxt_close_nic(bp, true, false); bnxt_vf_reps_free(bp); bnxt_clear_int_mode(bp); @@ -11441,13 +11450,20 @@ static bool is_bnxt_fw_ok(struct bnxt *bp) static void bnxt_force_fw_reset(struct bnxt *bp) { struct bnxt_fw_health *fw_health = bp->fw_health; + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; u32 wait_dsecs; if (!test_bit(BNXT_STATE_OPEN, &bp->state) || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) return; - set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); + if (ptp) { + spin_lock_bh(&ptp->ptp_lock); + set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); + spin_unlock_bh(&ptp->ptp_lock); + } else { + set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); + } bnxt_fw_reset_close(bp); wait_dsecs = fw_health->master_func_wait_dsecs; if (fw_health->master) { @@ -11503,9 +11519,16 @@ void bnxt_fw_reset(struct bnxt *bp) bnxt_rtnl_lock_sp(bp); if (test_bit(BNXT_STATE_OPEN, &bp->state) && !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; int n = 0, tmo; - set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); + if (ptp) { + spin_lock_bh(&ptp->ptp_lock); + set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); + spin_unlock_bh(&ptp->ptp_lock); + } else { + set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); + } if (bp->pf.active_vfs && !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) n = bnxt_get_registered_vfs(bp); @@ -11614,12 +11637,15 @@ static void bnxt_init_ethtool_link_settings(struct bnxt *bp) static void bnxt_fw_echo_reply(struct bnxt *bp) { struct bnxt_fw_health *fw_health = bp->fw_health; - struct hwrm_func_echo_response_input req = {0}; + struct hwrm_func_echo_response_input *req; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_ECHO_RESPONSE, -1, -1); - req.event_data1 = cpu_to_le32(fw_health->echo_req_data1); - req.event_data2 = cpu_to_le32(fw_health->echo_req_data2); - hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_FUNC_ECHO_RESPONSE); + if (rc) + return; + req->event_data1 = cpu_to_le32(fw_health->echo_req_data1); + req->event_data2 = cpu_to_le32(fw_health->echo_req_data2); + hwrm_req_send(bp, req); } static void bnxt_sp_task(struct work_struct *work) @@ -11824,18 +11850,6 @@ static int bnxt_fw_init_one_p1(struct bnxt *bp) return rc; } - if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) { - rc = bnxt_alloc_kong_hwrm_resources(bp); - if (rc) - bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL; - } - - if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || - bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) { - rc = bnxt_alloc_hwrm_short_cmd_req(bp); - if (rc) - return rc; - } bnxt_nvm_cfg_ver_get(bp); rc = bnxt_hwrm_func_reset(bp); @@ -12010,14 +12024,16 @@ static void bnxt_reset_all(struct bnxt *bp) for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) bnxt_fw_reset_writel(bp, i); } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) { - struct hwrm_fw_reset_input req = {0}; - - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1); - req.resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr); - req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP; - req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP; - req.flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL; - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + struct hwrm_fw_reset_input *req; + + rc = hwrm_req_init(bp, req, HWRM_FW_RESET); + if (!rc) { + req->target_id = cpu_to_le16(HWRM_TARGET_ID_KONG); + req->embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP; + req->selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP; + req->flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL; + rc = hwrm_req_send(bp, req); + } if (rc != -ENODEV) netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc); } @@ -12144,7 +12160,7 @@ static void bnxt_fw_reset_task(struct work_struct *work) fallthrough; case BNXT_FW_RESET_STATE_POLL_FW: bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT; - rc = __bnxt_hwrm_ver_get(bp, true); + rc = bnxt_hwrm_poll(bp); if (rc) { if (bnxt_fw_reset_timeout(bp)) { netdev_err(bp->dev, "Firmware reset aborted\n"); @@ -12177,6 +12193,7 @@ static void bnxt_fw_reset_task(struct work_struct *work) bnxt_reenable_sriov(bp); bnxt_vf_reps_alloc(bp); bnxt_vf_reps_open(bp); + bnxt_ptp_reapply_pps(bp); bnxt_dl_health_recovery_done(bp); bnxt_dl_health_status_update(bp, true); rtnl_unlock(); @@ -12708,7 +12725,7 @@ static const struct net_device_ops bnxt_netdev_ops = { .ndo_stop = bnxt_close, .ndo_get_stats64 = bnxt_get_stats64, .ndo_set_rx_mode = bnxt_set_rx_mode, - .ndo_do_ioctl = bnxt_ioctl, + .ndo_eth_ioctl = bnxt_ioctl, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = bnxt_change_mac_addr, .ndo_change_mtu = bnxt_change_mtu, @@ -12747,6 +12764,7 @@ static void bnxt_remove_one(struct pci_dev *pdev) if (BNXT_PF(bp)) devlink_port_type_clear(&bp->dl_port); + bnxt_ptp_clear(bp); pci_disable_pcie_error_reporting(pdev); unregister_netdev(dev); clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); @@ -12762,7 +12780,6 @@ static void bnxt_remove_one(struct pci_dev *pdev) bnxt_clear_int_mode(bp); bnxt_hwrm_func_drv_unrgtr(bp); bnxt_free_hwrm_resources(bp); - bnxt_free_hwrm_short_cmd_req(bp); bnxt_ethtool_free(bp); bnxt_dcb_free(bp); kfree(bp->edev); @@ -12800,8 +12817,10 @@ static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt) if (!fw_dflt) return 0; + mutex_lock(&bp->link_lock); rc = bnxt_update_link(bp, false); if (rc) { + mutex_unlock(&bp->link_lock); netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n", rc); return rc; @@ -12814,6 +12833,7 @@ static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt) link_info->support_auto_speeds = link_info->support_speeds; bnxt_init_ethtool_link_settings(bp); + mutex_unlock(&bp->link_lock); return 0; } @@ -13085,6 +13105,12 @@ static void bnxt_vpd_read_info(struct bnxt *bp) goto exit; } + i = pci_vpd_find_tag(vpd_data, vpd_size, PCI_VPD_LRDT_RO_DATA); + if (i < 0) { + netdev_err(bp->dev, "VPD READ-Only not found\n"); + goto exit; + } + ro_size = pci_vpd_lrdt_size(&vpd_data[i]); i += PCI_VPD_LRDT_TAG_SIZE; if (i + ro_size > vpd_size) @@ -13356,9 +13382,9 @@ init_err_cleanup: init_err_pci_clean: bnxt_hwrm_func_drv_unrgtr(bp); - bnxt_free_hwrm_short_cmd_req(bp); bnxt_free_hwrm_resources(bp); bnxt_ethtool_free(bp); + bnxt_ptp_clear(bp); kfree(bp->ptp_cfg); bp->ptp_cfg = NULL; kfree(bp->fw_health); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index ba4e0fc38520..a8212dcdad5f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -496,6 +496,16 @@ struct rx_tpa_end_cmp_ext { !!((data1) & \ ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_RECOVERY_ENABLED) +#define BNXT_EVENT_ERROR_REPORT_TYPE(data1) \ + (((data1) & \ + ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK) >>\ + ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT) + +#define BNXT_EVENT_INVALID_SIGNAL_DATA(data2) \ + (((data2) & \ + ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA2_PIN_ID_MASK) >>\ + ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA2_PIN_ID_SFT) + struct nqe_cn { __le16 type; #define NQ_CN_TYPE_MASK 0x3fUL @@ -586,15 +596,17 @@ struct nqe_cn { #define MAX_TPA_SEGS_P5 0x3f #if (BNXT_PAGE_SHIFT == 16) -#define MAX_RX_PAGES 1 +#define MAX_RX_PAGES_AGG_ENA 1 +#define MAX_RX_PAGES 4 #define MAX_RX_AGG_PAGES 4 #define MAX_TX_PAGES 1 -#define MAX_CP_PAGES 8 +#define MAX_CP_PAGES 16 #else -#define MAX_RX_PAGES 8 +#define MAX_RX_PAGES_AGG_ENA 8 +#define MAX_RX_PAGES 32 #define MAX_RX_AGG_PAGES 32 #define MAX_TX_PAGES 8 -#define MAX_CP_PAGES 64 +#define MAX_CP_PAGES 128 #endif #define RX_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct rx_bd)) @@ -612,6 +624,7 @@ struct nqe_cn { #define HW_CMPD_RING_SIZE (sizeof(struct tx_cmp) * CP_DESC_CNT) #define BNXT_MAX_RX_DESC_CNT (RX_DESC_CNT * MAX_RX_PAGES - 1) +#define BNXT_MAX_RX_DESC_CNT_JUM_ENA (RX_DESC_CNT * MAX_RX_PAGES_AGG_ENA - 1) #define BNXT_MAX_RX_JUM_DESC_CNT (RX_DESC_CNT * MAX_RX_AGG_PAGES - 1) #define BNXT_MAX_TX_DESC_CNT (TX_DESC_CNT * MAX_TX_PAGES - 1) @@ -656,37 +669,7 @@ struct nqe_cn { #define RING_CMP(idx) ((idx) & bp->cp_ring_mask) #define NEXT_CMP(idx) RING_CMP(ADV_RAW_CMP(idx, 1)) -#define BNXT_HWRM_MAX_REQ_LEN (bp->hwrm_max_req_len) -#define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input) #define DFLT_HWRM_CMD_TIMEOUT 500 -#define HWRM_CMD_MAX_TIMEOUT 40000 -#define SHORT_HWRM_CMD_TIMEOUT 20 -#define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout) -#define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4) -#define HWRM_COREDUMP_TIMEOUT ((HWRM_CMD_TIMEOUT) * 12) -#define BNXT_HWRM_REQ_MAX_SIZE 128 -#define BNXT_HWRM_REQS_PER_PAGE (BNXT_PAGE_SIZE / \ - BNXT_HWRM_REQ_MAX_SIZE) -#define HWRM_SHORT_MIN_TIMEOUT 3 -#define HWRM_SHORT_MAX_TIMEOUT 10 -#define HWRM_SHORT_TIMEOUT_COUNTER 5 - -#define HWRM_MIN_TIMEOUT 25 -#define HWRM_MAX_TIMEOUT 40 - -#define HWRM_WAIT_MUST_ABORT(bp, req) \ - (le16_to_cpu((req)->req_type) != HWRM_VER_GET && \ - !bnxt_is_fw_healthy(bp)) - -#define HWRM_TOTAL_TIMEOUT(n) (((n) <= HWRM_SHORT_TIMEOUT_COUNTER) ? \ - ((n) * HWRM_SHORT_MIN_TIMEOUT) : \ - (HWRM_SHORT_TIMEOUT_COUNTER * HWRM_SHORT_MIN_TIMEOUT + \ - ((n) - HWRM_SHORT_TIMEOUT_COUNTER) * HWRM_MIN_TIMEOUT)) - -#define HWRM_VALID_BIT_DELAY_USEC 150 - -#define BNXT_HWRM_CHNL_CHIMP 0 -#define BNXT_HWRM_CHNL_KONG 1 #define BNXT_RX_EVENT 1 #define BNXT_AGG_EVENT 2 @@ -926,6 +909,8 @@ struct bnxt_rx_sw_stats { u64 rx_l4_csum_errors; u64 rx_resets; u64 rx_buf_errors; + u64 rx_oom_discards; + u64 rx_netpoll_discards; }; struct bnxt_cmn_sw_stats { @@ -963,11 +948,11 @@ struct bnxt_cp_ring_info { struct dim dim; union { - struct tx_cmp *cp_desc_ring[MAX_CP_PAGES]; - struct nqe_cn *nq_desc_ring[MAX_CP_PAGES]; + struct tx_cmp **cp_desc_ring; + struct nqe_cn **nq_desc_ring; }; - dma_addr_t cp_desc_mapping[MAX_CP_PAGES]; + dma_addr_t *cp_desc_mapping; struct bnxt_stats_mem stats; u32 hw_stats_ctx_id; @@ -1888,19 +1873,15 @@ struct bnxt { #define BNXT_FW_CAP_VLAN_RX_STRIP 0x01000000 #define BNXT_FW_CAP_VLAN_TX_INSERT 0x02000000 #define BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED 0x04000000 + #define BNXT_FW_CAP_PTP_PPS 0x10000000 #define BNXT_FW_CAP_RING_MONITOR 0x40000000 #define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM) u32 hwrm_spec_code; u16 hwrm_cmd_seq; u16 hwrm_cmd_kong_seq; - u16 hwrm_intr_seq_id; - void *hwrm_short_cmd_req_addr; - dma_addr_t hwrm_short_cmd_req_dma_addr; - void *hwrm_cmd_resp_addr; - dma_addr_t hwrm_cmd_resp_dma_addr; - void *hwrm_cmd_kong_resp_addr; - dma_addr_t hwrm_cmd_kong_resp_dma_addr; + struct dma_pool *hwrm_dma_pool; + struct hlist_head hwrm_pending_list; struct rtnl_link_stats64 net_stats_prev; struct bnxt_stats_mem port_stats; @@ -2000,7 +1981,7 @@ struct bnxt { struct mutex sriov_lock; #endif -#if BITS_PER_LONG == 32 +#ifndef writeq /* ensure atomic 64-bit doorbell writes on 32-bit systems. */ spinlock_t db_lock; #endif @@ -2129,7 +2110,7 @@ static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr) ((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask); } -#if BITS_PER_LONG == 32 +#ifndef writeq #define writeq(val64, db) \ do { \ spin_lock(&bp->db_lock); \ @@ -2171,63 +2152,6 @@ static inline void bnxt_db_write(struct bnxt *bp, struct bnxt_db_info *db, } } -static inline bool bnxt_cfa_hwrm_message(u16 req_type) -{ - switch (req_type) { - case HWRM_CFA_ENCAP_RECORD_ALLOC: - case HWRM_CFA_ENCAP_RECORD_FREE: - case HWRM_CFA_DECAP_FILTER_ALLOC: - case HWRM_CFA_DECAP_FILTER_FREE: - case HWRM_CFA_EM_FLOW_ALLOC: - case HWRM_CFA_EM_FLOW_FREE: - case HWRM_CFA_EM_FLOW_CFG: - case HWRM_CFA_FLOW_ALLOC: - case HWRM_CFA_FLOW_FREE: - case HWRM_CFA_FLOW_INFO: - case HWRM_CFA_FLOW_FLUSH: - case HWRM_CFA_FLOW_STATS: - case HWRM_CFA_METER_PROFILE_ALLOC: - case HWRM_CFA_METER_PROFILE_FREE: - case HWRM_CFA_METER_PROFILE_CFG: - case HWRM_CFA_METER_INSTANCE_ALLOC: - case HWRM_CFA_METER_INSTANCE_FREE: - return true; - default: - return false; - } -} - -static inline bool bnxt_kong_hwrm_message(struct bnxt *bp, struct input *req) -{ - return (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL && - bnxt_cfa_hwrm_message(le16_to_cpu(req->req_type))); -} - -static inline bool bnxt_hwrm_kong_chnl(struct bnxt *bp, struct input *req) -{ - return (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL && - req->resp_addr == cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr)); -} - -static inline void *bnxt_get_hwrm_resp_addr(struct bnxt *bp, void *req) -{ - if (bnxt_hwrm_kong_chnl(bp, (struct input *)req)) - return bp->hwrm_cmd_kong_resp_addr; - else - return bp->hwrm_cmd_resp_addr; -} - -static inline u16 bnxt_get_hwrm_seq_id(struct bnxt *bp, u16 dst) -{ - u16 seq_id; - - if (dst == BNXT_HWRM_CHNL_CHIMP) - seq_id = bp->hwrm_cmd_seq++; - else - seq_id = bp->hwrm_cmd_kong_seq++; - return seq_id; -} - extern const u16 bnxt_lhint_arr[]; int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, @@ -2237,11 +2161,6 @@ u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx); void bnxt_set_tpa_flags(struct bnxt *bp); void bnxt_set_ring_params(struct bnxt *); int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode); -void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16); -int _hwrm_send_message(struct bnxt *, void *, u32, int); -int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 len, int timeout); -int hwrm_send_message(struct bnxt *, void *, u32, int); -int hwrm_send_message_silent(struct bnxt *, void *, u32, int); int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size, bool async_only); int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c index 8a68df4d9e59..228a5db7e143 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c @@ -18,6 +18,7 @@ #include <rdma/ib_verbs.h> #include "bnxt_hsi.h" #include "bnxt.h" +#include "bnxt_hwrm.h" #include "bnxt_dcb.h" #ifdef CONFIG_BNXT_DCB @@ -38,38 +39,43 @@ static int bnxt_queue_to_tc(struct bnxt *bp, u8 queue_id) static int bnxt_hwrm_queue_pri2cos_cfg(struct bnxt *bp, struct ieee_ets *ets) { - struct hwrm_queue_pri2cos_cfg_input req = {0}; + struct hwrm_queue_pri2cos_cfg_input *req; u8 *pri2cos; - int i; + int rc, i; + + rc = hwrm_req_init(bp, req, HWRM_QUEUE_PRI2COS_CFG); + if (rc) + return rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_CFG, -1, -1); - req.flags = cpu_to_le32(QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR | - QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN); + req->flags = cpu_to_le32(QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR | + QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN); - pri2cos = &req.pri0_cos_queue_id; + pri2cos = &req->pri0_cos_queue_id; for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { u8 qidx; - req.enables |= cpu_to_le32( + req->enables |= cpu_to_le32( QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID << i); qidx = bp->tc_to_qidx[ets->prio_tc[i]]; pri2cos[i] = bp->q_info[qidx].queue_id; } - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + return hwrm_req_send(bp, req); } static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets) { - struct hwrm_queue_pri2cos_qcfg_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_queue_pri2cos_qcfg_input req = {0}; - int rc = 0; + struct hwrm_queue_pri2cos_qcfg_output *resp; + struct hwrm_queue_pri2cos_qcfg_input *req; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_QCFG, -1, -1); - req.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN); + rc = hwrm_req_init(bp, req, HWRM_QUEUE_PRI2COS_QCFG); + if (rc) + return rc; - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) { u8 *pri2cos = &resp->pri0_cos_queue_id; int i; @@ -83,23 +89,26 @@ static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets) ets->prio_tc[i] = tc; } } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets, u8 max_tc) { - struct hwrm_queue_cos2bw_cfg_input req = {0}; + struct hwrm_queue_cos2bw_cfg_input *req; struct bnxt_cos2bw_cfg cos2bw; void *data; - int i; + int rc, i; + + rc = hwrm_req_init(bp, req, HWRM_QUEUE_COS2BW_CFG); + if (rc) + return rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_CFG, -1, -1); for (i = 0; i < max_tc; i++) { u8 qidx = bp->tc_to_qidx[i]; - req.enables |= cpu_to_le32( + req->enables |= cpu_to_le32( QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID << qidx); @@ -120,30 +129,32 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets, cpu_to_le32((ets->tc_tx_bw[i] * 100) | BW_VALUE_UNIT_PERCENT1_100); } - data = &req.unused_0 + qidx * (sizeof(cos2bw) - 4); + data = &req->unused_0 + qidx * (sizeof(cos2bw) - 4); memcpy(data, &cos2bw.queue_id, sizeof(cos2bw) - 4); if (qidx == 0) { - req.queue_id0 = cos2bw.queue_id; - req.unused_0 = 0; + req->queue_id0 = cos2bw.queue_id; + req->unused_0 = 0; } } - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + return hwrm_req_send(bp, req); } static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets) { - struct hwrm_queue_cos2bw_qcfg_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_queue_cos2bw_qcfg_input req = {0}; + struct hwrm_queue_cos2bw_qcfg_output *resp; + struct hwrm_queue_cos2bw_qcfg_input *req; struct bnxt_cos2bw_cfg cos2bw; void *data; int rc, i; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_QCFG, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_QUEUE_COS2BW_QCFG); + if (rc) + return rc; - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (rc) { - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -167,7 +178,7 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets) ets->tc_tx_bw[tc] = cos2bw.bw_weight; } } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return 0; } @@ -229,11 +240,12 @@ static int bnxt_queue_remap(struct bnxt *bp, unsigned int lltc_mask) static int bnxt_hwrm_queue_pfc_cfg(struct bnxt *bp, struct ieee_pfc *pfc) { - struct hwrm_queue_pfcenable_cfg_input req = {0}; + struct hwrm_queue_pfcenable_cfg_input *req; struct ieee_ets *my_ets = bp->ieee_ets; unsigned int tc_mask = 0, pri_mask = 0; u8 i, pri, lltc_count = 0; bool need_q_remap = false; + int rc; if (!my_ets) return -EINVAL; @@ -266,38 +278,43 @@ static int bnxt_hwrm_queue_pfc_cfg(struct bnxt *bp, struct ieee_pfc *pfc) if (need_q_remap) bnxt_queue_remap(bp, tc_mask); - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_CFG, -1, -1); - req.flags = cpu_to_le32(pri_mask); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_QUEUE_PFCENABLE_CFG); + if (rc) + return rc; + + req->flags = cpu_to_le32(pri_mask); + return hwrm_req_send(bp, req); } static int bnxt_hwrm_queue_pfc_qcfg(struct bnxt *bp, struct ieee_pfc *pfc) { - struct hwrm_queue_pfcenable_qcfg_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_queue_pfcenable_qcfg_input req = {0}; + struct hwrm_queue_pfcenable_qcfg_output *resp; + struct hwrm_queue_pfcenable_qcfg_input *req; u8 pri_mask; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_QCFG, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_QUEUE_PFCENABLE_QCFG); + if (rc) + return rc; - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (rc) { - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } pri_mask = le32_to_cpu(resp->flags); pfc->pfc_en = pri_mask; - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return 0; } static int bnxt_hwrm_set_dcbx_app(struct bnxt *bp, struct dcb_app *app, bool add) { - struct hwrm_fw_set_structured_data_input set = {0}; - struct hwrm_fw_get_structured_data_input get = {0}; + struct hwrm_fw_set_structured_data_input *set; + struct hwrm_fw_get_structured_data_input *get; struct hwrm_struct_data_dcbx_app *fw_app; struct hwrm_struct_hdr *data; dma_addr_t mapping; @@ -307,19 +324,26 @@ static int bnxt_hwrm_set_dcbx_app(struct bnxt *bp, struct dcb_app *app, if (bp->hwrm_spec_code < 0x10601) return 0; + rc = hwrm_req_init(bp, get, HWRM_FW_GET_STRUCTURED_DATA); + if (rc) + return rc; + + hwrm_req_hold(bp, get); + hwrm_req_alloc_flags(bp, get, GFP_KERNEL | __GFP_ZERO); + n = IEEE_8021QAZ_MAX_TCS; data_len = sizeof(*data) + sizeof(*fw_app) * n; - data = dma_alloc_coherent(&bp->pdev->dev, data_len, &mapping, - GFP_KERNEL); - if (!data) - return -ENOMEM; + data = hwrm_req_dma_slice(bp, get, data_len, &mapping); + if (!data) { + rc = -ENOMEM; + goto set_app_exit; + } - bnxt_hwrm_cmd_hdr_init(bp, &get, HWRM_FW_GET_STRUCTURED_DATA, -1, -1); - get.dest_data_addr = cpu_to_le64(mapping); - get.structure_id = cpu_to_le16(STRUCT_HDR_STRUCT_ID_DCBX_APP); - get.subtype = cpu_to_le16(HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL); - get.count = 0; - rc = hwrm_send_message(bp, &get, sizeof(get), HWRM_CMD_TIMEOUT); + get->dest_data_addr = cpu_to_le64(mapping); + get->structure_id = cpu_to_le16(STRUCT_HDR_STRUCT_ID_DCBX_APP); + get->subtype = cpu_to_le16(HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL); + get->count = 0; + rc = hwrm_req_send(bp, get); if (rc) goto set_app_exit; @@ -365,44 +389,49 @@ static int bnxt_hwrm_set_dcbx_app(struct bnxt *bp, struct dcb_app *app, data->len = cpu_to_le16(sizeof(*fw_app) * n); data->subtype = cpu_to_le16(HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL); - bnxt_hwrm_cmd_hdr_init(bp, &set, HWRM_FW_SET_STRUCTURED_DATA, -1, -1); - set.src_data_addr = cpu_to_le64(mapping); - set.data_len = cpu_to_le16(sizeof(*data) + sizeof(*fw_app) * n); - set.hdr_cnt = 1; - rc = hwrm_send_message(bp, &set, sizeof(set), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, set, HWRM_FW_SET_STRUCTURED_DATA); + if (rc) + goto set_app_exit; + + set->src_data_addr = cpu_to_le64(mapping); + set->data_len = cpu_to_le16(sizeof(*data) + sizeof(*fw_app) * n); + set->hdr_cnt = 1; + rc = hwrm_req_send(bp, set); set_app_exit: - dma_free_coherent(&bp->pdev->dev, data_len, data, mapping); + hwrm_req_drop(bp, get); /* dropping get request and associated slice */ return rc; } static int bnxt_hwrm_queue_dscp_qcaps(struct bnxt *bp) { - struct hwrm_queue_dscp_qcaps_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_queue_dscp_qcaps_input req = {0}; + struct hwrm_queue_dscp_qcaps_output *resp; + struct hwrm_queue_dscp_qcaps_input *req; int rc; bp->max_dscp_value = 0; if (bp->hwrm_spec_code < 0x10800 || BNXT_VF(bp)) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_DSCP_QCAPS, -1, -1); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_QUEUE_DSCP_QCAPS); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send_silent(bp, req); if (!rc) { bp->max_dscp_value = (1 << resp->num_dscp_bits) - 1; if (bp->max_dscp_value < 0x3f) bp->max_dscp_value = 0; } - - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } static int bnxt_hwrm_queue_dscp2pri_cfg(struct bnxt *bp, struct dcb_app *app, bool add) { - struct hwrm_queue_dscp2pri_cfg_input req = {0}; + struct hwrm_queue_dscp2pri_cfg_input *req; struct bnxt_dscp2pri_entry *dscp2pri; dma_addr_t mapping; int rc; @@ -410,23 +439,25 @@ static int bnxt_hwrm_queue_dscp2pri_cfg(struct bnxt *bp, struct dcb_app *app, if (bp->hwrm_spec_code < 0x10800) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_DSCP2PRI_CFG, -1, -1); - dscp2pri = dma_alloc_coherent(&bp->pdev->dev, sizeof(*dscp2pri), - &mapping, GFP_KERNEL); - if (!dscp2pri) + rc = hwrm_req_init(bp, req, HWRM_QUEUE_DSCP2PRI_CFG); + if (rc) + return rc; + + dscp2pri = hwrm_req_dma_slice(bp, req, sizeof(*dscp2pri), &mapping); + if (!dscp2pri) { + hwrm_req_drop(bp, req); return -ENOMEM; + } - req.src_data_addr = cpu_to_le64(mapping); + req->src_data_addr = cpu_to_le64(mapping); dscp2pri->dscp = app->protocol; if (add) dscp2pri->mask = 0x3f; else dscp2pri->mask = 0; dscp2pri->pri = app->priority; - req.entry_cnt = cpu_to_le16(1); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - dma_free_coherent(&bp->pdev->dev, sizeof(*dscp2pri), dscp2pri, - mapping); + req->entry_cnt = cpu_to_le16(1); + rc = hwrm_req_send(bp, req); return rc; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index 64381be935a8..1423cc617d93 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -12,6 +12,7 @@ #include <net/devlink.h> #include "bnxt_hsi.h" #include "bnxt.h" +#include "bnxt_hwrm.h" #include "bnxt_vfr.h" #include "bnxt_devlink.h" #include "bnxt_ethtool.h" @@ -354,28 +355,34 @@ static void bnxt_copy_from_nvm_data(union devlink_param_value *dst, static int bnxt_hwrm_get_nvm_cfg_ver(struct bnxt *bp, union devlink_param_value *nvm_cfg_ver) { - struct hwrm_nvm_get_variable_input req = {0}; + struct hwrm_nvm_get_variable_input *req; union bnxt_nvm_data *data; dma_addr_t data_dma_addr; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_VARIABLE, -1, -1); - data = dma_alloc_coherent(&bp->pdev->dev, sizeof(*data), - &data_dma_addr, GFP_KERNEL); - if (!data) - return -ENOMEM; + rc = hwrm_req_init(bp, req, HWRM_NVM_GET_VARIABLE); + if (rc) + return rc; + + data = hwrm_req_dma_slice(bp, req, sizeof(*data), &data_dma_addr); + if (!data) { + rc = -ENOMEM; + goto exit; + } - req.dest_data_addr = cpu_to_le64(data_dma_addr); - req.data_len = cpu_to_le16(BNXT_NVM_CFG_VER_BITS); - req.option_num = cpu_to_le16(NVM_OFF_NVM_CFG_VER); + hwrm_req_hold(bp, req); + req->dest_data_addr = cpu_to_le64(data_dma_addr); + req->data_len = cpu_to_le16(BNXT_NVM_CFG_VER_BITS); + req->option_num = cpu_to_le16(NVM_OFF_NVM_CFG_VER); - rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_send_silent(bp, req); if (!rc) bnxt_copy_from_nvm_data(nvm_cfg_ver, data, BNXT_NVM_CFG_VER_BITS, BNXT_NVM_CFG_VER_BYTES); - dma_free_coherent(&bp->pdev->dev, sizeof(*data), data, data_dma_addr); +exit: + hwrm_req_drop(bp, req); return rc; } @@ -562,17 +569,20 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req, } static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, - int msg_len, union devlink_param_value *val) + union devlink_param_value *val) { struct hwrm_nvm_get_variable_input *req = msg; struct bnxt_dl_nvm_param nvm_param; + struct hwrm_err_output *resp; union bnxt_nvm_data *data; dma_addr_t data_dma_addr; int idx = 0, rc, i; /* Get/Set NVM CFG parameter is supported only on PFs */ - if (BNXT_VF(bp)) + if (BNXT_VF(bp)) { + hwrm_req_drop(bp, req); return -EPERM; + } for (i = 0; i < ARRAY_SIZE(nvm_params); i++) { if (nvm_params[i].id == param_id) { @@ -581,18 +591,22 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, } } - if (i == ARRAY_SIZE(nvm_params)) + if (i == ARRAY_SIZE(nvm_params)) { + hwrm_req_drop(bp, req); return -EOPNOTSUPP; + } if (nvm_param.dir_type == BNXT_NVM_PORT_CFG) idx = bp->pf.port_id; else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG) idx = bp->pf.fw_fid - BNXT_FIRST_PF_FID; - data = dma_alloc_coherent(&bp->pdev->dev, sizeof(*data), - &data_dma_addr, GFP_KERNEL); - if (!data) + data = hwrm_req_dma_slice(bp, req, sizeof(*data), &data_dma_addr); + + if (!data) { + hwrm_req_drop(bp, req); return -ENOMEM; + } req->dest_data_addr = cpu_to_le64(data_dma_addr); req->data_len = cpu_to_le16(nvm_param.nvm_num_bits); @@ -601,26 +615,24 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, if (idx) req->dimensions = cpu_to_le16(1); + resp = hwrm_req_hold(bp, req); if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE)) { bnxt_copy_to_nvm_data(data, val, nvm_param.nvm_num_bits, nvm_param.dl_num_bytes); - rc = hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, msg); } else { - rc = hwrm_send_message_silent(bp, msg, msg_len, - HWRM_CMD_TIMEOUT); + rc = hwrm_req_send_silent(bp, msg); if (!rc) { bnxt_copy_from_nvm_data(val, data, nvm_param.nvm_num_bits, nvm_param.dl_num_bytes); } else { - struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr; - if (resp->cmd_err == NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST) rc = -EOPNOTSUPP; } } - dma_free_coherent(&bp->pdev->dev, sizeof(*data), data, data_dma_addr); + hwrm_req_drop(bp, req); if (rc == -EACCES) netdev_err(bp->dev, "PF does not have admin privileges to modify NVM config\n"); return rc; @@ -629,15 +641,17 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id, struct devlink_param_gset_ctx *ctx) { - struct hwrm_nvm_get_variable_input req = {0}; struct bnxt *bp = bnxt_get_bp_from_dl(dl); + struct hwrm_nvm_get_variable_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_VARIABLE, -1, -1); - rc = bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val); - if (!rc) - if (id == BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK) - ctx->val.vbool = !ctx->val.vbool; + rc = hwrm_req_init(bp, req, HWRM_NVM_GET_VARIABLE); + if (rc) + return rc; + + rc = bnxt_hwrm_nvm_req(bp, id, req, &ctx->val); + if (!rc && id == BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK) + ctx->val.vbool = !ctx->val.vbool; return rc; } @@ -645,15 +659,18 @@ static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id, static int bnxt_dl_nvm_param_set(struct devlink *dl, u32 id, struct devlink_param_gset_ctx *ctx) { - struct hwrm_nvm_set_variable_input req = {0}; struct bnxt *bp = bnxt_get_bp_from_dl(dl); + struct hwrm_nvm_set_variable_input *req; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_SET_VARIABLE, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_NVM_SET_VARIABLE); + if (rc) + return rc; if (id == BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK) ctx->val.vbool = !ctx->val.vbool; - return bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val); + return bnxt_hwrm_nvm_req(bp, id, req, &ctx->val); } static int bnxt_dl_msix_validate(struct devlink *dl, u32 id, @@ -743,14 +760,17 @@ static void bnxt_dl_params_unregister(struct bnxt *bp) int bnxt_dl_register(struct bnxt *bp) { + const struct devlink_ops *devlink_ops; struct devlink_port_attrs attrs = {}; struct devlink *dl; int rc; if (BNXT_PF(bp)) - dl = devlink_alloc(&bnxt_dl_ops, sizeof(struct bnxt_dl)); + devlink_ops = &bnxt_dl_ops; else - dl = devlink_alloc(&bnxt_vf_dl_ops, sizeof(struct bnxt_dl)); + devlink_ops = &bnxt_vf_dl_ops; + + dl = devlink_alloc(devlink_ops, sizeof(struct bnxt_dl), &bp->pdev->dev); if (!dl) { netdev_warn(bp->dev, "devlink_alloc failed\n"); return -ENOMEM; @@ -763,7 +783,7 @@ int bnxt_dl_register(struct bnxt *bp) bp->hwrm_spec_code > 0x10803) bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY; - rc = devlink_register(dl, &bp->pdev->dev); + rc = devlink_register(dl); if (rc) { netdev_warn(bp->dev, "devlink_register failed. rc=%d\n", rc); goto err_dl_free; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 786ca51e669b..b056e3c29bbd 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -24,6 +24,7 @@ #include <linux/timecounter.h> #include "bnxt_hsi.h" #include "bnxt.h" +#include "bnxt_hwrm.h" #include "bnxt_xdp.h" #include "bnxt_ptp.h" #include "bnxt_ethtool.h" @@ -49,7 +50,9 @@ static void bnxt_set_msglevel(struct net_device *dev, u32 value) } static int bnxt_get_coalesce(struct net_device *dev, - struct ethtool_coalesce *coal) + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct bnxt *bp = netdev_priv(dev); struct bnxt_coal *hw_coal; @@ -79,7 +82,9 @@ static int bnxt_get_coalesce(struct net_device *dev, } static int bnxt_set_coalesce(struct net_device *dev, - struct ethtool_coalesce *coal) + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct bnxt *bp = netdev_priv(dev); bool update_stats = false; @@ -303,6 +308,7 @@ static const char * const bnxt_cmn_sw_stats_str[] = { enum { RX_TOTAL_DISCARDS, TX_TOTAL_DISCARDS, + RX_NETPOLL_DISCARDS, }; static struct { @@ -311,6 +317,7 @@ static struct { } bnxt_sw_func_stats[] = { {0, "rx_total_discard_pkts"}, {0, "tx_total_discard_pkts"}, + {0, "rx_total_netpoll_discards"}, }; #define NUM_RING_RX_SW_STATS ARRAY_SIZE(bnxt_rx_sw_stats_str) @@ -599,6 +606,8 @@ skip_tpa_ring_stats: BNXT_GET_RING_STATS64(sw_stats, rx_discard_pkts); bnxt_sw_func_stats[TX_TOTAL_DISCARDS].counter += BNXT_GET_RING_STATS64(sw_stats, tx_discard_pkts); + bnxt_sw_func_stats[RX_NETPOLL_DISCARDS].counter += + cpr->sw_stats.rx.rx_netpoll_discards; } for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++, j++) @@ -768,8 +777,13 @@ static void bnxt_get_ringparam(struct net_device *dev, { struct bnxt *bp = netdev_priv(dev); - ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT; - ering->rx_jumbo_max_pending = BNXT_MAX_RX_JUM_DESC_CNT; + if (bp->flags & BNXT_FLAG_AGG_RINGS) { + ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT_JUM_ENA; + ering->rx_jumbo_max_pending = BNXT_MAX_RX_JUM_DESC_CNT; + } else { + ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT; + ering->rx_jumbo_max_pending = 0; + } ering->tx_max_pending = BNXT_MAX_TX_DESC_CNT; ering->rx_pending = bp->rx_ring_size; @@ -1352,7 +1366,7 @@ static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p) { struct pcie_ctx_hw_stats *hw_pcie_stats; - struct hwrm_pcie_qstats_input req = {0}; + struct hwrm_pcie_qstats_input *req; struct bnxt *bp = netdev_priv(dev); dma_addr_t hw_pcie_stats_addr; int rc; @@ -1363,18 +1377,21 @@ static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs, if (!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED)) return; - hw_pcie_stats = dma_alloc_coherent(&bp->pdev->dev, - sizeof(*hw_pcie_stats), - &hw_pcie_stats_addr, GFP_KERNEL); - if (!hw_pcie_stats) + if (hwrm_req_init(bp, req, HWRM_PCIE_QSTATS)) return; + hw_pcie_stats = hwrm_req_dma_slice(bp, req, sizeof(*hw_pcie_stats), + &hw_pcie_stats_addr); + if (!hw_pcie_stats) { + hwrm_req_drop(bp, req); + return; + } + regs->version = 1; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PCIE_QSTATS, -1, -1); - req.pcie_stat_size = cpu_to_le16(sizeof(*hw_pcie_stats)); - req.pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + hwrm_req_hold(bp, req); /* hold on to slice */ + req->pcie_stat_size = cpu_to_le16(sizeof(*hw_pcie_stats)); + req->pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr); + rc = hwrm_req_send(bp, req); if (!rc) { __le64 *src = (__le64 *)hw_pcie_stats; u64 *dst = (u64 *)(_p + BNXT_PXP_REG_LEN); @@ -1383,9 +1400,7 @@ static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs, for (i = 0; i < sizeof(*hw_pcie_stats) / sizeof(__le64); i++) dst[i] = le64_to_cpu(src[i]); } - mutex_unlock(&bp->hwrm_cmd_lock); - dma_free_coherent(&bp->pdev->dev, sizeof(*hw_pcie_stats), hw_pcie_stats, - hw_pcie_stats_addr); + hwrm_req_drop(bp, req); } static void bnxt_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) @@ -1965,7 +1980,7 @@ static u32 bnxt_ethtool_forced_fec_to_fw(struct bnxt_link_info *link_info, static int bnxt_set_fecparam(struct net_device *dev, struct ethtool_fecparam *fecparam) { - struct hwrm_port_phy_cfg_input req = {0}; + struct hwrm_port_phy_cfg_input *req; struct bnxt *bp = netdev_priv(dev); struct bnxt_link_info *link_info; u32 new_cfg, fec = fecparam->fec; @@ -1997,9 +2012,11 @@ static int bnxt_set_fecparam(struct net_device *dev, } apply_fec: - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); - req.flags = cpu_to_le32(new_cfg | PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); + if (rc) + return rc; + req->flags = cpu_to_le32(new_cfg | PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); + rc = hwrm_req_send(bp, req); /* update current settings */ if (!rc) { mutex_lock(&bp->link_lock); @@ -2093,19 +2110,22 @@ static u32 bnxt_get_link(struct net_device *dev) int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp, struct hwrm_nvm_get_dev_info_output *nvm_dev_info) { - struct hwrm_nvm_get_dev_info_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_nvm_get_dev_info_input req = {0}; + struct hwrm_nvm_get_dev_info_output *resp; + struct hwrm_nvm_get_dev_info_input *req; int rc; if (BNXT_VF(bp)) return -EOPNOTSUPP; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DEV_INFO, -1, -1); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DEV_INFO); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) memcpy(nvm_dev_info, resp, sizeof(*resp)); - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -2118,77 +2138,67 @@ static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal, u16 ext, u16 *index, u32 *item_length, u32 *data_length); -static int __bnxt_flash_nvram(struct net_device *dev, u16 dir_type, - u16 dir_ordinal, u16 dir_ext, u16 dir_attr, - u32 dir_item_len, const u8 *data, - size_t data_len) +static int bnxt_flash_nvram(struct net_device *dev, u16 dir_type, + u16 dir_ordinal, u16 dir_ext, u16 dir_attr, + u32 dir_item_len, const u8 *data, + size_t data_len) { struct bnxt *bp = netdev_priv(dev); + struct hwrm_nvm_write_input *req; int rc; - struct hwrm_nvm_write_input req = {0}; - dma_addr_t dma_handle; - u8 *kmem = NULL; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_WRITE, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_NVM_WRITE); + if (rc) + return rc; - req.dir_type = cpu_to_le16(dir_type); - req.dir_ordinal = cpu_to_le16(dir_ordinal); - req.dir_ext = cpu_to_le16(dir_ext); - req.dir_attr = cpu_to_le16(dir_attr); - req.dir_item_length = cpu_to_le32(dir_item_len); if (data_len && data) { - req.dir_data_length = cpu_to_le32(data_len); + dma_addr_t dma_handle; + u8 *kmem; - kmem = dma_alloc_coherent(&bp->pdev->dev, data_len, &dma_handle, - GFP_KERNEL); - if (!kmem) + kmem = hwrm_req_dma_slice(bp, req, data_len, &dma_handle); + if (!kmem) { + hwrm_req_drop(bp, req); return -ENOMEM; + } + + req->dir_data_length = cpu_to_le32(data_len); memcpy(kmem, data, data_len); - req.host_src_addr = cpu_to_le64(dma_handle); + req->host_src_addr = cpu_to_le64(dma_handle); } - rc = _hwrm_send_message(bp, &req, sizeof(req), FLASH_NVRAM_TIMEOUT); - if (kmem) - dma_free_coherent(&bp->pdev->dev, data_len, kmem, dma_handle); + hwrm_req_timeout(bp, req, FLASH_NVRAM_TIMEOUT); + req->dir_type = cpu_to_le16(dir_type); + req->dir_ordinal = cpu_to_le16(dir_ordinal); + req->dir_ext = cpu_to_le16(dir_ext); + req->dir_attr = cpu_to_le16(dir_attr); + req->dir_item_length = cpu_to_le32(dir_item_len); + rc = hwrm_req_send(bp, req); if (rc == -EACCES) bnxt_print_admin_err(bp); return rc; } -static int bnxt_flash_nvram(struct net_device *dev, u16 dir_type, - u16 dir_ordinal, u16 dir_ext, u16 dir_attr, - const u8 *data, size_t data_len) -{ - struct bnxt *bp = netdev_priv(dev); - int rc; - - mutex_lock(&bp->hwrm_cmd_lock); - rc = __bnxt_flash_nvram(dev, dir_type, dir_ordinal, dir_ext, dir_attr, - 0, data, data_len); - mutex_unlock(&bp->hwrm_cmd_lock); - return rc; -} - static int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type, u8 self_reset, u8 flags) { - struct hwrm_fw_reset_input req = {0}; struct bnxt *bp = netdev_priv(dev); + struct hwrm_fw_reset_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_FW_RESET); + if (rc) + return rc; - req.embedded_proc_type = proc_type; - req.selfrst_status = self_reset; - req.flags = flags; + req->embedded_proc_type = proc_type; + req->selfrst_status = self_reset; + req->flags = flags; if (proc_type == FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP) { - rc = hwrm_send_message_silent(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); + rc = hwrm_req_send_silent(bp, req); } else { - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); if (rc == -EACCES) bnxt_print_admin_err(bp); } @@ -2326,7 +2336,7 @@ static int bnxt_flash_firmware(struct net_device *dev, return -EINVAL; } rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST, - 0, 0, fw_data, fw_size); + 0, 0, 0, fw_data, fw_size); if (rc == 0) /* Firmware update successful */ rc = bnxt_firmware_reset(dev, dir_type); @@ -2379,7 +2389,7 @@ static int bnxt_flash_microcode(struct net_device *dev, return -EINVAL; } rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST, - 0, 0, fw_data, fw_size); + 0, 0, 0, fw_data, fw_size); return rc; } @@ -2445,7 +2455,7 @@ static int bnxt_flash_firmware_from_file(struct net_device *dev, rc = bnxt_flash_microcode(dev, dir_type, fw->data, fw->size); else rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST, - 0, 0, fw->data, fw->size); + 0, 0, 0, fw->data, fw->size); release_firmware(fw); return rc; } @@ -2457,21 +2467,23 @@ static int bnxt_flash_firmware_from_file(struct net_device *dev, int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware *fw, u32 install_type) { - struct hwrm_nvm_install_update_input install = {0}; - struct hwrm_nvm_install_update_output resp = {0}; - struct hwrm_nvm_modify_input modify = {0}; + struct hwrm_nvm_install_update_input *install; + struct hwrm_nvm_install_update_output *resp; + struct hwrm_nvm_modify_input *modify; struct bnxt *bp = netdev_priv(dev); bool defrag_attempted = false; dma_addr_t dma_handle; u8 *kmem = NULL; u32 modify_len; u32 item_len; - int rc = 0; u16 index; + int rc; bnxt_hwrm_fw_set_time(bp); - bnxt_hwrm_cmd_hdr_init(bp, &modify, HWRM_NVM_MODIFY, -1, -1); + rc = hwrm_req_init(bp, modify, HWRM_NVM_MODIFY); + if (rc) + return rc; /* Try allocating a large DMA buffer first. Older fw will * cause excessive NVRAM erases when using small blocks. @@ -2479,22 +2491,33 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware modify_len = roundup_pow_of_two(fw->size); modify_len = min_t(u32, modify_len, BNXT_PKG_DMA_SIZE); while (1) { - kmem = dma_alloc_coherent(&bp->pdev->dev, modify_len, - &dma_handle, GFP_KERNEL); + kmem = hwrm_req_dma_slice(bp, modify, modify_len, &dma_handle); if (!kmem && modify_len > PAGE_SIZE) modify_len /= 2; else break; } - if (!kmem) + if (!kmem) { + hwrm_req_drop(bp, modify); return -ENOMEM; + } - modify.host_src_addr = cpu_to_le64(dma_handle); + rc = hwrm_req_init(bp, install, HWRM_NVM_INSTALL_UPDATE); + if (rc) { + hwrm_req_drop(bp, modify); + return rc; + } - bnxt_hwrm_cmd_hdr_init(bp, &install, HWRM_NVM_INSTALL_UPDATE, -1, -1); + hwrm_req_timeout(bp, modify, FLASH_PACKAGE_TIMEOUT); + hwrm_req_timeout(bp, install, INSTALL_PACKAGE_TIMEOUT); + + hwrm_req_hold(bp, modify); + modify->host_src_addr = cpu_to_le64(dma_handle); + + resp = hwrm_req_hold(bp, install); if ((install_type & 0xffff) == 0) install_type >>= 16; - install.install_type = cpu_to_le32(install_type); + install->install_type = cpu_to_le32(install_type); do { u32 copied = 0, len = modify_len; @@ -2514,76 +2537,69 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware break; } - modify.dir_idx = cpu_to_le16(index); + modify->dir_idx = cpu_to_le16(index); if (fw->size > modify_len) - modify.flags = BNXT_NVM_MORE_FLAG; + modify->flags = BNXT_NVM_MORE_FLAG; while (copied < fw->size) { u32 balance = fw->size - copied; if (balance <= modify_len) { len = balance; if (copied) - modify.flags |= BNXT_NVM_LAST_FLAG; + modify->flags |= BNXT_NVM_LAST_FLAG; } memcpy(kmem, fw->data + copied, len); - modify.len = cpu_to_le32(len); - modify.offset = cpu_to_le32(copied); - rc = hwrm_send_message(bp, &modify, sizeof(modify), - FLASH_PACKAGE_TIMEOUT); + modify->len = cpu_to_le32(len); + modify->offset = cpu_to_le32(copied); + rc = hwrm_req_send(bp, modify); if (rc) goto pkg_abort; copied += len; } - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message_silent(bp, &install, sizeof(install), - INSTALL_PACKAGE_TIMEOUT); - memcpy(&resp, bp->hwrm_cmd_resp_addr, sizeof(resp)); + + rc = hwrm_req_send_silent(bp, install); if (defrag_attempted) { /* We have tried to defragment already in the previous * iteration. Return with the result for INSTALL_UPDATE */ - mutex_unlock(&bp->hwrm_cmd_lock); break; } - if (rc && ((struct hwrm_err_output *)&resp)->cmd_err == + if (rc && ((struct hwrm_err_output *)resp)->cmd_err == NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) { - install.flags = + install->flags = cpu_to_le16(NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG); - rc = _hwrm_send_message_silent(bp, &install, - sizeof(install), - INSTALL_PACKAGE_TIMEOUT); - memcpy(&resp, bp->hwrm_cmd_resp_addr, sizeof(resp)); + rc = hwrm_req_send_silent(bp, install); - if (rc && ((struct hwrm_err_output *)&resp)->cmd_err == + if (rc && ((struct hwrm_err_output *)resp)->cmd_err == NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE) { /* FW has cleared NVM area, driver will create * UPDATE directory and try the flash again */ defrag_attempted = true; - install.flags = 0; - rc = __bnxt_flash_nvram(bp->dev, - BNX_DIR_TYPE_UPDATE, - BNX_DIR_ORDINAL_FIRST, - 0, 0, item_len, NULL, - 0); + install->flags = 0; + rc = bnxt_flash_nvram(bp->dev, + BNX_DIR_TYPE_UPDATE, + BNX_DIR_ORDINAL_FIRST, + 0, 0, item_len, NULL, 0); } else if (rc) { netdev_err(dev, "HWRM_NVM_INSTALL_UPDATE failure rc :%x\n", rc); } } else if (rc) { netdev_err(dev, "HWRM_NVM_INSTALL_UPDATE failure rc :%x\n", rc); } - mutex_unlock(&bp->hwrm_cmd_lock); } while (defrag_attempted && !rc); pkg_abort: - dma_free_coherent(&bp->pdev->dev, modify_len, kmem, dma_handle); - if (resp.result) { + hwrm_req_drop(bp, modify); + hwrm_req_drop(bp, install); + + if (resp->result) { netdev_err(dev, "PKG install error = %d, problem_item = %d\n", - (s8)resp.result, (int)resp.problem_item); + (s8)resp->result, (int)resp->problem_item); rc = -ENOPKG; } if (rc == -EACCES) @@ -2629,20 +2645,22 @@ static int bnxt_flash_device(struct net_device *dev, static int nvm_get_dir_info(struct net_device *dev, u32 *entries, u32 *length) { + struct hwrm_nvm_get_dir_info_output *output; + struct hwrm_nvm_get_dir_info_input *req; struct bnxt *bp = netdev_priv(dev); int rc; - struct hwrm_nvm_get_dir_info_input req = {0}; - struct hwrm_nvm_get_dir_info_output *output = bp->hwrm_cmd_resp_addr; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_INFO, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DIR_INFO); + if (rc) + return rc; - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + output = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) { *entries = le32_to_cpu(output->entries); *length = le32_to_cpu(output->entry_length); } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -2668,7 +2686,7 @@ static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data) u8 *buf; size_t buflen; dma_addr_t dma_handle; - struct hwrm_nvm_get_dir_entries_input req = {0}; + struct hwrm_nvm_get_dir_entries_input *req; rc = nvm_get_dir_info(dev, &dir_entries, &entry_length); if (rc != 0) @@ -2686,20 +2704,23 @@ static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data) len -= 2; memset(data, 0xff, len); + rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DIR_ENTRIES); + if (rc) + return rc; + buflen = dir_entries * entry_length; - buf = dma_alloc_coherent(&bp->pdev->dev, buflen, &dma_handle, - GFP_KERNEL); + buf = hwrm_req_dma_slice(bp, req, buflen, &dma_handle); if (!buf) { - netdev_err(dev, "dma_alloc_coherent failure, length = %u\n", - (unsigned)buflen); + hwrm_req_drop(bp, req); return -ENOMEM; } - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_ENTRIES, -1, -1); - req.host_dest_addr = cpu_to_le64(dma_handle); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->host_dest_addr = cpu_to_le64(dma_handle); + + hwrm_req_hold(bp, req); /* hold the slice */ + rc = hwrm_req_send(bp, req); if (rc == 0) memcpy(data, buf, len > buflen ? buflen : len); - dma_free_coherent(&bp->pdev->dev, buflen, buf, dma_handle); + hwrm_req_drop(bp, req); return rc; } @@ -2710,28 +2731,31 @@ static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset, int rc; u8 *buf; dma_addr_t dma_handle; - struct hwrm_nvm_read_input req = {0}; + struct hwrm_nvm_read_input *req; if (!length) return -EINVAL; - buf = dma_alloc_coherent(&bp->pdev->dev, length, &dma_handle, - GFP_KERNEL); + rc = hwrm_req_init(bp, req, HWRM_NVM_READ); + if (rc) + return rc; + + buf = hwrm_req_dma_slice(bp, req, length, &dma_handle); if (!buf) { - netdev_err(dev, "dma_alloc_coherent failure, length = %u\n", - (unsigned)length); + hwrm_req_drop(bp, req); return -ENOMEM; } - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_READ, -1, -1); - req.host_dest_addr = cpu_to_le64(dma_handle); - req.dir_idx = cpu_to_le16(index); - req.offset = cpu_to_le32(offset); - req.len = cpu_to_le32(length); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->host_dest_addr = cpu_to_le64(dma_handle); + req->dir_idx = cpu_to_le16(index); + req->offset = cpu_to_le32(offset); + req->len = cpu_to_le32(length); + + hwrm_req_hold(bp, req); /* hold the slice */ + rc = hwrm_req_send(bp, req); if (rc == 0) memcpy(data, buf, length); - dma_free_coherent(&bp->pdev->dev, length, buf, dma_handle); + hwrm_req_drop(bp, req); return rc; } @@ -2739,20 +2763,23 @@ static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal, u16 ext, u16 *index, u32 *item_length, u32 *data_length) { + struct hwrm_nvm_find_dir_entry_output *output; + struct hwrm_nvm_find_dir_entry_input *req; struct bnxt *bp = netdev_priv(dev); int rc; - struct hwrm_nvm_find_dir_entry_input req = {0}; - struct hwrm_nvm_find_dir_entry_output *output = bp->hwrm_cmd_resp_addr; - - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_FIND_DIR_ENTRY, -1, -1); - req.enables = 0; - req.dir_idx = 0; - req.dir_type = cpu_to_le16(type); - req.dir_ordinal = cpu_to_le16(ordinal); - req.dir_ext = cpu_to_le16(ext); - req.opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ; - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + + rc = hwrm_req_init(bp, req, HWRM_NVM_FIND_DIR_ENTRY); + if (rc) + return rc; + + req->enables = 0; + req->dir_idx = 0; + req->dir_type = cpu_to_le16(type); + req->dir_ordinal = cpu_to_le16(ordinal); + req->dir_ext = cpu_to_le16(ext); + req->opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ; + output = hwrm_req_hold(bp, req); + rc = hwrm_req_send_silent(bp, req); if (rc == 0) { if (index) *index = le16_to_cpu(output->dir_idx); @@ -2761,7 +2788,7 @@ static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal, if (data_length) *data_length = le32_to_cpu(output->dir_data_length); } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -2856,12 +2883,16 @@ static int bnxt_get_eeprom(struct net_device *dev, static int bnxt_erase_nvram_directory(struct net_device *dev, u8 index) { + struct hwrm_nvm_erase_dir_entry_input *req; struct bnxt *bp = netdev_priv(dev); - struct hwrm_nvm_erase_dir_entry_input req = {0}; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_ERASE_DIR_ENTRY, -1, -1); - req.dir_idx = cpu_to_le16(index); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_NVM_ERASE_DIR_ENTRY); + if (rc) + return rc; + + req->dir_idx = cpu_to_le16(index); + return hwrm_req_send(bp, req); } static int bnxt_set_eeprom(struct net_device *dev, @@ -2901,7 +2932,7 @@ static int bnxt_set_eeprom(struct net_device *dev, ordinal = eeprom->offset >> 16; attr = eeprom->offset & 0xffff; - return bnxt_flash_nvram(dev, type, ordinal, ext, attr, data, + return bnxt_flash_nvram(dev, type, ordinal, ext, attr, 0, data, eeprom->len); } @@ -2989,31 +3020,33 @@ static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr, u16 page_number, u16 start_addr, u16 data_length, u8 *buf) { - struct hwrm_port_phy_i2c_read_input req = {0}; - struct hwrm_port_phy_i2c_read_output *output = bp->hwrm_cmd_resp_addr; + struct hwrm_port_phy_i2c_read_output *output; + struct hwrm_port_phy_i2c_read_input *req; int rc, byte_offset = 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_I2C_READ, -1, -1); - req.i2c_slave_addr = i2c_addr; - req.page_number = cpu_to_le16(page_number); - req.port_id = cpu_to_le16(bp->pf.port_id); + rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_I2C_READ); + if (rc) + return rc; + + output = hwrm_req_hold(bp, req); + req->i2c_slave_addr = i2c_addr; + req->page_number = cpu_to_le16(page_number); + req->port_id = cpu_to_le16(bp->pf.port_id); do { u16 xfer_size; xfer_size = min_t(u16, data_length, BNXT_MAX_PHY_I2C_RESP_SIZE); data_length -= xfer_size; - req.page_offset = cpu_to_le16(start_addr + byte_offset); - req.data_length = xfer_size; - req.enables = cpu_to_le32(start_addr + byte_offset ? + req->page_offset = cpu_to_le16(start_addr + byte_offset); + req->data_length = xfer_size; + req->enables = cpu_to_le32(start_addr + byte_offset ? PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET : 0); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); if (!rc) memcpy(buf + byte_offset, output->data, xfer_size); - mutex_unlock(&bp->hwrm_cmd_lock); byte_offset += xfer_size; } while (!rc && data_length > 0); + hwrm_req_drop(bp, req); return rc; } @@ -3122,13 +3155,13 @@ static int bnxt_nway_reset(struct net_device *dev) static int bnxt_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state) { - struct hwrm_port_led_cfg_input req = {0}; + struct hwrm_port_led_cfg_input *req; struct bnxt *bp = netdev_priv(dev); struct bnxt_pf_info *pf = &bp->pf; struct bnxt_led_cfg *led_cfg; u8 led_state; __le16 duration; - int i; + int rc, i; if (!bp->num_leds || BNXT_VF(bp)) return -EOPNOTSUPP; @@ -3142,27 +3175,35 @@ static int bnxt_set_phys_id(struct net_device *dev, } else { return -EINVAL; } - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_CFG, -1, -1); - req.port_id = cpu_to_le16(pf->port_id); - req.num_leds = bp->num_leds; - led_cfg = (struct bnxt_led_cfg *)&req.led0_id; + rc = hwrm_req_init(bp, req, HWRM_PORT_LED_CFG); + if (rc) + return rc; + + req->port_id = cpu_to_le16(pf->port_id); + req->num_leds = bp->num_leds; + led_cfg = (struct bnxt_led_cfg *)&req->led0_id; for (i = 0; i < bp->num_leds; i++, led_cfg++) { - req.enables |= BNXT_LED_DFLT_ENABLES(i); + req->enables |= BNXT_LED_DFLT_ENABLES(i); led_cfg->led_id = bp->leds[i].led_id; led_cfg->led_state = led_state; led_cfg->led_blink_on = duration; led_cfg->led_blink_off = duration; led_cfg->led_group_id = bp->leds[i].led_group_id; } - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + return hwrm_req_send(bp, req); } static int bnxt_hwrm_selftest_irq(struct bnxt *bp, u16 cmpl_ring) { - struct hwrm_selftest_irq_input req = {0}; + struct hwrm_selftest_irq_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_SELFTEST_IRQ); + if (rc) + return rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_IRQ, cmpl_ring, -1); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->cmpl_ring = cpu_to_le16(cmpl_ring); + return hwrm_req_send(bp, req); } static int bnxt_test_irq(struct bnxt *bp) @@ -3182,31 +3223,37 @@ static int bnxt_test_irq(struct bnxt *bp) static int bnxt_hwrm_mac_loopback(struct bnxt *bp, bool enable) { - struct hwrm_port_mac_cfg_input req = {0}; + struct hwrm_port_mac_cfg_input *req; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_MAC_CFG, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG); + if (rc) + return rc; - req.enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_LPBK); + req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_LPBK); if (enable) - req.lpbk = PORT_MAC_CFG_REQ_LPBK_LOCAL; + req->lpbk = PORT_MAC_CFG_REQ_LPBK_LOCAL; else - req.lpbk = PORT_MAC_CFG_REQ_LPBK_NONE; - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->lpbk = PORT_MAC_CFG_REQ_LPBK_NONE; + return hwrm_req_send(bp, req); } static int bnxt_query_force_speeds(struct bnxt *bp, u16 *force_speeds) { - struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_port_phy_qcaps_input req = {0}; + struct hwrm_port_phy_qcaps_output *resp; + struct hwrm_port_phy_qcaps_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) *force_speeds = le16_to_cpu(resp->supported_speeds_force_mode); - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -3241,7 +3288,7 @@ static int bnxt_disable_an_for_lpbk(struct bnxt *bp, req->force_link_speed = cpu_to_le16(fw_speed); req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE | PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); - rc = hwrm_send_message(bp, req, sizeof(*req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); req->flags = 0; req->force_link_speed = cpu_to_le16(0); return rc; @@ -3249,21 +3296,29 @@ static int bnxt_disable_an_for_lpbk(struct bnxt *bp, static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable, bool ext) { - struct hwrm_port_phy_cfg_input req = {0}; + struct hwrm_port_phy_cfg_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); + if (rc) + return rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); + /* prevent bnxt_disable_an_for_lpbk() from consuming the request */ + hwrm_req_hold(bp, req); if (enable) { - bnxt_disable_an_for_lpbk(bp, &req); + bnxt_disable_an_for_lpbk(bp, req); if (ext) - req.lpbk = PORT_PHY_CFG_REQ_LPBK_EXTERNAL; + req->lpbk = PORT_PHY_CFG_REQ_LPBK_EXTERNAL; else - req.lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL; + req->lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL; } else { - req.lpbk = PORT_PHY_CFG_REQ_LPBK_NONE; + req->lpbk = PORT_PHY_CFG_REQ_LPBK_NONE; } - req.enables = cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_LPBK); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->enables = cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_LPBK); + rc = hwrm_req_send(bp, req); + hwrm_req_drop(bp, req); + return rc; } static int bnxt_rx_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, @@ -3361,7 +3416,7 @@ static int bnxt_run_loopback(struct bnxt *bp) data[i] = (u8)(i & 0xff); map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); if (dma_mapping_error(&bp->pdev->dev, map)) { dev_kfree_skb(skb); return -EIO; @@ -3374,24 +3429,28 @@ static int bnxt_run_loopback(struct bnxt *bp) bnxt_db_write(bp, &txr->tx_db, txr->tx_prod); rc = bnxt_poll_loopback(bp, cpr, pkt_size); - dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE); + dma_unmap_single(&bp->pdev->dev, map, pkt_size, DMA_TO_DEVICE); dev_kfree_skb(skb); return rc; } static int bnxt_run_fw_tests(struct bnxt *bp, u8 test_mask, u8 *test_results) { - struct hwrm_selftest_exec_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_selftest_exec_input req = {0}; + struct hwrm_selftest_exec_output *resp; + struct hwrm_selftest_exec_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_EXEC, -1, -1); - mutex_lock(&bp->hwrm_cmd_lock); - resp->test_success = 0; - req.flags = test_mask; - rc = _hwrm_send_message(bp, &req, sizeof(req), bp->test_info->timeout); + rc = hwrm_req_init(bp, req, HWRM_SELFTEST_EXEC); + if (rc) + return rc; + + hwrm_req_timeout(bp, req, bp->test_info->timeout); + req->flags = test_mask; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); *test_results = resp->test_success; - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -3550,32 +3609,34 @@ static int bnxt_reset(struct net_device *dev, u32 *flags) return 0; } -static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, int msg_len, +static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, struct bnxt_hwrm_dbg_dma_info *info) { - struct hwrm_dbg_cmn_output *cmn_resp = bp->hwrm_cmd_resp_addr; struct hwrm_dbg_cmn_input *cmn_req = msg; __le16 *seq_ptr = msg + info->seq_off; + struct hwrm_dbg_cmn_output *cmn_resp; u16 seq = 0, len, segs_off; - void *resp = cmn_resp; dma_addr_t dma_handle; + void *dma_buf, *resp; int rc, off = 0; - void *dma_buf; - dma_buf = dma_alloc_coherent(&bp->pdev->dev, info->dma_len, &dma_handle, - GFP_KERNEL); - if (!dma_buf) + dma_buf = hwrm_req_dma_slice(bp, msg, info->dma_len, &dma_handle); + if (!dma_buf) { + hwrm_req_drop(bp, msg); return -ENOMEM; + } + + hwrm_req_timeout(bp, msg, HWRM_COREDUMP_TIMEOUT); + cmn_resp = hwrm_req_hold(bp, msg); + resp = cmn_resp; segs_off = offsetof(struct hwrm_dbg_coredump_list_output, total_segments); cmn_req->host_dest_addr = cpu_to_le64(dma_handle); cmn_req->host_buf_len = cpu_to_le32(info->dma_len); - mutex_lock(&bp->hwrm_cmd_lock); while (1) { *seq_ptr = cpu_to_le16(seq); - rc = _hwrm_send_message(bp, msg, msg_len, - HWRM_COREDUMP_TIMEOUT); + rc = hwrm_req_send(bp, msg); if (rc) break; @@ -3619,26 +3680,27 @@ static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, int msg_len, seq++; off += len; } - mutex_unlock(&bp->hwrm_cmd_lock); - dma_free_coherent(&bp->pdev->dev, info->dma_len, dma_buf, dma_handle); + hwrm_req_drop(bp, msg); return rc; } static int bnxt_hwrm_dbg_coredump_list(struct bnxt *bp, struct bnxt_coredump *coredump) { - struct hwrm_dbg_coredump_list_input req = {0}; struct bnxt_hwrm_dbg_dma_info info = {NULL}; + struct hwrm_dbg_coredump_list_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_LIST, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_LIST); + if (rc) + return rc; info.dma_len = COREDUMP_LIST_BUF_LEN; info.seq_off = offsetof(struct hwrm_dbg_coredump_list_input, seq_no); info.data_len_off = offsetof(struct hwrm_dbg_coredump_list_output, data_len); - rc = bnxt_hwrm_dbg_dma_data(bp, &req, sizeof(req), &info); + rc = bnxt_hwrm_dbg_dma_data(bp, req, &info); if (!rc) { coredump->data = info.dest_buf; coredump->data_size = info.dest_buf_size; @@ -3650,26 +3712,34 @@ static int bnxt_hwrm_dbg_coredump_list(struct bnxt *bp, static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id, u16 segment_id) { - struct hwrm_dbg_coredump_initiate_input req = {0}; + struct hwrm_dbg_coredump_initiate_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_INITIATE); + if (rc) + return rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_INITIATE, -1, -1); - req.component_id = cpu_to_le16(component_id); - req.segment_id = cpu_to_le16(segment_id); + hwrm_req_timeout(bp, req, HWRM_COREDUMP_TIMEOUT); + req->component_id = cpu_to_le16(component_id); + req->segment_id = cpu_to_le16(segment_id); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_COREDUMP_TIMEOUT); + return hwrm_req_send(bp, req); } static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id, u16 segment_id, u32 *seg_len, void *buf, u32 buf_len, u32 offset) { - struct hwrm_dbg_coredump_retrieve_input req = {0}; + struct hwrm_dbg_coredump_retrieve_input *req; struct bnxt_hwrm_dbg_dma_info info = {NULL}; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_RETRIEVE, -1, -1); - req.component_id = cpu_to_le16(component_id); - req.segment_id = cpu_to_le16(segment_id); + rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_RETRIEVE); + if (rc) + return rc; + + req->component_id = cpu_to_le16(component_id); + req->segment_id = cpu_to_le16(segment_id); info.dma_len = COREDUMP_RETRIEVE_BUF_LEN; info.seq_off = offsetof(struct hwrm_dbg_coredump_retrieve_input, @@ -3682,7 +3752,7 @@ static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id, info.seg_start = offset; } - rc = bnxt_hwrm_dbg_dma_data(bp, &req, sizeof(req), &info); + rc = bnxt_hwrm_dbg_dma_data(bp, req, &info); if (!rc) *seg_len = info.dest_buf_size; @@ -3961,8 +4031,8 @@ static int bnxt_get_ts_info(struct net_device *dev, void bnxt_ethtool_init(struct bnxt *bp) { - struct hwrm_selftest_qlist_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_selftest_qlist_input req = {0}; + struct hwrm_selftest_qlist_output *resp; + struct hwrm_selftest_qlist_input *req; struct bnxt_test_info *test_info; struct net_device *dev = bp->dev; int i, rc; @@ -3974,19 +4044,22 @@ void bnxt_ethtool_init(struct bnxt *bp) if (bp->hwrm_spec_code < 0x10704 || !BNXT_PF(bp)) return; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_QLIST, -1, -1); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (rc) - goto ethtool_init_exit; - test_info = bp->test_info; - if (!test_info) + if (!test_info) { test_info = kzalloc(sizeof(*bp->test_info), GFP_KERNEL); - if (!test_info) + if (!test_info) + return; + bp->test_info = test_info; + } + + if (hwrm_req_init(bp, req, HWRM_SELFTEST_QLIST)) + return; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send_silent(bp, req); + if (rc) goto ethtool_init_exit; - bp->test_info = test_info; bp->num_tests = resp->num_tests + BNXT_DRV_TESTS; if (bp->num_tests > BNXT_MAX_TEST) bp->num_tests = BNXT_MAX_TEST; @@ -4020,7 +4093,7 @@ void bnxt_ethtool_init(struct bnxt *bp) } ethtool_init_exit: - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); } static void bnxt_get_eth_phy_stats(struct net_device *dev, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c new file mode 100644 index 000000000000..acef61abe35d --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c @@ -0,0 +1,763 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2020 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#include <asm/byteorder.h> +#include <linux/dma-mapping.h> +#include <linux/dmapool.h> +#include <linux/errno.h> +#include <linux/ethtool.h> +#include <linux/if_ether.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/netdevice.h> +#include <linux/pci.h> +#include <linux/skbuff.h> + +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_hwrm.h" + +static u64 hwrm_calc_sentinel(struct bnxt_hwrm_ctx *ctx, u16 req_type) +{ + return (((uintptr_t)ctx) + req_type) ^ BNXT_HWRM_SENTINEL; +} + +/** + * __hwrm_req_init() - Initialize an HWRM request. + * @bp: The driver context. + * @req: A pointer to the request pointer to initialize. + * @req_type: The request type. This will be converted to the little endian + * before being written to the req_type field of the returned request. + * @req_len: The length of the request to be allocated. + * + * Allocate DMA resources and initialize a new HWRM request object of the + * given type. The response address field in the request is configured with + * the DMA bus address that has been mapped for the response and the passed + * request is pointed to kernel virtual memory mapped for the request (such + * that short_input indirection can be accomplished without copying). The + * request’s target and completion ring are initialized to default values and + * can be overridden by writing to the returned request object directly. + * + * The initialized request can be further customized by writing to its fields + * directly, taking care to covert such fields to little endian. The request + * object will be consumed (and all its associated resources release) upon + * passing it to hwrm_req_send() unless ownership of the request has been + * claimed by the caller via a call to hwrm_req_hold(). If the request is not + * consumed, either because it is never sent or because ownership has been + * claimed, then it must be released by a call to hwrm_req_drop(). + * + * Return: zero on success, negative error code otherwise: + * E2BIG: the type of request pointer is too large to fit. + * ENOMEM: an allocation failure occurred. + */ +int __hwrm_req_init(struct bnxt *bp, void **req, u16 req_type, u32 req_len) +{ + struct bnxt_hwrm_ctx *ctx; + dma_addr_t dma_handle; + u8 *req_addr; + + if (req_len > BNXT_HWRM_CTX_OFFSET) + return -E2BIG; + + req_addr = dma_pool_alloc(bp->hwrm_dma_pool, GFP_KERNEL | __GFP_ZERO, + &dma_handle); + if (!req_addr) + return -ENOMEM; + + ctx = (struct bnxt_hwrm_ctx *)(req_addr + BNXT_HWRM_CTX_OFFSET); + /* safety first, sentinel used to check for invalid requests */ + ctx->sentinel = hwrm_calc_sentinel(ctx, req_type); + ctx->req_len = req_len; + ctx->req = (struct input *)req_addr; + ctx->resp = (struct output *)(req_addr + BNXT_HWRM_RESP_OFFSET); + ctx->dma_handle = dma_handle; + ctx->flags = 0; /* __GFP_ZERO, but be explicit regarding ownership */ + ctx->timeout = bp->hwrm_cmd_timeout ?: DFLT_HWRM_CMD_TIMEOUT; + ctx->allocated = BNXT_HWRM_DMA_SIZE - BNXT_HWRM_CTX_OFFSET; + ctx->gfp = GFP_KERNEL; + ctx->slice_addr = NULL; + + /* initialize common request fields */ + ctx->req->req_type = cpu_to_le16(req_type); + ctx->req->resp_addr = cpu_to_le64(dma_handle + BNXT_HWRM_RESP_OFFSET); + ctx->req->cmpl_ring = cpu_to_le16(BNXT_HWRM_NO_CMPL_RING); + ctx->req->target_id = cpu_to_le16(BNXT_HWRM_TARGET); + *req = ctx->req; + + return 0; +} + +static struct bnxt_hwrm_ctx *__hwrm_ctx(struct bnxt *bp, u8 *req_addr) +{ + void *ctx_addr = req_addr + BNXT_HWRM_CTX_OFFSET; + struct input *req = (struct input *)req_addr; + struct bnxt_hwrm_ctx *ctx = ctx_addr; + u64 sentinel; + + if (!req) { + /* can only be due to software bug, be loud */ + netdev_err(bp->dev, "null HWRM request"); + dump_stack(); + return NULL; + } + + /* HWRM API has no type safety, verify sentinel to validate address */ + sentinel = hwrm_calc_sentinel(ctx, le16_to_cpu(req->req_type)); + if (ctx->sentinel != sentinel) { + /* can only be due to software bug, be loud */ + netdev_err(bp->dev, "HWRM sentinel mismatch, req_type = %u\n", + (u32)le16_to_cpu(req->req_type)); + dump_stack(); + return NULL; + } + + return ctx; +} + +/** + * hwrm_req_timeout() - Set the completion timeout for the request. + * @bp: The driver context. + * @req: The request to set the timeout. + * @timeout: The timeout in milliseconds. + * + * Set the timeout associated with the request for subsequent calls to + * hwrm_req_send(). Some requests are long running and require a different + * timeout than the default. + */ +void hwrm_req_timeout(struct bnxt *bp, void *req, unsigned int timeout) +{ + struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req); + + if (ctx) + ctx->timeout = timeout; +} + +/** + * hwrm_req_alloc_flags() - Sets GFP allocation flags for slices. + * @bp: The driver context. + * @req: The request for which calls to hwrm_req_dma_slice() will have altered + * allocation flags. + * @flags: A bitmask of GFP flags. These flags are passed to + * dma_alloc_coherent() whenever it is used to allocate backing memory + * for slices. Note that calls to hwrm_req_dma_slice() will not always + * result in new allocations, however, memory suballocated from the + * request buffer is already __GFP_ZERO. + * + * Sets the GFP allocation flags associated with the request for subsequent + * calls to hwrm_req_dma_slice(). This can be useful for specifying __GFP_ZERO + * for slice allocations. + */ +void hwrm_req_alloc_flags(struct bnxt *bp, void *req, gfp_t gfp) +{ + struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req); + + if (ctx) + ctx->gfp = gfp; +} + +/** + * hwrm_req_replace() - Replace request data. + * @bp: The driver context. + * @req: The request to modify. A call to hwrm_req_replace() is conceptually + * an assignment of new_req to req. Subsequent calls to HWRM API functions, + * such as hwrm_req_send(), should thus use req and not new_req (in fact, + * calls to HWRM API functions will fail if non-managed request objects + * are passed). + * @len: The length of new_req. + * @new_req: The pre-built request to copy or reference. + * + * Replaces the request data in req with that of new_req. This is useful in + * scenarios where a request object has already been constructed by a third + * party prior to creating a resource managed request using hwrm_req_init(). + * Depending on the length, hwrm_req_replace() will either copy the new + * request data into the DMA memory allocated for req, or it will simply + * reference the new request and use it in lieu of req during subsequent + * calls to hwrm_req_send(). The resource management is associated with + * req and is independent of and does not apply to new_req. The caller must + * ensure that the lifetime of new_req is least as long as req. Any slices + * that may have been associated with the original request are released. + * + * Return: zero on success, negative error code otherwise: + * E2BIG: Request is too large. + * EINVAL: Invalid request to modify. + */ +int hwrm_req_replace(struct bnxt *bp, void *req, void *new_req, u32 len) +{ + struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req); + struct input *internal_req = req; + u16 req_type; + + if (!ctx) + return -EINVAL; + + if (len > BNXT_HWRM_CTX_OFFSET) + return -E2BIG; + + /* free any existing slices */ + ctx->allocated = BNXT_HWRM_DMA_SIZE - BNXT_HWRM_CTX_OFFSET; + if (ctx->slice_addr) { + dma_free_coherent(&bp->pdev->dev, ctx->slice_size, + ctx->slice_addr, ctx->slice_handle); + ctx->slice_addr = NULL; + } + ctx->gfp = GFP_KERNEL; + + if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || len > BNXT_HWRM_MAX_REQ_LEN) { + memcpy(internal_req, new_req, len); + } else { + internal_req->req_type = ((struct input *)new_req)->req_type; + ctx->req = new_req; + } + + ctx->req_len = len; + ctx->req->resp_addr = cpu_to_le64(ctx->dma_handle + + BNXT_HWRM_RESP_OFFSET); + + /* update sentinel for potentially new request type */ + req_type = le16_to_cpu(internal_req->req_type); + ctx->sentinel = hwrm_calc_sentinel(ctx, req_type); + + return 0; +} + +/** + * hwrm_req_flags() - Set non internal flags of the ctx + * @bp: The driver context. + * @req: The request containing the HWRM command + * @flags: ctx flags that don't have BNXT_HWRM_INTERNAL_FLAG set + * + * ctx flags can be used by the callers to instruct how the subsequent + * hwrm_req_send() should behave. Example: callers can use hwrm_req_flags + * with BNXT_HWRM_CTX_SILENT to omit kernel prints of errors of hwrm_req_send() + * or with BNXT_HWRM_FULL_WAIT enforce hwrm_req_send() to wait for full timeout + * even if FW is not responding. + * This generic function can be used to set any flag that is not an internal flag + * of the HWRM module. + */ +void hwrm_req_flags(struct bnxt *bp, void *req, enum bnxt_hwrm_ctx_flags flags) +{ + struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req); + + if (ctx) + ctx->flags |= (flags & HWRM_API_FLAGS); +} + +/** + * hwrm_req_hold() - Claim ownership of the request's resources. + * @bp: The driver context. + * @req: A pointer to the request to own. The request will no longer be + * consumed by calls to hwrm_req_send(). + * + * Take ownership of the request. Ownership places responsibility on the + * caller to free the resources associated with the request via a call to + * hwrm_req_drop(). The caller taking ownership implies that a subsequent + * call to hwrm_req_send() will not consume the request (ie. sending will + * not free the associated resources if the request is owned by the caller). + * Taking ownership returns a reference to the response. Retaining and + * accessing the response data is the most common reason to take ownership + * of the request. Ownership can also be acquired in order to reuse the same + * request object across multiple invocations of hwrm_req_send(). + * + * Return: A pointer to the response object. + * + * The resources associated with the response will remain available to the + * caller until ownership of the request is relinquished via a call to + * hwrm_req_drop(). It is not possible for hwrm_req_hold() to return NULL if + * a valid request is provided. A returned NULL value would imply a driver + * bug and the implementation will complain loudly in the logs to aid in + * detection. It should not be necessary to check the result for NULL. + */ +void *hwrm_req_hold(struct bnxt *bp, void *req) +{ + struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req); + struct input *input = (struct input *)req; + + if (!ctx) + return NULL; + + if (ctx->flags & BNXT_HWRM_INTERNAL_CTX_OWNED) { + /* can only be due to software bug, be loud */ + netdev_err(bp->dev, "HWRM context already owned, req_type = %u\n", + (u32)le16_to_cpu(input->req_type)); + dump_stack(); + return NULL; + } + + ctx->flags |= BNXT_HWRM_INTERNAL_CTX_OWNED; + return ((u8 *)req) + BNXT_HWRM_RESP_OFFSET; +} + +static void __hwrm_ctx_drop(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx) +{ + void *addr = ((u8 *)ctx) - BNXT_HWRM_CTX_OFFSET; + dma_addr_t dma_handle = ctx->dma_handle; /* save before invalidate */ + + /* unmap any auxiliary DMA slice */ + if (ctx->slice_addr) + dma_free_coherent(&bp->pdev->dev, ctx->slice_size, + ctx->slice_addr, ctx->slice_handle); + + /* invalidate, ensure ownership, sentinel and dma_handle are cleared */ + memset(ctx, 0, sizeof(struct bnxt_hwrm_ctx)); + + /* return the buffer to the DMA pool */ + if (dma_handle) + dma_pool_free(bp->hwrm_dma_pool, addr, dma_handle); +} + +/** + * hwrm_req_drop() - Release all resources associated with the request. + * @bp: The driver context. + * @req: The request to consume, releasing the associated resources. The + * request object, any slices, and its associated response are no + * longer valid. + * + * It is legal to call hwrm_req_drop() on an unowned request, provided it + * has not already been consumed by hwrm_req_send() (for example, to release + * an aborted request). A given request should not be dropped more than once, + * nor should it be dropped after having been consumed by hwrm_req_send(). To + * do so is an error (the context will not be found and a stack trace will be + * rendered in the kernel log). + */ +void hwrm_req_drop(struct bnxt *bp, void *req) +{ + struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req); + + if (ctx) + __hwrm_ctx_drop(bp, ctx); +} + +static int __hwrm_to_stderr(u32 hwrm_err) +{ + switch (hwrm_err) { + case HWRM_ERR_CODE_SUCCESS: + return 0; + case HWRM_ERR_CODE_RESOURCE_LOCKED: + return -EROFS; + case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED: + return -EACCES; + case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR: + return -ENOSPC; + case HWRM_ERR_CODE_INVALID_PARAMS: + case HWRM_ERR_CODE_INVALID_FLAGS: + case HWRM_ERR_CODE_INVALID_ENABLES: + case HWRM_ERR_CODE_UNSUPPORTED_TLV: + case HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR: + return -EINVAL; + case HWRM_ERR_CODE_NO_BUFFER: + return -ENOMEM; + case HWRM_ERR_CODE_HOT_RESET_PROGRESS: + case HWRM_ERR_CODE_BUSY: + return -EAGAIN; + case HWRM_ERR_CODE_CMD_NOT_SUPPORTED: + return -EOPNOTSUPP; + default: + return -EIO; + } +} + +static struct bnxt_hwrm_wait_token * +__hwrm_acquire_token(struct bnxt *bp, enum bnxt_hwrm_chnl dst) +{ + struct bnxt_hwrm_wait_token *token; + + token = kzalloc(sizeof(*token), GFP_KERNEL); + if (!token) + return NULL; + + mutex_lock(&bp->hwrm_cmd_lock); + + token->dst = dst; + token->state = BNXT_HWRM_PENDING; + if (dst == BNXT_HWRM_CHNL_CHIMP) { + token->seq_id = bp->hwrm_cmd_seq++; + hlist_add_head_rcu(&token->node, &bp->hwrm_pending_list); + } else { + token->seq_id = bp->hwrm_cmd_kong_seq++; + } + + return token; +} + +static void +__hwrm_release_token(struct bnxt *bp, struct bnxt_hwrm_wait_token *token) +{ + if (token->dst == BNXT_HWRM_CHNL_CHIMP) { + hlist_del_rcu(&token->node); + kfree_rcu(token, rcu); + } else { + kfree(token); + } + mutex_unlock(&bp->hwrm_cmd_lock); +} + +void +hwrm_update_token(struct bnxt *bp, u16 seq_id, enum bnxt_hwrm_wait_state state) +{ + struct bnxt_hwrm_wait_token *token; + + rcu_read_lock(); + hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node) { + if (token->seq_id == seq_id) { + WRITE_ONCE(token->state, state); + rcu_read_unlock(); + return; + } + } + rcu_read_unlock(); + netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id); +} + +static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx) +{ + u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER; + enum bnxt_hwrm_chnl dst = BNXT_HWRM_CHNL_CHIMP; + u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM; + struct bnxt_hwrm_wait_token *token = NULL; + struct hwrm_short_input short_input = {0}; + u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN; + unsigned int i, timeout, tmo_count; + u32 *data = (u32 *)ctx->req; + u32 msg_len = ctx->req_len; + int rc = -EBUSY; + u32 req_type; + u16 len = 0; + u8 *valid; + + if (ctx->flags & BNXT_HWRM_INTERNAL_RESP_DIRTY) + memset(ctx->resp, 0, PAGE_SIZE); + + req_type = le16_to_cpu(ctx->req->req_type); + if (BNXT_NO_FW_ACCESS(bp) && req_type != HWRM_FUNC_RESET) + goto exit; + + if (msg_len > BNXT_HWRM_MAX_REQ_LEN && + msg_len > bp->hwrm_max_ext_req_len) { + rc = -E2BIG; + goto exit; + } + + if (bnxt_kong_hwrm_message(bp, ctx->req)) { + dst = BNXT_HWRM_CHNL_KONG; + bar_offset = BNXT_GRCPF_REG_KONG_COMM; + doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER; + if (le16_to_cpu(ctx->req->cmpl_ring) != INVALID_HW_RING_ID) { + netdev_err(bp->dev, "Ring completions not supported for KONG commands, req_type = %d\n", + req_type); + rc = -EINVAL; + goto exit; + } + } + + token = __hwrm_acquire_token(bp, dst); + if (!token) { + rc = -ENOMEM; + goto exit; + } + ctx->req->seq_id = cpu_to_le16(token->seq_id); + + if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || + msg_len > BNXT_HWRM_MAX_REQ_LEN) { + short_input.req_type = ctx->req->req_type; + short_input.signature = + cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD); + short_input.size = cpu_to_le16(msg_len); + short_input.req_addr = cpu_to_le64(ctx->dma_handle); + + data = (u32 *)&short_input; + msg_len = sizeof(short_input); + + max_req_len = BNXT_HWRM_SHORT_REQ_LEN; + } + + /* Ensure any associated DMA buffers are written before doorbell */ + wmb(); + + /* Write request msg to hwrm channel */ + __iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4); + + for (i = msg_len; i < max_req_len; i += 4) + writel(0, bp->bar0 + bar_offset + i); + + /* Ring channel doorbell */ + writel(1, bp->bar0 + doorbell_offset); + + if (!pci_is_enabled(bp->pdev)) { + rc = -ENODEV; + goto exit; + } + + /* Limit timeout to an upper limit */ + timeout = min_t(uint, ctx->timeout, HWRM_CMD_MAX_TIMEOUT); + /* convert timeout to usec */ + timeout *= 1000; + + i = 0; + /* Short timeout for the first few iterations: + * number of loops = number of loops for short timeout + + * number of loops for standard timeout. + */ + tmo_count = HWRM_SHORT_TIMEOUT_COUNTER; + timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER; + tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT); + + if (le16_to_cpu(ctx->req->cmpl_ring) != INVALID_HW_RING_ID) { + /* Wait until hwrm response cmpl interrupt is processed */ + while (READ_ONCE(token->state) < BNXT_HWRM_COMPLETE && + i++ < tmo_count) { + /* Abort the wait for completion if the FW health + * check has failed. + */ + if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) + goto exit; + /* on first few passes, just barely sleep */ + if (i < HWRM_SHORT_TIMEOUT_COUNTER) { + usleep_range(HWRM_SHORT_MIN_TIMEOUT, + HWRM_SHORT_MAX_TIMEOUT); + } else { + if (HWRM_WAIT_MUST_ABORT(bp, ctx)) + break; + usleep_range(HWRM_MIN_TIMEOUT, + HWRM_MAX_TIMEOUT); + } + } + + if (READ_ONCE(token->state) != BNXT_HWRM_COMPLETE) { + if (!(ctx->flags & BNXT_HWRM_CTX_SILENT)) + netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n", + le16_to_cpu(ctx->req->req_type)); + goto exit; + } + len = le16_to_cpu(READ_ONCE(ctx->resp->resp_len)); + valid = ((u8 *)ctx->resp) + len - 1; + } else { + __le16 seen_out_of_seq = ctx->req->seq_id; /* will never see */ + int j; + + /* Check if response len is updated */ + for (i = 0; i < tmo_count; i++) { + /* Abort the wait for completion if the FW health + * check has failed. + */ + if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) + goto exit; + + if (token && + READ_ONCE(token->state) == BNXT_HWRM_DEFERRED) { + __hwrm_release_token(bp, token); + token = NULL; + } + + len = le16_to_cpu(READ_ONCE(ctx->resp->resp_len)); + if (len) { + __le16 resp_seq = READ_ONCE(ctx->resp->seq_id); + + if (resp_seq == ctx->req->seq_id) + break; + if (resp_seq != seen_out_of_seq) { + netdev_warn(bp->dev, "Discarding out of seq response: 0x%x for msg {0x%x 0x%x}\n", + le16_to_cpu(resp_seq), + le16_to_cpu(ctx->req->req_type), + le16_to_cpu(ctx->req->seq_id)); + seen_out_of_seq = resp_seq; + } + } + + /* on first few passes, just barely sleep */ + if (i < HWRM_SHORT_TIMEOUT_COUNTER) { + usleep_range(HWRM_SHORT_MIN_TIMEOUT, + HWRM_SHORT_MAX_TIMEOUT); + } else { + if (HWRM_WAIT_MUST_ABORT(bp, ctx)) + goto timeout_abort; + usleep_range(HWRM_MIN_TIMEOUT, + HWRM_MAX_TIMEOUT); + } + } + + if (i >= tmo_count) { +timeout_abort: + if (!(ctx->flags & BNXT_HWRM_CTX_SILENT)) + netdev_err(bp->dev, "Error (timeout: %u) msg {0x%x 0x%x} len:%d\n", + hwrm_total_timeout(i), + le16_to_cpu(ctx->req->req_type), + le16_to_cpu(ctx->req->seq_id), len); + goto exit; + } + + /* Last byte of resp contains valid bit */ + valid = ((u8 *)ctx->resp) + len - 1; + for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) { + /* make sure we read from updated DMA memory */ + dma_rmb(); + if (*valid) + break; + usleep_range(1, 5); + } + + if (j >= HWRM_VALID_BIT_DELAY_USEC) { + if (!(ctx->flags & BNXT_HWRM_CTX_SILENT)) + netdev_err(bp->dev, "Error (timeout: %u) msg {0x%x 0x%x} len:%d v:%d\n", + hwrm_total_timeout(i), + le16_to_cpu(ctx->req->req_type), + le16_to_cpu(ctx->req->seq_id), len, + *valid); + goto exit; + } + } + + /* Zero valid bit for compatibility. Valid bit in an older spec + * may become a new field in a newer spec. We must make sure that + * a new field not implemented by old spec will read zero. + */ + *valid = 0; + rc = le16_to_cpu(ctx->resp->error_code); + if (rc && !(ctx->flags & BNXT_HWRM_CTX_SILENT)) { + netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n", + le16_to_cpu(ctx->resp->req_type), + le16_to_cpu(ctx->resp->seq_id), rc); + } + rc = __hwrm_to_stderr(rc); +exit: + if (token) + __hwrm_release_token(bp, token); + if (ctx->flags & BNXT_HWRM_INTERNAL_CTX_OWNED) + ctx->flags |= BNXT_HWRM_INTERNAL_RESP_DIRTY; + else + __hwrm_ctx_drop(bp, ctx); + return rc; +} + +/** + * hwrm_req_send() - Execute an HWRM command. + * @bp: The driver context. + * @req: A pointer to the request to send. The DMA resources associated with + * the request will be released (ie. the request will be consumed) unless + * ownership of the request has been assumed by the caller via a call to + * hwrm_req_hold(). + * + * Send an HWRM request to the device and wait for a response. The request is + * consumed if it is not owned by the caller. This function will block until + * the request has either completed or times out due to an error. + * + * Return: A result code. + * + * The result is zero on success, otherwise the negative error code indicates + * one of the following errors: + * E2BIG: The request was too large. + * EBUSY: The firmware is in a fatal state or the request timed out + * EACCESS: HWRM access denied. + * ENOSPC: HWRM resource allocation error. + * EINVAL: Request parameters are invalid. + * ENOMEM: HWRM has no buffers. + * EAGAIN: HWRM busy or reset in progress. + * EOPNOTSUPP: Invalid request type. + * EIO: Any other error. + * Error handling is orthogonal to request ownership. An unowned request will + * still be consumed on error. If the caller owns the request, then the caller + * is responsible for releasing the resources. Otherwise, hwrm_req_send() will + * always consume the request. + */ +int hwrm_req_send(struct bnxt *bp, void *req) +{ + struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req); + + if (!ctx) + return -EINVAL; + + return __hwrm_send(bp, ctx); +} + +/** + * hwrm_req_send_silent() - A silent version of hwrm_req_send(). + * @bp: The driver context. + * @req: The request to send without logging. + * + * The same as hwrm_req_send(), except that the request is silenced using + * hwrm_req_silence() prior the call. This version of the function is + * provided solely to preserve the legacy API’s flavor for this functionality. + * + * Return: A result code, see hwrm_req_send(). + */ +int hwrm_req_send_silent(struct bnxt *bp, void *req) +{ + hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT); + return hwrm_req_send(bp, req); +} + +/** + * hwrm_req_dma_slice() - Allocate a slice of DMA mapped memory. + * @bp: The driver context. + * @req: The request for which indirect data will be associated. + * @size: The size of the allocation. + * @dma: The bus address associated with the allocation. The HWRM API has no + * knowledge about the type of the request and so cannot infer how the + * caller intends to use the indirect data. Thus, the caller is + * responsible for configuring the request object appropriately to + * point to the associated indirect memory. Note, DMA handle has the + * same definition as it does in dma_alloc_coherent(), the caller is + * responsible for endian conversions via cpu_to_le64() before assigning + * this address. + * + * Allocates DMA mapped memory for indirect data related to a request. The + * lifetime of the DMA resources will be bound to that of the request (ie. + * they will be automatically released when the request is either consumed by + * hwrm_req_send() or dropped by hwrm_req_drop()). Small allocations are + * efficiently suballocated out of the request buffer space, hence the name + * slice, while larger requests are satisfied via an underlying call to + * dma_alloc_coherent(). Multiple suballocations are supported, however, only + * one externally mapped region is. + * + * Return: The kernel virtual address of the DMA mapping. + */ +void * +hwrm_req_dma_slice(struct bnxt *bp, void *req, u32 size, dma_addr_t *dma_handle) +{ + struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req); + u8 *end = ((u8 *)req) + BNXT_HWRM_DMA_SIZE; + struct input *input = req; + u8 *addr, *req_addr = req; + u32 max_offset, offset; + + if (!ctx) + return NULL; + + max_offset = BNXT_HWRM_DMA_SIZE - ctx->allocated; + offset = max_offset - size; + offset = ALIGN_DOWN(offset, BNXT_HWRM_DMA_ALIGN); + addr = req_addr + offset; + + if (addr < req_addr + max_offset && req_addr + ctx->req_len <= addr) { + ctx->allocated = end - addr; + *dma_handle = ctx->dma_handle + offset; + return addr; + } + + /* could not suballocate from ctx buffer, try create a new mapping */ + if (ctx->slice_addr) { + /* if one exists, can only be due to software bug, be loud */ + netdev_err(bp->dev, "HWRM refusing to reallocate DMA slice, req_type = %u\n", + (u32)le16_to_cpu(input->req_type)); + dump_stack(); + return NULL; + } + + addr = dma_alloc_coherent(&bp->pdev->dev, size, dma_handle, ctx->gfp); + + if (!addr) + return NULL; + + ctx->slice_addr = addr; + ctx->slice_size = size; + ctx->slice_handle = *dma_handle; + + return addr; +} diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h new file mode 100644 index 000000000000..4d17f0d5363b --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h @@ -0,0 +1,145 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2020 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNXT_HWRM_H +#define BNXT_HWRM_H + +#include "bnxt_hsi.h" + +enum bnxt_hwrm_ctx_flags { + /* Update the HWRM_API_FLAGS right below for any new non-internal bit added here */ + BNXT_HWRM_INTERNAL_CTX_OWNED = BIT(0), /* caller owns the context */ + BNXT_HWRM_INTERNAL_RESP_DIRTY = BIT(1), /* response contains data */ + BNXT_HWRM_CTX_SILENT = BIT(2), /* squelch firmware errors */ + BNXT_HWRM_FULL_WAIT = BIT(3), /* wait for full timeout of HWRM command */ +}; + +#define HWRM_API_FLAGS (BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT) + +struct bnxt_hwrm_ctx { + u64 sentinel; + dma_addr_t dma_handle; + struct output *resp; + struct input *req; + dma_addr_t slice_handle; + void *slice_addr; + u32 slice_size; + u32 req_len; + enum bnxt_hwrm_ctx_flags flags; + unsigned int timeout; + u32 allocated; + gfp_t gfp; +}; + +enum bnxt_hwrm_wait_state { + BNXT_HWRM_PENDING, + BNXT_HWRM_DEFERRED, + BNXT_HWRM_COMPLETE, + BNXT_HWRM_CANCELLED, +}; + +enum bnxt_hwrm_chnl { BNXT_HWRM_CHNL_CHIMP, BNXT_HWRM_CHNL_KONG }; + +struct bnxt_hwrm_wait_token { + struct rcu_head rcu; + struct hlist_node node; + enum bnxt_hwrm_wait_state state; + enum bnxt_hwrm_chnl dst; + u16 seq_id; +}; + +void hwrm_update_token(struct bnxt *bp, u16 seq, enum bnxt_hwrm_wait_state s); + +#define BNXT_HWRM_MAX_REQ_LEN (bp->hwrm_max_req_len) +#define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input) +#define HWRM_CMD_MAX_TIMEOUT 40000 +#define SHORT_HWRM_CMD_TIMEOUT 20 +#define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout) +#define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4) +#define HWRM_COREDUMP_TIMEOUT ((HWRM_CMD_TIMEOUT) * 12) +#define BNXT_HWRM_TARGET 0xffff +#define BNXT_HWRM_NO_CMPL_RING -1 +#define BNXT_HWRM_REQ_MAX_SIZE 128 +#define BNXT_HWRM_DMA_SIZE (2 * PAGE_SIZE) /* space for req+resp */ +#define BNXT_HWRM_RESP_RESERVED PAGE_SIZE +#define BNXT_HWRM_RESP_OFFSET (BNXT_HWRM_DMA_SIZE - \ + BNXT_HWRM_RESP_RESERVED) +#define BNXT_HWRM_CTX_OFFSET (BNXT_HWRM_RESP_OFFSET - \ + sizeof(struct bnxt_hwrm_ctx)) +#define BNXT_HWRM_DMA_ALIGN 16 +#define BNXT_HWRM_SENTINEL 0xb6e1f68a12e9a7eb /* arbitrary value */ +#define BNXT_HWRM_REQS_PER_PAGE (BNXT_PAGE_SIZE / \ + BNXT_HWRM_REQ_MAX_SIZE) +#define HWRM_SHORT_MIN_TIMEOUT 3 +#define HWRM_SHORT_MAX_TIMEOUT 10 +#define HWRM_SHORT_TIMEOUT_COUNTER 5 + +#define HWRM_MIN_TIMEOUT 25 +#define HWRM_MAX_TIMEOUT 40 + +#define HWRM_WAIT_MUST_ABORT(bp, ctx) \ + (le16_to_cpu((ctx)->req->req_type) != HWRM_VER_GET && \ + !bnxt_is_fw_healthy(bp)) + +static inline unsigned int hwrm_total_timeout(unsigned int n) +{ + return n <= HWRM_SHORT_TIMEOUT_COUNTER ? n * HWRM_SHORT_MIN_TIMEOUT : + HWRM_SHORT_TIMEOUT_COUNTER * HWRM_SHORT_MIN_TIMEOUT + + (n - HWRM_SHORT_TIMEOUT_COUNTER) * HWRM_MIN_TIMEOUT; +} + + +#define HWRM_VALID_BIT_DELAY_USEC 150 + +static inline bool bnxt_cfa_hwrm_message(u16 req_type) +{ + switch (req_type) { + case HWRM_CFA_ENCAP_RECORD_ALLOC: + case HWRM_CFA_ENCAP_RECORD_FREE: + case HWRM_CFA_DECAP_FILTER_ALLOC: + case HWRM_CFA_DECAP_FILTER_FREE: + case HWRM_CFA_EM_FLOW_ALLOC: + case HWRM_CFA_EM_FLOW_FREE: + case HWRM_CFA_EM_FLOW_CFG: + case HWRM_CFA_FLOW_ALLOC: + case HWRM_CFA_FLOW_FREE: + case HWRM_CFA_FLOW_INFO: + case HWRM_CFA_FLOW_FLUSH: + case HWRM_CFA_FLOW_STATS: + case HWRM_CFA_METER_PROFILE_ALLOC: + case HWRM_CFA_METER_PROFILE_FREE: + case HWRM_CFA_METER_PROFILE_CFG: + case HWRM_CFA_METER_INSTANCE_ALLOC: + case HWRM_CFA_METER_INSTANCE_FREE: + return true; + default: + return false; + } +} + +static inline bool bnxt_kong_hwrm_message(struct bnxt *bp, struct input *req) +{ + return (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL && + (bnxt_cfa_hwrm_message(le16_to_cpu(req->req_type)) || + le16_to_cpu(req->target_id) == HWRM_TARGET_ID_KONG)); +} + +int __hwrm_req_init(struct bnxt *bp, void **req, u16 req_type, u32 req_len); +#define hwrm_req_init(bp, req, req_type) \ + __hwrm_req_init((bp), (void **)&(req), (req_type), sizeof(*(req))) +void *hwrm_req_hold(struct bnxt *bp, void *req); +void hwrm_req_drop(struct bnxt *bp, void *req); +void hwrm_req_flags(struct bnxt *bp, void *req, enum bnxt_hwrm_ctx_flags flags); +void hwrm_req_timeout(struct bnxt *bp, void *req, unsigned int timeout); +int hwrm_req_send(struct bnxt *bp, void *req); +int hwrm_req_send_silent(struct bnxt *bp, void *req); +int hwrm_req_replace(struct bnxt *bp, void *req, void *new_req, u32 len); +void hwrm_req_alloc_flags(struct bnxt *bp, void *req, gfp_t flags); +void *hwrm_req_dma_slice(struct bnxt *bp, void *req, u32 size, dma_addr_t *dma); +#endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c index 81f40ab748f1..f0aa480799ca 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c @@ -18,6 +18,7 @@ #include <linux/ptp_classify.h> #include "bnxt_hsi.h" #include "bnxt.h" +#include "bnxt_hwrm.h" #include "bnxt_ptp.h" int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id, u16 *hdr_off) @@ -56,16 +57,19 @@ static int bnxt_ptp_settime(struct ptp_clock_info *ptp_info, } /* Caller holds ptp_lock */ -static u64 bnxt_refclk_read(struct bnxt *bp, struct ptp_system_timestamp *sts) +static int bnxt_refclk_read(struct bnxt *bp, struct ptp_system_timestamp *sts, + u64 *ns) { struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; - u64 ns; + + if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) + return -EIO; ptp_read_system_prets(sts); - ns = readl(bp->bar0 + ptp->refclk_mapped_regs[0]); + *ns = readl(bp->bar0 + ptp->refclk_mapped_regs[0]); ptp_read_system_postts(sts); - ns |= (u64)readl(bp->bar0 + ptp->refclk_mapped_regs[1]) << 32; - return ns; + *ns |= (u64)readl(bp->bar0 + ptp->refclk_mapped_regs[1]) << 32; + return 0; } static void bnxt_ptp_get_current_time(struct bnxt *bp) @@ -76,30 +80,34 @@ static void bnxt_ptp_get_current_time(struct bnxt *bp) return; spin_lock_bh(&ptp->ptp_lock); WRITE_ONCE(ptp->old_time, ptp->current_time); - ptp->current_time = bnxt_refclk_read(bp, NULL); + bnxt_refclk_read(bp, NULL, &ptp->current_time); spin_unlock_bh(&ptp->ptp_lock); } static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts) { - struct hwrm_port_ts_query_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_port_ts_query_input req = {0}; + struct hwrm_port_ts_query_output *resp; + struct hwrm_port_ts_query_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_TS_QUERY, -1, -1); - req.flags = cpu_to_le32(flags); + rc = hwrm_req_init(bp, req, HWRM_PORT_TS_QUERY); + if (rc) + return rc; + + req->flags = cpu_to_le32(flags); if ((flags & PORT_TS_QUERY_REQ_FLAGS_PATH) == PORT_TS_QUERY_REQ_FLAGS_PATH_TX) { - req.enables = cpu_to_le16(BNXT_PTP_QTS_TX_ENABLES); - req.ptp_seq_id = cpu_to_le32(bp->ptp_cfg->tx_seqid); - req.ptp_hdr_offset = cpu_to_le16(bp->ptp_cfg->tx_hdr_off); - req.ts_req_timeout = cpu_to_le16(BNXT_PTP_QTS_TIMEOUT); + req->enables = cpu_to_le16(BNXT_PTP_QTS_TX_ENABLES); + req->ptp_seq_id = cpu_to_le32(bp->ptp_cfg->tx_seqid); + req->ptp_hdr_offset = cpu_to_le16(bp->ptp_cfg->tx_hdr_off); + req->ts_req_timeout = cpu_to_le16(BNXT_PTP_QTS_TIMEOUT); } - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + resp = hwrm_req_hold(bp, req); + + rc = hwrm_req_send(bp, req); if (!rc) *ts = le64_to_cpu(resp->ptp_msg_ts); - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -110,9 +118,14 @@ static int bnxt_ptp_gettimex(struct ptp_clock_info *ptp_info, struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg, ptp_info); u64 ns, cycles; + int rc; spin_lock_bh(&ptp->ptp_lock); - cycles = bnxt_refclk_read(ptp->bp, sts); + rc = bnxt_refclk_read(ptp->bp, sts, &cycles); + if (rc) { + spin_unlock_bh(&ptp->ptp_lock); + return rc; + } ns = timecounter_cyc2time(&ptp->tc, cycles); spin_unlock_bh(&ptp->ptp_lock); *ts = ns_to_timespec64(ns); @@ -135,33 +148,246 @@ static int bnxt_ptp_adjfreq(struct ptp_clock_info *ptp_info, s32 ppb) { struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg, ptp_info); - struct hwrm_port_mac_cfg_input req = {0}; + struct hwrm_port_mac_cfg_input *req; struct bnxt *bp = ptp->bp; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_MAC_CFG, -1, -1); - req.ptp_freq_adj_ppb = cpu_to_le32(ppb); - req.enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_PTP_FREQ_ADJ_PPB); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG); + if (rc) + return rc; + + req->ptp_freq_adj_ppb = cpu_to_le32(ppb); + req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_PTP_FREQ_ADJ_PPB); + rc = hwrm_req_send(ptp->bp, req); if (rc) netdev_err(ptp->bp->dev, "ptp adjfreq failed. rc = %d\n", rc); return rc; } -static int bnxt_ptp_enable(struct ptp_clock_info *ptp, +void bnxt_ptp_pps_event(struct bnxt *bp, u32 data1, u32 data2) +{ + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + struct ptp_clock_event event; + u64 ns, pps_ts; + + pps_ts = EVENT_PPS_TS(data2, data1); + spin_lock_bh(&ptp->ptp_lock); + ns = timecounter_cyc2time(&ptp->tc, pps_ts); + spin_unlock_bh(&ptp->ptp_lock); + + switch (EVENT_DATA2_PPS_EVENT_TYPE(data2)) { + case ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_INTERNAL: + event.pps_times.ts_real = ns_to_timespec64(ns); + event.type = PTP_CLOCK_PPSUSR; + event.index = EVENT_DATA2_PPS_PIN_NUM(data2); + break; + case ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_EXTERNAL: + event.timestamp = ns; + event.type = PTP_CLOCK_EXTTS; + event.index = EVENT_DATA2_PPS_PIN_NUM(data2); + break; + } + + ptp_clock_event(bp->ptp_cfg->ptp_clock, &event); +} + +static int bnxt_ptp_cfg_pin(struct bnxt *bp, u8 pin, u8 usage) +{ + struct hwrm_func_ptp_pin_cfg_input *req; + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + u8 state = usage != BNXT_PPS_PIN_NONE; + u8 *pin_state, *pin_usg; + u32 enables; + int rc; + + if (!TSIO_PIN_VALID(pin)) { + netdev_err(ptp->bp->dev, "1PPS: Invalid pin. Check pin-function configuration\n"); + return -EOPNOTSUPP; + } + + rc = hwrm_req_init(ptp->bp, req, HWRM_FUNC_PTP_PIN_CFG); + if (rc) + return rc; + + enables = (FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN0_STATE | + FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN0_USAGE) << (pin * 2); + req->enables = cpu_to_le32(enables); + + pin_state = &req->pin0_state; + pin_usg = &req->pin0_usage; + + *(pin_state + (pin * 2)) = state; + *(pin_usg + (pin * 2)) = usage; + + rc = hwrm_req_send(ptp->bp, req); + if (rc) + return rc; + + ptp->pps_info.pins[pin].usage = usage; + ptp->pps_info.pins[pin].state = state; + + return 0; +} + +static int bnxt_ptp_cfg_event(struct bnxt *bp, u8 event) +{ + struct hwrm_func_ptp_cfg_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_FUNC_PTP_CFG); + if (rc) + return rc; + + req->enables = cpu_to_le16(FUNC_PTP_CFG_REQ_ENABLES_PTP_PPS_EVENT); + req->ptp_pps_event = event; + return hwrm_req_send(bp, req); +} + +void bnxt_ptp_reapply_pps(struct bnxt *bp) +{ + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + struct bnxt_pps *pps; + u32 pin = 0; + int rc; + + if (!ptp || !(bp->fw_cap & BNXT_FW_CAP_PTP_PPS) || + !(ptp->ptp_info.pin_config)) + return; + pps = &ptp->pps_info; + for (pin = 0; pin < BNXT_MAX_TSIO_PINS; pin++) { + if (pps->pins[pin].state) { + rc = bnxt_ptp_cfg_pin(bp, pin, pps->pins[pin].usage); + if (!rc && pps->pins[pin].event) + rc = bnxt_ptp_cfg_event(bp, + pps->pins[pin].event); + if (rc) + netdev_err(bp->dev, "1PPS: Failed to configure pin%d\n", + pin); + } + } +} + +static int bnxt_get_target_cycles(struct bnxt_ptp_cfg *ptp, u64 target_ns, + u64 *cycles_delta) +{ + u64 cycles_now; + u64 nsec_now, nsec_delta; + int rc; + + spin_lock_bh(&ptp->ptp_lock); + rc = bnxt_refclk_read(ptp->bp, NULL, &cycles_now); + if (rc) { + spin_unlock_bh(&ptp->ptp_lock); + return rc; + } + nsec_now = timecounter_cyc2time(&ptp->tc, cycles_now); + spin_unlock_bh(&ptp->ptp_lock); + + nsec_delta = target_ns - nsec_now; + *cycles_delta = div64_u64(nsec_delta << ptp->cc.shift, ptp->cc.mult); + return 0; +} + +static int bnxt_ptp_perout_cfg(struct bnxt_ptp_cfg *ptp, + struct ptp_clock_request *rq) +{ + struct hwrm_func_ptp_cfg_input *req; + struct bnxt *bp = ptp->bp; + struct timespec64 ts; + u64 target_ns, delta; + u16 enables; + int rc; + + ts.tv_sec = rq->perout.start.sec; + ts.tv_nsec = rq->perout.start.nsec; + target_ns = timespec64_to_ns(&ts); + + rc = bnxt_get_target_cycles(ptp, target_ns, &delta); + if (rc) + return rc; + + rc = hwrm_req_init(bp, req, HWRM_FUNC_PTP_CFG); + if (rc) + return rc; + + enables = FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_PERIOD | + FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_UP | + FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_PHASE; + req->enables = cpu_to_le16(enables); + req->ptp_pps_event = 0; + req->ptp_freq_adj_dll_source = 0; + req->ptp_freq_adj_dll_phase = 0; + req->ptp_freq_adj_ext_period = cpu_to_le32(NSEC_PER_SEC); + req->ptp_freq_adj_ext_up = 0; + req->ptp_freq_adj_ext_phase_lower = cpu_to_le32(delta); + + return hwrm_req_send(bp, req); +} + +static int bnxt_ptp_enable(struct ptp_clock_info *ptp_info, struct ptp_clock_request *rq, int on) { - return -EOPNOTSUPP; + struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg, + ptp_info); + struct bnxt *bp = ptp->bp; + u8 pin_id; + int rc; + + switch (rq->type) { + case PTP_CLK_REQ_EXTTS: + /* Configure an External PPS IN */ + pin_id = ptp_find_pin(ptp->ptp_clock, PTP_PF_EXTTS, + rq->extts.index); + if (!on) + break; + rc = bnxt_ptp_cfg_pin(bp, pin_id, BNXT_PPS_PIN_PPS_IN); + if (rc) + return rc; + rc = bnxt_ptp_cfg_event(bp, BNXT_PPS_EVENT_EXTERNAL); + if (!rc) + ptp->pps_info.pins[pin_id].event = BNXT_PPS_EVENT_EXTERNAL; + return rc; + case PTP_CLK_REQ_PEROUT: + /* Configure a Periodic PPS OUT */ + pin_id = ptp_find_pin(ptp->ptp_clock, PTP_PF_PEROUT, + rq->perout.index); + if (!on) + break; + + rc = bnxt_ptp_cfg_pin(bp, pin_id, BNXT_PPS_PIN_PPS_OUT); + if (!rc) + rc = bnxt_ptp_perout_cfg(ptp, rq); + + return rc; + case PTP_CLK_REQ_PPS: + /* Configure PHC PPS IN */ + rc = bnxt_ptp_cfg_pin(bp, 0, BNXT_PPS_PIN_PPS_IN); + if (rc) + return rc; + rc = bnxt_ptp_cfg_event(bp, BNXT_PPS_EVENT_INTERNAL); + if (!rc) + ptp->pps_info.pins[0].event = BNXT_PPS_EVENT_INTERNAL; + return rc; + default: + netdev_err(ptp->bp->dev, "Unrecognized PIN function\n"); + return -EOPNOTSUPP; + } + + return bnxt_ptp_cfg_pin(bp, pin_id, BNXT_PPS_PIN_NONE); } static int bnxt_hwrm_ptp_cfg(struct bnxt *bp) { - struct hwrm_port_mac_cfg_input req = {0}; struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + struct hwrm_port_mac_cfg_input *req; u32 flags = 0; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG); + if (rc) + return rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_MAC_CFG, -1, -1); if (ptp->rx_filter) flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE; else @@ -170,11 +396,11 @@ static int bnxt_hwrm_ptp_cfg(struct bnxt *bp) flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE; else flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE; - req.flags = cpu_to_le32(flags); - req.enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE); - req.rx_ts_capture_ptp_msg_type = cpu_to_le16(ptp->rxctl); + req->flags = cpu_to_le32(flags); + req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE); + req->rx_ts_capture_ptp_msg_type = cpu_to_le16(ptp->rxctl); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + return hwrm_req_send(bp, req); } int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) @@ -311,8 +537,10 @@ static void bnxt_unmap_ptp_regs(struct bnxt *bp) static u64 bnxt_cc_read(const struct cyclecounter *cc) { struct bnxt_ptp_cfg *ptp = container_of(cc, struct bnxt_ptp_cfg, cc); + u64 ns = 0; - return bnxt_refclk_read(ptp->bp, NULL); + bnxt_refclk_read(ptp->bp, NULL, &ns); + return ns; } static void bnxt_stamp_tx_skb(struct bnxt *bp, struct sk_buff *skb) @@ -410,6 +638,87 @@ static const struct ptp_clock_info bnxt_ptp_caps = { .enable = bnxt_ptp_enable, }; +static int bnxt_ptp_verify(struct ptp_clock_info *ptp_info, unsigned int pin, + enum ptp_pin_function func, unsigned int chan) +{ + struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg, + ptp_info); + /* Allow only PPS pin function configuration */ + if (ptp->pps_info.pins[pin].usage <= BNXT_PPS_PIN_PPS_OUT && + func != PTP_PF_PHYSYNC) + return 0; + else + return -EOPNOTSUPP; +} + +static int bnxt_ptp_pps_init(struct bnxt *bp) +{ + struct hwrm_func_ptp_pin_qcfg_output *resp; + struct hwrm_func_ptp_pin_qcfg_input *req; + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + struct ptp_clock_info *ptp_info; + struct bnxt_pps *pps_info; + u8 *pin_usg; + u32 i, rc; + + /* Query current/default PIN CFG */ + rc = hwrm_req_init(bp, req, HWRM_FUNC_PTP_PIN_QCFG); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (rc || !resp->num_pins) { + hwrm_req_drop(bp, req); + return -EOPNOTSUPP; + } + + ptp_info = &ptp->ptp_info; + pps_info = &ptp->pps_info; + pps_info->num_pins = resp->num_pins; + ptp_info->n_pins = pps_info->num_pins; + ptp_info->pin_config = kcalloc(ptp_info->n_pins, + sizeof(*ptp_info->pin_config), + GFP_KERNEL); + if (!ptp_info->pin_config) { + hwrm_req_drop(bp, req); + return -ENOMEM; + } + + /* Report the TSIO capability to kernel */ + pin_usg = &resp->pin0_usage; + for (i = 0; i < pps_info->num_pins; i++, pin_usg++) { + snprintf(ptp_info->pin_config[i].name, + sizeof(ptp_info->pin_config[i].name), "bnxt_pps%d", i); + ptp_info->pin_config[i].index = i; + ptp_info->pin_config[i].chan = i; + if (*pin_usg == BNXT_PPS_PIN_PPS_IN) + ptp_info->pin_config[i].func = PTP_PF_EXTTS; + else if (*pin_usg == BNXT_PPS_PIN_PPS_OUT) + ptp_info->pin_config[i].func = PTP_PF_PEROUT; + else + ptp_info->pin_config[i].func = PTP_PF_NONE; + + pps_info->pins[i].usage = *pin_usg; + } + hwrm_req_drop(bp, req); + + /* Only 1 each of ext_ts and per_out pins is available in HW */ + ptp_info->n_ext_ts = 1; + ptp_info->n_per_out = 1; + ptp_info->pps = 1; + ptp_info->verify = bnxt_ptp_verify; + + return 0; +} + +static bool bnxt_pps_config_ok(struct bnxt *bp) +{ + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + + return !(bp->fw_cap & BNXT_FW_CAP_PTP_PPS) == !ptp->ptp_info.pin_config; +} + int bnxt_ptp_init(struct bnxt *bp) { struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; @@ -422,6 +731,15 @@ int bnxt_ptp_init(struct bnxt *bp) if (rc) return rc; + if (ptp->ptp_clock && bnxt_pps_config_ok(bp)) + return 0; + + if (ptp->ptp_clock) { + ptp_clock_unregister(ptp->ptp_clock); + ptp->ptp_clock = NULL; + kfree(ptp->ptp_info.pin_config); + ptp->ptp_info.pin_config = NULL; + } atomic_set(&ptp->tx_avail, BNXT_MAX_TX_TS); spin_lock_init(&ptp->ptp_lock); @@ -435,6 +753,10 @@ int bnxt_ptp_init(struct bnxt *bp) timecounter_init(&ptp->tc, &ptp->cc, ktime_to_ns(ktime_get_real())); ptp->ptp_info = bnxt_ptp_caps; + if ((bp->fw_cap & BNXT_FW_CAP_PTP_PPS)) { + if (bnxt_ptp_pps_init(bp)) + netdev_err(bp->dev, "1pps not initialized, continuing without 1pps support\n"); + } ptp->ptp_clock = ptp_clock_register(&ptp->ptp_info, &bp->pdev->dev); if (IS_ERR(ptp->ptp_clock)) { int err = PTR_ERR(ptp->ptp_clock); @@ -445,7 +767,7 @@ int bnxt_ptp_init(struct bnxt *bp) } if (bp->flags & BNXT_FLAG_CHIP_P5) { spin_lock_bh(&ptp->ptp_lock); - ptp->current_time = bnxt_refclk_read(bp, NULL); + bnxt_refclk_read(bp, NULL, &ptp->current_time); WRITE_ONCE(ptp->old_time, ptp->current_time); spin_unlock_bh(&ptp->ptp_lock); ptp_schedule_worker(ptp->ptp_clock, 0); @@ -464,6 +786,9 @@ void bnxt_ptp_clear(struct bnxt *bp) ptp_clock_unregister(ptp->ptp_clock); ptp->ptp_clock = NULL; + kfree(ptp->ptp_info.pin_config); + ptp->ptp_info.pin_config = NULL; + if (ptp->tx_skb) { dev_kfree_skb_any(ptp->tx_skb); ptp->tx_skb = NULL; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h index 524f1c272054..fa5f05708e6d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h @@ -22,11 +22,62 @@ PORT_TS_QUERY_REQ_ENABLES_TS_REQ_TIMEOUT | \ PORT_TS_QUERY_REQ_ENABLES_PTP_HDR_OFFSET) +struct pps_pin { + u8 event; + u8 usage; + u8 state; +}; + +#define TSIO_PIN_VALID(pin) ((pin) < (BNXT_MAX_TSIO_PINS)) + +#define EVENT_DATA2_PPS_EVENT_TYPE(data2) \ + ((data2) & ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE) + +#define EVENT_DATA2_PPS_PIN_NUM(data2) \ + (((data2) & \ + ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PIN_NUMBER_MASK) >>\ + ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PIN_NUMBER_SFT) + +#define BNXT_DATA2_UPPER_MSK \ + ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PPS_TIMESTAMP_UPPER_MASK + +#define BNXT_DATA2_UPPER_SFT \ + (32 - \ + ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PPS_TIMESTAMP_UPPER_SFT) + +#define BNXT_DATA1_LOWER_MSK \ + ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA1_PPS_TIMESTAMP_LOWER_MASK + +#define BNXT_DATA1_LOWER_SFT \ + ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA1_PPS_TIMESTAMP_LOWER_SFT + +#define EVENT_PPS_TS(data2, data1) \ + (((u64)((data2) & BNXT_DATA2_UPPER_MSK) << BNXT_DATA2_UPPER_SFT) |\ + (((data1) & BNXT_DATA1_LOWER_MSK) >> BNXT_DATA1_LOWER_SFT)) + +#define BNXT_PPS_PIN_DISABLE 0 +#define BNXT_PPS_PIN_ENABLE 1 +#define BNXT_PPS_PIN_NONE 0 +#define BNXT_PPS_PIN_PPS_IN 1 +#define BNXT_PPS_PIN_PPS_OUT 2 +#define BNXT_PPS_PIN_SYNC_IN 3 +#define BNXT_PPS_PIN_SYNC_OUT 4 + +#define BNXT_PPS_EVENT_INTERNAL 1 +#define BNXT_PPS_EVENT_EXTERNAL 2 + +struct bnxt_pps { + u8 num_pins; +#define BNXT_MAX_TSIO_PINS 4 + struct pps_pin pins[BNXT_MAX_TSIO_PINS]; +}; + struct bnxt_ptp_cfg { struct ptp_clock_info ptp_info; struct ptp_clock *ptp_clock; struct cyclecounter cc; struct timecounter tc; + struct bnxt_pps pps_info; /* serialize timecounter access */ spinlock_t ptp_lock; struct sk_buff *tx_skb; @@ -77,6 +128,8 @@ do { \ #endif int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id, u16 *hdr_off); +void bnxt_ptp_pps_event(struct bnxt *bp, u32 data1, u32 data2); +void bnxt_ptp_reapply_pps(struct bnxt *bp); int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr); int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr); int bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index 7fa881e1cd80..70d8ca3039dc 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -17,6 +17,7 @@ #include <linux/etherdevice.h> #include "bnxt_hsi.h" #include "bnxt.h" +#include "bnxt_hwrm.h" #include "bnxt_ulp.h" #include "bnxt_sriov.h" #include "bnxt_vfr.h" @@ -26,21 +27,26 @@ static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp, struct bnxt_vf_info *vf, u16 event_id) { - struct hwrm_fwd_async_event_cmpl_input req = {0}; + struct hwrm_fwd_async_event_cmpl_input *req; struct hwrm_async_event_cmpl *async_cmpl; int rc = 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_ASYNC_EVENT_CMPL, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_FWD_ASYNC_EVENT_CMPL); + if (rc) + goto exit; + if (vf) - req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid); + req->encap_async_event_target_id = cpu_to_le16(vf->fw_fid); else /* broadcast this async event to all VFs */ - req.encap_async_event_target_id = cpu_to_le16(0xffff); - async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl; + req->encap_async_event_target_id = cpu_to_le16(0xffff); + async_cmpl = + (struct hwrm_async_event_cmpl *)req->encap_async_event_cmpl; async_cmpl->type = cpu_to_le16(ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT); async_cmpl->event_id = cpu_to_le16(event_id); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); +exit: if (rc) netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n", rc); @@ -62,10 +68,10 @@ static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id) int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting) { - struct hwrm_func_cfg_input req = {0}; struct bnxt *bp = netdev_priv(dev); - struct bnxt_vf_info *vf; + struct hwrm_func_cfg_input *req; bool old_setting = false; + struct bnxt_vf_info *vf; u32 func_flags; int rc; @@ -89,36 +95,38 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting) /*TODO: if the driver supports VLAN filter on guest VLAN, * the spoof check should also include vlan anti-spoofing */ - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); - req.fid = cpu_to_le16(vf->fw_fid); - req.flags = cpu_to_le32(func_flags); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); if (!rc) { - if (setting) - vf->flags |= BNXT_VF_SPOOFCHK; - else - vf->flags &= ~BNXT_VF_SPOOFCHK; + req->fid = cpu_to_le16(vf->fw_fid); + req->flags = cpu_to_le32(func_flags); + rc = hwrm_req_send(bp, req); + if (!rc) { + if (setting) + vf->flags |= BNXT_VF_SPOOFCHK; + else + vf->flags &= ~BNXT_VF_SPOOFCHK; + } } return rc; } static int bnxt_hwrm_func_qcfg_flags(struct bnxt *bp, struct bnxt_vf_info *vf) { - struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_func_qcfg_input req = {0}; + struct hwrm_func_qcfg_output *resp; + struct hwrm_func_qcfg_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); - req.fid = cpu_to_le16(BNXT_PF(bp) ? vf->fw_fid : 0xffff); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (rc) { - mutex_unlock(&bp->hwrm_cmd_lock); + rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); + if (rc) return rc; - } - vf->func_qcfg_flags = le16_to_cpu(resp->flags); - mutex_unlock(&bp->hwrm_cmd_lock); - return 0; + + req->fid = cpu_to_le16(BNXT_PF(bp) ? vf->fw_fid : 0xffff); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (!rc) + vf->func_qcfg_flags = le16_to_cpu(resp->flags); + hwrm_req_drop(bp, req); + return rc; } bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf) @@ -132,18 +140,22 @@ bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf) static int bnxt_hwrm_set_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf) { - struct hwrm_func_cfg_input req = {0}; + struct hwrm_func_cfg_input *req; + int rc; if (!(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF)) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); - req.fid = cpu_to_le16(vf->fw_fid); + rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); + if (rc) + return rc; + + req->fid = cpu_to_le16(vf->fw_fid); if (vf->flags & BNXT_VF_TRUST) - req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE); + req->flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE); else - req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE); + return hwrm_req_send(bp, req); } int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trusted) @@ -203,8 +215,8 @@ int bnxt_get_vf_config(struct net_device *dev, int vf_id, int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac) { - struct hwrm_func_cfg_input req = {0}; struct bnxt *bp = netdev_priv(dev); + struct hwrm_func_cfg_input *req; struct bnxt_vf_info *vf; int rc; @@ -220,19 +232,23 @@ int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac) } vf = &bp->pf.vf[vf_id]; + rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); + if (rc) + return rc; + memcpy(vf->mac_addr, mac, ETH_ALEN); - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); - req.fid = cpu_to_le16(vf->fw_fid); - req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR); - memcpy(req.dflt_mac_addr, mac, ETH_ALEN); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + + req->fid = cpu_to_le16(vf->fw_fid); + req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR); + memcpy(req->dflt_mac_addr, mac, ETH_ALEN); + return hwrm_req_send(bp, req); } int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos, __be16 vlan_proto) { - struct hwrm_func_cfg_input req = {0}; struct bnxt *bp = netdev_priv(dev); + struct hwrm_func_cfg_input *req; struct bnxt_vf_info *vf; u16 vlan_tag; int rc; @@ -258,21 +274,23 @@ int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos, if (vlan_tag == vf->vlan) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); - req.fid = cpu_to_le16(vf->fw_fid); - req.dflt_vlan = cpu_to_le16(vlan_tag); - req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (!rc) - vf->vlan = vlan_tag; + rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); + if (!rc) { + req->fid = cpu_to_le16(vf->fw_fid); + req->dflt_vlan = cpu_to_le16(vlan_tag); + req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN); + rc = hwrm_req_send(bp, req); + if (!rc) + vf->vlan = vlan_tag; + } return rc; } int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate, int max_tx_rate) { - struct hwrm_func_cfg_input req = {0}; struct bnxt *bp = netdev_priv(dev); + struct hwrm_func_cfg_input *req; struct bnxt_vf_info *vf; u32 pf_link_speed; int rc; @@ -296,16 +314,18 @@ int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate, } if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); - req.fid = cpu_to_le16(vf->fw_fid); - req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW); - req.max_bw = cpu_to_le32(max_tx_rate); - req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW); - req.min_bw = cpu_to_le32(min_tx_rate); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); if (!rc) { - vf->min_tx_rate = min_tx_rate; - vf->max_tx_rate = max_tx_rate; + req->fid = cpu_to_le16(vf->fw_fid); + req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW | + FUNC_CFG_REQ_ENABLES_MIN_BW); + req->max_bw = cpu_to_le32(max_tx_rate); + req->min_bw = cpu_to_le32(min_tx_rate); + rc = hwrm_req_send(bp, req); + if (!rc) { + vf->min_tx_rate = min_tx_rate; + vf->max_tx_rate = max_tx_rate; + } } return rc; } @@ -358,21 +378,22 @@ static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs) static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp, int num_vfs) { - int i, rc = 0; + struct hwrm_func_vf_resc_free_input *req; struct bnxt_pf_info *pf = &bp->pf; - struct hwrm_func_vf_resc_free_input req = {0}; + int i, rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESC_FREE, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_RESC_FREE); + if (rc) + return rc; - mutex_lock(&bp->hwrm_cmd_lock); + hwrm_req_hold(bp, req); for (i = pf->first_vf_id; i < pf->first_vf_id + num_vfs; i++) { - req.vf_id = cpu_to_le16(i); - rc = _hwrm_send_message(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); + req->vf_id = cpu_to_le16(i); + rc = hwrm_req_send(bp, req); if (rc) break; } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -446,51 +467,55 @@ static int bnxt_alloc_vf_resources(struct bnxt *bp, int num_vfs) static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp) { - struct hwrm_func_buf_rgtr_input req = {0}; + struct hwrm_func_buf_rgtr_input *req; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BUF_RGTR, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_FUNC_BUF_RGTR); + if (rc) + return rc; - req.req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages); - req.req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT); - req.req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE); - req.req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]); - req.req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]); - req.req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]); - req.req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]); + req->req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages); + req->req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT); + req->req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE); + req->req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]); + req->req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]); + req->req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]); + req->req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + return hwrm_req_send(bp, req); } -/* Caller holds bp->hwrm_cmd_lock mutex lock */ -static void __bnxt_set_vf_params(struct bnxt *bp, int vf_id) +static int __bnxt_set_vf_params(struct bnxt *bp, int vf_id) { - struct hwrm_func_cfg_input req = {0}; + struct hwrm_func_cfg_input *req; struct bnxt_vf_info *vf; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); + if (rc) + return rc; vf = &bp->pf.vf[vf_id]; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); - req.fid = cpu_to_le16(vf->fw_fid); + req->fid = cpu_to_le16(vf->fw_fid); if (is_valid_ether_addr(vf->mac_addr)) { - req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR); - memcpy(req.dflt_mac_addr, vf->mac_addr, ETH_ALEN); + req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR); + memcpy(req->dflt_mac_addr, vf->mac_addr, ETH_ALEN); } if (vf->vlan) { - req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN); - req.dflt_vlan = cpu_to_le16(vf->vlan); + req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN); + req->dflt_vlan = cpu_to_le16(vf->vlan); } if (vf->max_tx_rate) { - req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW); - req.max_bw = cpu_to_le32(vf->max_tx_rate); -#ifdef HAVE_IFLA_TX_RATE - req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW); - req.min_bw = cpu_to_le32(vf->min_tx_rate); -#endif + req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW | + FUNC_CFG_REQ_ENABLES_MIN_BW); + req->max_bw = cpu_to_le32(vf->max_tx_rate); + req->min_bw = cpu_to_le32(vf->min_tx_rate); } if (vf->flags & BNXT_VF_TRUST) - req.flags |= cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE); + req->flags |= cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE); - _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + return hwrm_req_send(bp, req); } /* Only called by PF to reserve resources for VFs, returns actual number of @@ -498,7 +523,7 @@ static void __bnxt_set_vf_params(struct bnxt *bp, int vf_id) */ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset) { - struct hwrm_func_vf_resource_cfg_input req = {0}; + struct hwrm_func_vf_resource_cfg_input *req; struct bnxt_hw_resc *hw_resc = &bp->hw_resc; u16 vf_tx_rings, vf_rx_rings, vf_cp_rings; u16 vf_stat_ctx, vf_vnics, vf_ring_grps; @@ -507,7 +532,9 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset) u16 vf_msix = 0; u16 vf_rss; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_RESOURCE_CFG); + if (rc) + return rc; if (bp->flags & BNXT_FLAG_CHIP_P5) { vf_msix = hw_resc->max_nqs - bnxt_nq_rings_in_use(bp); @@ -526,21 +553,21 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset) vf_vnics = min_t(u16, vf_vnics, vf_rx_rings); vf_rss = hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs; - req.min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX); + req->min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX); if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) { min = 0; - req.min_rsscos_ctx = cpu_to_le16(min); + req->min_rsscos_ctx = cpu_to_le16(min); } if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL || pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) { - req.min_cmpl_rings = cpu_to_le16(min); - req.min_tx_rings = cpu_to_le16(min); - req.min_rx_rings = cpu_to_le16(min); - req.min_l2_ctxs = cpu_to_le16(min); - req.min_vnics = cpu_to_le16(min); - req.min_stat_ctx = cpu_to_le16(min); + req->min_cmpl_rings = cpu_to_le16(min); + req->min_tx_rings = cpu_to_le16(min); + req->min_rx_rings = cpu_to_le16(min); + req->min_l2_ctxs = cpu_to_le16(min); + req->min_vnics = cpu_to_le16(min); + req->min_stat_ctx = cpu_to_le16(min); if (!(bp->flags & BNXT_FLAG_CHIP_P5)) - req.min_hw_ring_grps = cpu_to_le16(min); + req->min_hw_ring_grps = cpu_to_le16(min); } else { vf_cp_rings /= num_vfs; vf_tx_rings /= num_vfs; @@ -550,56 +577,57 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset) vf_ring_grps /= num_vfs; vf_rss /= num_vfs; - req.min_cmpl_rings = cpu_to_le16(vf_cp_rings); - req.min_tx_rings = cpu_to_le16(vf_tx_rings); - req.min_rx_rings = cpu_to_le16(vf_rx_rings); - req.min_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); - req.min_vnics = cpu_to_le16(vf_vnics); - req.min_stat_ctx = cpu_to_le16(vf_stat_ctx); - req.min_hw_ring_grps = cpu_to_le16(vf_ring_grps); - req.min_rsscos_ctx = cpu_to_le16(vf_rss); + req->min_cmpl_rings = cpu_to_le16(vf_cp_rings); + req->min_tx_rings = cpu_to_le16(vf_tx_rings); + req->min_rx_rings = cpu_to_le16(vf_rx_rings); + req->min_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); + req->min_vnics = cpu_to_le16(vf_vnics); + req->min_stat_ctx = cpu_to_le16(vf_stat_ctx); + req->min_hw_ring_grps = cpu_to_le16(vf_ring_grps); + req->min_rsscos_ctx = cpu_to_le16(vf_rss); } - req.max_cmpl_rings = cpu_to_le16(vf_cp_rings); - req.max_tx_rings = cpu_to_le16(vf_tx_rings); - req.max_rx_rings = cpu_to_le16(vf_rx_rings); - req.max_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); - req.max_vnics = cpu_to_le16(vf_vnics); - req.max_stat_ctx = cpu_to_le16(vf_stat_ctx); - req.max_hw_ring_grps = cpu_to_le16(vf_ring_grps); - req.max_rsscos_ctx = cpu_to_le16(vf_rss); + req->max_cmpl_rings = cpu_to_le16(vf_cp_rings); + req->max_tx_rings = cpu_to_le16(vf_tx_rings); + req->max_rx_rings = cpu_to_le16(vf_rx_rings); + req->max_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); + req->max_vnics = cpu_to_le16(vf_vnics); + req->max_stat_ctx = cpu_to_le16(vf_stat_ctx); + req->max_hw_ring_grps = cpu_to_le16(vf_ring_grps); + req->max_rsscos_ctx = cpu_to_le16(vf_rss); if (bp->flags & BNXT_FLAG_CHIP_P5) - req.max_msix = cpu_to_le16(vf_msix / num_vfs); + req->max_msix = cpu_to_le16(vf_msix / num_vfs); - mutex_lock(&bp->hwrm_cmd_lock); + hwrm_req_hold(bp, req); for (i = 0; i < num_vfs; i++) { if (reset) __bnxt_set_vf_params(bp, i); - req.vf_id = cpu_to_le16(pf->first_vf_id + i); - rc = _hwrm_send_message(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); + req->vf_id = cpu_to_le16(pf->first_vf_id + i); + rc = hwrm_req_send(bp, req); if (rc) break; pf->active_vfs = i + 1; pf->vf[i].fw_fid = pf->first_vf_id + i; } - mutex_unlock(&bp->hwrm_cmd_lock); + if (pf->active_vfs) { u16 n = pf->active_vfs; - hw_resc->max_tx_rings -= le16_to_cpu(req.min_tx_rings) * n; - hw_resc->max_rx_rings -= le16_to_cpu(req.min_rx_rings) * n; - hw_resc->max_hw_ring_grps -= le16_to_cpu(req.min_hw_ring_grps) * - n; - hw_resc->max_cp_rings -= le16_to_cpu(req.min_cmpl_rings) * n; - hw_resc->max_rsscos_ctxs -= le16_to_cpu(req.min_rsscos_ctx) * n; - hw_resc->max_stat_ctxs -= le16_to_cpu(req.min_stat_ctx) * n; - hw_resc->max_vnics -= le16_to_cpu(req.min_vnics) * n; + hw_resc->max_tx_rings -= le16_to_cpu(req->min_tx_rings) * n; + hw_resc->max_rx_rings -= le16_to_cpu(req->min_rx_rings) * n; + hw_resc->max_hw_ring_grps -= + le16_to_cpu(req->min_hw_ring_grps) * n; + hw_resc->max_cp_rings -= le16_to_cpu(req->min_cmpl_rings) * n; + hw_resc->max_rsscos_ctxs -= + le16_to_cpu(req->min_rsscos_ctx) * n; + hw_resc->max_stat_ctxs -= le16_to_cpu(req->min_stat_ctx) * n; + hw_resc->max_vnics -= le16_to_cpu(req->min_vnics) * n; if (bp->flags & BNXT_FLAG_CHIP_P5) hw_resc->max_irqs -= vf_msix * n; rc = pf->active_vfs; } + hwrm_req_drop(bp, req); return rc; } @@ -608,15 +636,18 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset) */ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) { - u32 rc = 0, mtu, i; u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics; struct bnxt_hw_resc *hw_resc = &bp->hw_resc; - struct hwrm_func_cfg_input req = {0}; struct bnxt_pf_info *pf = &bp->pf; + struct hwrm_func_cfg_input *req; int total_vf_tx_rings = 0; u16 vf_ring_grps; + u32 mtu, i; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); + if (rc) + return rc; /* Remaining rings are distributed equally amongs VF's for now */ vf_cp_rings = bnxt_get_avail_cp_rings_for_en(bp) / num_vfs; @@ -632,50 +663,49 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) vf_vnics = (hw_resc->max_vnics - bp->nr_vnics) / num_vfs; vf_vnics = min_t(u16, vf_vnics, vf_rx_rings); - req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ADMIN_MTU | - FUNC_CFG_REQ_ENABLES_MRU | - FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS | - FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS | - FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS | - FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS | - FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS | - FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS | - FUNC_CFG_REQ_ENABLES_NUM_VNICS | - FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS); + req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ADMIN_MTU | + FUNC_CFG_REQ_ENABLES_MRU | + FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS | + FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS | + FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS | + FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS | + FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS | + FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS | + FUNC_CFG_REQ_ENABLES_NUM_VNICS | + FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS); mtu = bp->dev->mtu + ETH_HLEN + VLAN_HLEN; - req.mru = cpu_to_le16(mtu); - req.admin_mtu = cpu_to_le16(mtu); + req->mru = cpu_to_le16(mtu); + req->admin_mtu = cpu_to_le16(mtu); - req.num_rsscos_ctxs = cpu_to_le16(1); - req.num_cmpl_rings = cpu_to_le16(vf_cp_rings); - req.num_tx_rings = cpu_to_le16(vf_tx_rings); - req.num_rx_rings = cpu_to_le16(vf_rx_rings); - req.num_hw_ring_grps = cpu_to_le16(vf_ring_grps); - req.num_l2_ctxs = cpu_to_le16(4); + req->num_rsscos_ctxs = cpu_to_le16(1); + req->num_cmpl_rings = cpu_to_le16(vf_cp_rings); + req->num_tx_rings = cpu_to_le16(vf_tx_rings); + req->num_rx_rings = cpu_to_le16(vf_rx_rings); + req->num_hw_ring_grps = cpu_to_le16(vf_ring_grps); + req->num_l2_ctxs = cpu_to_le16(4); - req.num_vnics = cpu_to_le16(vf_vnics); + req->num_vnics = cpu_to_le16(vf_vnics); /* FIXME spec currently uses 1 bit for stats ctx */ - req.num_stat_ctxs = cpu_to_le16(vf_stat_ctx); + req->num_stat_ctxs = cpu_to_le16(vf_stat_ctx); - mutex_lock(&bp->hwrm_cmd_lock); + hwrm_req_hold(bp, req); for (i = 0; i < num_vfs; i++) { int vf_tx_rsvd = vf_tx_rings; - req.fid = cpu_to_le16(pf->first_vf_id + i); - rc = _hwrm_send_message(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); + req->fid = cpu_to_le16(pf->first_vf_id + i); + rc = hwrm_req_send(bp, req); if (rc) break; pf->active_vfs = i + 1; - pf->vf[i].fw_fid = le16_to_cpu(req.fid); + pf->vf[i].fw_fid = le16_to_cpu(req->fid); rc = __bnxt_hwrm_get_tx_rings(bp, pf->vf[i].fw_fid, &vf_tx_rsvd); if (rc) break; total_vf_tx_rings += vf_tx_rsvd; } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); if (pf->active_vfs) { hw_resc->max_tx_rings -= total_vf_tx_rings; hw_resc->max_rx_rings -= vf_rx_rings * num_vfs; @@ -893,23 +923,24 @@ static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, void *encap_resp, __le64 encap_resp_addr, __le16 encap_resp_cpr, u32 msg_size) { - int rc = 0; - struct hwrm_fwd_resp_input req = {0}; + struct hwrm_fwd_resp_input *req; + int rc; if (BNXT_FWD_RESP_SIZE_ERR(msg_size)) return -EINVAL; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_RESP, -1, -1); - - /* Set the new target id */ - req.target_id = cpu_to_le16(vf->fw_fid); - req.encap_resp_target_id = cpu_to_le16(vf->fw_fid); - req.encap_resp_len = cpu_to_le16(msg_size); - req.encap_resp_addr = encap_resp_addr; - req.encap_resp_cmpl_ring = encap_resp_cpr; - memcpy(req.encap_resp, encap_resp, msg_size); - - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_FWD_RESP); + if (!rc) { + /* Set the new target id */ + req->target_id = cpu_to_le16(vf->fw_fid); + req->encap_resp_target_id = cpu_to_le16(vf->fw_fid); + req->encap_resp_len = cpu_to_le16(msg_size); + req->encap_resp_addr = encap_resp_addr; + req->encap_resp_cmpl_ring = encap_resp_cpr; + memcpy(req->encap_resp, encap_resp, msg_size); + + rc = hwrm_req_send(bp, req); + } if (rc) netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc); return rc; @@ -918,19 +949,21 @@ static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf, u32 msg_size) { - int rc = 0; - struct hwrm_reject_fwd_resp_input req = {0}; + struct hwrm_reject_fwd_resp_input *req; + int rc; if (BNXT_REJ_FWD_RESP_SIZE_ERR(msg_size)) return -EINVAL; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1); - /* Set the new target id */ - req.target_id = cpu_to_le16(vf->fw_fid); - req.encap_resp_target_id = cpu_to_le16(vf->fw_fid); - memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size); + rc = hwrm_req_init(bp, req, HWRM_REJECT_FWD_RESP); + if (!rc) { + /* Set the new target id */ + req->target_id = cpu_to_le16(vf->fw_fid); + req->encap_resp_target_id = cpu_to_le16(vf->fw_fid); + memcpy(req->encap_request, vf->hwrm_cmd_req_addr, msg_size); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); + } if (rc) netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc); return rc; @@ -939,19 +972,21 @@ static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf, static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, u32 msg_size) { - int rc = 0; - struct hwrm_exec_fwd_resp_input req = {0}; + struct hwrm_exec_fwd_resp_input *req; + int rc; if (BNXT_EXEC_FWD_RESP_SIZE_ERR(msg_size)) return -EINVAL; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1); - /* Set the new target id */ - req.target_id = cpu_to_le16(vf->fw_fid); - req.encap_resp_target_id = cpu_to_le16(vf->fw_fid); - memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size); + rc = hwrm_req_init(bp, req, HWRM_EXEC_FWD_RESP); + if (!rc) { + /* Set the new target id */ + req->target_id = cpu_to_le16(vf->fw_fid); + req->encap_resp_target_id = cpu_to_le16(vf->fw_fid); + memcpy(req->encap_request, vf->hwrm_cmd_req_addr, msg_size); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); + } if (rc) netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc); return rc; @@ -1031,10 +1066,10 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf) phy_qcfg_req = (struct hwrm_port_phy_qcfg_input *)vf->hwrm_cmd_req_addr; - mutex_lock(&bp->hwrm_cmd_lock); + mutex_lock(&bp->link_lock); memcpy(&phy_qcfg_resp, &bp->link_info.phy_qcfg_resp, sizeof(phy_qcfg_resp)); - mutex_unlock(&bp->hwrm_cmd_lock); + mutex_unlock(&bp->link_lock); phy_qcfg_resp.resp_len = cpu_to_le16(sizeof(phy_qcfg_resp)); phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id; phy_qcfg_resp.valid = 1; @@ -1118,7 +1153,7 @@ void bnxt_hwrm_exec_fwd_req(struct bnxt *bp) int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict) { - struct hwrm_func_vf_cfg_input req = {0}; + struct hwrm_func_vf_cfg_input *req; int rc = 0; if (!BNXT_VF(bp)) @@ -1129,10 +1164,16 @@ int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict) rc = -EADDRNOTAVAIL; goto mac_done; } - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1); - req.enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR); - memcpy(req.dflt_mac_addr, mac, ETH_ALEN); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + + rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG); + if (rc) + goto mac_done; + + req->enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR); + memcpy(req->dflt_mac_addr, mac, ETH_ALEN); + if (!strict) + hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT); + rc = hwrm_req_send(bp, req); mac_done: if (rc && strict) { rc = -EADDRNOTAVAIL; @@ -1145,15 +1186,17 @@ mac_done: void bnxt_update_vf_mac(struct bnxt *bp) { - struct hwrm_func_qcaps_input req = {0}; - struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_func_qcaps_output *resp; + struct hwrm_func_qcaps_input *req; bool inform_pf = false; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1); - req.fid = cpu_to_le16(0xffff); + if (hwrm_req_init(bp, req, HWRM_FUNC_QCAPS)) + return; + + req->fid = cpu_to_le16(0xffff); - mutex_lock(&bp->hwrm_cmd_lock); - if (_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT)) + resp = hwrm_req_hold(bp, req); + if (hwrm_req_send(bp, req)) goto update_vf_mac_exit; /* Store MAC address from the firmware. There are 2 cases: @@ -1176,7 +1219,7 @@ void bnxt_update_vf_mac(struct bnxt *bp) if (is_valid_ether_addr(bp->vf.mac_addr)) memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN); update_vf_mac_exit: - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); if (inform_pf) bnxt_approve_mac(bp, bp->dev->dev_addr, false); } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index 5e4429b14b8c..46fae1acbeed 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -22,6 +22,7 @@ #include "bnxt_hsi.h" #include "bnxt.h" +#include "bnxt_hwrm.h" #include "bnxt_sriov.h" #include "bnxt_tc.h" #include "bnxt_vfr.h" @@ -502,16 +503,18 @@ static int bnxt_tc_parse_flow(struct bnxt *bp, static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp, struct bnxt_tc_flow_node *flow_node) { - struct hwrm_cfa_flow_free_input req = { 0 }; + struct hwrm_cfa_flow_free_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_FREE, -1, -1); - if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) - req.ext_flow_handle = flow_node->ext_flow_handle; - else - req.flow_handle = flow_node->flow_handle; + rc = hwrm_req_init(bp, req, HWRM_CFA_FLOW_FREE); + if (!rc) { + if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) + req->ext_flow_handle = flow_node->ext_flow_handle; + else + req->flow_handle = flow_node->flow_handle; - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); + } if (rc) netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc); @@ -587,20 +590,22 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow, struct bnxt_tc_actions *actions = &flow->actions; struct bnxt_tc_l3_key *l3_mask = &flow->l3_mask; struct bnxt_tc_l3_key *l3_key = &flow->l3_key; - struct hwrm_cfa_flow_alloc_input req = { 0 }; struct hwrm_cfa_flow_alloc_output *resp; + struct hwrm_cfa_flow_alloc_input *req; u16 flow_flags = 0, action_flags = 0; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_ALLOC, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_CFA_FLOW_ALLOC); + if (rc) + return rc; - req.src_fid = cpu_to_le16(flow->src_fid); - req.ref_flow_handle = ref_flow_handle; + req->src_fid = cpu_to_le16(flow->src_fid); + req->ref_flow_handle = ref_flow_handle; if (actions->flags & BNXT_TC_ACTION_FLAG_L2_REWRITE) { - memcpy(req.l2_rewrite_dmac, actions->l2_rewrite_dmac, + memcpy(req->l2_rewrite_dmac, actions->l2_rewrite_dmac, ETH_ALEN); - memcpy(req.l2_rewrite_smac, actions->l2_rewrite_smac, + memcpy(req->l2_rewrite_smac, actions->l2_rewrite_smac, ETH_ALEN); action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE; @@ -615,71 +620,71 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow, action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC; /* L3 source rewrite */ - req.nat_ip_address[0] = + req->nat_ip_address[0] = actions->nat.l3.ipv4.saddr.s_addr; /* L4 source port */ if (actions->nat.l4.ports.sport) - req.nat_port = + req->nat_port = actions->nat.l4.ports.sport; } else { action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST; /* L3 destination rewrite */ - req.nat_ip_address[0] = + req->nat_ip_address[0] = actions->nat.l3.ipv4.daddr.s_addr; /* L4 destination port */ if (actions->nat.l4.ports.dport) - req.nat_port = + req->nat_port = actions->nat.l4.ports.dport; } netdev_dbg(bp->dev, - "req.nat_ip_address: %pI4 src_xlate: %d req.nat_port: %x\n", - req.nat_ip_address, actions->nat.src_xlate, - req.nat_port); + "req->nat_ip_address: %pI4 src_xlate: %d req->nat_port: %x\n", + req->nat_ip_address, actions->nat.src_xlate, + req->nat_port); } else { if (actions->nat.src_xlate) { action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC; /* L3 source rewrite */ - memcpy(req.nat_ip_address, + memcpy(req->nat_ip_address, actions->nat.l3.ipv6.saddr.s6_addr32, - sizeof(req.nat_ip_address)); + sizeof(req->nat_ip_address)); /* L4 source port */ if (actions->nat.l4.ports.sport) - req.nat_port = + req->nat_port = actions->nat.l4.ports.sport; } else { action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST; /* L3 destination rewrite */ - memcpy(req.nat_ip_address, + memcpy(req->nat_ip_address, actions->nat.l3.ipv6.daddr.s6_addr32, - sizeof(req.nat_ip_address)); + sizeof(req->nat_ip_address)); /* L4 destination port */ if (actions->nat.l4.ports.dport) - req.nat_port = + req->nat_port = actions->nat.l4.ports.dport; } netdev_dbg(bp->dev, - "req.nat_ip_address: %pI6 src_xlate: %d req.nat_port: %x\n", - req.nat_ip_address, actions->nat.src_xlate, - req.nat_port); + "req->nat_ip_address: %pI6 src_xlate: %d req->nat_port: %x\n", + req->nat_ip_address, actions->nat.src_xlate, + req->nat_port); } } if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP || actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) { - req.tunnel_handle = tunnel_handle; + req->tunnel_handle = tunnel_handle; flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL; action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL; } - req.ethertype = flow->l2_key.ether_type; - req.ip_proto = flow->l4_key.ip_proto; + req->ethertype = flow->l2_key.ether_type; + req->ip_proto = flow->l4_key.ip_proto; if (flow->flags & BNXT_TC_FLOW_FLAGS_ETH_ADDRS) { - memcpy(req.dmac, flow->l2_key.dmac, ETH_ALEN); - memcpy(req.smac, flow->l2_key.smac, ETH_ALEN); + memcpy(req->dmac, flow->l2_key.dmac, ETH_ALEN); + memcpy(req->smac, flow->l2_key.smac, ETH_ALEN); } if (flow->l2_key.num_vlans > 0) { @@ -688,7 +693,7 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow, * in outer_vlan_tci when num_vlans is 1 (which is * always the case in TC.) */ - req.outer_vlan_tci = flow->l2_key.inner_vlan_tci; + req->outer_vlan_tci = flow->l2_key.inner_vlan_tci; } /* If all IP and L4 fields are wildcarded then this is an L2 flow */ @@ -701,68 +706,67 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow, CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6; if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV4_ADDRS) { - req.ip_dst[0] = l3_key->ipv4.daddr.s_addr; - req.ip_dst_mask_len = + req->ip_dst[0] = l3_key->ipv4.daddr.s_addr; + req->ip_dst_mask_len = inet_mask_len(l3_mask->ipv4.daddr.s_addr); - req.ip_src[0] = l3_key->ipv4.saddr.s_addr; - req.ip_src_mask_len = + req->ip_src[0] = l3_key->ipv4.saddr.s_addr; + req->ip_src_mask_len = inet_mask_len(l3_mask->ipv4.saddr.s_addr); } else if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV6_ADDRS) { - memcpy(req.ip_dst, l3_key->ipv6.daddr.s6_addr32, - sizeof(req.ip_dst)); - req.ip_dst_mask_len = + memcpy(req->ip_dst, l3_key->ipv6.daddr.s6_addr32, + sizeof(req->ip_dst)); + req->ip_dst_mask_len = ipv6_mask_len(&l3_mask->ipv6.daddr); - memcpy(req.ip_src, l3_key->ipv6.saddr.s6_addr32, - sizeof(req.ip_src)); - req.ip_src_mask_len = + memcpy(req->ip_src, l3_key->ipv6.saddr.s6_addr32, + sizeof(req->ip_src)); + req->ip_src_mask_len = ipv6_mask_len(&l3_mask->ipv6.saddr); } } if (flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) { - req.l4_src_port = flow->l4_key.ports.sport; - req.l4_src_port_mask = flow->l4_mask.ports.sport; - req.l4_dst_port = flow->l4_key.ports.dport; - req.l4_dst_port_mask = flow->l4_mask.ports.dport; + req->l4_src_port = flow->l4_key.ports.sport; + req->l4_src_port_mask = flow->l4_mask.ports.sport; + req->l4_dst_port = flow->l4_key.ports.dport; + req->l4_dst_port_mask = flow->l4_mask.ports.dport; } else if (flow->flags & BNXT_TC_FLOW_FLAGS_ICMP) { /* l4 ports serve as type/code when ip_proto is ICMP */ - req.l4_src_port = htons(flow->l4_key.icmp.type); - req.l4_src_port_mask = htons(flow->l4_mask.icmp.type); - req.l4_dst_port = htons(flow->l4_key.icmp.code); - req.l4_dst_port_mask = htons(flow->l4_mask.icmp.code); + req->l4_src_port = htons(flow->l4_key.icmp.type); + req->l4_src_port_mask = htons(flow->l4_mask.icmp.type); + req->l4_dst_port = htons(flow->l4_key.icmp.code); + req->l4_dst_port_mask = htons(flow->l4_mask.icmp.code); } - req.flags = cpu_to_le16(flow_flags); + req->flags = cpu_to_le16(flow_flags); if (actions->flags & BNXT_TC_ACTION_FLAG_DROP) { action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP; } else { if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) { action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD; - req.dst_fid = cpu_to_le16(actions->dst_fid); + req->dst_fid = cpu_to_le16(actions->dst_fid); } if (actions->flags & BNXT_TC_ACTION_FLAG_PUSH_VLAN) { action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE; - req.l2_rewrite_vlan_tpid = actions->push_vlan_tpid; - req.l2_rewrite_vlan_tci = actions->push_vlan_tci; - memcpy(&req.l2_rewrite_dmac, &req.dmac, ETH_ALEN); - memcpy(&req.l2_rewrite_smac, &req.smac, ETH_ALEN); + req->l2_rewrite_vlan_tpid = actions->push_vlan_tpid; + req->l2_rewrite_vlan_tci = actions->push_vlan_tci; + memcpy(&req->l2_rewrite_dmac, &req->dmac, ETH_ALEN); + memcpy(&req->l2_rewrite_smac, &req->smac, ETH_ALEN); } if (actions->flags & BNXT_TC_ACTION_FLAG_POP_VLAN) { action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE; /* Rewrite config with tpid = 0 implies vlan pop */ - req.l2_rewrite_vlan_tpid = 0; - memcpy(&req.l2_rewrite_dmac, &req.dmac, ETH_ALEN); - memcpy(&req.l2_rewrite_smac, &req.smac, ETH_ALEN); + req->l2_rewrite_vlan_tpid = 0; + memcpy(&req->l2_rewrite_dmac, &req->dmac, ETH_ALEN); + memcpy(&req->l2_rewrite_smac, &req->smac, ETH_ALEN); } } - req.action_flags = cpu_to_le16(action_flags); + req->action_flags = cpu_to_le16(action_flags); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send_silent(bp, req); if (!rc) { - resp = bnxt_get_hwrm_resp_addr(bp, &req); /* CFA_FLOW_ALLOC response interpretation: * fw with fw with * 16-bit 64-bit @@ -778,7 +782,7 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow, flow_node->flow_id = resp->flow_id; } } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -788,67 +792,69 @@ static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp, __le32 ref_decap_handle, __le32 *decap_filter_handle) { - struct hwrm_cfa_decap_filter_alloc_input req = { 0 }; struct hwrm_cfa_decap_filter_alloc_output *resp; struct ip_tunnel_key *tun_key = &flow->tun_key; + struct hwrm_cfa_decap_filter_alloc_input *req; u32 enables = 0; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_DECAP_FILTER_ALLOC, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_CFA_DECAP_FILTER_ALLOC); + if (rc) + goto exit; - req.flags = cpu_to_le32(CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL); + req->flags = cpu_to_le32(CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL); enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE | CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL; - req.tunnel_type = CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN; - req.ip_protocol = CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP; + req->tunnel_type = CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN; + req->ip_protocol = CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP; if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ID) { enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID; /* tunnel_id is wrongly defined in hsi defn. as __le32 */ - req.tunnel_id = tunnel_id_to_key32(tun_key->tun_id); + req->tunnel_id = tunnel_id_to_key32(tun_key->tun_id); } if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS) { enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR; - ether_addr_copy(req.dst_macaddr, l2_info->dmac); + ether_addr_copy(req->dst_macaddr, l2_info->dmac); } if (l2_info->num_vlans) { enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID; - req.t_ivlan_vid = l2_info->inner_vlan_tci; + req->t_ivlan_vid = l2_info->inner_vlan_tci; } enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE; - req.ethertype = htons(ETH_P_IP); + req->ethertype = htons(ETH_P_IP); if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS) { enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE; - req.ip_addr_type = CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4; - req.dst_ipaddr[0] = tun_key->u.ipv4.dst; - req.src_ipaddr[0] = tun_key->u.ipv4.src; + req->ip_addr_type = + CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4; + req->dst_ipaddr[0] = tun_key->u.ipv4.dst; + req->src_ipaddr[0] = tun_key->u.ipv4.src; } if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_PORTS) { enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT; - req.dst_port = tun_key->tp_dst; + req->dst_port = tun_key->tp_dst; } /* Eventhough the decap_handle returned by hwrm_cfa_decap_filter_alloc * is defined as __le32, l2_ctxt_ref_id is defined in HSI as __le16. */ - req.l2_ctxt_ref_id = (__force __le16)ref_decap_handle; - req.enables = cpu_to_le32(enables); + req->l2_ctxt_ref_id = (__force __le16)ref_decap_handle; + req->enables = cpu_to_le32(enables); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (!rc) { - resp = bnxt_get_hwrm_resp_addr(bp, &req); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send_silent(bp, req); + if (!rc) *decap_filter_handle = resp->decap_filter_id; - } else { + hwrm_req_drop(bp, req); +exit: + if (rc) netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc); - } - mutex_unlock(&bp->hwrm_cmd_lock); return rc; } @@ -856,13 +862,14 @@ static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp, static int hwrm_cfa_decap_filter_free(struct bnxt *bp, __le32 decap_filter_handle) { - struct hwrm_cfa_decap_filter_free_input req = { 0 }; + struct hwrm_cfa_decap_filter_free_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_DECAP_FILTER_FREE, -1, -1); - req.decap_filter_id = decap_filter_handle; - - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_CFA_DECAP_FILTER_FREE); + if (!rc) { + req->decap_filter_id = decap_filter_handle; + rc = hwrm_req_send(bp, req); + } if (rc) netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc); @@ -874,18 +881,18 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp, struct bnxt_tc_l2_key *l2_info, __le32 *encap_record_handle) { - struct hwrm_cfa_encap_record_alloc_input req = { 0 }; struct hwrm_cfa_encap_record_alloc_output *resp; - struct hwrm_cfa_encap_data_vxlan *encap = - (struct hwrm_cfa_encap_data_vxlan *)&req.encap_data; - struct hwrm_vxlan_ipv4_hdr *encap_ipv4 = - (struct hwrm_vxlan_ipv4_hdr *)encap->l3; + struct hwrm_cfa_encap_record_alloc_input *req; + struct hwrm_cfa_encap_data_vxlan *encap; + struct hwrm_vxlan_ipv4_hdr *encap_ipv4; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ENCAP_RECORD_ALLOC, -1, -1); - - req.encap_type = CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN; + rc = hwrm_req_init(bp, req, HWRM_CFA_ENCAP_RECORD_ALLOC); + if (rc) + goto exit; + encap = (struct hwrm_cfa_encap_data_vxlan *)&req->encap_data; + req->encap_type = CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN; ether_addr_copy(encap->dst_mac_addr, l2_info->dmac); ether_addr_copy(encap->src_mac_addr, l2_info->smac); if (l2_info->num_vlans) { @@ -894,6 +901,7 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp, encap->ovlan_tpid = l2_info->inner_vlan_tpid; } + encap_ipv4 = (struct hwrm_vxlan_ipv4_hdr *)encap->l3; encap_ipv4->ver_hlen = 4 << VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT; encap_ipv4->ver_hlen |= 5 << VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT; encap_ipv4->ttl = encap_key->ttl; @@ -905,15 +913,14 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp, encap->dst_port = encap_key->tp_dst; encap->vni = tunnel_id_to_key32(encap_key->tun_id); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (!rc) { - resp = bnxt_get_hwrm_resp_addr(bp, &req); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send_silent(bp, req); + if (!rc) *encap_record_handle = resp->encap_record_id; - } else { + hwrm_req_drop(bp, req); +exit: + if (rc) netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc); - } - mutex_unlock(&bp->hwrm_cmd_lock); return rc; } @@ -921,13 +928,14 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp, static int hwrm_cfa_encap_record_free(struct bnxt *bp, __le32 encap_record_handle) { - struct hwrm_cfa_encap_record_free_input req = { 0 }; + struct hwrm_cfa_encap_record_free_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ENCAP_RECORD_FREE, -1, -1); - req.encap_record_id = encap_record_handle; - - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_CFA_ENCAP_RECORD_FREE); + if (!rc) { + req->encap_record_id = encap_record_handle; + rc = hwrm_req_send(bp, req); + } if (rc) netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc); @@ -1673,14 +1681,20 @@ static int bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows, struct bnxt_tc_stats_batch stats_batch[]) { - struct hwrm_cfa_flow_stats_input req = { 0 }; struct hwrm_cfa_flow_stats_output *resp; - __le16 *req_flow_handles = &req.flow_handle_0; - __le32 *req_flow_ids = &req.flow_id_0; + struct hwrm_cfa_flow_stats_input *req; + __le16 *req_flow_handles; + __le32 *req_flow_ids; int rc, i; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_STATS, -1, -1); - req.num_flows = cpu_to_le16(num_flows); + rc = hwrm_req_init(bp, req, HWRM_CFA_FLOW_STATS); + if (rc) + goto exit; + + req_flow_handles = &req->flow_handle_0; + req_flow_ids = &req->flow_id_0; + + req->num_flows = cpu_to_le16(num_flows); for (i = 0; i < num_flows; i++) { struct bnxt_tc_flow_node *flow_node = stats_batch[i].flow_node; @@ -1688,13 +1702,12 @@ bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows, &req_flow_handles[i], &req_flow_ids[i]); } - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) { __le64 *resp_packets; __le64 *resp_bytes; - resp = bnxt_get_hwrm_resp_addr(bp, &req); resp_packets = &resp->packet_0; resp_bytes = &resp->byte_0; @@ -1704,10 +1717,11 @@ bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows, stats_batch[i].hw_stats.bytes = le64_to_cpu(resp_bytes[i]); } - } else { - netdev_info(bp->dev, "error rc=%d\n", rc); } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); +exit: + if (rc) + netdev_info(bp->dev, "error rc=%d\n", rc); return rc; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c index 187ff643ad2a..fde0c3e8ac57 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c @@ -22,6 +22,7 @@ #include "bnxt_hsi.h" #include "bnxt.h" +#include "bnxt_hwrm.h" #include "bnxt_ulp.h" static int bnxt_register_dev(struct bnxt_en_dev *edev, int ulp_id, @@ -237,27 +238,33 @@ static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id, { struct net_device *dev = edev->net; struct bnxt *bp = netdev_priv(dev); + struct output *resp; struct input *req; + u32 resp_len; int rc; if (ulp_id != BNXT_ROCE_ULP && bp->fw_reset_state) return -EBUSY; - mutex_lock(&bp->hwrm_cmd_lock); - req = fw_msg->msg; - req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr); - rc = _hwrm_send_message(bp, fw_msg->msg, fw_msg->msg_len, - fw_msg->timeout); - if (!rc) { - struct output *resp = bp->hwrm_cmd_resp_addr; - u32 len = le16_to_cpu(resp->resp_len); + rc = hwrm_req_init(bp, req, 0 /* don't care */); + if (rc) + return rc; - if (fw_msg->resp_max_len < len) - len = fw_msg->resp_max_len; + rc = hwrm_req_replace(bp, req, fw_msg->msg, fw_msg->msg_len); + if (rc) + return rc; - memcpy(fw_msg->resp, resp, len); + hwrm_req_timeout(bp, req, fw_msg->timeout); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + resp_len = le16_to_cpu(resp->resp_len); + if (resp_len) { + if (fw_msg->resp_max_len < resp_len) + resp_len = fw_msg->resp_max_len; + + memcpy(fw_msg->resp, resp, resp_len); } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c index dd66302343a2..9401936b74fa 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c @@ -15,6 +15,7 @@ #include "bnxt_hsi.h" #include "bnxt.h" +#include "bnxt_hwrm.h" #include "bnxt_vfr.h" #include "bnxt_devlink.h" #include "bnxt_tc.h" @@ -27,38 +28,40 @@ static int hwrm_cfa_vfr_alloc(struct bnxt *bp, u16 vf_idx, u16 *tx_cfa_action, u16 *rx_cfa_code) { - struct hwrm_cfa_vfr_alloc_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_cfa_vfr_alloc_input req = { 0 }; + struct hwrm_cfa_vfr_alloc_output *resp; + struct hwrm_cfa_vfr_alloc_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_VFR_ALLOC, -1, -1); - req.vf_id = cpu_to_le16(vf_idx); - sprintf(req.vfr_name, "vfr%d", vf_idx); - - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_CFA_VFR_ALLOC); if (!rc) { - *tx_cfa_action = le16_to_cpu(resp->tx_cfa_action); - *rx_cfa_code = le16_to_cpu(resp->rx_cfa_code); - netdev_dbg(bp->dev, "tx_cfa_action=0x%x, rx_cfa_code=0x%x", - *tx_cfa_action, *rx_cfa_code); - } else { - netdev_info(bp->dev, "%s error rc=%d\n", __func__, rc); + req->vf_id = cpu_to_le16(vf_idx); + sprintf(req->vfr_name, "vfr%d", vf_idx); + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (!rc) { + *tx_cfa_action = le16_to_cpu(resp->tx_cfa_action); + *rx_cfa_code = le16_to_cpu(resp->rx_cfa_code); + netdev_dbg(bp->dev, "tx_cfa_action=0x%x, rx_cfa_code=0x%x", + *tx_cfa_action, *rx_cfa_code); + } + hwrm_req_drop(bp, req); } - - mutex_unlock(&bp->hwrm_cmd_lock); + if (rc) + netdev_info(bp->dev, "%s error rc=%d\n", __func__, rc); return rc; } static int hwrm_cfa_vfr_free(struct bnxt *bp, u16 vf_idx) { - struct hwrm_cfa_vfr_free_input req = { 0 }; + struct hwrm_cfa_vfr_free_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_VFR_FREE, -1, -1); - sprintf(req.vfr_name, "vfr%d", vf_idx); - - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_CFA_VFR_FREE); + if (!rc) { + sprintf(req->vfr_name, "vfr%d", vf_idx); + rc = hwrm_req_send(bp, req); + } if (rc) netdev_info(bp->dev, "%s error rc=%d\n", __func__, rc); return rc; @@ -67,17 +70,18 @@ static int hwrm_cfa_vfr_free(struct bnxt *bp, u16 vf_idx) static int bnxt_hwrm_vfr_qcfg(struct bnxt *bp, struct bnxt_vf_rep *vf_rep, u16 *max_mtu) { - struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_func_qcfg_input req = {0}; + struct hwrm_func_qcfg_output *resp; + struct hwrm_func_qcfg_input *req; u16 mtu; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); - req.fid = cpu_to_le16(bp->pf.vf[vf_rep->vf_idx].fw_fid); - - mutex_lock(&bp->hwrm_cmd_lock); + rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); + if (rc) + return rc; - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->fid = cpu_to_le16(bp->pf.vf[vf_rep->vf_idx].fw_fid); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) { mtu = le16_to_cpu(resp->max_mtu_configured); if (!mtu) @@ -85,7 +89,7 @@ static int bnxt_hwrm_vfr_qcfg(struct bnxt *bp, struct bnxt_vf_rep *vf_rep, else *max_mtu = mtu; } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c index bee6e091a997..c8083df5e0ab 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c @@ -87,7 +87,7 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), dma_unmap_len(tx_buf, len), - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); xdp_return_frame(tx_buf->xdpf); tx_buf->action = 0; tx_buf->xdpf = NULL; diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index db74241935ab..23c7595d2a1d 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -828,7 +828,9 @@ static void bcmgenet_set_msglevel(struct net_device *dev, u32 level) } static int bcmgenet_get_coalesce(struct net_device *dev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct bcmgenet_priv *priv = netdev_priv(dev); struct bcmgenet_rx_ring *ring; @@ -890,7 +892,9 @@ static void bcmgenet_set_ring_rx_coalesce(struct bcmgenet_rx_ring *ring, } static int bcmgenet_set_coalesce(struct net_device *dev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct bcmgenet_priv *priv = netdev_priv(dev); unsigned int i; @@ -3659,7 +3663,7 @@ static const struct net_device_ops bcmgenet_netdev_ops = { .ndo_tx_timeout = bcmgenet_timeout, .ndo_set_rx_mode = bcmgenet_set_rx_mode, .ndo_set_mac_address = bcmgenet_set_mac_addr, - .ndo_do_ioctl = phy_do_ioctl_running, + .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_set_features = bcmgenet_set_features, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = bcmgenet_poll_controller, @@ -3972,8 +3976,6 @@ static int bcmgenet_probe(struct platform_device *pdev) */ dev->needed_headroom += 64; - netdev_boot_setup_check(dev); - priv->dev = dev; priv->pdev = pdev; diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c index 5b4568c2ad1c..f38f40eb966e 100644 --- a/drivers/net/ethernet/broadcom/sb1250-mac.c +++ b/drivers/net/ethernet/broadcom/sb1250-mac.c @@ -2136,7 +2136,7 @@ static const struct net_device_ops sbmac_netdev_ops = { .ndo_start_xmit = sbmac_start_tx, .ndo_set_rx_mode = sbmac_set_rx_mode, .ndo_tx_timeout = sbmac_tx_timeout, - .ndo_do_ioctl = sbmac_mii_ioctl, + .ndo_eth_ioctl = sbmac_mii_ioctl, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index b0e49643f483..8a238e349e02 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -6564,10 +6564,8 @@ static void tg3_tx(struct tg3_napi *tnapi) skb_tstamp_tx(skb, ×tamp); } - pci_unmap_single(tp->pdev, - dma_unmap_addr(ri, mapping), - skb_headlen(skb), - PCI_DMA_TODEVICE); + dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping), + skb_headlen(skb), DMA_TO_DEVICE); ri->skb = NULL; @@ -6584,10 +6582,10 @@ static void tg3_tx(struct tg3_napi *tnapi) if (unlikely(ri->skb != NULL || sw_idx == hw_idx)) tx_bug = 1; - pci_unmap_page(tp->pdev, + dma_unmap_page(&tp->pdev->dev, dma_unmap_addr(ri, mapping), skb_frag_size(&skb_shinfo(skb)->frags[i]), - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); while (ri->fragmented) { ri->fragmented = false; @@ -6646,8 +6644,8 @@ static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz) if (!ri->data) return; - pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping), - map_sz, PCI_DMA_FROMDEVICE); + dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping), map_sz, + DMA_FROM_DEVICE); tg3_frag_free(skb_size <= PAGE_SIZE, ri->data); ri->data = NULL; } @@ -6711,11 +6709,9 @@ static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr, if (!data) return -ENOMEM; - mapping = pci_map_single(tp->pdev, - data + TG3_RX_OFFSET(tp), - data_size, - PCI_DMA_FROMDEVICE); - if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) { + mapping = dma_map_single(&tp->pdev->dev, data + TG3_RX_OFFSET(tp), + data_size, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(&tp->pdev->dev, mapping))) { tg3_frag_free(skb_size <= PAGE_SIZE, data); return -EIO; } @@ -6882,8 +6878,8 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) if (skb_size < 0) goto drop_it; - pci_unmap_single(tp->pdev, dma_addr, skb_size, - PCI_DMA_FROMDEVICE); + dma_unmap_single(&tp->pdev->dev, dma_addr, skb_size, + DMA_FROM_DEVICE); /* Ensure that the update to the data happens * after the usage of the old DMA mapping. @@ -6908,11 +6904,13 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) goto drop_it_no_recycle; skb_reserve(skb, TG3_RAW_IP_ALIGN); - pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&tp->pdev->dev, dma_addr, len, + DMA_FROM_DEVICE); memcpy(skb->data, data + TG3_RX_OFFSET(tp), len); - pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); + dma_sync_single_for_device(&tp->pdev->dev, dma_addr, + len, DMA_FROM_DEVICE); } skb_put(skb, len); @@ -7762,10 +7760,8 @@ static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last) skb = txb->skb; txb->skb = NULL; - pci_unmap_single(tnapi->tp->pdev, - dma_unmap_addr(txb, mapping), - skb_headlen(skb), - PCI_DMA_TODEVICE); + dma_unmap_single(&tnapi->tp->pdev->dev, dma_unmap_addr(txb, mapping), + skb_headlen(skb), DMA_TO_DEVICE); while (txb->fragmented) { txb->fragmented = false; @@ -7779,9 +7775,9 @@ static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last) entry = NEXT_TX(entry); txb = &tnapi->tx_buffers[entry]; - pci_unmap_page(tnapi->tp->pdev, + dma_unmap_page(&tnapi->tp->pdev->dev, dma_unmap_addr(txb, mapping), - skb_frag_size(frag), PCI_DMA_TODEVICE); + skb_frag_size(frag), DMA_TO_DEVICE); while (txb->fragmented) { txb->fragmented = false; @@ -7816,10 +7812,10 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, ret = -1; } else { /* New SKB is guaranteed to be linear. */ - new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len, - PCI_DMA_TODEVICE); + new_addr = dma_map_single(&tp->pdev->dev, new_skb->data, + new_skb->len, DMA_TO_DEVICE); /* Make sure the mapping succeeded */ - if (pci_dma_mapping_error(tp->pdev, new_addr)) { + if (dma_mapping_error(&tp->pdev->dev, new_addr)) { dev_kfree_skb_any(new_skb); ret = -1; } else { @@ -8043,8 +8039,9 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) len = skb_headlen(skb); - mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(tp->pdev, mapping)) + mapping = dma_map_single(&tp->pdev->dev, skb->data, len, + DMA_TO_DEVICE); + if (dma_mapping_error(&tp->pdev->dev, mapping)) goto drop; @@ -13499,8 +13496,8 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback) for (i = data_off; i < tx_len; i++) tx_data[i] = (u8) (i & 0xff); - map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(tp->pdev, map)) { + map = dma_map_single(&tp->pdev->dev, skb->data, tx_len, DMA_TO_DEVICE); + if (dma_mapping_error(&tp->pdev->dev, map)) { dev_kfree_skb(skb); return -EIO; } @@ -13598,8 +13595,8 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback) } else goto out; - pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&tp->pdev->dev, map, rx_len, + DMA_FROM_DEVICE); rx_data += TG3_RX_OFFSET(tp); for (i = data_off; i < rx_len; i++, val++) { @@ -14040,7 +14037,10 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return -EOPNOTSUPP; } -static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) +static int tg3_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct tg3 *tp = netdev_priv(dev); @@ -14048,7 +14048,10 @@ static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) return 0; } -static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) +static int tg3_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct tg3 *tp = netdev_priv(dev); u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0; @@ -14290,7 +14293,7 @@ static const struct net_device_ops tg3_netdev_ops = { .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = tg3_set_rx_mode, .ndo_set_mac_address = tg3_set_mac_addr, - .ndo_do_ioctl = tg3_ioctl, + .ndo_eth_ioctl = tg3_ioctl, .ndo_tx_timeout = tg3_tx_timeout, .ndo_change_mtu = tg3_change_mtu, .ndo_fix_features = tg3_fix_features, @@ -17755,11 +17758,11 @@ static int tg3_init_one(struct pci_dev *pdev, /* Configure DMA attributes. */ if (dma_mask > DMA_BIT_MASK(32)) { - err = pci_set_dma_mask(pdev, dma_mask); + err = dma_set_mask(&pdev->dev, dma_mask); if (!err) { features |= NETIF_F_HIGHDMA; - err = pci_set_consistent_dma_mask(pdev, - persist_dma_mask); + err = dma_set_coherent_mask(&pdev->dev, + persist_dma_mask); if (err < 0) { dev_err(&pdev->dev, "Unable to obtain 64 bit " "DMA for consistent allocations\n"); @@ -17768,7 +17771,7 @@ static int tg3_init_one(struct pci_dev *pdev, } } if (err || dma_mask == DMA_BIT_MASK(32)) { - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c index 265c2fa6bbe0..391b85f25141 100644 --- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c +++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c @@ -307,8 +307,10 @@ bnad_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wolinfo) wolinfo->wolopts = 0; } -static int -bnad_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce) +static int bnad_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coalesce, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct bnad *bnad = netdev_priv(netdev); unsigned long flags; @@ -328,8 +330,10 @@ bnad_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce) return 0; } -static int -bnad_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce) +static int bnad_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coalesce, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct bnad *bnad = netdev_priv(netdev); unsigned long flags; diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig index e432a68ac520..5b2a461dfd28 100644 --- a/drivers/net/ethernet/cadence/Kconfig +++ b/drivers/net/ethernet/cadence/Kconfig @@ -22,6 +22,7 @@ if NET_VENDOR_CADENCE config MACB tristate "Cadence MACB/GEM support" depends on HAS_DMA && COMMON_CLK + depends on PTP_1588_CLOCK_OPTIONAL select PHYLINK select CRC32 help diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 7d2fe13a52f8..d13fb1d31821 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -3664,7 +3664,7 @@ static const struct net_device_ops macb_netdev_ops = { .ndo_start_xmit = macb_start_xmit, .ndo_set_rx_mode = macb_set_rx_mode, .ndo_get_stats = macb_get_stats, - .ndo_do_ioctl = macb_ioctl, + .ndo_eth_ioctl = macb_ioctl, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = macb_change_mtu, .ndo_set_mac_address = eth_mac_addr, @@ -4323,7 +4323,7 @@ static const struct net_device_ops at91ether_netdev_ops = { .ndo_get_stats = macb_get_stats, .ndo_set_rx_mode = macb_set_rx_mode, .ndo_set_mac_address = eth_mac_addr, - .ndo_do_ioctl = macb_ioctl, + .ndo_eth_ioctl = macb_ioctl, .ndo_validate_addr = eth_validate_addr, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = at91ether_poll_controller, @@ -4533,6 +4533,14 @@ static const struct macb_config sama5d2_config = { .usrio = &macb_default_usrio, }; +static const struct macb_config sama5d29_config = { + .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_GEM_HAS_PTP, + .dma_burst_length = 16, + .clk_init = macb_clk_init, + .init = macb_init, + .usrio = &macb_default_usrio, +}; + static const struct macb_config sama5d3_config = { .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_JUMBO, @@ -4610,6 +4618,7 @@ static const struct of_device_id macb_dt_ids[] = { { .compatible = "cdns,gem", .data = &pc302gem_config }, { .compatible = "cdns,sam9x60-macb", .data = &at91sam9260_config }, { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config }, + { .compatible = "atmel,sama5d29-gem", .data = &sama5d29_config }, { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config }, { .compatible = "atmel,sama5d3-macb", .data = &sama5d3macb_config }, { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config }, diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig index 4875cdae622e..1c76c95b0b27 100644 --- a/drivers/net/ethernet/cavium/Kconfig +++ b/drivers/net/ethernet/cavium/Kconfig @@ -66,7 +66,7 @@ config LIQUIDIO tristate "Cavium LiquidIO support" depends on 64BIT && PCI depends on PCI - imply PTP_1588_CLOCK + depends on PTP_1588_CLOCK_OPTIONAL select FW_LOADER select LIBCRC32C select NET_DEVLINK @@ -91,7 +91,7 @@ config OCTEON_MGMT_ETHERNET config LIQUIDIO_VF tristate "Cavium LiquidIO VF support" depends on 64BIT && PCI_MSI - imply PTP_1588_CLOCK + depends on PTP_1588_CLOCK_OPTIONAL help This driver supports Cavium LiquidIO Intelligent Server Adapter based on CN23XX chips. diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c index 66f2c553370c..2b9747867d4c 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c @@ -2108,7 +2108,9 @@ static int octnet_set_intrmod_cfg(struct lio *lio, } static int lio_get_intr_coalesce(struct net_device *netdev, - struct ethtool_coalesce *intr_coal) + struct ethtool_coalesce *intr_coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; @@ -2412,7 +2414,9 @@ oct_cfg_tx_intrcnt(struct lio *lio, } static int lio_set_intr_coalesce(struct net_device *netdev, - struct ethtool_coalesce *intr_coal) + struct ethtool_coalesce *intr_coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct lio *lio = GET_LIO(netdev); int ret; diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 591229b96257..2907e13b9df6 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -1457,7 +1457,7 @@ static void free_netsgbuf(void *buf) while (frags--) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; - pci_unmap_page((lio->oct_dev)->pci_dev, + dma_unmap_page(&lio->oct_dev->pci_dev->dev, g->sg[(i >> 2)].ptr[(i & 3)], skb_frag_size(frag), DMA_TO_DEVICE); i++; @@ -1500,7 +1500,7 @@ static void free_netsgbuf_with_resp(void *buf) while (frags--) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; - pci_unmap_page((lio->oct_dev)->pci_dev, + dma_unmap_page(&lio->oct_dev->pci_dev->dev, g->sg[(i >> 2)].ptr[(i & 3)], skb_frag_size(frag), DMA_TO_DEVICE); i++; @@ -3223,7 +3223,7 @@ static const struct net_device_ops lionetdevops = { .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid, .ndo_change_mtu = liquidio_change_mtu, - .ndo_do_ioctl = liquidio_ioctl, + .ndo_eth_ioctl = liquidio_ioctl, .ndo_fix_features = liquidio_fix_features, .ndo_set_features = liquidio_set_features, .ndo_set_vf_mac = liquidio_set_vf_mac, @@ -3750,7 +3750,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) } devlink = devlink_alloc(&liquidio_devlink_ops, - sizeof(struct lio_devlink_priv)); + sizeof(struct lio_devlink_priv), + &octeon_dev->pci_dev->dev); if (!devlink) { dev_err(&octeon_dev->pci_dev->dev, "devlink alloc failed\n"); goto setup_nic_dev_free; @@ -3759,7 +3760,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) lio_devlink = devlink_priv(devlink); lio_devlink->oct = octeon_dev; - if (devlink_register(devlink, &octeon_dev->pci_dev->dev)) { + if (devlink_register(devlink)) { devlink_free(devlink); dev_err(&octeon_dev->pci_dev->dev, "devlink registration failed\n"); diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index ffddb3126a32..c6fe0f2a4d0e 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -843,7 +843,7 @@ static void free_netsgbuf(void *buf) while (frags--) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; - pci_unmap_page((lio->oct_dev)->pci_dev, + dma_unmap_page(&lio->oct_dev->pci_dev->dev, g->sg[(i >> 2)].ptr[(i & 3)], skb_frag_size(frag), DMA_TO_DEVICE); i++; @@ -887,7 +887,7 @@ static void free_netsgbuf_with_resp(void *buf) while (frags--) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; - pci_unmap_page((lio->oct_dev)->pci_dev, + dma_unmap_page(&lio->oct_dev->pci_dev->dev, g->sg[(i >> 2)].ptr[(i & 3)], skb_frag_size(frag), DMA_TO_DEVICE); i++; @@ -1889,7 +1889,7 @@ static const struct net_device_ops lionetdevops = { .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid, .ndo_change_mtu = liquidio_change_mtu, - .ndo_do_ioctl = liquidio_ioctl, + .ndo_eth_ioctl = liquidio_ioctl, .ndo_fix_features = liquidio_fix_features, .ndo_set_features = liquidio_set_features, }; diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c index 48ff6fb0eed9..30463a6d1f8c 100644 --- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c @@ -1373,7 +1373,7 @@ static const struct net_device_ops octeon_mgmt_ops = { .ndo_start_xmit = octeon_mgmt_xmit, .ndo_set_rx_mode = octeon_mgmt_set_rx_filtering, .ndo_set_mac_address = octeon_mgmt_set_mac_address, - .ndo_do_ioctl = octeon_mgmt_ioctl, + .ndo_eth_ioctl = octeon_mgmt_ioctl, .ndo_change_mtu = octeon_mgmt_change_mtu, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = octeon_mgmt_poll_controller, diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c index 9361f964bb9b..691e1475d55e 100644 --- a/drivers/net/ethernet/cavium/thunder/nic_main.c +++ b/drivers/net/ethernet/cavium/thunder/nic_main.c @@ -1322,18 +1322,12 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_disable_device; } - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)); if (err) { dev_err(dev, "Unable to get usable DMA configuration\n"); goto err_release_regions; } - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48)); - if (err) { - dev_err(dev, "Unable to get 48-bit DMA for consistent allocations\n"); - goto err_release_regions; - } - /* MAP PF's configuration registers */ nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); if (!nic->reg_base) { diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c index 2f218fbfed06..7f2882109b16 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c @@ -456,7 +456,9 @@ static void nicvf_get_regs(struct net_device *dev, } static int nicvf_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *cmd) + struct ethtool_coalesce *cmd, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct nicvf *nic = netdev_priv(netdev); diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index e2b290135fd9..d1667b759522 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -2096,7 +2096,7 @@ static const struct net_device_ops nicvf_netdev_ops = { .ndo_fix_features = nicvf_fix_features, .ndo_set_features = nicvf_set_features, .ndo_bpf = nicvf_xdp, - .ndo_do_ioctl = nicvf_ioctl, + .ndo_eth_ioctl = nicvf_ioctl, .ndo_set_rx_mode = nicvf_set_rx_mode, }; @@ -2130,18 +2130,12 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_disable_device; } - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)); if (err) { dev_err(dev, "Unable to get usable DMA configuration\n"); goto err_release_regions; } - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48)); - if (err) { - dev_err(dev, "unable to get 48-bit DMA for consistent allocations\n"); - goto err_release_regions; - } - qcount = netif_get_num_default_rss_queues(); /* Restrict multiqset support only for host bound VFs */ diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig index 8ba0e08e5e64..c931ec8cac40 100644 --- a/drivers/net/ethernet/chelsio/Kconfig +++ b/drivers/net/ethernet/chelsio/Kconfig @@ -69,6 +69,7 @@ config CHELSIO_T3 config CHELSIO_T4 tristate "Chelsio Communications T4/T5/T6 Ethernet support" depends on PCI && (IPV6 || IPV6=n) && (TLS || TLS=n) + depends on PTP_1588_CLOCK_OPTIONAL select FW_LOADER select MDIO select ZLIB_DEFLATE diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c index 512da98019c6..73c016166f06 100644 --- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c +++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c @@ -748,7 +748,9 @@ static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e) return 0; } -static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) +static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct adapter *adapter = dev->ml_priv; @@ -759,7 +761,9 @@ static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) return 0; } -static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) +static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct adapter *adapter = dev->ml_priv; @@ -924,7 +928,7 @@ static const struct net_device_ops cxgb_netdev_ops = { .ndo_get_stats = t1_get_stats, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = t1_set_rxmode, - .ndo_do_ioctl = t1_ioctl, + .ndo_eth_ioctl = t1_ioctl, .ndo_change_mtu = t1_change_mtu, .ndo_set_mac_address = t1_set_mac_addr, .ndo_fix_features = t1_fix_features, diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c index 57f210c53afc..38e47703f9ab 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c @@ -1996,7 +1996,9 @@ static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e) return 0; } -static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) +static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct port_info *pi = netdev_priv(dev); struct adapter *adapter = pi->adapter; @@ -2017,7 +2019,9 @@ static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) return 0; } -static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) +static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct port_info *pi = netdev_priv(dev); struct adapter *adapter = pi->adapter; @@ -2135,13 +2139,18 @@ static int in_range(int val, int lo, int hi) return val < 0 || (val <= hi && val >= lo); } -static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) +static int cxgb_siocdevprivate(struct net_device *dev, + struct ifreq *ifreq, + void __user *useraddr, + int cmd) { struct port_info *pi = netdev_priv(dev); struct adapter *adapter = pi->adapter; - u32 cmd; int ret; + if (cmd != SIOCCHIOCTL) + return -EOPNOTSUPP; + if (copy_from_user(&cmd, useraddr, sizeof(cmd))) return -EFAULT; @@ -2546,8 +2555,6 @@ static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd) fallthrough; case SIOCGMIIPHY: return mdio_mii_ioctl(&pi->phy.mdio, data, cmd); - case SIOCCHIOCTL: - return cxgb_extension_ioctl(dev, req->ifr_data); default: return -EOPNOTSUPP; } @@ -3181,7 +3188,8 @@ static const struct net_device_ops cxgb_netdev_ops = { .ndo_get_stats = cxgb_get_stats, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = cxgb_set_rxmode, - .ndo_do_ioctl = cxgb_ioctl, + .ndo_eth_ioctl = cxgb_ioctl, + .ndo_siocdevprivate = cxgb_siocdevprivate, .ndo_change_mtu = cxgb_change_mtu, .ndo_set_mac_address = cxgb_set_mac_addr, .ndo_fix_features = cxgb_fix_features, @@ -3231,15 +3239,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) goto out_disable_device; } - if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { pci_using_dac = 1; - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); - if (err) { - dev_err(&pdev->dev, "unable to obtain 64-bit DMA for " - "coherent allocations\n"); - goto out_release_regions; - } - } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) { + } else if ((err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) != 0) { dev_err(&pdev->dev, "no usable DMA configuration\n"); goto out_release_regions; } diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c index cb5c79c43bc9..e21a2e691382 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c @@ -244,8 +244,8 @@ static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q, frag_idx = d->fragidx; if (frag_idx == 0 && skb_headlen(skb)) { - pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]), - skb_headlen(skb), PCI_DMA_TODEVICE); + dma_unmap_single(&pdev->dev, be64_to_cpu(sgp->addr[0]), + skb_headlen(skb), DMA_TO_DEVICE); j = 1; } @@ -253,9 +253,9 @@ static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q, nfrags = skb_shinfo(skb)->nr_frags; while (frag_idx < nfrags && curflit < WR_FLITS) { - pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]), + dma_unmap_page(&pdev->dev, be64_to_cpu(sgp->addr[j]), skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]), - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); j ^= 1; if (j == 0) { sgp++; @@ -355,15 +355,14 @@ static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q, if (q->use_pages && d->pg_chunk.page) { (*d->pg_chunk.p_cnt)--; if (!*d->pg_chunk.p_cnt) - pci_unmap_page(pdev, - d->pg_chunk.mapping, - q->alloc_size, PCI_DMA_FROMDEVICE); + dma_unmap_page(&pdev->dev, d->pg_chunk.mapping, + q->alloc_size, DMA_FROM_DEVICE); put_page(d->pg_chunk.page); d->pg_chunk.page = NULL; } else { - pci_unmap_single(pdev, dma_unmap_addr(d, dma_addr), - q->buf_size, PCI_DMA_FROMDEVICE); + dma_unmap_single(&pdev->dev, dma_unmap_addr(d, dma_addr), + q->buf_size, DMA_FROM_DEVICE); kfree_skb(d->skb); d->skb = NULL; } @@ -414,8 +413,8 @@ static inline int add_one_rx_buf(void *va, unsigned int len, { dma_addr_t mapping; - mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE); - if (unlikely(pci_dma_mapping_error(pdev, mapping))) + mapping = dma_map_single(&pdev->dev, va, len, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(&pdev->dev, mapping))) return -ENOMEM; dma_unmap_addr_set(sd, dma_addr, mapping); @@ -453,9 +452,9 @@ static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q, q->pg_chunk.p_cnt = q->pg_chunk.va + (PAGE_SIZE << order) - SGE_PG_RSVD; q->pg_chunk.offset = 0; - mapping = pci_map_page(adapter->pdev, q->pg_chunk.page, - 0, q->alloc_size, PCI_DMA_FROMDEVICE); - if (unlikely(pci_dma_mapping_error(adapter->pdev, mapping))) { + mapping = dma_map_page(&adapter->pdev->dev, q->pg_chunk.page, + 0, q->alloc_size, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(&adapter->pdev->dev, mapping))) { __free_pages(q->pg_chunk.page, order); q->pg_chunk.page = NULL; return -EIO; @@ -522,9 +521,9 @@ nomem: q->alloc_failed++; dma_unmap_addr_set(sd, dma_addr, mapping); add_one_rx_chunk(mapping, d, q->gen); - pci_dma_sync_single_for_device(adap->pdev, mapping, - q->buf_size - SGE_PG_RSVD, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_device(&adap->pdev->dev, mapping, + q->buf_size - SGE_PG_RSVD, + DMA_FROM_DEVICE); } else { void *buf_start; @@ -793,13 +792,13 @@ static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl, skb = alloc_skb(len, GFP_ATOMIC); if (likely(skb != NULL)) { __skb_put(skb, len); - pci_dma_sync_single_for_cpu(adap->pdev, - dma_unmap_addr(sd, dma_addr), len, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&adap->pdev->dev, + dma_unmap_addr(sd, dma_addr), + len, DMA_FROM_DEVICE); memcpy(skb->data, sd->skb->data, len); - pci_dma_sync_single_for_device(adap->pdev, - dma_unmap_addr(sd, dma_addr), len, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_device(&adap->pdev->dev, + dma_unmap_addr(sd, dma_addr), + len, DMA_FROM_DEVICE); } else if (!drop_thres) goto use_orig_buf; recycle: @@ -813,8 +812,8 @@ recycle: goto recycle; use_orig_buf: - pci_unmap_single(adap->pdev, dma_unmap_addr(sd, dma_addr), - fl->buf_size, PCI_DMA_FROMDEVICE); + dma_unmap_single(&adap->pdev->dev, dma_unmap_addr(sd, dma_addr), + fl->buf_size, DMA_FROM_DEVICE); skb = sd->skb; skb_put(skb, len); __refill_fl(adap, fl); @@ -854,12 +853,11 @@ static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl, newskb = alloc_skb(len, GFP_ATOMIC); if (likely(newskb != NULL)) { __skb_put(newskb, len); - pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&adap->pdev->dev, dma_addr, + len, DMA_FROM_DEVICE); memcpy(newskb->data, sd->pg_chunk.va, len); - pci_dma_sync_single_for_device(adap->pdev, dma_addr, - len, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_device(&adap->pdev->dev, dma_addr, + len, DMA_FROM_DEVICE); } else if (!drop_thres) return NULL; recycle: @@ -883,14 +881,12 @@ recycle: goto recycle; } - pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&adap->pdev->dev, dma_addr, len, + DMA_FROM_DEVICE); (*sd->pg_chunk.p_cnt)--; if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page) - pci_unmap_page(adap->pdev, - sd->pg_chunk.mapping, - fl->alloc_size, - PCI_DMA_FROMDEVICE); + dma_unmap_page(&adap->pdev->dev, sd->pg_chunk.mapping, + fl->alloc_size, DMA_FROM_DEVICE); if (!skb) { __skb_put(newskb, SGE_RX_PULL_LEN); memcpy(newskb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN); @@ -968,9 +964,9 @@ static int map_skb(struct pci_dev *pdev, const struct sk_buff *skb, const struct skb_shared_info *si; if (skb_headlen(skb)) { - *addr = pci_map_single(pdev, skb->data, skb_headlen(skb), - PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(pdev, *addr)) + *addr = dma_map_single(&pdev->dev, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, *addr)) goto out_err; addr++; } @@ -981,7 +977,7 @@ static int map_skb(struct pci_dev *pdev, const struct sk_buff *skb, for (fp = si->frags; fp < end; fp++) { *addr = skb_frag_dma_map(&pdev->dev, fp, 0, skb_frag_size(fp), DMA_TO_DEVICE); - if (pci_dma_mapping_error(pdev, *addr)) + if (dma_mapping_error(&pdev->dev, *addr)) goto unwind; addr++; } @@ -992,7 +988,8 @@ unwind: dma_unmap_page(&pdev->dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE); - pci_unmap_single(pdev, addr[-1], skb_headlen(skb), PCI_DMA_TODEVICE); + dma_unmap_single(&pdev->dev, addr[-1], skb_headlen(skb), + DMA_TO_DEVICE); out_err: return -ENOMEM; } @@ -1592,13 +1589,14 @@ static void deferred_unmap_destructor(struct sk_buff *skb) p = dui->addr; if (skb_tail_pointer(skb) - skb_transport_header(skb)) - pci_unmap_single(dui->pdev, *p++, skb_tail_pointer(skb) - - skb_transport_header(skb), PCI_DMA_TODEVICE); + dma_unmap_single(&dui->pdev->dev, *p++, + skb_tail_pointer(skb) - skb_transport_header(skb), + DMA_TO_DEVICE); si = skb_shinfo(skb); for (i = 0; i < si->nr_frags; i++) - pci_unmap_page(dui->pdev, *p++, skb_frag_size(&si->frags[i]), - PCI_DMA_TODEVICE); + dma_unmap_page(&dui->pdev->dev, *p++, + skb_frag_size(&si->frags[i]), DMA_TO_DEVICE); } static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev, @@ -2153,17 +2151,14 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs, fl->credits--; - pci_dma_sync_single_for_cpu(adap->pdev, - dma_unmap_addr(sd, dma_addr), - fl->buf_size - SGE_PG_RSVD, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&adap->pdev->dev, + dma_unmap_addr(sd, dma_addr), + fl->buf_size - SGE_PG_RSVD, DMA_FROM_DEVICE); (*sd->pg_chunk.p_cnt)--; if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page) - pci_unmap_page(adap->pdev, - sd->pg_chunk.mapping, - fl->alloc_size, - PCI_DMA_FROMDEVICE); + dma_unmap_page(&adap->pdev->dev, sd->pg_chunk.mapping, + fl->alloc_size, DMA_FROM_DEVICE); if (!skb) { put_page(sd->pg_chunk.page); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index 83ed10ac8660..5903bdb78916 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -1147,7 +1147,9 @@ static int set_dbqtimer_tickval(struct net_device *dev, } static int set_coalesce(struct net_device *dev, - struct ethtool_coalesce *coalesce) + struct ethtool_coalesce *coalesce, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { int ret; @@ -1163,7 +1165,9 @@ static int set_coalesce(struct net_device *dev, coalesce->tx_coalesce_usecs); } -static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) +static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { const struct port_info *pi = netdev_priv(dev); const struct adapter *adap = pi->adapter; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c index 6260b3bebd2b..786ceae34488 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c @@ -1441,7 +1441,7 @@ static int cxgb4_set_hash_filter(struct net_device *dev, } else if (iconf & USE_ENC_IDX_F) { if (f->fs.val.encap_vld) { struct port_info *pi = netdev_priv(f->dev); - u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 }; + static const u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 }; /* allocate MPS TCAM entry */ ret = t4_alloc_encap_mac_filt(adapter, pi->viid, @@ -1688,7 +1688,7 @@ int __cxgb4_set_filter(struct net_device *dev, int ftid, } else if (iconf & USE_ENC_IDX_F) { if (f->fs.val.encap_vld) { struct port_info *pi = netdev_priv(f->dev); - u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 }; + static const u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 }; /* allocate MPS TCAM entry */ ret = t4_alloc_encap_mac_filt(adapter, pi->viid, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 710cb00ce3a3..0d9cda4ab303 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -3872,7 +3872,7 @@ static const struct net_device_ops cxgb4_netdev_ops = { .ndo_set_mac_address = cxgb_set_mac_addr, .ndo_set_features = cxgb_set_features, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = cxgb_ioctl, + .ndo_eth_ioctl = cxgb_ioctl, .ndo_change_mtu = cxgb_change_mtu, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = cxgb_netpoll, @@ -4008,7 +4008,7 @@ static void adap_free_hma_mem(struct adapter *adapter) if (adapter->hma.flags & HMA_DMA_MAPPED_FLAG) { dma_unmap_sg(adapter->pdev_dev, adapter->hma.sgt->sgl, - adapter->hma.sgt->nents, PCI_DMA_BIDIRECTIONAL); + adapter->hma.sgt->nents, DMA_BIDIRECTIONAL); adapter->hma.flags &= ~HMA_DMA_MAPPED_FLAG; } @@ -6163,8 +6163,7 @@ static void print_port_info(const struct net_device *dev) --bufp; sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type)); - netdev_info(dev, "%s: Chelsio %s (%s) %s\n", - dev->name, adap->params.vpd.id, adap->name, buf); + netdev_info(dev, "Chelsio %s %s\n", adap->params.vpd.id, buf); } /* @@ -6688,16 +6687,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; } - if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { highdma = true; - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); - if (err) { - dev_err(&pdev->dev, "unable to obtain 64-bit DMA for " - "coherent allocations\n"); - goto out_free_adapter; - } } else { - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "no usable DMA configuration\n"); goto out_free_adapter; diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 6a099cb34b12..fa5b596ff23a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -443,7 +443,7 @@ static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n) if (is_buf_mapped(d)) dma_unmap_page(adap->pdev_dev, get_buf_addr(d), get_buf_size(adap, d), - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); put_page(d->page); d->page = NULL; if (++q->cidx == q->size) @@ -469,7 +469,7 @@ static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q) if (is_buf_mapped(d)) dma_unmap_page(adap->pdev_dev, get_buf_addr(d), - get_buf_size(adap, d), PCI_DMA_FROMDEVICE); + get_buf_size(adap, d), DMA_FROM_DEVICE); d->page = NULL; if (++q->cidx == q->size) q->cidx = 0; @@ -566,7 +566,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE << s->fl_pg_order, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { __free_pages(pg, s->fl_pg_order); q->mapping_err++; @@ -596,7 +596,7 @@ alloc_small_pages: } mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { put_page(pg); q->mapping_err++; diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 2820a0bb971b..49b76fd47daa 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -1647,7 +1647,9 @@ static int cxgb4vf_set_ringparam(struct net_device *dev, * interrupt holdoff timer to be read on all of the device's Queue Sets. */ static int cxgb4vf_get_coalesce(struct net_device *dev, - struct ethtool_coalesce *coalesce) + struct ethtool_coalesce *coalesce, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { const struct port_info *pi = netdev_priv(dev); const struct adapter *adapter = pi->adapter; @@ -1667,7 +1669,9 @@ static int cxgb4vf_get_coalesce(struct net_device *dev, * the interrupt holdoff timer on any of the device's Queue Sets. */ static int cxgb4vf_set_coalesce(struct net_device *dev, - struct ethtool_coalesce *coalesce) + struct ethtool_coalesce *coalesce, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { const struct port_info *pi = netdev_priv(dev); struct adapter *adapter = pi->adapter; @@ -2837,7 +2841,7 @@ static const struct net_device_ops cxgb4vf_netdev_ops = { .ndo_set_rx_mode = cxgb4vf_set_rxmode, .ndo_set_mac_address = cxgb4vf_set_mac_addr, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = cxgb4vf_do_ioctl, + .ndo_eth_ioctl = cxgb4vf_do_ioctl, .ndo_change_mtu = cxgb4vf_change_mtu, .ndo_fix_features = cxgb4vf_fix_features, .ndo_set_features = cxgb4vf_set_features, @@ -2917,17 +2921,11 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, * Set up our DMA mask: try for 64-bit address masking first and * fall back to 32-bit if we can't get 64 bits ... */ - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (err == 0) { - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); - if (err) { - dev_err(&pdev->dev, "unable to obtain 64-bit DMA for" - " coherent allocations\n"); - goto err_release_regions; - } pci_using_dac = 1; } else { - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err != 0) { dev_err(&pdev->dev, "no usable DMA configuration\n"); goto err_release_regions; diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index 7bc80eeb2c21..0295b2406646 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -478,7 +478,7 @@ static void free_rx_bufs(struct adapter *adapter, struct sge_fl *fl, int n) if (is_buf_mapped(sdesc)) dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc), get_buf_size(adapter, sdesc), - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); put_page(sdesc->page); sdesc->page = NULL; if (++fl->cidx == fl->size) @@ -507,7 +507,7 @@ static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl) if (is_buf_mapped(sdesc)) dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc), get_buf_size(adapter, sdesc), - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); sdesc->page = NULL; if (++fl->cidx == fl->size) fl->cidx = 0; @@ -644,7 +644,7 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl, dma_addr = dma_map_page(adapter->pdev_dev, page, 0, PAGE_SIZE << s->fl_pg_order, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) { /* * We've run out of DMA mapping space. Free up the @@ -682,7 +682,7 @@ alloc_small_pages: poison_buf(page, PAGE_SIZE); dma_addr = dma_map_page(adapter->pdev_dev, page, 0, PAGE_SIZE, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) { put_page(page); break; diff --git a/drivers/net/ethernet/cirrus/Kconfig b/drivers/net/ethernet/cirrus/Kconfig index d8af9e64dd1e..dac1764ba740 100644 --- a/drivers/net/ethernet/cirrus/Kconfig +++ b/drivers/net/ethernet/cirrus/Kconfig @@ -6,7 +6,7 @@ config NET_VENDOR_CIRRUS bool "Cirrus devices" default y - depends on ISA || EISA || ARM || MAC + depends on ISA || EISA || ARM || MAC || COMPILE_TEST help If you have a network (Ethernet) card belonging to this class, say Y. @@ -18,9 +18,16 @@ config NET_VENDOR_CIRRUS if NET_VENDOR_CIRRUS config CS89x0 - tristate "CS89x0 support" - depends on ISA || EISA || ARM + tristate + +config CS89x0_ISA + tristate "CS89x0 ISA driver support" + depends on HAS_IOPORT_MAP + depends on ISA depends on !PPC32 + depends on CS89x0_PLATFORM=n + select NETDEV_LEGACY_INIT + select CS89x0 help Support for CS89x0 chipset based Ethernet cards. If you have a network (Ethernet) card of this type, say Y and read the file @@ -30,15 +37,15 @@ config CS89x0 will be called cs89x0. config CS89x0_PLATFORM - bool "CS89x0 platform driver support" if HAS_IOPORT_MAP - default !HAS_IOPORT_MAP - depends on CS89x0 + tristate "CS89x0 platform driver support" + depends on ARM || COMPILE_TEST + select CS89x0 help - Say Y to compile the cs89x0 driver as a platform driver. This - makes this driver suitable for use on certain evaluation boards - such as the iMX21ADS. + Say Y to compile the cs89x0 platform driver. This makes this driver + suitable for use on certain evaluation boards such as the iMX21ADS. - If you are unsure, say N. + To compile this driver as a module, choose M here. The module + will be called cs89x0. config EP93XX_ETH tristate "EP93xx Ethernet support" diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c index 33ace3307059..d0c4c8b7a15a 100644 --- a/drivers/net/ethernet/cirrus/cs89x0.c +++ b/drivers/net/ethernet/cirrus/cs89x0.c @@ -104,7 +104,7 @@ static char version[] __initdata = * them to system IRQ numbers. This mapping is card specific and is set to * the configuration of the Cirrus Eval board for this chip. */ -#ifndef CONFIG_CS89x0_PLATFORM +#if IS_ENABLED(CONFIG_CS89x0_ISA) static unsigned int netcard_portlist[] __used __initdata = { 0x300, 0x320, 0x340, 0x360, 0x200, 0x220, 0x240, 0x260, 0x280, 0x2a0, 0x2c0, 0x2e0, 0 @@ -292,7 +292,7 @@ write_irq(struct net_device *dev, int chip_type, int irq) int i; if (chip_type == CS8900) { -#ifndef CONFIG_CS89x0_PLATFORM +#if IS_ENABLED(CONFIG_CS89x0_ISA) /* Search the mapping table for the corresponding IRQ pin. */ for (i = 0; i != ARRAY_SIZE(cs8900_irq_map); i++) if (cs8900_irq_map[i] == irq) @@ -859,7 +859,7 @@ net_open(struct net_device *dev) goto bad_out; } } else { -#if !defined(CONFIG_CS89x0_PLATFORM) +#if IS_ENABLED(CONFIG_CS89x0_ISA) if (((1 << dev->irq) & lp->irq_map) == 0) { pr_err("%s: IRQ %d is not in our map of allowable IRQs, which is %x\n", dev->name, dev->irq, lp->irq_map); @@ -1523,7 +1523,7 @@ cs89x0_probe1(struct net_device *dev, void __iomem *ioaddr, int modular) dev->irq = i; } else { i = lp->isa_config & INT_NO_MASK; -#ifndef CONFIG_CS89x0_PLATFORM +#if IS_ENABLED(CONFIG_CS89x0_ISA) if (lp->chip_type == CS8900) { /* Translate the IRQ using the IRQ mapping table. */ if (i >= ARRAY_SIZE(cs8900_irq_map)) @@ -1576,7 +1576,7 @@ out1: return retval; } -#ifndef CONFIG_CS89x0_PLATFORM +#if IS_ENABLED(CONFIG_CS89x0_ISA) /* * This function converts the I/O port address used by the cs89x0_probe() and * init_module() functions to the I/O memory address used by the @@ -1682,11 +1682,7 @@ out: pr_warn("no cs8900 or cs8920 detected. Be sure to disable PnP with SETUP\n"); return ERR_PTR(err); } -#endif -#endif - -#if defined(MODULE) && !defined(CONFIG_CS89x0_PLATFORM) - +#else static struct net_device *dev_cs89x0; /* Support the 'debug' module parm even if we're compiled for non-debug to @@ -1757,9 +1753,9 @@ MODULE_LICENSE("GPL"); * (hw or software util) */ -int __init init_module(void) +static int __init cs89x0_isa_init_module(void) { - struct net_device *dev = alloc_etherdev(sizeof(struct net_local)); + struct net_device *dev; struct net_local *lp; int ret = 0; @@ -1768,6 +1764,7 @@ int __init init_module(void) #else debug = 0; #endif + dev = alloc_etherdev(sizeof(struct net_local)); if (!dev) return -ENOMEM; @@ -1826,9 +1823,9 @@ out: free_netdev(dev); return ret; } +module_init(cs89x0_isa_init_module); -void __exit -cleanup_module(void) +static void __exit cs89x0_isa_cleanup_module(void) { struct net_local *lp = netdev_priv(dev_cs89x0); @@ -1838,9 +1835,11 @@ cleanup_module(void) release_region(dev_cs89x0->base_addr, NETCARD_IO_EXTENT); free_netdev(dev_cs89x0); } -#endif /* MODULE && !CONFIG_CS89x0_PLATFORM */ +module_exit(cs89x0_isa_cleanup_module); +#endif /* MODULE */ +#endif /* CONFIG_CS89x0_ISA */ -#ifdef CONFIG_CS89x0_PLATFORM +#if IS_ENABLED(CONFIG_CS89x0_PLATFORM) static int __init cs89x0_platform_probe(struct platform_device *pdev) { struct net_device *dev = alloc_etherdev(sizeof(struct net_local)); diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c index 9f5e5ec69991..072fac5f5d24 100644 --- a/drivers/net/ethernet/cirrus/ep93xx_eth.c +++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c @@ -733,7 +733,7 @@ static const struct net_device_ops ep93xx_netdev_ops = { .ndo_open = ep93xx_open, .ndo_stop = ep93xx_close, .ndo_start_xmit = ep93xx_xmit, - .ndo_do_ioctl = ep93xx_ioctl, + .ndo_eth_ioctl = ep93xx_ioctl, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, }; diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c index 1a9803f2073e..12ffc14fbecd 100644 --- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c +++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c @@ -298,7 +298,9 @@ static void enic_set_msglevel(struct net_device *netdev, u32 value) } static int enic_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ecmd) + struct ethtool_coalesce *ecmd, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct enic *enic = netdev_priv(netdev); struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting; @@ -343,7 +345,9 @@ static int enic_coalesce_valid(struct enic *enic, } static int enic_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ecmd) + struct ethtool_coalesce *ecmd, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct enic *enic = netdev_priv(netdev); u32 tx_coalesce_usecs; diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index c2ebb3388789..6e745ca4c433 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -2144,7 +2144,9 @@ static int gmac_set_ringparam(struct net_device *netdev, } static int gmac_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ecmd) + struct ethtool_coalesce *ecmd, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct gemini_ethernet_port *port = netdev_priv(netdev); @@ -2156,7 +2158,9 @@ static int gmac_get_coalesce(struct net_device *netdev, } static int gmac_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ecmd) + struct ethtool_coalesce *ecmd, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct gemini_ethernet_port *port = netdev_priv(netdev); diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index 2a8bf53c2f75..e842de6f6635 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -1372,7 +1372,7 @@ static const struct net_device_ops dm9000_netdev_ops = { .ndo_start_xmit = dm9000_start_xmit, .ndo_tx_timeout = dm9000_timeout, .ndo_set_rx_mode = dm9000_hash_table, - .ndo_do_ioctl = dm9000_ioctl, + .ndo_eth_ioctl = dm9000_ioctl, .ndo_set_features = dm9000_set_features, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c index b125d7faefdf..36ab4cbf2ad0 100644 --- a/drivers/net/ethernet/dec/tulip/de4x5.c +++ b/drivers/net/ethernet/dec/tulip/de4x5.c @@ -443,6 +443,7 @@ ========================================================================= */ +#include <linux/compat.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> @@ -902,7 +903,8 @@ static int de4x5_close(struct net_device *dev); static struct net_device_stats *de4x5_get_stats(struct net_device *dev); static void de4x5_local_stats(struct net_device *dev, char *buf, int pkt_len); static void set_multicast_list(struct net_device *dev); -static int de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); +static int de4x5_siocdevprivate(struct net_device *dev, struct ifreq *rq, + void __user *data, int cmd); /* ** Private functions @@ -1084,7 +1086,7 @@ static const struct net_device_ops de4x5_netdev_ops = { .ndo_start_xmit = de4x5_queue_pkt, .ndo_get_stats = de4x5_get_stats, .ndo_set_rx_mode = set_multicast_list, - .ndo_do_ioctl = de4x5_ioctl, + .ndo_siocdevprivate = de4x5_siocdevprivate, .ndo_set_mac_address= eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; @@ -5357,7 +5359,7 @@ de4x5_dbg_rx(struct sk_buff *skb, int len) ** this function is only used for my testing. */ static int -de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +de4x5_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd) { struct de4x5_private *lp = netdev_priv(dev); struct de4x5_ioctl *ioc = (struct de4x5_ioctl *) &rq->ifr_ifru; @@ -5371,6 +5373,9 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) } tmp; u_long flags = 0; + if (cmd != SIOCDEVPRIVATE || in_compat_syscall()) + return -EOPNOTSUPP; + switch(ioc->cmd) { case DE4X5_GET_HWADDR: /* Get the hardware address */ ioc->len = ETH_ALEN; diff --git a/drivers/net/ethernet/dec/tulip/media.c b/drivers/net/ethernet/dec/tulip/media.c index 011604787b8e..55d6fc99f40b 100644 --- a/drivers/net/ethernet/dec/tulip/media.c +++ b/drivers/net/ethernet/dec/tulip/media.c @@ -362,7 +362,7 @@ void tulip_select_media(struct net_device *dev, int startup) iowrite32(0x33, ioaddr + CSR12); new_csr6 = 0x01860000; /* Trigger autonegotiation. */ - iowrite32(startup ? 0x0201F868 : 0x0001F868, ioaddr + 0xB8); + iowrite32(0x0001F868, ioaddr + 0xB8); } else { iowrite32(0x32, ioaddr + CSR12); new_csr6 = 0x00420000; diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c index c1dcd6ca1457..fcedd733bacb 100644 --- a/drivers/net/ethernet/dec/tulip/tulip_core.c +++ b/drivers/net/ethernet/dec/tulip/tulip_core.c @@ -1271,7 +1271,7 @@ static const struct net_device_ops tulip_netdev_ops = { .ndo_tx_timeout = tulip_tx_timeout, .ndo_stop = tulip_close, .ndo_get_stats = tulip_get_stats, - .ndo_do_ioctl = private_ioctl, + .ndo_eth_ioctl = private_ioctl, .ndo_set_rx_mode = set_rx_mode, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c index 1876f15dd827..85b99099c6b9 100644 --- a/drivers/net/ethernet/dec/tulip/winbond-840.c +++ b/drivers/net/ethernet/dec/tulip/winbond-840.c @@ -341,7 +341,7 @@ static const struct net_device_ops netdev_ops = { .ndo_start_xmit = start_tx, .ndo_get_stats = get_stats, .ndo_set_rx_mode = set_rx_mode, - .ndo_do_ioctl = netdev_ioctl, + .ndo_eth_ioctl = netdev_ioctl, .ndo_tx_timeout = tx_timeout, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c index 734acb834c98..202ecb132053 100644 --- a/drivers/net/ethernet/dlink/dl2k.c +++ b/drivers/net/ethernet/dlink/dl2k.c @@ -95,7 +95,7 @@ static const struct net_device_ops netdev_ops = { .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, .ndo_set_rx_mode = set_multicast, - .ndo_do_ioctl = rio_ioctl, + .ndo_eth_ioctl = rio_ioctl, .ndo_tx_timeout = rio_tx_timeout, }; diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c index ee0ca712dd1c..c36d186dffed 100644 --- a/drivers/net/ethernet/dlink/sundance.c +++ b/drivers/net/ethernet/dlink/sundance.c @@ -479,7 +479,7 @@ static const struct net_device_ops netdev_ops = { .ndo_start_xmit = start_tx, .ndo_get_stats = get_stats, .ndo_set_rx_mode = set_rx_mode, - .ndo_do_ioctl = netdev_ioctl, + .ndo_eth_ioctl = netdev_ioctl, .ndo_tx_timeout = tx_timeout, .ndo_change_mtu = change_mtu, .ndo_set_mac_address = sundance_set_mac_addr, diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c index 48c6eb142dcc..6c51cf991dad 100644 --- a/drivers/net/ethernet/dnet.c +++ b/drivers/net/ethernet/dnet.c @@ -742,7 +742,7 @@ static const struct net_device_ops dnet_netdev_ops = { .ndo_stop = dnet_close, .ndo_get_stats = dnet_get_stats, .ndo_start_xmit = dnet_start_xmit, - .ndo_do_ioctl = phy_do_ioctl_running, + .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c index 7c992172933b..b2d4fb3feb74 100644 --- a/drivers/net/ethernet/ec_bhf.c +++ b/drivers/net/ethernet/ec_bhf.c @@ -488,15 +488,7 @@ static int ec_bhf_probe(struct pci_dev *dev, const struct pci_device_id *id) pci_set_master(dev); - err = pci_set_dma_mask(dev, DMA_BIT_MASK(32)); - if (err) { - dev_err(&dev->dev, - "Required dma mask not supported, failed to initialize device\n"); - err = -EIO; - goto err_disable_dev; - } - - err = pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(32)); + err = dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32)); if (err) { dev_err(&dev->dev, "Required dma mask not supported, failed to initialize device\n"); diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index 99cc1c46fb30..f9955308b93d 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -315,7 +315,9 @@ static int be_read_dump_data(struct be_adapter *adapter, u32 dump_len, } static int be_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *et) + struct ethtool_coalesce *et, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct be_adapter *adapter = netdev_priv(netdev); struct be_aic_obj *aic = &adapter->aic_obj[0]; @@ -338,7 +340,9 @@ static int be_get_coalesce(struct net_device *netdev, * eqd cmd is issued in the worker thread. */ static int be_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *et) + struct ethtool_coalesce *et, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct be_adapter *adapter = netdev_priv(netdev); struct be_aic_obj *aic = &adapter->aic_obj[0]; diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c index e1b43b07755b..ed1ed48e7483 100644 --- a/drivers/net/ethernet/ethoc.c +++ b/drivers/net/ethernet/ethoc.c @@ -1009,7 +1009,7 @@ static const struct ethtool_ops ethoc_ethtool_ops = { static const struct net_device_ops ethoc_netdev_ops = { .ndo_open = ethoc_open, .ndo_stop = ethoc_stop, - .ndo_do_ioctl = ethoc_ioctl, + .ndo_eth_ioctl = ethoc_ioctl, .ndo_set_mac_address = ethoc_set_mac_address, .ndo_set_rx_mode = ethoc_set_multicast_list, .ndo_change_mtu = ethoc_change_mtu, diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index 11dbbfd38770..ff76e401a014 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -1616,7 +1616,7 @@ static const struct net_device_ops ftgmac100_netdev_ops = { .ndo_start_xmit = ftgmac100_hard_start_xmit, .ndo_set_mac_address = ftgmac100_set_mac_addr, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = phy_do_ioctl, + .ndo_eth_ioctl = phy_do_ioctl, .ndo_tx_timeout = ftgmac100_tx_timeout, .ndo_set_rx_mode = ftgmac100_set_rx_mode, .ndo_set_features = ftgmac100_set_features, diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c index 5a1a8f2ea63c..8a341e2d5833 100644 --- a/drivers/net/ethernet/faraday/ftmac100.c +++ b/drivers/net/ethernet/faraday/ftmac100.c @@ -1043,7 +1043,7 @@ static const struct net_device_ops ftmac100_netdev_ops = { .ndo_start_xmit = ftmac100_hard_start_xmit, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = ftmac100_do_ioctl, + .ndo_eth_ioctl = ftmac100_do_ioctl, }; /****************************************************************************** diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c index 0f141c14d72d..25c91b3c5fd3 100644 --- a/drivers/net/ethernet/fealnx.c +++ b/drivers/net/ethernet/fealnx.c @@ -463,7 +463,7 @@ static const struct net_device_ops netdev_ops = { .ndo_start_xmit = start_tx, .ndo_get_stats = get_stats, .ndo_set_rx_mode = set_rx_mode, - .ndo_do_ioctl = mii_ioctl, + .ndo_eth_ioctl = mii_ioctl, .ndo_tx_timeout = fealnx_tx_timeout, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig index 2d1abdd58fab..e04e1c5cb013 100644 --- a/drivers/net/ethernet/freescale/Kconfig +++ b/drivers/net/ethernet/freescale/Kconfig @@ -25,10 +25,10 @@ config FEC depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \ ARCH_MXC || SOC_IMX28 || COMPILE_TEST) default ARCH_MXC || SOC_IMX28 if ARM + depends on PTP_1588_CLOCK_OPTIONAL select CRC32 select PHYLIB imply NET_SELFTESTS - imply PTP_1588_CLOCK help Say Y here if you want to use the built-in 10/100 Fast ethernet controller on some Motorola ColdFire and Freescale i.MX processors. diff --git a/drivers/net/ethernet/freescale/dpaa/Kconfig b/drivers/net/ethernet/freescale/dpaa/Kconfig index 626ec58a0afc..0e1439fd00bd 100644 --- a/drivers/net/ethernet/freescale/dpaa/Kconfig +++ b/drivers/net/ethernet/freescale/dpaa/Kconfig @@ -4,7 +4,6 @@ menuconfig FSL_DPAA_ETH depends on FSL_DPAA && FSL_FMAN select PHYLIB select FIXED_PHY - select FSL_FMAN_MAC help Data Path Acceleration Architecture Ethernet driver, supporting the Freescale QorIQ chips. diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index e6826561cf11..685d2d8a3b36 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -3157,7 +3157,7 @@ static const struct net_device_ops dpaa_ops = { .ndo_set_mac_address = dpaa_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = dpaa_set_rx_mode, - .ndo_do_ioctl = dpaa_ioctl, + .ndo_eth_ioctl = dpaa_ioctl, .ndo_setup_tc = dpaa_setup_tc, .ndo_change_mtu = dpaa_change_mtu, .ndo_bpf = dpaa_xdp, diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c index 1268996b7030..763d2c7b5fb1 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c @@ -513,7 +513,9 @@ static int dpaa_get_ts_info(struct net_device *net_dev, } static int dpaa_get_coalesce(struct net_device *dev, - struct ethtool_coalesce *c) + struct ethtool_coalesce *c, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct qman_portal *portal; u32 period; @@ -530,7 +532,9 @@ static int dpaa_get_coalesce(struct net_device *dev, } static int dpaa_set_coalesce(struct net_device *dev, - struct ethtool_coalesce *c) + struct ethtool_coalesce *c, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { const cpumask_t *cpus = qman_affine_cpus(); bool needs_revert[NR_CPUS] = {false}; diff --git a/drivers/net/ethernet/freescale/dpaa2/Makefile b/drivers/net/ethernet/freescale/dpaa2/Makefile index c2ef74052ef8..3d9842af7f10 100644 --- a/drivers/net/ethernet/freescale/dpaa2/Makefile +++ b/drivers/net/ethernet/freescale/dpaa2/Makefile @@ -11,7 +11,7 @@ fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o dpaa2-mac.o dpmac.o dpa fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_DCB} += dpaa2-eth-dcb.o fsl-dpaa2-eth-${CONFIG_DEBUG_FS} += dpaa2-eth-debugfs.o fsl-dpaa2-ptp-objs := dpaa2-ptp.o dprtc.o -fsl-dpaa2-switch-objs := dpaa2-switch.o dpaa2-switch-ethtool.o dpsw.o dpaa2-switch-flower.o +fsl-dpaa2-switch-objs := dpaa2-switch.o dpaa2-switch-ethtool.o dpsw.o dpaa2-switch-flower.o dpaa2-mac.o dpmac.o # Needed by the tracing framework CFLAGS_dpaa2-eth.o := -I$(src) diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c index 833696245565..605a39f892b9 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c @@ -68,7 +68,7 @@ dpaa2_eth_dl_trap_item_lookup(struct dpaa2_eth_priv *priv, u16 trap_id) struct dpaa2_eth_trap_item *dpaa2_eth_dl_get_trap(struct dpaa2_eth_priv *priv, struct dpaa2_fapr *fapr) { - struct dpaa2_faf_error_bit { + static const struct dpaa2_faf_error_bit { int position; enum devlink_trap_generic_id trap_id; } faf_bits[] = { @@ -196,7 +196,8 @@ int dpaa2_eth_dl_register(struct dpaa2_eth_priv *priv) struct dpaa2_eth_devlink_priv *dl_priv; int err; - priv->devlink = devlink_alloc(&dpaa2_eth_devlink_ops, sizeof(*dl_priv)); + priv->devlink = + devlink_alloc(&dpaa2_eth_devlink_ops, sizeof(*dl_priv), dev); if (!priv->devlink) { dev_err(dev, "devlink_alloc failed\n"); return -ENOMEM; @@ -204,7 +205,7 @@ int dpaa2_eth_dl_register(struct dpaa2_eth_priv *priv) dl_priv = devlink_priv(priv->devlink); dl_priv->dpaa2_priv = priv; - err = devlink_register(priv->devlink, dev); + err = devlink_register(priv->devlink); if (err) { dev_err(dev, "devlink_register() = %d\n", err); goto devlink_free; diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index 973352393bd4..7065c71ed7b8 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -2594,7 +2594,7 @@ static const struct net_device_ops dpaa2_eth_ops = { .ndo_get_stats64 = dpaa2_eth_get_stats, .ndo_set_rx_mode = dpaa2_eth_set_rx_mode, .ndo_set_features = dpaa2_eth_set_features, - .ndo_do_ioctl = dpaa2_eth_ioctl, + .ndo_eth_ioctl = dpaa2_eth_ioctl, .ndo_change_mtu = dpaa2_eth_change_mtu, .ndo_bpf = dpaa2_eth_xdp, .ndo_xdp_xmit = dpaa2_eth_xdp_xmit, @@ -4138,7 +4138,7 @@ static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv) int err; dpni_dev = to_fsl_mc_device(priv->net_dev->dev.parent); - dpmac_dev = fsl_mc_get_endpoint(dpni_dev); + dpmac_dev = fsl_mc_get_endpoint(dpni_dev, 0); if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER) return PTR_ERR(dpmac_dev); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c index ad5e374eeccf..2da5f881f630 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c @@ -72,12 +72,12 @@ static void dpaa2_eth_get_drvinfo(struct net_device *net_dev, { struct dpaa2_eth_priv *priv = netdev_priv(net_dev); - strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver)); + strscpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver)); snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%u.%u", priv->dpni_ver_major, priv->dpni_ver_minor); - strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent), + strscpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent), sizeof(drvinfo->bus_info)); } @@ -191,11 +191,11 @@ static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset, switch (stringset) { case ETH_SS_STATS: for (i = 0; i < DPAA2_ETH_NUM_STATS; i++) { - strlcpy(p, dpaa2_ethtool_stats[i], ETH_GSTRING_LEN); + strscpy(p, dpaa2_ethtool_stats[i], ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } for (i = 0; i < DPAA2_ETH_NUM_EXTRA_STATS; i++) { - strlcpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN); + strscpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } if (dpaa2_eth_has_mac(priv)) diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c index 70e04321c420..720c9230cab5 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c @@ -15,18 +15,18 @@ static struct { enum dpsw_counter id; char name[ETH_GSTRING_LEN]; } dpaa2_switch_ethtool_counters[] = { - {DPSW_CNT_ING_FRAME, "rx frames"}, - {DPSW_CNT_ING_BYTE, "rx bytes"}, - {DPSW_CNT_ING_FLTR_FRAME, "rx filtered frames"}, - {DPSW_CNT_ING_FRAME_DISCARD, "rx discarded frames"}, - {DPSW_CNT_ING_BCAST_FRAME, "rx b-cast frames"}, - {DPSW_CNT_ING_BCAST_BYTES, "rx b-cast bytes"}, - {DPSW_CNT_ING_MCAST_FRAME, "rx m-cast frames"}, - {DPSW_CNT_ING_MCAST_BYTE, "rx m-cast bytes"}, - {DPSW_CNT_EGR_FRAME, "tx frames"}, - {DPSW_CNT_EGR_BYTE, "tx bytes"}, - {DPSW_CNT_EGR_FRAME_DISCARD, "tx discarded frames"}, - {DPSW_CNT_ING_NO_BUFF_DISCARD, "rx discarded no buffer frames"}, + {DPSW_CNT_ING_FRAME, "[hw] rx frames"}, + {DPSW_CNT_ING_BYTE, "[hw] rx bytes"}, + {DPSW_CNT_ING_FLTR_FRAME, "[hw] rx filtered frames"}, + {DPSW_CNT_ING_FRAME_DISCARD, "[hw] rx discarded frames"}, + {DPSW_CNT_ING_BCAST_FRAME, "[hw] rx bcast frames"}, + {DPSW_CNT_ING_BCAST_BYTES, "[hw] rx bcast bytes"}, + {DPSW_CNT_ING_MCAST_FRAME, "[hw] rx mcast frames"}, + {DPSW_CNT_ING_MCAST_BYTE, "[hw] rx mcast bytes"}, + {DPSW_CNT_EGR_FRAME, "[hw] tx frames"}, + {DPSW_CNT_EGR_BYTE, "[hw] tx bytes"}, + {DPSW_CNT_EGR_FRAME_DISCARD, "[hw] tx discarded frames"}, + {DPSW_CNT_ING_NO_BUFF_DISCARD, "[hw] rx nobuffer discards"}, }; #define DPAA2_SWITCH_NUM_COUNTERS ARRAY_SIZE(dpaa2_switch_ethtool_counters) @@ -62,6 +62,10 @@ dpaa2_switch_get_link_ksettings(struct net_device *netdev, struct dpsw_link_state state = {0}; int err = 0; + if (dpaa2_switch_port_is_type_phy(port_priv)) + return phylink_ethtool_ksettings_get(port_priv->mac->phylink, + link_ksettings); + err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0, port_priv->ethsw_data->dpsw_handle, port_priv->idx, @@ -95,6 +99,10 @@ dpaa2_switch_set_link_ksettings(struct net_device *netdev, bool if_running; int err = 0, ret; + if (dpaa2_switch_port_is_type_phy(port_priv)) + return phylink_ethtool_ksettings_set(port_priv->mac->phylink, + link_ksettings); + /* Interface needs to be down to change link settings */ if_running = netif_running(netdev); if (if_running) { @@ -134,11 +142,17 @@ dpaa2_switch_set_link_ksettings(struct net_device *netdev, return err; } -static int dpaa2_switch_ethtool_get_sset_count(struct net_device *dev, int sset) +static int +dpaa2_switch_ethtool_get_sset_count(struct net_device *netdev, int sset) { + struct ethsw_port_priv *port_priv = netdev_priv(netdev); + int num_ss_stats = DPAA2_SWITCH_NUM_COUNTERS; + switch (sset) { case ETH_SS_STATS: - return DPAA2_SWITCH_NUM_COUNTERS; + if (port_priv->mac) + num_ss_stats += dpaa2_mac_get_sset_count(); + return num_ss_stats; default: return -EOPNOTSUPP; } @@ -147,14 +161,19 @@ static int dpaa2_switch_ethtool_get_sset_count(struct net_device *dev, int sset) static void dpaa2_switch_ethtool_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { + struct ethsw_port_priv *port_priv = netdev_priv(netdev); + u8 *p = data; int i; switch (stringset) { case ETH_SS_STATS: - for (i = 0; i < DPAA2_SWITCH_NUM_COUNTERS; i++) - memcpy(data + i * ETH_GSTRING_LEN, - dpaa2_switch_ethtool_counters[i].name, + for (i = 0; i < DPAA2_SWITCH_NUM_COUNTERS; i++) { + memcpy(p, dpaa2_switch_ethtool_counters[i].name, ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + if (port_priv->mac) + dpaa2_mac_get_strings(p); break; } } @@ -176,6 +195,9 @@ static void dpaa2_switch_ethtool_get_stats(struct net_device *netdev, netdev_err(netdev, "dpsw_if_get_counter[%s] err %d\n", dpaa2_switch_ethtool_counters[i].name, err); } + + if (port_priv->mac) + dpaa2_mac_get_ethtool_stats(port_priv->mac, data + i); } const struct ethtool_ops dpaa2_switch_port_ethtool_ops = { diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c index f9451ec5f2cb..d6eefbbf163f 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c @@ -111,11 +111,11 @@ static int dpaa2_switch_flower_parse_key(struct flow_cls_offload *cls, return 0; } -int dpaa2_switch_acl_entry_add(struct dpaa2_switch_acl_tbl *acl_tbl, +int dpaa2_switch_acl_entry_add(struct dpaa2_switch_filter_block *filter_block, struct dpaa2_switch_acl_entry *entry) { struct dpsw_acl_entry_cfg *acl_entry_cfg = &entry->cfg; - struct ethsw_core *ethsw = acl_tbl->ethsw; + struct ethsw_core *ethsw = filter_block->ethsw; struct dpsw_acl_key *acl_key = &entry->key; struct device *dev = ethsw->dev; u8 *cmd_buff; @@ -136,7 +136,7 @@ int dpaa2_switch_acl_entry_add(struct dpaa2_switch_acl_tbl *acl_tbl, } err = dpsw_acl_add_entry(ethsw->mc_io, 0, ethsw->dpsw_handle, - acl_tbl->id, acl_entry_cfg); + filter_block->acl_id, acl_entry_cfg); dma_unmap_single(dev, acl_entry_cfg->key_iova, sizeof(cmd_buff), DMA_TO_DEVICE); @@ -150,12 +150,13 @@ int dpaa2_switch_acl_entry_add(struct dpaa2_switch_acl_tbl *acl_tbl, return 0; } -static int dpaa2_switch_acl_entry_remove(struct dpaa2_switch_acl_tbl *acl_tbl, - struct dpaa2_switch_acl_entry *entry) +static int +dpaa2_switch_acl_entry_remove(struct dpaa2_switch_filter_block *block, + struct dpaa2_switch_acl_entry *entry) { struct dpsw_acl_entry_cfg *acl_entry_cfg = &entry->cfg; struct dpsw_acl_key *acl_key = &entry->key; - struct ethsw_core *ethsw = acl_tbl->ethsw; + struct ethsw_core *ethsw = block->ethsw; struct device *dev = ethsw->dev; u8 *cmd_buff; int err; @@ -175,7 +176,7 @@ static int dpaa2_switch_acl_entry_remove(struct dpaa2_switch_acl_tbl *acl_tbl, } err = dpsw_acl_remove_entry(ethsw->mc_io, 0, ethsw->dpsw_handle, - acl_tbl->id, acl_entry_cfg); + block->acl_id, acl_entry_cfg); dma_unmap_single(dev, acl_entry_cfg->key_iova, sizeof(cmd_buff), DMA_TO_DEVICE); @@ -190,19 +191,19 @@ static int dpaa2_switch_acl_entry_remove(struct dpaa2_switch_acl_tbl *acl_tbl, } static int -dpaa2_switch_acl_entry_add_to_list(struct dpaa2_switch_acl_tbl *acl_tbl, +dpaa2_switch_acl_entry_add_to_list(struct dpaa2_switch_filter_block *block, struct dpaa2_switch_acl_entry *entry) { struct dpaa2_switch_acl_entry *tmp; struct list_head *pos, *n; int index = 0; - if (list_empty(&acl_tbl->entries)) { - list_add(&entry->list, &acl_tbl->entries); + if (list_empty(&block->acl_entries)) { + list_add(&entry->list, &block->acl_entries); return index; } - list_for_each_safe(pos, n, &acl_tbl->entries) { + list_for_each_safe(pos, n, &block->acl_entries) { tmp = list_entry(pos, struct dpaa2_switch_acl_entry, list); if (entry->prio < tmp->prio) break; @@ -213,13 +214,13 @@ dpaa2_switch_acl_entry_add_to_list(struct dpaa2_switch_acl_tbl *acl_tbl, } static struct dpaa2_switch_acl_entry* -dpaa2_switch_acl_entry_get_by_index(struct dpaa2_switch_acl_tbl *acl_tbl, +dpaa2_switch_acl_entry_get_by_index(struct dpaa2_switch_filter_block *block, int index) { struct dpaa2_switch_acl_entry *tmp; int i = 0; - list_for_each_entry(tmp, &acl_tbl->entries, list) { + list_for_each_entry(tmp, &block->acl_entries, list) { if (i == index) return tmp; ++i; @@ -229,37 +230,38 @@ dpaa2_switch_acl_entry_get_by_index(struct dpaa2_switch_acl_tbl *acl_tbl, } static int -dpaa2_switch_acl_entry_set_precedence(struct dpaa2_switch_acl_tbl *acl_tbl, +dpaa2_switch_acl_entry_set_precedence(struct dpaa2_switch_filter_block *block, struct dpaa2_switch_acl_entry *entry, int precedence) { int err; - err = dpaa2_switch_acl_entry_remove(acl_tbl, entry); + err = dpaa2_switch_acl_entry_remove(block, entry); if (err) return err; entry->cfg.precedence = precedence; - return dpaa2_switch_acl_entry_add(acl_tbl, entry); + return dpaa2_switch_acl_entry_add(block, entry); } -static int dpaa2_switch_acl_tbl_add_entry(struct dpaa2_switch_acl_tbl *acl_tbl, - struct dpaa2_switch_acl_entry *entry) +static int +dpaa2_switch_acl_tbl_add_entry(struct dpaa2_switch_filter_block *block, + struct dpaa2_switch_acl_entry *entry) { struct dpaa2_switch_acl_entry *tmp; int index, i, precedence, err; /* Add the new ACL entry to the linked list and get its index */ - index = dpaa2_switch_acl_entry_add_to_list(acl_tbl, entry); + index = dpaa2_switch_acl_entry_add_to_list(block, entry); /* Move up in priority the ACL entries to make space * for the new filter. */ - precedence = DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES - acl_tbl->num_rules - 1; + precedence = DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES - block->num_acl_rules - 1; for (i = 0; i < index; i++) { - tmp = dpaa2_switch_acl_entry_get_by_index(acl_tbl, i); + tmp = dpaa2_switch_acl_entry_get_by_index(block, i); - err = dpaa2_switch_acl_entry_set_precedence(acl_tbl, tmp, + err = dpaa2_switch_acl_entry_set_precedence(block, tmp, precedence); if (err) return err; @@ -269,19 +271,19 @@ static int dpaa2_switch_acl_tbl_add_entry(struct dpaa2_switch_acl_tbl *acl_tbl, /* Add the new entry to hardware */ entry->cfg.precedence = precedence; - err = dpaa2_switch_acl_entry_add(acl_tbl, entry); - acl_tbl->num_rules++; + err = dpaa2_switch_acl_entry_add(block, entry); + block->num_acl_rules++; return err; } static struct dpaa2_switch_acl_entry * -dpaa2_switch_acl_tbl_find_entry_by_cookie(struct dpaa2_switch_acl_tbl *acl_tbl, +dpaa2_switch_acl_tbl_find_entry_by_cookie(struct dpaa2_switch_filter_block *block, unsigned long cookie) { struct dpaa2_switch_acl_entry *tmp, *n; - list_for_each_entry_safe(tmp, n, &acl_tbl->entries, list) { + list_for_each_entry_safe(tmp, n, &block->acl_entries, list) { if (tmp->cookie == cookie) return tmp; } @@ -289,13 +291,13 @@ dpaa2_switch_acl_tbl_find_entry_by_cookie(struct dpaa2_switch_acl_tbl *acl_tbl, } static int -dpaa2_switch_acl_entry_get_index(struct dpaa2_switch_acl_tbl *acl_tbl, +dpaa2_switch_acl_entry_get_index(struct dpaa2_switch_filter_block *block, struct dpaa2_switch_acl_entry *entry) { struct dpaa2_switch_acl_entry *tmp, *n; int index = 0; - list_for_each_entry_safe(tmp, n, &acl_tbl->entries, list) { + list_for_each_entry_safe(tmp, n, &block->acl_entries, list) { if (tmp->cookie == entry->cookie) return index; index++; @@ -303,21 +305,34 @@ dpaa2_switch_acl_entry_get_index(struct dpaa2_switch_acl_tbl *acl_tbl, return -ENOENT; } +static struct dpaa2_switch_mirror_entry * +dpaa2_switch_mirror_find_entry_by_cookie(struct dpaa2_switch_filter_block *block, + unsigned long cookie) +{ + struct dpaa2_switch_mirror_entry *tmp, *n; + + list_for_each_entry_safe(tmp, n, &block->mirror_entries, list) { + if (tmp->cookie == cookie) + return tmp; + } + return NULL; +} + static int -dpaa2_switch_acl_tbl_remove_entry(struct dpaa2_switch_acl_tbl *acl_tbl, +dpaa2_switch_acl_tbl_remove_entry(struct dpaa2_switch_filter_block *block, struct dpaa2_switch_acl_entry *entry) { struct dpaa2_switch_acl_entry *tmp; int index, i, precedence, err; - index = dpaa2_switch_acl_entry_get_index(acl_tbl, entry); + index = dpaa2_switch_acl_entry_get_index(block, entry); /* Remove from hardware the ACL entry */ - err = dpaa2_switch_acl_entry_remove(acl_tbl, entry); + err = dpaa2_switch_acl_entry_remove(block, entry); if (err) return err; - acl_tbl->num_rules--; + block->num_acl_rules--; /* Remove it from the list also */ list_del(&entry->list); @@ -325,8 +340,8 @@ dpaa2_switch_acl_tbl_remove_entry(struct dpaa2_switch_acl_tbl *acl_tbl, /* Move down in priority the entries over the deleted one */ precedence = entry->cfg.precedence; for (i = index - 1; i >= 0; i--) { - tmp = dpaa2_switch_acl_entry_get_by_index(acl_tbl, i); - err = dpaa2_switch_acl_entry_set_precedence(acl_tbl, tmp, + tmp = dpaa2_switch_acl_entry_get_by_index(block, i); + err = dpaa2_switch_acl_entry_set_precedence(block, tmp, precedence); if (err) return err; @@ -339,10 +354,10 @@ dpaa2_switch_acl_tbl_remove_entry(struct dpaa2_switch_acl_tbl *acl_tbl, return 0; } -static int dpaa2_switch_tc_parse_action(struct ethsw_core *ethsw, - struct flow_action_entry *cls_act, - struct dpsw_acl_result *dpsw_act, - struct netlink_ext_ack *extack) +static int dpaa2_switch_tc_parse_action_acl(struct ethsw_core *ethsw, + struct flow_action_entry *cls_act, + struct dpsw_acl_result *dpsw_act, + struct netlink_ext_ack *extack) { int err = 0; @@ -374,22 +389,110 @@ out: return err; } -int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_acl_tbl *acl_tbl, +static int +dpaa2_switch_block_add_mirror(struct dpaa2_switch_filter_block *block, + struct dpaa2_switch_mirror_entry *entry, + u16 to, struct netlink_ext_ack *extack) +{ + unsigned long block_ports = block->ports; + struct ethsw_core *ethsw = block->ethsw; + struct ethsw_port_priv *port_priv; + unsigned long ports_added = 0; + u16 vlan = entry->cfg.vlan_id; + bool mirror_port_enabled; + int err, port; + + /* Setup the mirroring port */ + mirror_port_enabled = (ethsw->mirror_port != ethsw->sw_attr.num_ifs); + if (!mirror_port_enabled) { + err = dpsw_set_reflection_if(ethsw->mc_io, 0, + ethsw->dpsw_handle, to); + if (err) + return err; + ethsw->mirror_port = to; + } + + /* Setup the same egress mirroring configuration on all the switch + * ports that share the same filter block. + */ + for_each_set_bit(port, &block_ports, ethsw->sw_attr.num_ifs) { + port_priv = ethsw->ports[port]; + + /* We cannot add a per VLAN mirroring rule if the VLAN in + * question is not installed on the switch port. + */ + if (entry->cfg.filter == DPSW_REFLECTION_FILTER_INGRESS_VLAN && + !(port_priv->vlans[vlan] & ETHSW_VLAN_MEMBER)) { + NL_SET_ERR_MSG(extack, + "VLAN must be installed on the switch port"); + err = -EINVAL; + goto err_remove_filters; + } + + err = dpsw_if_add_reflection(ethsw->mc_io, 0, + ethsw->dpsw_handle, + port, &entry->cfg); + if (err) + goto err_remove_filters; + + ports_added |= BIT(port); + } + + list_add(&entry->list, &block->mirror_entries); + + return 0; + +err_remove_filters: + for_each_set_bit(port, &ports_added, ethsw->sw_attr.num_ifs) { + dpsw_if_remove_reflection(ethsw->mc_io, 0, ethsw->dpsw_handle, + port, &entry->cfg); + } + + if (!mirror_port_enabled) + ethsw->mirror_port = ethsw->sw_attr.num_ifs; + + return err; +} + +static int +dpaa2_switch_block_remove_mirror(struct dpaa2_switch_filter_block *block, + struct dpaa2_switch_mirror_entry *entry) +{ + struct dpsw_reflection_cfg *cfg = &entry->cfg; + unsigned long block_ports = block->ports; + struct ethsw_core *ethsw = block->ethsw; + int port; + + /* Remove this mirroring configuration from all the ports belonging to + * the filter block. + */ + for_each_set_bit(port, &block_ports, ethsw->sw_attr.num_ifs) + dpsw_if_remove_reflection(ethsw->mc_io, 0, ethsw->dpsw_handle, + port, cfg); + + /* Also remove it from the list of mirror filters */ + list_del(&entry->list); + kfree(entry); + + /* If this was the last mirror filter, then unset the mirror port */ + if (list_empty(&block->mirror_entries)) + ethsw->mirror_port = ethsw->sw_attr.num_ifs; + + return 0; +} + +static int +dpaa2_switch_cls_flower_replace_acl(struct dpaa2_switch_filter_block *block, struct flow_cls_offload *cls) { struct flow_rule *rule = flow_cls_offload_flow_rule(cls); struct netlink_ext_ack *extack = cls->common.extack; - struct ethsw_core *ethsw = acl_tbl->ethsw; struct dpaa2_switch_acl_entry *acl_entry; + struct ethsw_core *ethsw = block->ethsw; struct flow_action_entry *act; int err; - if (!flow_offload_has_one_action(&rule->action)) { - NL_SET_ERR_MSG(extack, "Only singular actions are supported"); - return -EOPNOTSUPP; - } - - if (dpaa2_switch_acl_tbl_is_full(acl_tbl)) { + if (dpaa2_switch_acl_tbl_is_full(block)) { NL_SET_ERR_MSG(extack, "Maximum filter capacity reached"); return -ENOMEM; } @@ -403,15 +506,15 @@ int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_acl_tbl *acl_tbl, goto free_acl_entry; act = &rule->action.entries[0]; - err = dpaa2_switch_tc_parse_action(ethsw, act, - &acl_entry->cfg.result, extack); + err = dpaa2_switch_tc_parse_action_acl(ethsw, act, + &acl_entry->cfg.result, extack); if (err) goto free_acl_entry; acl_entry->prio = cls->common.prio; acl_entry->cookie = cls->cookie; - err = dpaa2_switch_acl_tbl_add_entry(acl_tbl, acl_entry); + err = dpaa2_switch_acl_tbl_add_entry(block, acl_entry); if (err) goto free_acl_entry; @@ -423,33 +526,171 @@ free_acl_entry: return err; } -int dpaa2_switch_cls_flower_destroy(struct dpaa2_switch_acl_tbl *acl_tbl, - struct flow_cls_offload *cls) +static int dpaa2_switch_flower_parse_mirror_key(struct flow_cls_offload *cls, + u16 *vlan) { - struct dpaa2_switch_acl_entry *entry; + struct flow_rule *rule = flow_cls_offload_flow_rule(cls); + struct flow_dissector *dissector = rule->match.dissector; + struct netlink_ext_ack *extack = cls->common.extack; + + if (dissector->used_keys & + ~(BIT(FLOW_DISSECTOR_KEY_BASIC) | + BIT(FLOW_DISSECTOR_KEY_CONTROL) | + BIT(FLOW_DISSECTOR_KEY_VLAN))) { + NL_SET_ERR_MSG_MOD(extack, + "Mirroring is supported only per VLAN"); + return -EOPNOTSUPP; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_match_vlan match; + + flow_rule_match_vlan(rule, &match); - entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(acl_tbl, cls->cookie); - if (!entry) - return 0; + if (match.mask->vlan_priority != 0 || + match.mask->vlan_dei != 0) { + NL_SET_ERR_MSG_MOD(extack, + "Only matching on VLAN ID supported"); + return -EOPNOTSUPP; + } - return dpaa2_switch_acl_tbl_remove_entry(acl_tbl, entry); + if (match.mask->vlan_id != 0xFFF) { + NL_SET_ERR_MSG_MOD(extack, + "Masked matching not supported"); + return -EOPNOTSUPP; + } + + *vlan = (u16)match.key->vlan_id; + } + + return 0; } -int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_acl_tbl *acl_tbl, - struct tc_cls_matchall_offload *cls) +static int +dpaa2_switch_cls_flower_replace_mirror(struct dpaa2_switch_filter_block *block, + struct flow_cls_offload *cls) { struct netlink_ext_ack *extack = cls->common.extack; - struct ethsw_core *ethsw = acl_tbl->ethsw; - struct dpaa2_switch_acl_entry *acl_entry; - struct flow_action_entry *act; + struct dpaa2_switch_mirror_entry *mirror_entry; + struct ethsw_core *ethsw = block->ethsw; + struct dpaa2_switch_mirror_entry *tmp; + struct flow_action_entry *cls_act; + struct list_head *pos, *n; + bool mirror_port_enabled; + u16 if_id, vlan; int err; - if (!flow_offload_has_one_action(&cls->rule->action)) { + mirror_port_enabled = (ethsw->mirror_port != ethsw->sw_attr.num_ifs); + cls_act = &cls->rule->action.entries[0]; + + /* Offload rules only when the destination is a DPAA2 switch port */ + if (!dpaa2_switch_port_dev_check(cls_act->dev)) { + NL_SET_ERR_MSG_MOD(extack, + "Destination not a DPAA2 switch port"); + return -EOPNOTSUPP; + } + if_id = dpaa2_switch_get_index(ethsw, cls_act->dev); + + /* We have a single mirror port but can configure egress mirroring on + * all the other switch ports. We need to allow mirroring rules only + * when the destination port is the same. + */ + if (mirror_port_enabled && ethsw->mirror_port != if_id) { + NL_SET_ERR_MSG_MOD(extack, + "Multiple mirror ports not supported"); + return -EBUSY; + } + + /* Parse the key */ + err = dpaa2_switch_flower_parse_mirror_key(cls, &vlan); + if (err) + return err; + + /* Make sure that we don't already have a mirror rule with the same + * configuration. + */ + list_for_each_safe(pos, n, &block->mirror_entries) { + tmp = list_entry(pos, struct dpaa2_switch_mirror_entry, list); + + if (tmp->cfg.filter == DPSW_REFLECTION_FILTER_INGRESS_VLAN && + tmp->cfg.vlan_id == vlan) { + NL_SET_ERR_MSG_MOD(extack, + "VLAN mirror filter already installed"); + return -EBUSY; + } + } + + mirror_entry = kzalloc(sizeof(*mirror_entry), GFP_KERNEL); + if (!mirror_entry) + return -ENOMEM; + + mirror_entry->cfg.filter = DPSW_REFLECTION_FILTER_INGRESS_VLAN; + mirror_entry->cfg.vlan_id = vlan; + mirror_entry->cookie = cls->cookie; + + return dpaa2_switch_block_add_mirror(block, mirror_entry, if_id, + extack); +} + +int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_filter_block *block, + struct flow_cls_offload *cls) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(cls); + struct netlink_ext_ack *extack = cls->common.extack; + struct flow_action_entry *act; + + if (!flow_offload_has_one_action(&rule->action)) { NL_SET_ERR_MSG(extack, "Only singular actions are supported"); return -EOPNOTSUPP; } - if (dpaa2_switch_acl_tbl_is_full(acl_tbl)) { + act = &rule->action.entries[0]; + switch (act->id) { + case FLOW_ACTION_REDIRECT: + case FLOW_ACTION_TRAP: + case FLOW_ACTION_DROP: + return dpaa2_switch_cls_flower_replace_acl(block, cls); + case FLOW_ACTION_MIRRED: + return dpaa2_switch_cls_flower_replace_mirror(block, cls); + default: + NL_SET_ERR_MSG_MOD(extack, "Action not supported"); + return -EOPNOTSUPP; + } +} + +int dpaa2_switch_cls_flower_destroy(struct dpaa2_switch_filter_block *block, + struct flow_cls_offload *cls) +{ + struct dpaa2_switch_mirror_entry *mirror_entry; + struct dpaa2_switch_acl_entry *acl_entry; + + /* If this filter is a an ACL one, remove it */ + acl_entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(block, + cls->cookie); + if (acl_entry) + return dpaa2_switch_acl_tbl_remove_entry(block, acl_entry); + + /* If not, then it has to be a mirror */ + mirror_entry = dpaa2_switch_mirror_find_entry_by_cookie(block, + cls->cookie); + if (mirror_entry) + return dpaa2_switch_block_remove_mirror(block, + mirror_entry); + + return 0; +} + +static int +dpaa2_switch_cls_matchall_replace_acl(struct dpaa2_switch_filter_block *block, + struct tc_cls_matchall_offload *cls) +{ + struct netlink_ext_ack *extack = cls->common.extack; + struct ethsw_core *ethsw = block->ethsw; + struct dpaa2_switch_acl_entry *acl_entry; + struct flow_action_entry *act; + int err; + + if (dpaa2_switch_acl_tbl_is_full(block)) { NL_SET_ERR_MSG(extack, "Maximum filter capacity reached"); return -ENOMEM; } @@ -459,15 +700,15 @@ int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_acl_tbl *acl_tbl, return -ENOMEM; act = &cls->rule->action.entries[0]; - err = dpaa2_switch_tc_parse_action(ethsw, act, - &acl_entry->cfg.result, extack); + err = dpaa2_switch_tc_parse_action_acl(ethsw, act, + &acl_entry->cfg.result, extack); if (err) goto free_acl_entry; acl_entry->prio = cls->common.prio; acl_entry->cookie = cls->cookie; - err = dpaa2_switch_acl_tbl_add_entry(acl_tbl, acl_entry); + err = dpaa2_switch_acl_tbl_add_entry(block, acl_entry); if (err) goto free_acl_entry; @@ -479,14 +720,159 @@ free_acl_entry: return err; } -int dpaa2_switch_cls_matchall_destroy(struct dpaa2_switch_acl_tbl *acl_tbl, +static int +dpaa2_switch_cls_matchall_replace_mirror(struct dpaa2_switch_filter_block *block, + struct tc_cls_matchall_offload *cls) +{ + struct netlink_ext_ack *extack = cls->common.extack; + struct dpaa2_switch_mirror_entry *mirror_entry; + struct ethsw_core *ethsw = block->ethsw; + struct dpaa2_switch_mirror_entry *tmp; + struct flow_action_entry *cls_act; + struct list_head *pos, *n; + bool mirror_port_enabled; + u16 if_id; + + mirror_port_enabled = (ethsw->mirror_port != ethsw->sw_attr.num_ifs); + cls_act = &cls->rule->action.entries[0]; + + /* Offload rules only when the destination is a DPAA2 switch port */ + if (!dpaa2_switch_port_dev_check(cls_act->dev)) { + NL_SET_ERR_MSG_MOD(extack, + "Destination not a DPAA2 switch port"); + return -EOPNOTSUPP; + } + if_id = dpaa2_switch_get_index(ethsw, cls_act->dev); + + /* We have a single mirror port but can configure egress mirroring on + * all the other switch ports. We need to allow mirroring rules only + * when the destination port is the same. + */ + if (mirror_port_enabled && ethsw->mirror_port != if_id) { + NL_SET_ERR_MSG_MOD(extack, + "Multiple mirror ports not supported"); + return -EBUSY; + } + + /* Make sure that we don't already have a mirror rule with the same + * configuration. One matchall rule per block is the maximum. + */ + list_for_each_safe(pos, n, &block->mirror_entries) { + tmp = list_entry(pos, struct dpaa2_switch_mirror_entry, list); + + if (tmp->cfg.filter == DPSW_REFLECTION_FILTER_INGRESS_ALL) { + NL_SET_ERR_MSG_MOD(extack, + "Matchall mirror filter already installed"); + return -EBUSY; + } + } + + mirror_entry = kzalloc(sizeof(*mirror_entry), GFP_KERNEL); + if (!mirror_entry) + return -ENOMEM; + + mirror_entry->cfg.filter = DPSW_REFLECTION_FILTER_INGRESS_ALL; + mirror_entry->cookie = cls->cookie; + + return dpaa2_switch_block_add_mirror(block, mirror_entry, if_id, + extack); +} + +int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_filter_block *block, + struct tc_cls_matchall_offload *cls) +{ + struct netlink_ext_ack *extack = cls->common.extack; + struct flow_action_entry *act; + + if (!flow_offload_has_one_action(&cls->rule->action)) { + NL_SET_ERR_MSG(extack, "Only singular actions are supported"); + return -EOPNOTSUPP; + } + + act = &cls->rule->action.entries[0]; + switch (act->id) { + case FLOW_ACTION_REDIRECT: + case FLOW_ACTION_TRAP: + case FLOW_ACTION_DROP: + return dpaa2_switch_cls_matchall_replace_acl(block, cls); + case FLOW_ACTION_MIRRED: + return dpaa2_switch_cls_matchall_replace_mirror(block, cls); + default: + NL_SET_ERR_MSG_MOD(extack, "Action not supported"); + return -EOPNOTSUPP; + } +} + +int dpaa2_switch_block_offload_mirror(struct dpaa2_switch_filter_block *block, + struct ethsw_port_priv *port_priv) +{ + struct ethsw_core *ethsw = port_priv->ethsw_data; + struct dpaa2_switch_mirror_entry *tmp; + int err; + + list_for_each_entry(tmp, &block->mirror_entries, list) { + err = dpsw_if_add_reflection(ethsw->mc_io, 0, + ethsw->dpsw_handle, + port_priv->idx, &tmp->cfg); + if (err) + goto unwind_add; + } + + return 0; + +unwind_add: + list_for_each_entry(tmp, &block->mirror_entries, list) + dpsw_if_remove_reflection(ethsw->mc_io, 0, + ethsw->dpsw_handle, + port_priv->idx, &tmp->cfg); + + return err; +} + +int dpaa2_switch_block_unoffload_mirror(struct dpaa2_switch_filter_block *block, + struct ethsw_port_priv *port_priv) +{ + struct ethsw_core *ethsw = port_priv->ethsw_data; + struct dpaa2_switch_mirror_entry *tmp; + int err; + + list_for_each_entry(tmp, &block->mirror_entries, list) { + err = dpsw_if_remove_reflection(ethsw->mc_io, 0, + ethsw->dpsw_handle, + port_priv->idx, &tmp->cfg); + if (err) + goto unwind_remove; + } + + return 0; + +unwind_remove: + list_for_each_entry(tmp, &block->mirror_entries, list) + dpsw_if_add_reflection(ethsw->mc_io, 0, ethsw->dpsw_handle, + port_priv->idx, &tmp->cfg); + + return err; +} + +int dpaa2_switch_cls_matchall_destroy(struct dpaa2_switch_filter_block *block, struct tc_cls_matchall_offload *cls) { - struct dpaa2_switch_acl_entry *entry; + struct dpaa2_switch_mirror_entry *mirror_entry; + struct dpaa2_switch_acl_entry *acl_entry; + + /* If this filter is a an ACL one, remove it */ + acl_entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(block, + cls->cookie); + if (acl_entry) + return dpaa2_switch_acl_tbl_remove_entry(block, + acl_entry); - entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(acl_tbl, cls->cookie); - if (!entry) - return 0; + /* If not, then it has to be a mirror */ + mirror_entry = dpaa2_switch_mirror_find_entry_by_cookie(block, + cls->cookie); + if (mirror_entry) + return dpaa2_switch_block_remove_mirror(block, + mirror_entry); - return dpaa2_switch_acl_tbl_remove_entry(acl_tbl, entry); + return 0; } diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c index 98cc0133c343..175f15c46842 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c @@ -41,14 +41,14 @@ static struct dpaa2_switch_fdb *dpaa2_switch_fdb_get_unused(struct ethsw_core *e return NULL; } -static struct dpaa2_switch_acl_tbl * -dpaa2_switch_acl_tbl_get_unused(struct ethsw_core *ethsw) +static struct dpaa2_switch_filter_block * +dpaa2_switch_filter_block_get_unused(struct ethsw_core *ethsw) { int i; for (i = 0; i < ethsw->sw_attr.num_ifs; i++) - if (!ethsw->acls[i].in_use) - return ðsw->acls[i]; + if (!ethsw->filter_blocks[i].in_use) + return ðsw->filter_blocks[i]; return NULL; } @@ -594,12 +594,18 @@ static int dpaa2_switch_port_change_mtu(struct net_device *netdev, int mtu) return 0; } -static int dpaa2_switch_port_carrier_state_sync(struct net_device *netdev) +static int dpaa2_switch_port_link_state_update(struct net_device *netdev) { struct ethsw_port_priv *port_priv = netdev_priv(netdev); struct dpsw_link_state state; int err; + /* When we manage the MAC/PHY using phylink there is no need + * to manually update the netif_carrier. + */ + if (dpaa2_switch_port_is_type_phy(port_priv)) + return 0; + /* Interrupts are received even though no one issued an 'ifconfig up' * on the switch interface. Ignore these link state update interrupts */ @@ -677,12 +683,14 @@ static int dpaa2_switch_port_open(struct net_device *netdev) struct ethsw_core *ethsw = port_priv->ethsw_data; int err; - /* Explicitly set carrier off, otherwise - * netif_carrier_ok() will return true and cause 'ip link show' - * to report the LOWER_UP flag, even though the link - * notification wasn't even received. - */ - netif_carrier_off(netdev); + if (!dpaa2_switch_port_is_type_phy(port_priv)) { + /* Explicitly set carrier off, otherwise + * netif_carrier_ok() will return true and cause 'ip link show' + * to report the LOWER_UP flag, even though the link + * notification wasn't even received. + */ + netif_carrier_off(netdev); + } err = dpsw_if_enable(port_priv->ethsw_data->mc_io, 0, port_priv->ethsw_data->dpsw_handle, @@ -692,23 +700,12 @@ static int dpaa2_switch_port_open(struct net_device *netdev) return err; } - /* sync carrier state */ - err = dpaa2_switch_port_carrier_state_sync(netdev); - if (err) { - netdev_err(netdev, - "dpaa2_switch_port_carrier_state_sync err %d\n", err); - goto err_carrier_sync; - } - dpaa2_switch_enable_ctrl_if_napi(ethsw); - return 0; + if (dpaa2_switch_port_is_type_phy(port_priv)) + phylink_start(port_priv->mac->phylink); -err_carrier_sync: - dpsw_if_disable(port_priv->ethsw_data->mc_io, 0, - port_priv->ethsw_data->dpsw_handle, - port_priv->idx); - return err; + return 0; } static int dpaa2_switch_port_stop(struct net_device *netdev) @@ -717,6 +714,13 @@ static int dpaa2_switch_port_stop(struct net_device *netdev) struct ethsw_core *ethsw = port_priv->ethsw_data; int err; + if (dpaa2_switch_port_is_type_phy(port_priv)) { + phylink_stop(port_priv->mac->phylink); + } else { + netif_tx_stop_all_queues(netdev); + netif_carrier_off(netdev); + } + err = dpsw_if_disable(port_priv->ethsw_data->mc_io, 0, port_priv->ethsw_data->dpsw_handle, port_priv->idx); @@ -1127,28 +1131,28 @@ err_exit: } static int -dpaa2_switch_setup_tc_cls_flower(struct dpaa2_switch_acl_tbl *acl_tbl, +dpaa2_switch_setup_tc_cls_flower(struct dpaa2_switch_filter_block *filter_block, struct flow_cls_offload *f) { switch (f->command) { case FLOW_CLS_REPLACE: - return dpaa2_switch_cls_flower_replace(acl_tbl, f); + return dpaa2_switch_cls_flower_replace(filter_block, f); case FLOW_CLS_DESTROY: - return dpaa2_switch_cls_flower_destroy(acl_tbl, f); + return dpaa2_switch_cls_flower_destroy(filter_block, f); default: return -EOPNOTSUPP; } } static int -dpaa2_switch_setup_tc_cls_matchall(struct dpaa2_switch_acl_tbl *acl_tbl, +dpaa2_switch_setup_tc_cls_matchall(struct dpaa2_switch_filter_block *block, struct tc_cls_matchall_offload *f) { switch (f->command) { case TC_CLSMATCHALL_REPLACE: - return dpaa2_switch_cls_matchall_replace(acl_tbl, f); + return dpaa2_switch_cls_matchall_replace(block, f); case TC_CLSMATCHALL_DESTROY: - return dpaa2_switch_cls_matchall_destroy(acl_tbl, f); + return dpaa2_switch_cls_matchall_destroy(block, f); default: return -EOPNOTSUPP; } @@ -1170,106 +1174,122 @@ static int dpaa2_switch_port_setup_tc_block_cb_ig(enum tc_setup_type type, static LIST_HEAD(dpaa2_switch_block_cb_list); -static int dpaa2_switch_port_acl_tbl_bind(struct ethsw_port_priv *port_priv, - struct dpaa2_switch_acl_tbl *acl_tbl) +static int +dpaa2_switch_port_acl_tbl_bind(struct ethsw_port_priv *port_priv, + struct dpaa2_switch_filter_block *block) { struct ethsw_core *ethsw = port_priv->ethsw_data; struct net_device *netdev = port_priv->netdev; struct dpsw_acl_if_cfg acl_if_cfg; int err; - if (port_priv->acl_tbl) + if (port_priv->filter_block) return -EINVAL; acl_if_cfg.if_id[0] = port_priv->idx; acl_if_cfg.num_ifs = 1; err = dpsw_acl_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle, - acl_tbl->id, &acl_if_cfg); + block->acl_id, &acl_if_cfg); if (err) { netdev_err(netdev, "dpsw_acl_add_if err %d\n", err); return err; } - acl_tbl->ports |= BIT(port_priv->idx); - port_priv->acl_tbl = acl_tbl; + block->ports |= BIT(port_priv->idx); + port_priv->filter_block = block; return 0; } static int dpaa2_switch_port_acl_tbl_unbind(struct ethsw_port_priv *port_priv, - struct dpaa2_switch_acl_tbl *acl_tbl) + struct dpaa2_switch_filter_block *block) { struct ethsw_core *ethsw = port_priv->ethsw_data; struct net_device *netdev = port_priv->netdev; struct dpsw_acl_if_cfg acl_if_cfg; int err; - if (port_priv->acl_tbl != acl_tbl) + if (port_priv->filter_block != block) return -EINVAL; acl_if_cfg.if_id[0] = port_priv->idx; acl_if_cfg.num_ifs = 1; err = dpsw_acl_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle, - acl_tbl->id, &acl_if_cfg); + block->acl_id, &acl_if_cfg); if (err) { netdev_err(netdev, "dpsw_acl_add_if err %d\n", err); return err; } - acl_tbl->ports &= ~BIT(port_priv->idx); - port_priv->acl_tbl = NULL; + block->ports &= ~BIT(port_priv->idx); + port_priv->filter_block = NULL; return 0; } static int dpaa2_switch_port_block_bind(struct ethsw_port_priv *port_priv, - struct dpaa2_switch_acl_tbl *acl_tbl) + struct dpaa2_switch_filter_block *block) { - struct dpaa2_switch_acl_tbl *old_acl_tbl = port_priv->acl_tbl; + struct dpaa2_switch_filter_block *old_block = port_priv->filter_block; int err; + /* Offload all the mirror entries found in the block on this new port + * joining it. + */ + err = dpaa2_switch_block_offload_mirror(block, port_priv); + if (err) + return err; + /* If the port is already bound to this ACL table then do nothing. This * can happen when this port is the first one to join a tc block */ - if (port_priv->acl_tbl == acl_tbl) + if (port_priv->filter_block == block) return 0; - err = dpaa2_switch_port_acl_tbl_unbind(port_priv, old_acl_tbl); + err = dpaa2_switch_port_acl_tbl_unbind(port_priv, old_block); if (err) return err; /* Mark the previous ACL table as being unused if this was the last * port that was using it. */ - if (old_acl_tbl->ports == 0) - old_acl_tbl->in_use = false; + if (old_block->ports == 0) + old_block->in_use = false; - return dpaa2_switch_port_acl_tbl_bind(port_priv, acl_tbl); + return dpaa2_switch_port_acl_tbl_bind(port_priv, block); } -static int dpaa2_switch_port_block_unbind(struct ethsw_port_priv *port_priv, - struct dpaa2_switch_acl_tbl *acl_tbl) +static int +dpaa2_switch_port_block_unbind(struct ethsw_port_priv *port_priv, + struct dpaa2_switch_filter_block *block) { struct ethsw_core *ethsw = port_priv->ethsw_data; - struct dpaa2_switch_acl_tbl *new_acl_tbl; + struct dpaa2_switch_filter_block *new_block; int err; + /* Unoffload all the mirror entries found in the block from the + * port leaving it. + */ + err = dpaa2_switch_block_unoffload_mirror(block, port_priv); + if (err) + return err; + /* We are the last port that leaves a block (an ACL table). * We'll continue to use this table. */ - if (acl_tbl->ports == BIT(port_priv->idx)) + if (block->ports == BIT(port_priv->idx)) return 0; - err = dpaa2_switch_port_acl_tbl_unbind(port_priv, acl_tbl); + err = dpaa2_switch_port_acl_tbl_unbind(port_priv, block); if (err) return err; - if (acl_tbl->ports == 0) - acl_tbl->in_use = false; + if (block->ports == 0) + block->in_use = false; - new_acl_tbl = dpaa2_switch_acl_tbl_get_unused(ethsw); - new_acl_tbl->in_use = true; - return dpaa2_switch_port_acl_tbl_bind(port_priv, new_acl_tbl); + new_block = dpaa2_switch_filter_block_get_unused(ethsw); + new_block->in_use = true; + return dpaa2_switch_port_acl_tbl_bind(port_priv, new_block); } static int dpaa2_switch_setup_tc_block_bind(struct net_device *netdev, @@ -1277,7 +1297,7 @@ static int dpaa2_switch_setup_tc_block_bind(struct net_device *netdev, { struct ethsw_port_priv *port_priv = netdev_priv(netdev); struct ethsw_core *ethsw = port_priv->ethsw_data; - struct dpaa2_switch_acl_tbl *acl_tbl; + struct dpaa2_switch_filter_block *filter_block; struct flow_block_cb *block_cb; bool register_block = false; int err; @@ -1287,24 +1307,24 @@ static int dpaa2_switch_setup_tc_block_bind(struct net_device *netdev, ethsw); if (!block_cb) { - /* If the ACL table is not already known, then this port must - * be the first to join it. In this case, we can just continue - * to use our private table + /* If the filter block is not already known, then this port + * must be the first to join it. In this case, we can just + * continue to use our private table */ - acl_tbl = port_priv->acl_tbl; + filter_block = port_priv->filter_block; block_cb = flow_block_cb_alloc(dpaa2_switch_port_setup_tc_block_cb_ig, - ethsw, acl_tbl, NULL); + ethsw, filter_block, NULL); if (IS_ERR(block_cb)) return PTR_ERR(block_cb); register_block = true; } else { - acl_tbl = flow_block_cb_priv(block_cb); + filter_block = flow_block_cb_priv(block_cb); } flow_block_cb_incref(block_cb); - err = dpaa2_switch_port_block_bind(port_priv, acl_tbl); + err = dpaa2_switch_port_block_bind(port_priv, filter_block); if (err) goto err_block_bind; @@ -1327,7 +1347,7 @@ static void dpaa2_switch_setup_tc_block_unbind(struct net_device *netdev, { struct ethsw_port_priv *port_priv = netdev_priv(netdev); struct ethsw_core *ethsw = port_priv->ethsw_data; - struct dpaa2_switch_acl_tbl *acl_tbl; + struct dpaa2_switch_filter_block *filter_block; struct flow_block_cb *block_cb; int err; @@ -1337,8 +1357,8 @@ static void dpaa2_switch_setup_tc_block_unbind(struct net_device *netdev, if (!block_cb) return; - acl_tbl = flow_block_cb_priv(block_cb); - err = dpaa2_switch_port_block_unbind(port_priv, acl_tbl); + filter_block = flow_block_cb_priv(block_cb); + err = dpaa2_switch_port_block_unbind(port_priv, filter_block); if (!err && !flow_block_cb_decref(block_cb)) { flow_block_cb_remove(block_cb, f); list_del(&block_cb->driver_list); @@ -1403,41 +1423,105 @@ bool dpaa2_switch_port_dev_check(const struct net_device *netdev) return netdev->netdev_ops == &dpaa2_switch_port_ops; } -static void dpaa2_switch_links_state_update(struct ethsw_core *ethsw) +static int dpaa2_switch_port_connect_mac(struct ethsw_port_priv *port_priv) { - int i; + struct fsl_mc_device *dpsw_port_dev, *dpmac_dev; + struct dpaa2_mac *mac; + int err; - for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { - dpaa2_switch_port_carrier_state_sync(ethsw->ports[i]->netdev); - dpaa2_switch_port_set_mac_addr(ethsw->ports[i]); + dpsw_port_dev = to_fsl_mc_device(port_priv->netdev->dev.parent); + dpmac_dev = fsl_mc_get_endpoint(dpsw_port_dev, port_priv->idx); + + if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER) + return PTR_ERR(dpmac_dev); + + if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type) + return 0; + + mac = kzalloc(sizeof(*mac), GFP_KERNEL); + if (!mac) + return -ENOMEM; + + mac->mc_dev = dpmac_dev; + mac->mc_io = port_priv->ethsw_data->mc_io; + mac->net_dev = port_priv->netdev; + + err = dpaa2_mac_open(mac); + if (err) + goto err_free_mac; + port_priv->mac = mac; + + if (dpaa2_switch_port_is_type_phy(port_priv)) { + err = dpaa2_mac_connect(mac); + if (err) { + netdev_err(port_priv->netdev, + "Error connecting to the MAC endpoint %pe\n", + ERR_PTR(err)); + goto err_close_mac; + } } + + return 0; + +err_close_mac: + dpaa2_mac_close(mac); + port_priv->mac = NULL; +err_free_mac: + kfree(mac); + return err; +} + +static void dpaa2_switch_port_disconnect_mac(struct ethsw_port_priv *port_priv) +{ + if (dpaa2_switch_port_is_type_phy(port_priv)) + dpaa2_mac_disconnect(port_priv->mac); + + if (!dpaa2_switch_port_has_mac(port_priv)) + return; + + dpaa2_mac_close(port_priv->mac); + kfree(port_priv->mac); + port_priv->mac = NULL; } static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg) { struct device *dev = (struct device *)arg; struct ethsw_core *ethsw = dev_get_drvdata(dev); - - /* Mask the events and the if_id reserved bits to be cleared on read */ - u32 status = DPSW_IRQ_EVENT_LINK_CHANGED | 0xFFFF0000; - int err; + struct ethsw_port_priv *port_priv; + u32 status = ~0; + int err, if_id; err = dpsw_get_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle, DPSW_IRQ_INDEX_IF, &status); if (err) { dev_err(dev, "Can't get irq status (err %d)\n", err); - - err = dpsw_clear_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle, - DPSW_IRQ_INDEX_IF, 0xFFFFFFFF); - if (err) - dev_err(dev, "Can't clear irq status (err %d)\n", err); goto out; } - if (status & DPSW_IRQ_EVENT_LINK_CHANGED) - dpaa2_switch_links_state_update(ethsw); + if_id = (status & 0xFFFF0000) >> 16; + port_priv = ethsw->ports[if_id]; + + if (status & DPSW_IRQ_EVENT_LINK_CHANGED) { + dpaa2_switch_port_link_state_update(port_priv->netdev); + dpaa2_switch_port_set_mac_addr(port_priv); + } + + if (status & DPSW_IRQ_EVENT_ENDPOINT_CHANGED) { + rtnl_lock(); + if (dpaa2_switch_port_has_mac(port_priv)) + dpaa2_switch_port_disconnect_mac(port_priv); + else + dpaa2_switch_port_connect_mac(port_priv); + rtnl_unlock(); + } out: + err = dpsw_clear_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle, + DPSW_IRQ_INDEX_IF, status); + if (err) + dev_err(dev, "Can't clear irq status (err %d)\n", err); + return IRQ_HANDLED; } @@ -1889,8 +1973,12 @@ static int dpaa2_switch_port_attr_set_event(struct net_device *netdev, return notifier_from_errno(err); } +static struct notifier_block dpaa2_switch_port_switchdev_nb; +static struct notifier_block dpaa2_switch_port_switchdev_blocking_nb; + static int dpaa2_switch_port_bridge_join(struct net_device *netdev, - struct net_device *upper_dev) + struct net_device *upper_dev, + struct netlink_ext_ack *extack) { struct ethsw_port_priv *port_priv = netdev_priv(netdev); struct ethsw_core *ethsw = port_priv->ethsw_data; @@ -1906,8 +1994,8 @@ static int dpaa2_switch_port_bridge_join(struct net_device *netdev, other_port_priv = netdev_priv(other_dev); if (other_port_priv->ethsw_data != port_priv->ethsw_data) { - netdev_err(netdev, - "Interface from a different DPSW is in the bridge already!\n"); + NL_SET_ERR_MSG_MOD(extack, + "Interface from a different DPSW is in the bridge already"); return -EINVAL; } } @@ -1929,8 +2017,16 @@ static int dpaa2_switch_port_bridge_join(struct net_device *netdev, if (err) goto err_egress_flood; + err = switchdev_bridge_port_offload(netdev, netdev, NULL, + &dpaa2_switch_port_switchdev_nb, + &dpaa2_switch_port_switchdev_blocking_nb, + false, extack); + if (err) + goto err_switchdev_offload; + return 0; +err_switchdev_offload: err_egress_flood: dpaa2_switch_port_set_fdb(port_priv, NULL); return err; @@ -1956,6 +2052,13 @@ static int dpaa2_switch_port_restore_rxvlan(struct net_device *vdev, int vid, vo return dpaa2_switch_port_vlan_add(arg, vlan_proto, vid); } +static void dpaa2_switch_port_pre_bridge_leave(struct net_device *netdev) +{ + switchdev_bridge_port_unoffload(netdev, NULL, + &dpaa2_switch_port_switchdev_nb, + &dpaa2_switch_port_switchdev_blocking_nb); +} + static int dpaa2_switch_port_bridge_leave(struct net_device *netdev) { struct ethsw_port_priv *port_priv = netdev_priv(netdev); @@ -2029,6 +2132,28 @@ static int dpaa2_switch_prevent_bridging_with_8021q_upper(struct net_device *net return 0; } +static int +dpaa2_switch_prechangeupper_sanity_checks(struct net_device *netdev, + struct net_device *upper_dev, + struct netlink_ext_ack *extack) +{ + int err; + + if (!br_vlan_enabled(upper_dev)) { + NL_SET_ERR_MSG_MOD(extack, "Cannot join a VLAN-unaware bridge"); + return -EOPNOTSUPP; + } + + err = dpaa2_switch_prevent_bridging_with_8021q_upper(netdev); + if (err) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot join a bridge while VLAN uppers are present"); + return 0; + } + + return 0; +} + static int dpaa2_switch_port_netdevice_event(struct notifier_block *nb, unsigned long event, void *ptr) { @@ -2049,25 +2174,23 @@ static int dpaa2_switch_port_netdevice_event(struct notifier_block *nb, if (!netif_is_bridge_master(upper_dev)) break; - if (!br_vlan_enabled(upper_dev)) { - NL_SET_ERR_MSG_MOD(extack, "Cannot join a VLAN-unaware bridge"); - err = -EOPNOTSUPP; + err = dpaa2_switch_prechangeupper_sanity_checks(netdev, + upper_dev, + extack); + if (err) goto out; - } - err = dpaa2_switch_prevent_bridging_with_8021q_upper(netdev); - if (err) { - NL_SET_ERR_MSG_MOD(extack, - "Cannot join a bridge while VLAN uppers are present"); - goto out; - } + if (!info->linking) + dpaa2_switch_port_pre_bridge_leave(netdev); break; case NETDEV_CHANGEUPPER: upper_dev = info->upper_dev; if (netif_is_bridge_master(upper_dev)) { if (info->linking) - err = dpaa2_switch_port_bridge_join(netdev, upper_dev); + err = dpaa2_switch_port_bridge_join(netdev, + upper_dev, + extack); else err = dpaa2_switch_port_bridge_leave(netdev); } @@ -2802,6 +2925,18 @@ err_free_dpbp: return err; } +static void dpaa2_switch_remove_port(struct ethsw_core *ethsw, + u16 port_idx) +{ + struct ethsw_port_priv *port_priv = ethsw->ports[port_idx]; + + rtnl_lock(); + dpaa2_switch_port_disconnect_mac(port_priv); + rtnl_unlock(); + free_netdev(port_priv->netdev); + ethsw->ports[port_idx] = NULL; +} + static int dpaa2_switch_init(struct fsl_mc_device *sw_dev) { struct device *dev = &sw_dev->dev; @@ -2952,7 +3087,7 @@ static int dpaa2_switch_port_trap_mac_addr(struct ethsw_port_priv *port_priv, acl_entry.cfg.precedence = 0; acl_entry.cfg.result.action = DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF; - return dpaa2_switch_acl_entry_add(port_priv->acl_tbl, &acl_entry); + return dpaa2_switch_acl_entry_add(port_priv->filter_block, &acl_entry); } static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port) @@ -2965,7 +3100,7 @@ static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port) }; struct net_device *netdev = port_priv->netdev; struct ethsw_core *ethsw = port_priv->ethsw_data; - struct dpaa2_switch_acl_tbl *acl_tbl; + struct dpaa2_switch_filter_block *filter_block; struct dpsw_fdb_cfg fdb_cfg = {0}; struct dpsw_if_attr dpsw_if_attr; struct dpaa2_switch_fdb *fdb; @@ -3020,14 +3155,15 @@ static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port) return err; } - acl_tbl = dpaa2_switch_acl_tbl_get_unused(ethsw); - acl_tbl->ethsw = ethsw; - acl_tbl->id = acl_tbl_id; - acl_tbl->in_use = true; - acl_tbl->num_rules = 0; - INIT_LIST_HEAD(&acl_tbl->entries); + filter_block = dpaa2_switch_filter_block_get_unused(ethsw); + filter_block->ethsw = ethsw; + filter_block->acl_id = acl_tbl_id; + filter_block->in_use = true; + filter_block->num_acl_rules = 0; + INIT_LIST_HEAD(&filter_block->acl_entries); + INIT_LIST_HEAD(&filter_block->mirror_entries); - err = dpaa2_switch_port_acl_tbl_bind(port_priv, acl_tbl); + err = dpaa2_switch_port_acl_tbl_bind(port_priv, filter_block); if (err) return err; @@ -3079,11 +3215,11 @@ static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev) for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { port_priv = ethsw->ports[i]; unregister_netdev(port_priv->netdev); - free_netdev(port_priv->netdev); + dpaa2_switch_remove_port(ethsw, i); } kfree(ethsw->fdbs); - kfree(ethsw->acls); + kfree(ethsw->filter_blocks); kfree(ethsw->ports); dpaa2_switch_teardown(sw_dev); @@ -3156,6 +3292,10 @@ static int dpaa2_switch_probe_port(struct ethsw_core *ethsw, goto err_port_probe; port_priv->learn_ena = false; + err = dpaa2_switch_port_connect_mac(port_priv); + if (err) + goto err_port_probe; + return 0; err_port_probe: @@ -3209,9 +3349,10 @@ static int dpaa2_switch_probe(struct fsl_mc_device *sw_dev) goto err_free_ports; } - ethsw->acls = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->acls), - GFP_KERNEL); - if (!ethsw->acls) { + ethsw->filter_blocks = kcalloc(ethsw->sw_attr.num_ifs, + sizeof(*ethsw->filter_blocks), + GFP_KERNEL); + if (!ethsw->filter_blocks) { err = -ENOMEM; goto err_free_fdbs; } @@ -3231,17 +3372,16 @@ static int dpaa2_switch_probe(struct fsl_mc_device *sw_dev) ðsw->fq[i].napi, dpaa2_switch_poll, NAPI_POLL_WEIGHT); - err = dpsw_enable(ethsw->mc_io, 0, ethsw->dpsw_handle); - if (err) { - dev_err(ethsw->dev, "dpsw_enable err %d\n", err); - goto err_free_netdev; - } - /* Setup IRQs */ err = dpaa2_switch_setup_irqs(sw_dev); if (err) goto err_stop; + /* By convention, if the mirror port is equal to the number of switch + * interfaces, then mirroring of any kind is disabled. + */ + ethsw->mirror_port = ethsw->sw_attr.num_ifs; + /* Register the netdev only when the entire setup is done and the * switch port interfaces are ready to receive traffic */ @@ -3263,8 +3403,8 @@ err_stop: dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle); err_free_netdev: for (i--; i >= 0; i--) - free_netdev(ethsw->ports[i]->netdev); - kfree(ethsw->acls); + dpaa2_switch_remove_port(ethsw, i); + kfree(ethsw->filter_blocks); err_free_fdbs: kfree(ethsw->fdbs); err_free_ports: diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h index bdef71f234cb..0002dca4d417 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h @@ -21,6 +21,7 @@ #include <net/pkt_cls.h> #include <soc/fsl/dpaa2-io.h> +#include "dpaa2-mac.h" #include "dpsw.h" /* Number of IRQs supported */ @@ -113,20 +114,29 @@ struct dpaa2_switch_acl_entry { struct dpsw_acl_key key; }; -struct dpaa2_switch_acl_tbl { - struct list_head entries; +struct dpaa2_switch_mirror_entry { + struct list_head list; + struct dpsw_reflection_cfg cfg; + unsigned long cookie; + u16 if_id; +}; + +struct dpaa2_switch_filter_block { struct ethsw_core *ethsw; u64 ports; - - u16 id; - u8 num_rules; bool in_use; + + struct list_head acl_entries; + u16 acl_id; + u8 num_acl_rules; + + struct list_head mirror_entries; }; static inline bool -dpaa2_switch_acl_tbl_is_full(struct dpaa2_switch_acl_tbl *acl_tbl) +dpaa2_switch_acl_tbl_is_full(struct dpaa2_switch_filter_block *filter_block) { - if ((acl_tbl->num_rules + DPAA2_ETHSW_PORT_DEFAULT_TRAPS) >= + if ((filter_block->num_acl_rules + DPAA2_ETHSW_PORT_DEFAULT_TRAPS) >= DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES) return true; return false; @@ -149,7 +159,8 @@ struct ethsw_port_priv { bool ucast_flood; bool learn_ena; - struct dpaa2_switch_acl_tbl *acl_tbl; + struct dpaa2_switch_filter_block *filter_block; + struct dpaa2_mac *mac; }; /* Switch data */ @@ -175,7 +186,8 @@ struct ethsw_core { int napi_users; struct dpaa2_switch_fdb *fdbs; - struct dpaa2_switch_acl_tbl *acls; + struct dpaa2_switch_filter_block *filter_blocks; + u16 mirror_port; }; static inline int dpaa2_switch_get_index(struct ethsw_core *ethsw, @@ -215,6 +227,22 @@ static inline bool dpaa2_switch_supports_cpu_traffic(struct ethsw_core *ethsw) return true; } +static inline bool +dpaa2_switch_port_is_type_phy(struct ethsw_port_priv *port_priv) +{ + if (port_priv->mac && + (port_priv->mac->attr.link_type == DPMAC_LINK_TYPE_PHY || + port_priv->mac->attr.link_type == DPMAC_LINK_TYPE_BACKPLANE)) + return true; + + return false; +} + +static inline bool dpaa2_switch_port_has_mac(struct ethsw_port_priv *port_priv) +{ + return port_priv->mac ? true : false; +} + bool dpaa2_switch_port_dev_check(const struct net_device *netdev); int dpaa2_switch_port_vlans_add(struct net_device *netdev, @@ -229,18 +257,24 @@ typedef int dpaa2_switch_fdb_cb_t(struct ethsw_port_priv *port_priv, /* TC offload */ -int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_acl_tbl *acl_tbl, +int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_filter_block *block, struct flow_cls_offload *cls); -int dpaa2_switch_cls_flower_destroy(struct dpaa2_switch_acl_tbl *acl_tbl, +int dpaa2_switch_cls_flower_destroy(struct dpaa2_switch_filter_block *block, struct flow_cls_offload *cls); -int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_acl_tbl *acl_tbl, +int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_filter_block *block, struct tc_cls_matchall_offload *cls); -int dpaa2_switch_cls_matchall_destroy(struct dpaa2_switch_acl_tbl *acl_tbl, +int dpaa2_switch_cls_matchall_destroy(struct dpaa2_switch_filter_block *block, struct tc_cls_matchall_offload *cls); -int dpaa2_switch_acl_entry_add(struct dpaa2_switch_acl_tbl *acl_tbl, +int dpaa2_switch_acl_entry_add(struct dpaa2_switch_filter_block *block, struct dpaa2_switch_acl_entry *entry); + +int dpaa2_switch_block_offload_mirror(struct dpaa2_switch_filter_block *block, + struct ethsw_port_priv *port_priv); + +int dpaa2_switch_block_unoffload_mirror(struct dpaa2_switch_filter_block *block, + struct ethsw_port_priv *port_priv); #endif /* __ETHSW_H */ diff --git a/drivers/net/ethernet/freescale/dpaa2/dpsw-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpsw-cmd.h index cb13e740f72b..397d55f2bd99 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpsw-cmd.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpsw-cmd.h @@ -39,11 +39,16 @@ #define DPSW_CMDID_GET_IRQ_STATUS DPSW_CMD_ID(0x016) #define DPSW_CMDID_CLEAR_IRQ_STATUS DPSW_CMD_ID(0x017) +#define DPSW_CMDID_SET_REFLECTION_IF DPSW_CMD_ID(0x022) + #define DPSW_CMDID_IF_SET_TCI DPSW_CMD_ID(0x030) #define DPSW_CMDID_IF_SET_STP DPSW_CMD_ID(0x031) #define DPSW_CMDID_IF_GET_COUNTER DPSW_CMD_V2(0x034) +#define DPSW_CMDID_IF_ADD_REFLECTION DPSW_CMD_ID(0x037) +#define DPSW_CMDID_IF_REMOVE_REFLECTION DPSW_CMD_ID(0x038) + #define DPSW_CMDID_IF_ENABLE DPSW_CMD_ID(0x03D) #define DPSW_CMDID_IF_DISABLE DPSW_CMD_ID(0x03E) @@ -533,5 +538,19 @@ struct dpsw_cmd_acl_entry { __le64 pad2[4]; __le64 key_iova; }; + +struct dpsw_cmd_set_reflection_if { + __le16 if_id; +}; + +#define DPSW_FILTER_SHIFT 0 +#define DPSW_FILTER_SIZE 2 + +struct dpsw_cmd_if_reflection { + __le16 if_id; + __le16 vlan_id; + /* only 2 bits from the LSB */ + u8 filter; +}; #pragma pack(pop) #endif /* __FSL_DPSW_CMD_H */ diff --git a/drivers/net/ethernet/freescale/dpaa2/dpsw.c b/drivers/net/ethernet/freescale/dpaa2/dpsw.c index 6352d6d1ecba..ab921d75deb2 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpsw.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpsw.c @@ -1579,3 +1579,83 @@ int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, /* send command to mc*/ return mc_send_command(mc_io, &cmd); } + +/** + * dpsw_set_reflection_if() - Set target interface for traffic mirrored + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @if_id: Interface Id + * + * Only one mirroring destination is allowed per switch + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_set_reflection_if(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 if_id) +{ + struct dpsw_cmd_set_reflection_if *cmd_params; + struct fsl_mc_command cmd = { 0 }; + + cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_REFLECTION_IF, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_set_reflection_if *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_if_add_reflection() - Setup mirroring rule + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @if_id: Interface Identifier + * @cfg: Reflection configuration + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_if_add_reflection(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 if_id, const struct dpsw_reflection_cfg *cfg) +{ + struct dpsw_cmd_if_reflection *cmd_params; + struct fsl_mc_command cmd = { 0 }; + + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_ADD_REFLECTION, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_if_reflection *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + cmd_params->vlan_id = cpu_to_le16(cfg->vlan_id); + dpsw_set_field(cmd_params->filter, FILTER, cfg->filter); + + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_if_remove_reflection() - Remove mirroring rule + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @if_id: Interface Identifier + * @cfg: Reflection configuration + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_if_remove_reflection(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 if_id, const struct dpsw_reflection_cfg *cfg) +{ + struct dpsw_cmd_if_reflection *cmd_params; + struct fsl_mc_command cmd = { 0 }; + + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_REMOVE_REFLECTION, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_if_reflection *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + cmd_params->vlan_id = cpu_to_le16(cfg->vlan_id); + dpsw_set_field(cmd_params->filter, FILTER, cfg->filter); + + return mc_send_command(mc_io, &cmd); +} diff --git a/drivers/net/ethernet/freescale/dpaa2/dpsw.h b/drivers/net/ethernet/freescale/dpaa2/dpsw.h index 5ef221a25b02..b90bd363f47a 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpsw.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpsw.h @@ -99,6 +99,11 @@ int dpsw_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); #define DPSW_IRQ_EVENT_LINK_CHANGED 0x0001 /** + * DPSW_IRQ_EVENT_ENDPOINT_CHANGED - Indicates a change in endpoint + */ +#define DPSW_IRQ_EVENT_ENDPOINT_CHANGED 0x0002 + +/** * struct dpsw_irq_cfg - IRQ configuration * @addr: Address that must be written to signal a message-based interrupt * @val: Value to write into irq_addr address @@ -752,4 +757,35 @@ int dpsw_acl_add_entry(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 acl_id, const struct dpsw_acl_entry_cfg *cfg); + +/** + * enum dpsw_reflection_filter - Filter type for frames to be reflected + * @DPSW_REFLECTION_FILTER_INGRESS_ALL: Reflect all frames + * @DPSW_REFLECTION_FILTER_INGRESS_VLAN: Reflect only frames that belong to + * the particular VLAN defined by vid parameter + * + */ +enum dpsw_reflection_filter { + DPSW_REFLECTION_FILTER_INGRESS_ALL = 0, + DPSW_REFLECTION_FILTER_INGRESS_VLAN = 1 +}; + +/** + * struct dpsw_reflection_cfg - Structure representing the mirroring config + * @filter: Filter type for frames to be mirrored + * @vlan_id: VLAN ID to mirror; valid only when the type is DPSW_INGRESS_VLAN + */ +struct dpsw_reflection_cfg { + enum dpsw_reflection_filter filter; + u16 vlan_id; +}; + +int dpsw_set_reflection_if(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 if_id); + +int dpsw_if_add_reflection(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 if_id, const struct dpsw_reflection_cfg *cfg); + +int dpsw_if_remove_reflection(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 if_id, const struct dpsw_reflection_cfg *cfg); #endif /* __FSL_DPSW_H */ diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c index ebccaf02411c..9690e36e9e85 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c @@ -585,7 +585,9 @@ static void enetc_get_ringparam(struct net_device *ndev, } static int enetc_get_coalesce(struct net_device *ndev, - struct ethtool_coalesce *ic) + struct ethtool_coalesce *ic, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct enetc_ndev_priv *priv = netdev_priv(ndev); struct enetc_int_vector *v = priv->int_vector[0]; @@ -602,7 +604,9 @@ static int enetc_get_coalesce(struct net_device *ndev, } static int enetc_set_coalesce(struct net_device *ndev, - struct ethtool_coalesce *ic) + struct ethtool_coalesce *ic, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct enetc_ndev_priv *priv = netdev_priv(ndev); u32 rx_ictt, tx_ictt; diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c index c84f6c226743..60d94e0a07d6 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c @@ -735,7 +735,7 @@ static const struct net_device_ops enetc_ndev_ops = { .ndo_set_vf_vlan = enetc_pf_set_vf_vlan, .ndo_set_vf_spoofchk = enetc_pf_set_vf_spoofchk, .ndo_set_features = enetc_pf_set_features, - .ndo_do_ioctl = enetc_ioctl, + .ndo_eth_ioctl = enetc_ioctl, .ndo_setup_tc = enetc_setup_tc, .ndo_bpf = enetc_setup_bpf, .ndo_xdp_xmit = enetc_xdp_xmit, diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c index 03090ba7e226..1a9d1e8b772c 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c @@ -99,7 +99,7 @@ static const struct net_device_ops enetc_ndev_ops = { .ndo_get_stats = enetc_get_stats, .ndo_set_mac_address = enetc_vf_set_mac_addr, .ndo_set_features = enetc_vf_set_features, - .ndo_do_ioctl = enetc_ioctl, + .ndo_eth_ioctl = enetc_ioctl, .ndo_setup_tc = enetc_setup_tc, }; diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index 2e002e4b4b4a..7b4961daa254 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -77,6 +77,8 @@ #define FEC_R_DES_ACTIVE_2 0x1e8 /* Rx descriptor active for ring 2 */ #define FEC_X_DES_ACTIVE_2 0x1ec /* Tx descriptor active for ring 2 */ #define FEC_QOS_SCHEME 0x1f0 /* Set multi queues Qos scheme */ +#define FEC_LPI_SLEEP 0x1f4 /* Set IEEE802.3az LPI Sleep Ts time */ +#define FEC_LPI_WAKE 0x1f8 /* Set IEEE802.3az LPI Wake Tw time */ #define FEC_MIIGSK_CFGR 0x300 /* MIIGSK Configuration reg */ #define FEC_MIIGSK_ENR 0x308 /* MIIGSK Enable reg */ @@ -187,6 +189,8 @@ #define FEC_RXIC0 0xfff #define FEC_RXIC1 0xfff #define FEC_RXIC2 0xfff +#define FEC_LPI_SLEEP 0xfff +#define FEC_LPI_WAKE 0xfff #endif /* CONFIG_M5272 */ @@ -379,6 +383,9 @@ struct bufdesc_ex { #define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF) #define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF)) +#define FEC_ENET_TXC_DLY ((uint)0x00010000) +#define FEC_ENET_RXC_DLY ((uint)0x00020000) + /* ENET interrupt coalescing macro define */ #define FEC_ITR_CLK_SEL (0x1 << 30) #define FEC_ITR_EN (0x1 << 31) @@ -472,6 +479,22 @@ struct bufdesc_ex { */ #define FEC_QUIRK_HAS_MULTI_QUEUES (1 << 19) +/* i.MX8MQ ENET IP version add new feature to support IEEE 802.3az EEE + * standard. For the transmission, MAC supply two user registers to set + * Sleep (TS) and Wake (TW) time. + */ +#define FEC_QUIRK_HAS_EEE (1 << 20) + +/* i.MX8QM ENET IP version add new feture to generate delayed TXC/RXC + * as an alternative option to make sure it works well with various PHYs. + * For the implementation of delayed clock, ENET takes synchronized 250MHz + * clocks to generate 2ns delay. + */ +#define FEC_QUIRK_DELAYED_CLKS_SUPPORT (1 << 21) + +/* i.MX8MQ SoC integration mix wakeup interrupt signal into "int2" interrupt line. */ +#define FEC_QUIRK_WAKEUP_FROM_INT2 (1 << 22) + struct bufdesc_prop { int qid; /* Address of Rx and Tx buffers */ @@ -528,6 +551,7 @@ struct fec_enet_private { struct clk *clk_ref; struct clk *clk_enet_out; struct clk *clk_ptp; + struct clk *clk_2x_txclk; bool ptp_clk_on; struct mutex ptp_clk_mutex; @@ -550,6 +574,8 @@ struct fec_enet_private { uint phy_speed; phy_interface_t phy_interface; struct device_node *phy_node; + bool rgmii_txc_dly; + bool rgmii_rxc_dly; int link; int full_duplex; int speed; @@ -557,6 +583,7 @@ struct fec_enet_private { bool bufdesc_ex; int pause_flag; int wol_flag; + int wake_irq; u32 quirks; struct napi_struct napi; @@ -589,6 +616,10 @@ struct fec_enet_private { unsigned int tx_time_itr; unsigned int itr_clk_rate; + /* tx lpi eee mode */ + struct ethtool_eee eee; + unsigned int clk_ref_rate; + u32 rx_copybreak; /* ptp clock period in ns*/ diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 7e4c4980ced7..80bd5c629fa0 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -135,6 +135,26 @@ static const struct fec_devinfo fec_imx6ul_info = { FEC_QUIRK_HAS_COALESCE | FEC_QUIRK_CLEAR_SETUP_MII, }; +static const struct fec_devinfo fec_imx8mq_info = { + .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | + FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | + FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | + FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | + FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE | + FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES | + FEC_QUIRK_HAS_EEE | FEC_QUIRK_WAKEUP_FROM_INT2, +}; + +static const struct fec_devinfo fec_imx8qm_info = { + .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | + FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | + FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | + FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | + FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE | + FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES | + FEC_QUIRK_DELAYED_CLKS_SUPPORT, +}; + static struct platform_device_id fec_devtype[] = { { /* keep it for coldfire */ @@ -162,6 +182,12 @@ static struct platform_device_id fec_devtype[] = { .name = "imx6ul-fec", .driver_data = (kernel_ulong_t)&fec_imx6ul_info, }, { + .name = "imx8mq-fec", + .driver_data = (kernel_ulong_t)&fec_imx8mq_info, + }, { + .name = "imx8qm-fec", + .driver_data = (kernel_ulong_t)&fec_imx8qm_info, + }, { /* sentinel */ } }; @@ -175,6 +201,8 @@ enum imx_fec_type { MVF600_FEC, IMX6SX_FEC, IMX6UL_FEC, + IMX8MQ_FEC, + IMX8QM_FEC, }; static const struct of_device_id fec_dt_ids[] = { @@ -185,6 +213,8 @@ static const struct of_device_id fec_dt_ids[] = { { .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], }, { .compatible = "fsl,imx6sx-fec", .data = &fec_devtype[IMX6SX_FEC], }, { .compatible = "fsl,imx6ul-fec", .data = &fec_devtype[IMX6UL_FEC], }, + { .compatible = "fsl,imx8mq-fec", .data = &fec_devtype[IMX8MQ_FEC], }, + { .compatible = "fsl,imx8qm-fec", .data = &fec_devtype[IMX8QM_FEC], }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, fec_dt_ids); @@ -1107,6 +1137,13 @@ fec_restart(struct net_device *ndev) if (fep->bufdesc_ex) ecntl |= (1 << 4); + if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT && + fep->rgmii_txc_dly) + ecntl |= FEC_ENET_TXC_DLY; + if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT && + fep->rgmii_rxc_dly) + ecntl |= FEC_ENET_RXC_DLY; + #ifndef CONFIG_M5272 /* Enable the MIB statistic event counters */ writel(0 << 31, fep->hwp + FEC_MIB_CTRLSTAT); @@ -1970,6 +2007,10 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable) if (ret) goto failed_clk_ref; + ret = clk_prepare_enable(fep->clk_2x_txclk); + if (ret) + goto failed_clk_2x_txclk; + fec_enet_phy_reset_after_clk_enable(ndev); } else { clk_disable_unprepare(fep->clk_enet_out); @@ -1980,10 +2021,14 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable) mutex_unlock(&fep->ptp_clk_mutex); } clk_disable_unprepare(fep->clk_ref); + clk_disable_unprepare(fep->clk_2x_txclk); } return 0; +failed_clk_2x_txclk: + if (fep->clk_ref) + clk_disable_unprepare(fep->clk_ref); failed_clk_ref: if (fep->clk_ptp) { mutex_lock(&fep->ptp_clk_mutex); @@ -1997,6 +2042,34 @@ failed_clk_ptp: return ret; } +static int fec_enet_parse_rgmii_delay(struct fec_enet_private *fep, + struct device_node *np) +{ + u32 rgmii_tx_delay, rgmii_rx_delay; + + /* For rgmii tx internal delay, valid values are 0ps and 2000ps */ + if (!of_property_read_u32(np, "tx-internal-delay-ps", &rgmii_tx_delay)) { + if (rgmii_tx_delay != 0 && rgmii_tx_delay != 2000) { + dev_err(&fep->pdev->dev, "The only allowed RGMII TX delay values are: 0ps, 2000ps"); + return -EINVAL; + } else if (rgmii_tx_delay == 2000) { + fep->rgmii_txc_dly = true; + } + } + + /* For rgmii rx internal delay, valid values are 0ps and 2000ps */ + if (!of_property_read_u32(np, "rx-internal-delay-ps", &rgmii_rx_delay)) { + if (rgmii_rx_delay != 0 && rgmii_rx_delay != 2000) { + dev_err(&fep->pdev->dev, "The only allowed RGMII RX delay values are: 0ps, 2000ps"); + return -EINVAL; + } else if (rgmii_rx_delay == 2000) { + fep->rgmii_rxc_dly = true; + } + } + + return 0; +} + static int fec_enet_mii_probe(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); @@ -2581,8 +2654,10 @@ static void fec_enet_itr_coal_set(struct net_device *ndev) } } -static int -fec_enet_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec) +static int fec_enet_get_coalesce(struct net_device *ndev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct fec_enet_private *fep = netdev_priv(ndev); @@ -2598,8 +2673,10 @@ fec_enet_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec) return 0; } -static int -fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec) +static int fec_enet_set_coalesce(struct net_device *ndev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct fec_enet_private *fep = netdev_priv(ndev); struct device *dev = &fep->pdev->dev; @@ -2651,7 +2728,7 @@ static void fec_enet_itr_coal_init(struct net_device *ndev) ec.tx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT; ec.tx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT; - fec_enet_set_coalesce(ndev, &ec); + fec_enet_set_coalesce(ndev, &ec, NULL, NULL); } static int fec_enet_get_tunable(struct net_device *netdev, @@ -2692,6 +2769,92 @@ static int fec_enet_set_tunable(struct net_device *netdev, return ret; } +/* LPI Sleep Ts count base on tx clk (clk_ref). + * The lpi sleep cnt value = X us / (cycle_ns). + */ +static int fec_enet_us_to_tx_cycle(struct net_device *ndev, int us) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + return us * (fep->clk_ref_rate / 1000) / 1000; +} + +static int fec_enet_eee_mode_set(struct net_device *ndev, bool enable) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct ethtool_eee *p = &fep->eee; + unsigned int sleep_cycle, wake_cycle; + int ret = 0; + + if (enable) { + ret = phy_init_eee(ndev->phydev, 0); + if (ret) + return ret; + + sleep_cycle = fec_enet_us_to_tx_cycle(ndev, p->tx_lpi_timer); + wake_cycle = sleep_cycle; + } else { + sleep_cycle = 0; + wake_cycle = 0; + } + + p->tx_lpi_enabled = enable; + p->eee_enabled = enable; + p->eee_active = enable; + + writel(sleep_cycle, fep->hwp + FEC_LPI_SLEEP); + writel(wake_cycle, fep->hwp + FEC_LPI_WAKE); + + return 0; +} + +static int +fec_enet_get_eee(struct net_device *ndev, struct ethtool_eee *edata) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct ethtool_eee *p = &fep->eee; + + if (!(fep->quirks & FEC_QUIRK_HAS_EEE)) + return -EOPNOTSUPP; + + if (!netif_running(ndev)) + return -ENETDOWN; + + edata->eee_enabled = p->eee_enabled; + edata->eee_active = p->eee_active; + edata->tx_lpi_timer = p->tx_lpi_timer; + edata->tx_lpi_enabled = p->tx_lpi_enabled; + + return phy_ethtool_get_eee(ndev->phydev, edata); +} + +static int +fec_enet_set_eee(struct net_device *ndev, struct ethtool_eee *edata) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct ethtool_eee *p = &fep->eee; + int ret = 0; + + if (!(fep->quirks & FEC_QUIRK_HAS_EEE)) + return -EOPNOTSUPP; + + if (!netif_running(ndev)) + return -ENETDOWN; + + p->tx_lpi_timer = edata->tx_lpi_timer; + + if (!edata->eee_enabled || !edata->tx_lpi_enabled || + !edata->tx_lpi_timer) + ret = fec_enet_eee_mode_set(ndev, false); + else + ret = fec_enet_eee_mode_set(ndev, true); + + if (ret) + return ret; + + return phy_ethtool_set_eee(ndev->phydev, edata); +} + static void fec_enet_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) { @@ -2719,12 +2882,12 @@ fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC); if (device_may_wakeup(&ndev->dev)) { fep->wol_flag |= FEC_WOL_FLAG_ENABLE; - if (fep->irq[0] > 0) - enable_irq_wake(fep->irq[0]); + if (fep->wake_irq > 0) + enable_irq_wake(fep->wake_irq); } else { fep->wol_flag &= (~FEC_WOL_FLAG_ENABLE); - if (fep->irq[0] > 0) - disable_irq_wake(fep->irq[0]); + if (fep->wake_irq > 0) + disable_irq_wake(fep->wake_irq); } return 0; @@ -2752,6 +2915,8 @@ static const struct ethtool_ops fec_enet_ethtool_ops = { .set_tunable = fec_enet_set_tunable, .get_wol = fec_enet_get_wol, .set_wol = fec_enet_set_wol, + .get_eee = fec_enet_get_eee, + .set_eee = fec_enet_set_eee, .get_link_ksettings = phy_ethtool_get_link_ksettings, .set_link_ksettings = phy_ethtool_set_link_ksettings, .self_test = net_selftest, @@ -3280,7 +3445,7 @@ static const struct net_device_ops fec_netdev_ops = { .ndo_validate_addr = eth_validate_addr, .ndo_tx_timeout = fec_timeout, .ndo_set_mac_address = fec_set_mac_address, - .ndo_do_ioctl = fec_enet_ioctl, + .ndo_eth_ioctl = fec_enet_ioctl, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = fec_poll_controller, #endif @@ -3535,6 +3700,17 @@ static int fec_enet_get_irq_cnt(struct platform_device *pdev) return irq_cnt; } +static void fec_enet_get_wakeup_irq(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct fec_enet_private *fep = netdev_priv(ndev); + + if (fep->quirks & FEC_QUIRK_WAKEUP_FROM_INT2) + fep->wake_irq = fep->irq[2]; + else + fep->wake_irq = fep->irq[0]; +} + static int fec_enet_init_stop_mode(struct fec_enet_private *fep, struct device_node *np) { @@ -3666,6 +3842,10 @@ fec_probe(struct platform_device *pdev) fep->phy_interface = interface; } + ret = fec_enet_parse_rgmii_delay(fep, np); + if (ret) + goto failed_rgmii_delay; + fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); if (IS_ERR(fep->clk_ipg)) { ret = PTR_ERR(fep->clk_ipg); @@ -3692,6 +3872,14 @@ fec_probe(struct platform_device *pdev) fep->clk_ref = devm_clk_get(&pdev->dev, "enet_clk_ref"); if (IS_ERR(fep->clk_ref)) fep->clk_ref = NULL; + fep->clk_ref_rate = clk_get_rate(fep->clk_ref); + + /* clk_2x_txclk is optional, depends on board */ + if (fep->rgmii_txc_dly || fep->rgmii_rxc_dly) { + fep->clk_2x_txclk = devm_clk_get(&pdev->dev, "enet_2x_txclk"); + if (IS_ERR(fep->clk_2x_txclk)) + fep->clk_2x_txclk = NULL; + } fep->bufdesc_ex = fep->quirks & FEC_QUIRK_HAS_BUFDESC_EX; fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp"); @@ -3762,6 +3950,9 @@ fec_probe(struct platform_device *pdev) fep->irq[i] = irq; } + /* Decide which interrupt line is wakeup capable */ + fec_enet_get_wakeup_irq(pdev); + ret = fec_enet_mii_init(pdev); if (ret) goto failed_mii_init; @@ -3809,6 +4000,7 @@ failed_clk_ahb: failed_clk_ipg: fec_enet_clk_enable(ndev, false); failed_clk: +failed_rgmii_delay: if (of_phy_is_fixed_link(np)) of_phy_deregister_fixed_link(np); of_node_put(phy_node); diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c index 02c47658a215..73ff359a15f1 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx.c +++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c @@ -792,7 +792,7 @@ static const struct net_device_ops mpc52xx_fec_netdev_ops = { .ndo_set_rx_mode = mpc52xx_fec_set_multicast_list, .ndo_set_mac_address = mpc52xx_fec_set_mac_address, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = phy_do_ioctl, + .ndo_eth_ioctl = phy_do_ioctl, .ndo_tx_timeout = mpc52xx_fec_tx_timeout, .ndo_get_stats = mpc52xx_fec_get_stats, #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index 6ee325ad35c5..2db6e38a772e 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -900,7 +900,7 @@ static const struct net_device_ops fs_enet_netdev_ops = { .ndo_start_xmit = fs_enet_start_xmit, .ndo_tx_timeout = fs_timeout, .ndo_set_rx_mode = fs_set_multicast_list, - .ndo_do_ioctl = phy_do_ioctl_running, + .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 9646483137c4..af6ad94bf24a 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -3184,7 +3184,7 @@ static const struct net_device_ops gfar_netdev_ops = { .ndo_set_features = gfar_set_features, .ndo_set_rx_mode = gfar_set_multi, .ndo_tx_timeout = gfar_timeout, - .ndo_do_ioctl = gfar_ioctl, + .ndo_eth_ioctl = gfar_ioctl, .ndo_get_stats64 = gfar_get_stats64, .ndo_change_carrier = fixed_phy_change_carrier, .ndo_set_mac_address = gfar_set_mac_addr, diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index cc7d4f93da54..7b32ed29bf4c 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -243,7 +243,9 @@ static unsigned int gfar_ticks2usecs(struct gfar_private *priv, /* Get the coalescing parameters, and put them in the cvals * structure. */ static int gfar_gcoalesce(struct net_device *dev, - struct ethtool_coalesce *cvals) + struct ethtool_coalesce *cvals, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct gfar_private *priv = netdev_priv(dev); struct gfar_priv_rx_q *rx_queue = NULL; @@ -280,7 +282,9 @@ static int gfar_gcoalesce(struct net_device *dev, * in order for coalescing to be active */ static int gfar_scoalesce(struct net_device *dev, - struct ethtool_coalesce *cvals) + struct ethtool_coalesce *cvals, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct gfar_private *priv = netdev_priv(dev); int i, err = 0; diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index 0acfafb73db1..3eb288d10b0c 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -3516,7 +3516,7 @@ static const struct net_device_ops ucc_geth_netdev_ops = { .ndo_set_mac_address = ucc_geth_set_mac_addr, .ndo_set_rx_mode = ucc_geth_set_multi, .ndo_tx_timeout = ucc_geth_timeout, - .ndo_do_ioctl = ucc_geth_ioctl, + .ndo_eth_ioctl = ucc_geth_ioctl, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = ucc_netpoll, #endif diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c index 5bb56b454541..f089d33dd48e 100644 --- a/drivers/net/ethernet/google/gve/gve_adminq.c +++ b/drivers/net/ethernet/google/gve/gve_adminq.c @@ -322,7 +322,8 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv, tail = ioread32be(&priv->reg_bar0->adminq_event_counter); // Check if next command will overflow the buffer. - if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) == tail) { + if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) == + (tail & priv->adminq_mask)) { int err; // Flush existing commands to make room. @@ -332,7 +333,8 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv, // Retry. tail = ioread32be(&priv->reg_bar0->adminq_event_counter); - if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) == tail) { + if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) == + (tail & priv->adminq_mask)) { // This should never happen. We just flushed the // command queue so there should be enough space. return -ENOMEM; diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig index bb062b02fb85..3312e1d93c3b 100644 --- a/drivers/net/ethernet/hisilicon/Kconfig +++ b/drivers/net/ethernet/hisilicon/Kconfig @@ -90,6 +90,8 @@ config HNS_ENET config HNS3 tristate "Hisilicon Network Subsystem Support HNS3 (Framework)" depends on PCI + select NET_DEVLINK + select PAGE_POOL help This selects the framework support for Hisilicon Network Subsystem 3. This layer facilitates clients like ENET, RoCE and user-space ethernet @@ -102,7 +104,7 @@ config HNS3_HCLGE tristate "Hisilicon HNS3 HCLGE Acceleration Engine & Compatibility Layer Support" default m depends on PCI_MSI - imply PTP_1588_CLOCK + depends on PTP_1588_CLOCK_OPTIONAL help This selects the HNS3_HCLGE network acceleration engine & its hardware compatibility layer. The engine would be used in Hisilicon hip08 family of diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c index e53512f6878a..37b605fed32c 100644 --- a/drivers/net/ethernet/hisilicon/hip04_eth.c +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c @@ -796,7 +796,9 @@ static void hip04_tx_timeout_task(struct work_struct *work) } static int hip04_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct hip04_priv *priv = netdev_priv(netdev); @@ -807,7 +809,9 @@ static int hip04_get_coalesce(struct net_device *netdev, } static int hip04_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct hip04_priv *priv = netdev_priv(netdev); diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c index 3c4db4a6b431..22bf914f2dbd 100644 --- a/drivers/net/ethernet/hisilicon/hisi_femac.c +++ b/drivers/net/ethernet/hisilicon/hisi_femac.c @@ -685,7 +685,7 @@ static const struct net_device_ops hisi_femac_netdev_ops = { .ndo_open = hisi_femac_net_open, .ndo_stop = hisi_femac_net_close, .ndo_start_xmit = hisi_femac_net_xmit, - .ndo_do_ioctl = phy_do_ioctl_running, + .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_set_mac_address = hisi_femac_set_mac_address, .ndo_set_rx_mode = hisi_femac_net_set_rx_mode, }; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index ad534f9e41ab..343c605c4be8 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -1945,7 +1945,7 @@ static const struct net_device_ops hns_nic_netdev_ops = { .ndo_tx_timeout = hns_nic_net_timeout, .ndo_set_mac_address = hns_nic_net_set_mac_address, .ndo_change_mtu = hns_nic_change_mtu, - .ndo_do_ioctl = phy_do_ioctl_running, + .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_set_features = hns_nic_set_features, .ndo_fix_features = hns_nic_fix_features, .ndo_get_stats64 = hns_nic_get_stats64, diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index 7e62dcff2426..ab7390225942 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c @@ -730,11 +730,15 @@ static int hns_set_pauseparam(struct net_device *net_dev, * hns_get_coalesce - get coalesce info. * @net_dev: net device * @ec: coalesce info. + * @kernel_coal: ethtool CQE mode setting structure + * @extack: extack for reporting error messages * * Return 0 on success, negative on failure. */ static int hns_get_coalesce(struct net_device *net_dev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct hns_nic_priv *priv = netdev_priv(net_dev); struct hnae_ae_ops *ops; @@ -774,11 +778,15 @@ static int hns_get_coalesce(struct net_device *net_dev, * hns_set_coalesce - set coalesce info. * @net_dev: net device * @ec: coalesce info. + * @kernel_coal: ethtool CQE mode setting structure + * @extack: extack for reporting error messages * * Return 0 on success, negative on failure. */ static int hns_set_coalesce(struct net_device *net_dev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct hns_nic_priv *priv = netdev_priv(net_dev); struct hnae_ae_ops *ops; diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h index aa86a81c8f4a..c2bd2584201f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h @@ -9,7 +9,7 @@ enum HCLGE_MBX_OPCODE { HCLGE_MBX_RESET = 0x01, /* (VF -> PF) assert reset */ - HCLGE_MBX_ASSERTING_RESET, /* (PF -> VF) PF is asserting reset*/ + HCLGE_MBX_ASSERTING_RESET, /* (PF -> VF) PF is asserting reset */ HCLGE_MBX_SET_UNICAST, /* (VF -> PF) set UC addr */ HCLGE_MBX_SET_MULTICAST, /* (VF -> PF) set MC addr */ HCLGE_MBX_SET_VLAN, /* (VF -> PF) set VLAN */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index e0b7c3c44e7b..546a60530384 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -65,7 +65,7 @@ #define HNAE3_UNIC_CLIENT_INITED_B 0x4 #define HNAE3_ROCE_CLIENT_INITED_B 0x5 -#define HNAE3_DEV_SUPPORT_ROCE_DCB_BITS (BIT(HNAE3_DEV_SUPPORT_DCB_B) |\ +#define HNAE3_DEV_SUPPORT_ROCE_DCB_BITS (BIT(HNAE3_DEV_SUPPORT_DCB_B) | \ BIT(HNAE3_DEV_SUPPORT_ROCE_B)) #define hnae3_dev_roce_supported(hdev) \ @@ -718,6 +718,8 @@ struct hnae3_ae_ops { u32 nsec, u32 sec); int (*get_ts_info)(struct hnae3_handle *handle, struct ethtool_ts_info *info); + int (*get_link_diagnosis_info)(struct hnae3_handle *handle, + u32 *status_code); }; struct hnae3_dcb_ops { @@ -772,6 +774,7 @@ struct hnae3_knic_private_info { u16 int_rl_setting; enum pkt_hash_types rss_type; + void __iomem *io_base; }; struct hnae3_roce_private_info { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c index 80461ab0ce9e..2b66c59f5eaf 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c @@ -38,9 +38,8 @@ static struct hns3_dbg_dentry_info hns3_dbg_dentry[] = { }, }; -static int hns3_dbg_bd_file_init(struct hnae3_handle *handle, unsigned int cmd); -static int hns3_dbg_common_file_init(struct hnae3_handle *handle, - unsigned int cmd); +static int hns3_dbg_bd_file_init(struct hnae3_handle *handle, u32 cmd); +static int hns3_dbg_common_file_init(struct hnae3_handle *handle, u32 cmd); static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = { { @@ -696,7 +695,7 @@ static int hns3_dbg_queue_map(struct hnae3_handle *h, char *buf, int len) sprintf(result[j++], "%u", i); sprintf(result[j++], "%u", h->ae_algo->ops->get_global_queue_id(h, i)); - sprintf(result[j++], "%u", + sprintf(result[j++], "%d", priv->ring[i].tqp_vector->vector_irq); hns3_dbg_fill_content(content, sizeof(content), queue_map_items, (const char **)result, @@ -798,10 +797,10 @@ static const struct hns3_dbg_item tx_bd_info_items[] = { { "T_CS_VLAN_TSO", 2 }, { "OT_VLAN_TAG", 3 }, { "TV", 2 }, - { "OLT_VLAN_LEN", 2}, - { "PAYLEN_OL4CS", 2}, - { "BD_FE_SC_VLD", 2}, - { "MSS_HW_CSUM", 0}, + { "OLT_VLAN_LEN", 2 }, + { "PAYLEN_OL4CS", 2 }, + { "BD_FE_SC_VLD", 2 }, + { "MSS_HW_CSUM", 0 }, }; static void hns3_dump_tx_bd_info(struct hns3_nic_priv *priv, @@ -868,7 +867,7 @@ static void hns3_dbg_dev_caps(struct hnae3_handle *h, char *buf, int len, int *pos) { struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev); - static const char * const str[] = {"no", "yes"}; + const char * const str[] = {"no", "yes"}; unsigned long *caps = ae_dev->caps; u32 i, state; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index cdb5f14fb6bc..22af3d6ce178 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -63,7 +63,7 @@ MODULE_PARM_DESC(tx_sgl, "Minimum number of frags when using dma_map_sg() to opt #define HNS3_SGL_SIZE(nfrag) (sizeof(struct scatterlist) * (nfrag) + \ sizeof(struct sg_table)) -#define HNS3_MAX_SGL_SIZE ALIGN(HNS3_SGL_SIZE(HNS3_MAX_TSO_BD_NUM),\ +#define HNS3_MAX_SGL_SIZE ALIGN(HNS3_SGL_SIZE(HNS3_MAX_TSO_BD_NUM), \ dma_get_cache_alignment()) #define DEFAULT_MSG_LEVEL (NETIF_MSG_PROBE | NETIF_MSG_LINK | \ @@ -100,7 +100,7 @@ static const struct pci_device_id hns3_pci_tbl[] = { {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF), HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, /* required last entry */ - {0, } + {0,} }; MODULE_DEVICE_TABLE(pci, hns3_pci_tbl); @@ -971,8 +971,7 @@ static u32 hns3_tx_spare_space(struct hns3_enet_ring *ring) /* The free tx buffer is divided into two part, so pick the * larger one. */ - return (ntc > (tx_spare->len - ntu) ? ntc : - (tx_spare->len - ntu)) - 1; + return max(ntc, tx_spare->len - ntu) - 1; } static void hns3_tx_spare_update(struct hns3_enet_ring *ring) @@ -2852,7 +2851,7 @@ static const struct net_device_ops hns3_nic_netdev_ops = { .ndo_start_xmit = hns3_nic_net_xmit, .ndo_tx_timeout = hns3_nic_net_timeout, .ndo_set_mac_address = hns3_nic_net_set_mac_address, - .ndo_do_ioctl = hns3_nic_do_ioctl, + .ndo_eth_ioctl = hns3_nic_do_ioctl, .ndo_change_mtu = hns3_nic_change_mtu, .ndo_set_features = hns3_nic_set_features, .ndo_features_check = hns3_features_check, @@ -3127,11 +3126,6 @@ static void hns3_set_default_feature(struct net_device *netdev) netdev->priv_flags |= IFF_UNICAST_FLT; - netdev->hw_enc_features |= NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | - NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | - NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_SCTP_CRC | NETIF_F_TSO_MANGLEID | NETIF_F_FRAGLIST; - netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | @@ -3141,62 +3135,37 @@ static void hns3_set_default_feature(struct net_device *netdev) NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_SCTP_CRC | NETIF_F_FRAGLIST; - netdev->vlan_features |= NETIF_F_RXCSUM | - NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO | - NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | - NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_SCTP_CRC | NETIF_F_FRAGLIST; - - netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | - NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | - NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_SCTP_CRC | NETIF_F_FRAGLIST; - if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { - netdev->hw_features |= NETIF_F_GRO_HW; netdev->features |= NETIF_F_GRO_HW; - if (!(h->flags & HNAE3_SUPPORT_VF)) { - netdev->hw_features |= NETIF_F_NTUPLE; + if (!(h->flags & HNAE3_SUPPORT_VF)) netdev->features |= NETIF_F_NTUPLE; - } } - if (test_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps)) { - netdev->hw_features |= NETIF_F_GSO_UDP_L4; + if (test_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps)) netdev->features |= NETIF_F_GSO_UDP_L4; - netdev->vlan_features |= NETIF_F_GSO_UDP_L4; - netdev->hw_enc_features |= NETIF_F_GSO_UDP_L4; - } - if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps)) { - netdev->hw_features |= NETIF_F_HW_CSUM; + if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps)) netdev->features |= NETIF_F_HW_CSUM; - netdev->vlan_features |= NETIF_F_HW_CSUM; - netdev->hw_enc_features |= NETIF_F_HW_CSUM; - } else { - netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + else netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; - netdev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; - netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; - } - if (test_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps)) { - netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; + if (test_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps)) netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; - netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; - netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; - } - if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) { - netdev->hw_features |= NETIF_F_HW_TC; + if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) netdev->features |= NETIF_F_HW_TC; - } - if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps)) - netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; + netdev->hw_features |= netdev->features; + if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps)) + netdev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; + + netdev->vlan_features |= netdev->features & + ~(NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_GRO_HW | NETIF_F_NTUPLE | + NETIF_F_HW_TC); + + netdev->hw_enc_features |= netdev->vlan_features | NETIF_F_TSO_MANGLEID; } static int hns3_alloc_buffer(struct hns3_enet_ring *ring, @@ -3205,6 +3174,21 @@ static int hns3_alloc_buffer(struct hns3_enet_ring *ring, unsigned int order = hns3_page_order(ring); struct page *p; + if (ring->page_pool) { + p = page_pool_dev_alloc_frag(ring->page_pool, + &cb->page_offset, + hns3_buf_size(ring)); + if (unlikely(!p)) + return -ENOMEM; + + cb->priv = p; + cb->buf = page_address(p); + cb->dma = page_pool_get_dma_addr(p); + cb->type = DESC_TYPE_PP_FRAG; + cb->reuse_flag = 0; + return 0; + } + p = dev_alloc_pages(order); if (!p) return -ENOMEM; @@ -3227,8 +3211,13 @@ static void hns3_free_buffer(struct hns3_enet_ring *ring, if (cb->type & (DESC_TYPE_SKB | DESC_TYPE_BOUNCE_HEAD | DESC_TYPE_BOUNCE_ALL | DESC_TYPE_SGL_SKB)) napi_consume_skb(cb->priv, budget); - else if (!HNAE3_IS_TX_RING(ring) && cb->pagecnt_bias) - __page_frag_cache_drain(cb->priv, cb->pagecnt_bias); + else if (!HNAE3_IS_TX_RING(ring)) { + if (cb->type & DESC_TYPE_PAGE && cb->pagecnt_bias) + __page_frag_cache_drain(cb->priv, cb->pagecnt_bias); + else if (cb->type & DESC_TYPE_PP_FRAG) + page_pool_put_full_page(ring->page_pool, cb->priv, + false); + } memset(cb, 0, sizeof(*cb)); } @@ -3315,7 +3304,7 @@ static int hns3_alloc_and_map_buffer(struct hns3_enet_ring *ring, int ret; ret = hns3_alloc_buffer(ring, cb); - if (ret) + if (ret || ring->page_pool) goto out; ret = hns3_map_buffer(ring, cb); @@ -3337,7 +3326,8 @@ static int hns3_alloc_and_attach_buffer(struct hns3_enet_ring *ring, int i) if (ret) return ret; - ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); + ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma + + ring->desc_cb[i].page_offset); return 0; } @@ -3367,7 +3357,8 @@ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i, { hns3_unmap_buffer(ring, &ring->desc_cb[i]); ring->desc_cb[i] = *res_cb; - ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); + ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma + + ring->desc_cb[i].page_offset); ring->desc[i].rx.bd_base_info = 0; } @@ -3539,6 +3530,12 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i, u32 frag_size = size - pull_len; bool reused; + if (ring->page_pool) { + skb_add_rx_frag(skb, i, desc_cb->priv, frag_offset, + frag_size, truesize); + return; + } + /* Avoid re-using remote or pfmem page */ if (unlikely(!dev_page_is_reusable(desc_cb->priv))) goto out; @@ -3856,6 +3853,9 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length, /* We can reuse buffer as-is, just make sure it is reusable */ if (dev_page_is_reusable(desc_cb->priv)) desc_cb->reuse_flag = 1; + else if (desc_cb->type & DESC_TYPE_PP_FRAG) + page_pool_put_full_page(ring->page_pool, desc_cb->priv, + false); else /* This page cannot be reused so discard it */ __page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias); @@ -3863,6 +3863,10 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length, hns3_rx_ring_move_fw(ring); return 0; } + + if (ring->page_pool) + skb_mark_for_recycle(skb); + u64_stats_update_begin(&ring->syncp); ring->stats.seg_pkt_cnt++; u64_stats_update_end(&ring->syncp); @@ -3901,6 +3905,10 @@ static int hns3_add_frag(struct hns3_enet_ring *ring) "alloc rx fraglist skb fail\n"); return -ENXIO; } + + if (ring->page_pool) + skb_mark_for_recycle(new_skb); + ring->frag_num = 0; if (ring->tail_skb) { @@ -4434,9 +4442,7 @@ static void hns3_tx_dim_work(struct work_struct *work) static void hns3_nic_init_dim(struct hns3_enet_tqp_vector *tqp_vector) { INIT_WORK(&tqp_vector->rx_group.dim.work, hns3_rx_dim_work); - tqp_vector->rx_group.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; INIT_WORK(&tqp_vector->tx_group.dim.work, hns3_tx_dim_work); - tqp_vector->tx_group.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; } static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) @@ -4705,6 +4711,29 @@ static void hns3_put_ring_config(struct hns3_nic_priv *priv) priv->ring = NULL; } +static void hns3_alloc_page_pool(struct hns3_enet_ring *ring) +{ + struct page_pool_params pp_params = { + .flags = PP_FLAG_DMA_MAP | PP_FLAG_PAGE_FRAG | + PP_FLAG_DMA_SYNC_DEV, + .order = hns3_page_order(ring), + .pool_size = ring->desc_num * hns3_buf_size(ring) / + (PAGE_SIZE << hns3_page_order(ring)), + .nid = dev_to_node(ring_to_dev(ring)), + .dev = ring_to_dev(ring), + .dma_dir = DMA_FROM_DEVICE, + .offset = 0, + .max_len = PAGE_SIZE << hns3_page_order(ring), + }; + + ring->page_pool = page_pool_create(&pp_params); + if (IS_ERR(ring->page_pool)) { + dev_warn(ring_to_dev(ring), "page pool creation failed: %ld\n", + PTR_ERR(ring->page_pool)); + ring->page_pool = NULL; + } +} + static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring) { int ret; @@ -4724,6 +4753,8 @@ static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring) goto out_with_desc_cb; if (!HNAE3_IS_TX_RING(ring)) { + hns3_alloc_page_pool(ring); + ret = hns3_alloc_ring_buffers(ring); if (ret) goto out_with_desc; @@ -4764,6 +4795,11 @@ void hns3_fini_ring(struct hns3_enet_ring *ring) devm_kfree(ring_to_dev(ring), tx_spare); ring->tx_spare = NULL; } + + if (!HNAE3_IS_TX_RING(ring) && ring->page_pool) { + page_pool_destroy(ring->page_pool); + ring->page_pool = NULL; + } } static int hns3_buf_size2type(u32 buf_size) @@ -4954,6 +4990,66 @@ static void hns3_info_show(struct hns3_nic_priv *priv) dev_info(priv->dev, "Max mtu size: %u\n", priv->netdev->max_mtu); } +static void hns3_set_cq_period_mode(struct hns3_nic_priv *priv, + enum dim_cq_period_mode mode, bool is_tx) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev); + struct hnae3_handle *handle = priv->ae_handle; + int i; + + if (is_tx) { + priv->tx_cqe_mode = mode; + + for (i = 0; i < priv->vector_num; i++) + priv->tqp_vector[i].tx_group.dim.mode = mode; + } else { + priv->rx_cqe_mode = mode; + + for (i = 0; i < priv->vector_num; i++) + priv->tqp_vector[i].rx_group.dim.mode = mode; + } + + /* only device version above V3(include V3), GL can switch CQ/EQ + * period mode. + */ + if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) { + u32 new_mode; + u64 reg; + + new_mode = (mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE) ? + HNS3_CQ_MODE_CQE : HNS3_CQ_MODE_EQE; + reg = is_tx ? HNS3_GL1_CQ_MODE_REG : HNS3_GL0_CQ_MODE_REG; + + writel(new_mode, handle->kinfo.io_base + reg); + } +} + +void hns3_cq_period_mode_init(struct hns3_nic_priv *priv, + enum dim_cq_period_mode tx_mode, + enum dim_cq_period_mode rx_mode) +{ + hns3_set_cq_period_mode(priv, tx_mode, true); + hns3_set_cq_period_mode(priv, rx_mode, false); +} + +static void hns3_state_init(struct hnae3_handle *handle) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); + struct net_device *netdev = handle->kinfo.netdev; + struct hns3_nic_priv *priv = netdev_priv(netdev); + + set_bit(HNS3_NIC_STATE_INITED, &priv->state); + + if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) + set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->supported_pflags); + + if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps)) + set_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state); + + if (hnae3_ae_dev_rxd_adv_layout_supported(ae_dev)) + set_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state); +} + static int hns3_client_init(struct hnae3_handle *handle) { struct pci_dev *pdev = handle->pdev; @@ -5021,6 +5117,9 @@ static int hns3_client_init(struct hnae3_handle *handle) goto out_init_ring; } + hns3_cq_period_mode_init(priv, DIM_CQ_PERIOD_MODE_START_FROM_EQE, + DIM_CQ_PERIOD_MODE_START_FROM_EQE); + ret = hns3_init_phy(netdev); if (ret) goto out_init_phy; @@ -5054,16 +5153,7 @@ static int hns3_client_init(struct hnae3_handle *handle) netdev->max_mtu = HNS3_MAX_MTU(ae_dev->dev_specs.max_frm_size); - if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps)) - set_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state); - - if (hnae3_ae_dev_rxd_adv_layout_supported(ae_dev)) - set_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state); - - set_bit(HNS3_NIC_STATE_INITED, &priv->state); - - if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) - set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->supported_pflags); + hns3_state_init(handle); ret = register_netdev(netdev); if (ret) { @@ -5353,6 +5443,8 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) if (ret) goto err_uninit_vector; + hns3_cq_period_mode_init(priv, priv->tx_cqe_mode, priv->rx_cqe_mode); + /* the device can work without cpu rmap, only aRFS needs it */ ret = hns3_set_rx_cpu_rmap(netdev); if (ret) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index 15af3d93857b..6162d9f88e37 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -6,6 +6,7 @@ #include <linux/dim.h> #include <linux/if_vlan.h> +#include <net/page_pool.h> #include "hnae3.h" @@ -201,6 +202,12 @@ enum hns3_nic_state { #define HNS3_RING_EN_B 0 +#define HNS3_GL0_CQ_MODE_REG 0x20d00 +#define HNS3_GL1_CQ_MODE_REG 0x20d04 +#define HNS3_GL2_CQ_MODE_REG 0x20d08 +#define HNS3_CQ_MODE_EQE 1U +#define HNS3_CQ_MODE_CQE 0U + enum hns3_pkt_l2t_type { HNS3_L2_TYPE_UNICAST, HNS3_L2_TYPE_MULTICAST, @@ -307,6 +314,7 @@ enum hns3_desc_type { DESC_TYPE_BOUNCE_ALL = 1 << 3, DESC_TYPE_BOUNCE_HEAD = 1 << 4, DESC_TYPE_SGL_SKB = 1 << 5, + DESC_TYPE_PP_FRAG = 1 << 6, }; struct hns3_desc_cb { @@ -340,7 +348,7 @@ enum hns3_pkt_l3type { HNS3_L3_TYPE_LLDP, HNS3_L3_TYPE_BPDU, HNS3_L3_TYPE_MAC_PAUSE, - HNS3_L3_TYPE_PFC_PAUSE,/* 0x9*/ + HNS3_L3_TYPE_PFC_PAUSE, /* 0x9 */ /* reserved for 0xA~0xB */ @@ -384,11 +392,11 @@ enum hns3_pkt_ol4type { }; struct hns3_rx_ptype { - u32 ptype:8; - u32 csum_level:2; - u32 ip_summed:2; - u32 l3_type:4; - u32 valid:1; + u32 ptype : 8; + u32 csum_level : 2; + u32 ip_summed : 2; + u32 l3_type : 4; + u32 valid : 1; }; struct ring_stats { @@ -451,6 +459,7 @@ struct hns3_enet_ring { struct hnae3_queue *tqp; int queue_index; struct device *dev; /* will be used for DMA mapping of descriptors */ + struct page_pool *page_pool; /* statistic */ struct ring_stats stats; @@ -513,9 +522,9 @@ struct hns3_enet_coalesce { u16 int_gl; u16 int_ql; u16 int_ql_max; - u8 adapt_enable:1; - u8 ql_enable:1; - u8 unit_1us:1; + u8 adapt_enable : 1; + u8 ql_enable : 1; + u8 unit_1us : 1; enum hns3_flow_level_range flow_level; }; @@ -569,6 +578,8 @@ struct hns3_nic_priv { unsigned long state; + enum dim_cq_period_mode tx_cqe_mode; + enum dim_cq_period_mode rx_cqe_mode; struct hns3_enet_coalesce tx_coal; struct hns3_enet_coalesce rx_coal; u32 tx_copybreak; @@ -593,6 +604,11 @@ struct hns3_hw_error_info { const char *msg; }; +struct hns3_reset_type_map { + enum ethtool_reset_flags rst_flags; + enum hnae3_reset_type rst_type; +}; + static inline int ring_space(struct hns3_enet_ring *ring) { /* This smp_load_acquire() pairs with smp_store_release() in @@ -702,4 +718,7 @@ void hns3_dbg_register_debugfs(const char *debugfs_dir_name); void hns3_dbg_unregister_debugfs(void); void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size); u16 hns3_get_max_available_channels(struct hnae3_handle *h); +void hns3_cq_period_mode_init(struct hns3_nic_priv *priv, + enum dim_cq_period_mode tx_mode, + enum dim_cq_period_mode rx_mode); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index 82061ab6930f..7ea511d59e91 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -7,21 +7,7 @@ #include <linux/sfp.h> #include "hns3_enet.h" - -struct hns3_stats { - char stats_string[ETH_GSTRING_LEN]; - int stats_offset; -}; - -struct hns3_sfp_type { - u8 type; - u8 ext_type; -}; - -struct hns3_pflag_desc { - char name[ETH_GSTRING_LEN]; - void (*handler)(struct net_device *netdev, bool enable); -}; +#include "hns3_ethtool.h" /* tqp related stats */ #define HNS3_TQP_STAT(_string, _member) { \ @@ -312,33 +298,8 @@ out: return ret_val; } -/** - * hns3_self_test - self test - * @ndev: net device - * @eth_test: test cmd - * @data: test result - */ -static void hns3_self_test(struct net_device *ndev, - struct ethtool_test *eth_test, u64 *data) +static void hns3_set_selftest_param(struct hnae3_handle *h, int (*st_param)[2]) { - struct hns3_nic_priv *priv = netdev_priv(ndev); - struct hnae3_handle *h = priv->ae_handle; - int st_param[HNS3_SELF_TEST_TYPE_NUM][2]; - bool if_running = netif_running(ndev); - int test_index = 0; - u32 i; - - if (hns3_nic_resetting(ndev)) { - netdev_err(ndev, "dev resetting!"); - return; - } - - /* Only do offline selftest, or pass by default */ - if (eth_test->flags != ETH_TEST_FL_OFFLINE) - return; - - netif_dbg(h, drv, ndev, "self test start"); - st_param[HNAE3_LOOP_APP][0] = HNAE3_LOOP_APP; st_param[HNAE3_LOOP_APP][1] = h->flags & HNAE3_SUPPORT_APP_LOOPBACK; @@ -355,6 +316,18 @@ static void hns3_self_test(struct net_device *ndev, st_param[HNAE3_LOOP_PHY][0] = HNAE3_LOOP_PHY; st_param[HNAE3_LOOP_PHY][1] = h->flags & HNAE3_SUPPORT_PHY_LOOPBACK; +} + +static void hns3_selftest_prepare(struct net_device *ndev, + bool if_running, int (*st_param)[2]) +{ + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hnae3_handle *h = priv->ae_handle; + + if (netif_msg_ifdown(h)) + netdev_info(ndev, "self test start\n"); + + hns3_set_selftest_param(h, st_param); if (if_running) ndev->netdev_ops->ndo_stop(ndev); @@ -373,6 +346,35 @@ static void hns3_self_test(struct net_device *ndev, h->ae_algo->ops->halt_autoneg(h, true); set_bit(HNS3_NIC_STATE_TESTING, &priv->state); +} + +static void hns3_selftest_restore(struct net_device *ndev, bool if_running) +{ + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hnae3_handle *h = priv->ae_handle; + + clear_bit(HNS3_NIC_STATE_TESTING, &priv->state); + + if (h->ae_algo->ops->halt_autoneg) + h->ae_algo->ops->halt_autoneg(h, false); + +#if IS_ENABLED(CONFIG_VLAN_8021Q) + if (h->ae_algo->ops->enable_vlan_filter) + h->ae_algo->ops->enable_vlan_filter(h, true); +#endif + + if (if_running) + ndev->netdev_ops->ndo_open(ndev); + + if (netif_msg_ifdown(h)) + netdev_info(ndev, "self test end\n"); +} + +static void hns3_do_selftest(struct net_device *ndev, int (*st_param)[2], + struct ethtool_test *eth_test, u64 *data) +{ + int test_index = 0; + u32 i; for (i = 0; i < HNS3_SELF_TEST_TYPE_NUM; i++) { enum hnae3_loop loop_type = (enum hnae3_loop)st_param[i][0]; @@ -391,21 +393,32 @@ static void hns3_self_test(struct net_device *ndev, test_index++; } +} - clear_bit(HNS3_NIC_STATE_TESTING, &priv->state); - - if (h->ae_algo->ops->halt_autoneg) - h->ae_algo->ops->halt_autoneg(h, false); +/** + * hns3_nic_self_test - self test + * @ndev: net device + * @eth_test: test cmd + * @data: test result + */ +static void hns3_self_test(struct net_device *ndev, + struct ethtool_test *eth_test, u64 *data) +{ + int st_param[HNS3_SELF_TEST_TYPE_NUM][2]; + bool if_running = netif_running(ndev); -#if IS_ENABLED(CONFIG_VLAN_8021Q) - if (h->ae_algo->ops->enable_vlan_filter) - h->ae_algo->ops->enable_vlan_filter(h, true); -#endif + if (hns3_nic_resetting(ndev)) { + netdev_err(ndev, "dev resetting!"); + return; + } - if (if_running) - ndev->netdev_ops->ndo_open(ndev); + /* Only do offline selftest, or pass by default */ + if (eth_test->flags != ETH_TEST_FL_OFFLINE) + return; - netif_dbg(h, drv, ndev, "self test end\n"); + hns3_selftest_prepare(ndev, if_running, st_param); + hns3_do_selftest(ndev, st_param, eth_test, data); + hns3_selftest_restore(ndev, if_running); } static void hns3_update_limit_promisc_mode(struct net_device *netdev, @@ -953,6 +966,60 @@ static int hns3_get_rxnfc(struct net_device *netdev, } } +static const struct hns3_reset_type_map hns3_reset_type[] = { + {ETH_RESET_MGMT, HNAE3_IMP_RESET}, + {ETH_RESET_ALL, HNAE3_GLOBAL_RESET}, + {ETH_RESET_DEDICATED, HNAE3_FUNC_RESET}, +}; + +static const struct hns3_reset_type_map hns3vf_reset_type[] = { + {ETH_RESET_DEDICATED, HNAE3_VF_FUNC_RESET}, +}; + +static int hns3_set_reset(struct net_device *netdev, u32 *flags) +{ + enum hnae3_reset_type rst_type = HNAE3_NONE_RESET; + struct hnae3_handle *h = hns3_get_handle(netdev); + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev); + const struct hnae3_ae_ops *ops = h->ae_algo->ops; + const struct hns3_reset_type_map *rst_type_map; + u32 i, size; + + if (ops->ae_dev_resetting && ops->ae_dev_resetting(h)) + return -EBUSY; + + if (!ops->set_default_reset_request || !ops->reset_event) + return -EOPNOTSUPP; + + if (h->flags & HNAE3_SUPPORT_VF) { + rst_type_map = hns3vf_reset_type; + size = ARRAY_SIZE(hns3vf_reset_type); + } else { + rst_type_map = hns3_reset_type; + size = ARRAY_SIZE(hns3_reset_type); + } + + for (i = 0; i < size; i++) { + if (rst_type_map[i].rst_flags == *flags) { + rst_type = rst_type_map[i].rst_type; + break; + } + } + + if (rst_type == HNAE3_NONE_RESET || + (rst_type == HNAE3_IMP_RESET && + ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2)) + return -EOPNOTSUPP; + + netdev_info(netdev, "Setting reset type %d\n", rst_type); + + ops->set_default_reset_request(ae_dev, rst_type); + + ops->reset_event(h->pdev, h); + + return 0; +} + static void hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv, u32 tx_desc_num, u32 rx_desc_num) { @@ -1139,7 +1206,9 @@ static void hns3_get_channels(struct net_device *netdev, } static int hns3_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *cmd) + struct ethtool_coalesce *cmd, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct hns3_nic_priv *priv = netdev_priv(netdev); struct hns3_enet_coalesce *tx_coal = &priv->tx_coal; @@ -1161,6 +1230,11 @@ static int hns3_get_coalesce(struct net_device *netdev, cmd->tx_max_coalesced_frames = tx_coal->int_ql; cmd->rx_max_coalesced_frames = rx_coal->int_ql; + kernel_coal->use_cqe_mode_tx = (priv->tx_cqe_mode == + DIM_CQ_PERIOD_MODE_START_FROM_CQE); + kernel_coal->use_cqe_mode_rx = (priv->rx_cqe_mode == + DIM_CQ_PERIOD_MODE_START_FROM_CQE); + return 0; } @@ -1321,13 +1395,17 @@ static void hns3_set_coalesce_per_queue(struct net_device *netdev, } static int hns3_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *cmd) + struct ethtool_coalesce *cmd, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct hnae3_handle *h = hns3_get_handle(netdev); struct hns3_nic_priv *priv = netdev_priv(netdev); struct hns3_enet_coalesce *tx_coal = &priv->tx_coal; struct hns3_enet_coalesce *rx_coal = &priv->rx_coal; u16 queue_num = h->kinfo.num_tqps; + enum dim_cq_period_mode tx_mode; + enum dim_cq_period_mode rx_mode; int ret; int i; @@ -1353,6 +1431,14 @@ static int hns3_set_coalesce(struct net_device *netdev, for (i = 0; i < queue_num; i++) hns3_set_coalesce_per_queue(netdev, cmd, i); + tx_mode = kernel_coal->use_cqe_mode_tx ? + DIM_CQ_PERIOD_MODE_START_FROM_CQE : + DIM_CQ_PERIOD_MODE_START_FROM_EQE; + rx_mode = kernel_coal->use_cqe_mode_rx ? + DIM_CQ_PERIOD_MODE_START_FROM_CQE : + DIM_CQ_PERIOD_MODE_START_FROM_EQE; + hns3_cq_period_mode_init(priv, tx_mode, rx_mode); + return 0; } @@ -1658,7 +1744,8 @@ static int hns3_set_tunable(struct net_device *netdev, ETHTOOL_COALESCE_USE_ADAPTIVE | \ ETHTOOL_COALESCE_RX_USECS_HIGH | \ ETHTOOL_COALESCE_TX_USECS_HIGH | \ - ETHTOOL_COALESCE_MAX_FRAMES) + ETHTOOL_COALESCE_MAX_FRAMES | \ + ETHTOOL_COALESCE_USE_CQE) static int hns3_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *info) @@ -1671,6 +1758,71 @@ static int hns3_get_ts_info(struct net_device *netdev, return ethtool_op_get_ts_info(netdev, info); } +static const struct hns3_ethtool_link_ext_state_mapping +hns3_link_ext_state_map[] = { + {1, ETHTOOL_LINK_EXT_STATE_AUTONEG, + ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_HCD}, + {2, ETHTOOL_LINK_EXT_STATE_AUTONEG, + ETHTOOL_LINK_EXT_SUBSTATE_AN_ACK_NOT_RECEIVED}, + + {256, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE, + ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_INHIBIT_TIMEOUT}, + {257, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE, + ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_PARTNER_DID_NOT_SET_RECEIVER_READY}, + {512, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE, + ETHTOOL_LINK_EXT_SUBSTATE_LT_REMOTE_FAULT}, + + {513, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_BLOCK_LOCK}, + {514, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_FC_FEC_IS_NOT_LOCKED}, + {515, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_RS_FEC_IS_NOT_LOCKED}, + + {768, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY, + ETHTOOL_LINK_EXT_SUBSTATE_BSI_LARGE_NUMBER_OF_PHYSICAL_ERRORS}, + {769, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY, + ETHTOOL_LINK_EXT_SUBSTATE_BSI_SERDES_REFERENCE_CLOCK_LOST}, + {770, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY, + ETHTOOL_LINK_EXT_SUBSTATE_BSI_SERDES_ALOS}, + + {1024, ETHTOOL_LINK_EXT_STATE_NO_CABLE, 0}, + {1025, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE, + ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE}, + + {1026, ETHTOOL_LINK_EXT_STATE_EEPROM_ISSUE, 0}, +}; + +static int hns3_get_link_ext_state(struct net_device *netdev, + struct ethtool_link_ext_state_info *info) +{ + const struct hns3_ethtool_link_ext_state_mapping *map; + struct hnae3_handle *h = hns3_get_handle(netdev); + u32 status_code, i; + int ret; + + if (netif_carrier_ok(netdev)) + return -ENODATA; + + if (!h->ae_algo->ops->get_link_diagnosis_info) + return -EOPNOTSUPP; + + ret = h->ae_algo->ops->get_link_diagnosis_info(h, &status_code); + if (ret) + return ret; + + for (i = 0; i < ARRAY_SIZE(hns3_link_ext_state_map); i++) { + map = &hns3_link_ext_state_map[i]; + if (map->status_code == status_code) { + info->link_ext_state = map->link_ext_state; + info->__link_ext_substate = map->link_ext_substate; + return 0; + } + } + + return -ENODATA; +} + static const struct ethtool_ops hns3vf_ethtool_ops = { .supported_coalesce_params = HNS3_ETHTOOL_COALESCE, .get_drvinfo = hns3_get_drvinfo, @@ -1699,6 +1851,7 @@ static const struct ethtool_ops hns3vf_ethtool_ops = { .set_priv_flags = hns3_set_priv_flags, .get_tunable = hns3_get_tunable, .set_tunable = hns3_set_tunable, + .reset = hns3_set_reset, }; static const struct ethtool_ops hns3_ethtool_ops = { @@ -1740,6 +1893,8 @@ static const struct ethtool_ops hns3_ethtool_ops = { .get_ts_info = hns3_get_ts_info, .get_tunable = hns3_get_tunable, .set_tunable = hns3_set_tunable, + .reset = hns3_set_reset, + .get_link_ext_state = hns3_get_link_ext_state, }; void hns3_ethtool_set_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h new file mode 100644 index 000000000000..822d6fcbc73b --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +// Copyright (c) 2021 Hisilicon Limited. + +#ifndef __HNS3_ETHTOOL_H +#define __HNS3_ETHTOOL_H + +#include <linux/ethtool.h> +#include <linux/netdevice.h> + +struct hns3_stats { + char stats_string[ETH_GSTRING_LEN]; + int stats_offset; +}; + +struct hns3_sfp_type { + u8 type; + u8 ext_type; +}; + +struct hns3_pflag_desc { + char name[ETH_GSTRING_LEN]; + void (*handler)(struct net_device *netdev, bool enable); +}; + +struct hns3_ethtool_link_ext_state_mapping { + u32 status_code; + enum ethtool_link_ext_state link_ext_state; + u8 link_ext_substate; +}; + +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile index a685392dbfe9..d1bf5c4c0abb 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile @@ -7,6 +7,6 @@ ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3 ccflags-y += -I $(srctree)/$(src) obj-$(CONFIG_HNS3_HCLGE) += hclge.o -hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge_debugfs.o hclge_ptp.o +hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge_debugfs.o hclge_ptp.o hclge_devlink.o hclge-$(CONFIG_HNS3_DCB) += hclge_dcb.o diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c index eb748aa35952..474c6d1664e7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@ -169,17 +169,19 @@ static bool hclge_is_special_opcode(u16 opcode) /* these commands have several descriptors, * and use the first one to save opcode and return value */ - u16 spec_opcode[] = {HCLGE_OPC_STATS_64_BIT, - HCLGE_OPC_STATS_32_BIT, - HCLGE_OPC_STATS_MAC, - HCLGE_OPC_STATS_MAC_ALL, - HCLGE_OPC_QUERY_32_BIT_REG, - HCLGE_OPC_QUERY_64_BIT_REG, - HCLGE_QUERY_CLEAR_MPF_RAS_INT, - HCLGE_QUERY_CLEAR_PF_RAS_INT, - HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT, - HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT, - HCLGE_QUERY_ALL_ERR_INFO}; + static const u16 spec_opcode[] = { + HCLGE_OPC_STATS_64_BIT, + HCLGE_OPC_STATS_32_BIT, + HCLGE_OPC_STATS_MAC, + HCLGE_OPC_STATS_MAC_ALL, + HCLGE_OPC_QUERY_32_BIT_REG, + HCLGE_OPC_QUERY_64_BIT_REG, + HCLGE_QUERY_CLEAR_MPF_RAS_INT, + HCLGE_QUERY_CLEAR_PF_RAS_INT, + HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT, + HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT, + HCLGE_QUERY_ALL_ERR_INFO + }; int i; for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) { @@ -360,41 +362,34 @@ static void hclge_set_default_capability(struct hclge_dev *hdev) } } +const struct hclge_caps_bit_map hclge_cmd_caps_bit_map0[] = { + {HCLGE_CAP_UDP_GSO_B, HNAE3_DEV_SUPPORT_UDP_GSO_B}, + {HCLGE_CAP_PTP_B, HNAE3_DEV_SUPPORT_PTP_B}, + {HCLGE_CAP_INT_QL_B, HNAE3_DEV_SUPPORT_INT_QL_B}, + {HCLGE_CAP_TQP_TXRX_INDEP_B, HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B}, + {HCLGE_CAP_HW_TX_CSUM_B, HNAE3_DEV_SUPPORT_HW_TX_CSUM_B}, + {HCLGE_CAP_UDP_TUNNEL_CSUM_B, HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B}, + {HCLGE_CAP_FD_FORWARD_TC_B, HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B}, + {HCLGE_CAP_FEC_B, HNAE3_DEV_SUPPORT_FEC_B}, + {HCLGE_CAP_PAUSE_B, HNAE3_DEV_SUPPORT_PAUSE_B}, + {HCLGE_CAP_PHY_IMP_B, HNAE3_DEV_SUPPORT_PHY_IMP_B}, + {HCLGE_CAP_RAS_IMP_B, HNAE3_DEV_SUPPORT_RAS_IMP_B}, + {HCLGE_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B}, + {HCLGE_CAP_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B}, + {HCLGE_CAP_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B}, +}; + static void hclge_parse_capability(struct hclge_dev *hdev, struct hclge_query_version_cmd *cmd) { struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); - u32 caps; + u32 caps, i; caps = __le32_to_cpu(cmd->caps[0]); - if (hnae3_get_bit(caps, HCLGE_CAP_UDP_GSO_B)) - set_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps); - if (hnae3_get_bit(caps, HCLGE_CAP_PTP_B)) - set_bit(HNAE3_DEV_SUPPORT_PTP_B, ae_dev->caps); - if (hnae3_get_bit(caps, HCLGE_CAP_INT_QL_B)) - set_bit(HNAE3_DEV_SUPPORT_INT_QL_B, ae_dev->caps); - if (hnae3_get_bit(caps, HCLGE_CAP_TQP_TXRX_INDEP_B)) - set_bit(HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B, ae_dev->caps); - if (hnae3_get_bit(caps, HCLGE_CAP_HW_TX_CSUM_B)) - set_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps); - if (hnae3_get_bit(caps, HCLGE_CAP_UDP_TUNNEL_CSUM_B)) - set_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps); - if (hnae3_get_bit(caps, HCLGE_CAP_FD_FORWARD_TC_B)) - set_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps); - if (hnae3_get_bit(caps, HCLGE_CAP_FEC_B)) - set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps); - if (hnae3_get_bit(caps, HCLGE_CAP_PAUSE_B)) - set_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps); - if (hnae3_get_bit(caps, HCLGE_CAP_PHY_IMP_B)) - set_bit(HNAE3_DEV_SUPPORT_PHY_IMP_B, ae_dev->caps); - if (hnae3_get_bit(caps, HCLGE_CAP_RAS_IMP_B)) - set_bit(HNAE3_DEV_SUPPORT_RAS_IMP_B, ae_dev->caps); - if (hnae3_get_bit(caps, HCLGE_CAP_RXD_ADV_LAYOUT_B)) - set_bit(HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B, ae_dev->caps); - if (hnae3_get_bit(caps, HCLGE_CAP_PORT_VLAN_BYPASS_B)) { - set_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps); - set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps); - } + for (i = 0; i < ARRAY_SIZE(hclge_cmd_caps_bit_map0); i++) + if (hnae3_get_bit(caps, hclge_cmd_caps_bit_map0[i].imp_bit)) + set_bit(hclge_cmd_caps_bit_map0[i].local_bit, + ae_dev->caps); } static __le32 hclge_build_api_caps(void) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index ac70d49e205d..33244472e0d0 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -320,6 +320,9 @@ enum hclge_opcode_type { /* PHY command */ HCLGE_OPC_PHY_LINK_KSETTING = 0x7025, HCLGE_OPC_PHY_REG = 0x7026, + + /* Query link diagnosis info command */ + HCLGE_OPC_QUERY_LINK_DIAGNOSIS = 0x702A, }; #define HCLGE_TQP_REG_OFFSET 0x80000 @@ -450,7 +453,7 @@ struct hclge_tc_thrd { }; struct hclge_priv_buf { - struct hclge_waterline wl; /* Waterline for low and high*/ + struct hclge_waterline wl; /* Waterline for low and high */ u32 buf_size; /* TC private buffer size */ u32 tx_buf_size; u32 enable; /* Enable TC private buffer or not */ @@ -1014,16 +1017,6 @@ struct hclge_common_lb_cmd { #define HCLGE_TYPE_CRQ 0 #define HCLGE_TYPE_CSQ 1 -#define HCLGE_NIC_CSQ_BASEADDR_L_REG 0x27000 -#define HCLGE_NIC_CSQ_BASEADDR_H_REG 0x27004 -#define HCLGE_NIC_CSQ_DEPTH_REG 0x27008 -#define HCLGE_NIC_CSQ_TAIL_REG 0x27010 -#define HCLGE_NIC_CSQ_HEAD_REG 0x27014 -#define HCLGE_NIC_CRQ_BASEADDR_L_REG 0x27018 -#define HCLGE_NIC_CRQ_BASEADDR_H_REG 0x2701c -#define HCLGE_NIC_CRQ_DEPTH_REG 0x27020 -#define HCLGE_NIC_CRQ_TAIL_REG 0x27024 -#define HCLGE_NIC_CRQ_HEAD_REG 0x27028 /* this bit indicates that the driver is ready for hardware reset */ #define HCLGE_NIC_SW_RST_RDY_B 16 @@ -1198,6 +1191,19 @@ struct hclge_dev_specs_1_cmd { u8 rsv1[18]; }; +/* mac speed type defined in firmware command */ +enum HCLGE_FIRMWARE_MAC_SPEED { + HCLGE_FW_MAC_SPEED_1G, + HCLGE_FW_MAC_SPEED_10G, + HCLGE_FW_MAC_SPEED_25G, + HCLGE_FW_MAC_SPEED_40G, + HCLGE_FW_MAC_SPEED_50G, + HCLGE_FW_MAC_SPEED_100G, + HCLGE_FW_MAC_SPEED_10M, + HCLGE_FW_MAC_SPEED_100M, + HCLGE_FW_MAC_SPEED_200G, +}; + #define HCLGE_PHY_LINK_SETTING_BD_NUM 2 struct hclge_phy_link_ksetting_0_cmd { @@ -1228,6 +1234,12 @@ struct hclge_phy_reg_cmd { u8 rsv1[18]; }; +/* capabilities bits map between imp firmware and local driver */ +struct hclge_caps_bit_map { + u16 imp_bit; + u16 local_bit; +}; + int hclge_cmd_init(struct hclge_dev *hdev); static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value) { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c index 39f56f245d84..4a619e5d3f35 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c @@ -104,26 +104,30 @@ static int hclge_dcb_common_validate(struct hclge_dev *hdev, u8 num_tc, return 0; } -static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets, - u8 *tc, bool *changed) +static u8 hclge_ets_tc_changed(struct hclge_dev *hdev, struct ieee_ets *ets, + bool *changed) { - bool has_ets_tc = false; - u32 total_ets_bw = 0; - u8 max_tc = 0; - int ret; + u8 max_tc_id = 0; u8 i; for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) { if (ets->prio_tc[i] != hdev->tm_info.prio_tc[i]) *changed = true; - if (ets->prio_tc[i] > max_tc) - max_tc = ets->prio_tc[i]; + if (ets->prio_tc[i] > max_tc_id) + max_tc_id = ets->prio_tc[i]; } - ret = hclge_dcb_common_validate(hdev, max_tc + 1, ets->prio_tc); - if (ret) - return ret; + /* return max tc number, max tc id need to plus 1 */ + return max_tc_id + 1; +} + +static int hclge_ets_sch_mode_validate(struct hclge_dev *hdev, + struct ieee_ets *ets, bool *changed) +{ + bool has_ets_tc = false; + u32 total_ets_bw = 0; + u8 i; for (i = 0; i < hdev->tc_max; i++) { switch (ets->tc_tsa[i]) { @@ -148,7 +152,26 @@ static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets, if (has_ets_tc && total_ets_bw != BW_PERCENT) return -EINVAL; - *tc = max_tc + 1; + return 0; +} + +static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets, + u8 *tc, bool *changed) +{ + u8 tc_num; + int ret; + + tc_num = hclge_ets_tc_changed(hdev, ets, changed); + + ret = hclge_dcb_common_validate(hdev, tc_num, ets->prio_tc); + if (ret) + return ret; + + ret = hclge_ets_sch_mode_validate(hdev, ets, changed); + if (ret) + return ret; + + *tc = tc_num; if (*tc != hdev->tm_info.num_tc) *changed = true; @@ -234,9 +257,7 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets) if (ret) goto err_out; - ret = hclge_notify_init_up(hdev); - if (ret) - return ret; + return hclge_notify_init_up(hdev); } return hclge_tm_dwrr_cfg(hdev); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c index 288788186ecc..68ed1715ac52 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c @@ -926,26 +926,45 @@ static int hclge_dbg_dump_tm_nodes(struct hclge_dev *hdev, char *buf, int len) return 0; } +static const struct hclge_dbg_item tm_pri_items[] = { + { "ID", 4 }, + { "MODE", 2 }, + { "DWRR", 2 }, + { "C_IR_B", 2 }, + { "C_IR_U", 2 }, + { "C_IR_S", 2 }, + { "C_BS_B", 2 }, + { "C_BS_S", 2 }, + { "C_FLAG", 2 }, + { "C_RATE(Mbps)", 2 }, + { "P_IR_B", 2 }, + { "P_IR_U", 2 }, + { "P_IR_S", 2 }, + { "P_BS_B", 2 }, + { "P_BS_S", 2 }, + { "P_FLAG", 2 }, + { "P_RATE(Mbps)", 0 } +}; + static int hclge_dbg_dump_tm_pri(struct hclge_dev *hdev, char *buf, int len) { - struct hclge_tm_shaper_para c_shaper_para; - struct hclge_tm_shaper_para p_shaper_para; - u8 pri_num, sch_mode, weight; - char *sch_mode_str; - int pos = 0; - int ret; - u8 i; + char data_str[ARRAY_SIZE(tm_pri_items)][HCLGE_DBG_DATA_STR_LEN]; + struct hclge_tm_shaper_para c_shaper_para, p_shaper_para; + char *result[ARRAY_SIZE(tm_pri_items)], *sch_mode_str; + char content[HCLGE_DBG_TM_INFO_LEN]; + u8 pri_num, sch_mode, weight, i, j; + int pos, ret; ret = hclge_tm_get_pri_num(hdev, &pri_num); if (ret) return ret; - pos += scnprintf(buf + pos, len - pos, - "ID MODE DWRR C_IR_B C_IR_U C_IR_S C_BS_B "); - pos += scnprintf(buf + pos, len - pos, - "C_BS_S C_FLAG C_RATE(Mbps) P_IR_B P_IR_U "); - pos += scnprintf(buf + pos, len - pos, - "P_IR_S P_BS_B P_BS_S P_FLAG P_RATE(Mbps)\n"); + for (i = 0; i < ARRAY_SIZE(tm_pri_items); i++) + result[i] = &data_str[i][0]; + + hclge_dbg_fill_content(content, sizeof(content), tm_pri_items, + NULL, ARRAY_SIZE(tm_pri_items)); + pos = scnprintf(buf, len, "%s", content); for (i = 0; i < pri_num; i++) { ret = hclge_tm_get_pri_sch_mode(hdev, i, &sch_mode); @@ -971,21 +990,16 @@ static int hclge_dbg_dump_tm_pri(struct hclge_dev *hdev, char *buf, int len) sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" : "sp"; - pos += scnprintf(buf + pos, len - pos, - "%04u %4s %3u %3u %3u %3u ", - i, sch_mode_str, weight, c_shaper_para.ir_b, - c_shaper_para.ir_u, c_shaper_para.ir_s); - pos += scnprintf(buf + pos, len - pos, - "%3u %3u %1u %6u ", - c_shaper_para.bs_b, c_shaper_para.bs_s, - c_shaper_para.flag, c_shaper_para.rate); - pos += scnprintf(buf + pos, len - pos, - "%3u %3u %3u %3u %3u ", - p_shaper_para.ir_b, p_shaper_para.ir_u, - p_shaper_para.ir_s, p_shaper_para.bs_b, - p_shaper_para.bs_s); - pos += scnprintf(buf + pos, len - pos, "%1u %6u\n", - p_shaper_para.flag, p_shaper_para.rate); + j = 0; + sprintf(result[j++], "%04u", i); + sprintf(result[j++], "%4s", sch_mode_str); + sprintf(result[j++], "%3u", weight); + hclge_dbg_fill_shaper_content(&c_shaper_para, result, &j); + hclge_dbg_fill_shaper_content(&p_shaper_para, result, &j); + hclge_dbg_fill_content(content, sizeof(content), tm_pri_items, + (const char **)result, + ARRAY_SIZE(tm_pri_items)); + pos += scnprintf(buf + pos, len - pos, "%s", content); } return 0; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c new file mode 100644 index 000000000000..e4aad695abcc --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c @@ -0,0 +1,148 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2021 Hisilicon Limited. */ + +#include <net/devlink.h> + +#include "hclge_devlink.h" + +static int hclge_devlink_info_get(struct devlink *devlink, + struct devlink_info_req *req, + struct netlink_ext_ack *extack) +{ +#define HCLGE_DEVLINK_FW_STRING_LEN 32 + struct hclge_devlink_priv *priv = devlink_priv(devlink); + char version_str[HCLGE_DEVLINK_FW_STRING_LEN]; + struct hclge_dev *hdev = priv->hdev; + int ret; + + ret = devlink_info_driver_name_put(req, KBUILD_MODNAME); + if (ret) + return ret; + + snprintf(version_str, sizeof(version_str), "%lu.%lu.%lu.%lu", + hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE3_MASK, + HNAE3_FW_VERSION_BYTE3_SHIFT), + hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE2_MASK, + HNAE3_FW_VERSION_BYTE2_SHIFT), + hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE1_MASK, + HNAE3_FW_VERSION_BYTE1_SHIFT), + hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE0_MASK, + HNAE3_FW_VERSION_BYTE0_SHIFT)); + + return devlink_info_version_running_put(req, + DEVLINK_INFO_VERSION_GENERIC_FW, + version_str); +} + +static int hclge_devlink_reload_down(struct devlink *devlink, bool netns_change, + enum devlink_reload_action action, + enum devlink_reload_limit limit, + struct netlink_ext_ack *extack) +{ + struct hclge_devlink_priv *priv = devlink_priv(devlink); + struct hclge_dev *hdev = priv->hdev; + struct hnae3_handle *h = &hdev->vport->nic; + struct pci_dev *pdev = hdev->pdev; + int ret; + + if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) { + dev_err(&pdev->dev, "reset is handling\n"); + return -EBUSY; + } + + switch (action) { + case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: + rtnl_lock(); + ret = hdev->nic_client->ops->reset_notify(h, HNAE3_DOWN_CLIENT); + if (ret) { + rtnl_unlock(); + return ret; + } + + ret = hdev->nic_client->ops->reset_notify(h, + HNAE3_UNINIT_CLIENT); + rtnl_unlock(); + return ret; + default: + return -EOPNOTSUPP; + } +} + +static int hclge_devlink_reload_up(struct devlink *devlink, + enum devlink_reload_action action, + enum devlink_reload_limit limit, + u32 *actions_performed, + struct netlink_ext_ack *extack) +{ + struct hclge_devlink_priv *priv = devlink_priv(devlink); + struct hclge_dev *hdev = priv->hdev; + struct hnae3_handle *h = &hdev->vport->nic; + int ret; + + *actions_performed = BIT(action); + switch (action) { + case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: + rtnl_lock(); + ret = hdev->nic_client->ops->reset_notify(h, HNAE3_INIT_CLIENT); + if (ret) { + rtnl_unlock(); + return ret; + } + + ret = hdev->nic_client->ops->reset_notify(h, HNAE3_UP_CLIENT); + rtnl_unlock(); + return ret; + default: + return -EOPNOTSUPP; + } +} + +static const struct devlink_ops hclge_devlink_ops = { + .info_get = hclge_devlink_info_get, + .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT), + .reload_down = hclge_devlink_reload_down, + .reload_up = hclge_devlink_reload_up, +}; + +int hclge_devlink_init(struct hclge_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + struct hclge_devlink_priv *priv; + struct devlink *devlink; + int ret; + + devlink = devlink_alloc(&hclge_devlink_ops, + sizeof(struct hclge_devlink_priv), &pdev->dev); + if (!devlink) + return -ENOMEM; + + priv = devlink_priv(devlink); + priv->hdev = hdev; + hdev->devlink = devlink; + + ret = devlink_register(devlink); + if (ret) { + dev_err(&pdev->dev, "failed to register devlink, ret = %d\n", + ret); + goto out_reg_fail; + } + + devlink_reload_enable(devlink); + + return 0; + +out_reg_fail: + devlink_free(devlink); + return ret; +} + +void hclge_devlink_uninit(struct hclge_dev *hdev) +{ + struct devlink *devlink = hdev->devlink; + + devlink_reload_disable(devlink); + + devlink_unregister(devlink); + + devlink_free(devlink); +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h new file mode 100644 index 000000000000..918be04507a5 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2021 Hisilicon Limited. */ + +#ifndef __HCLGE_DEVLINK_H +#define __HCLGE_DEVLINK_H + +#include "hclge_main.h" + +struct hclge_devlink_priv { + struct hclge_dev *hdev; +}; + +int hclge_devlink_init(struct hclge_dev *hdev); +void hclge_devlink_uninit(struct hclge_dev *hdev); +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c index ec9a7f8bc3fe..718c16d686fa 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c @@ -4,468 +4,895 @@ #include "hclge_err.h" static const struct hclge_hw_error hclge_imp_tcm_ecc_int[] = { - { .int_msk = BIT(1), .msg = "imp_itcm0_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(3), .msg = "imp_itcm1_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(5), .msg = "imp_itcm2_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(7), .msg = "imp_itcm3_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(9), .msg = "imp_dtcm0_mem0_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(11), .msg = "imp_dtcm0_mem1_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(13), .msg = "imp_dtcm1_mem0_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(15), .msg = "imp_dtcm1_mem1_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(17), .msg = "imp_itcm4_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(1), + .msg = "imp_itcm0_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(3), + .msg = "imp_itcm1_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(5), + .msg = "imp_itcm2_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(7), + .msg = "imp_itcm3_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(9), + .msg = "imp_dtcm0_mem0_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(11), + .msg = "imp_dtcm0_mem1_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(13), + .msg = "imp_dtcm1_mem0_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(15), + .msg = "imp_dtcm1_mem1_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(17), + .msg = "imp_itcm4_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_cmdq_nic_mem_ecc_int[] = { - { .int_msk = BIT(1), .msg = "cmdq_nic_rx_depth_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(3), .msg = "cmdq_nic_tx_depth_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(5), .msg = "cmdq_nic_rx_tail_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(7), .msg = "cmdq_nic_tx_tail_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(9), .msg = "cmdq_nic_rx_head_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(11), .msg = "cmdq_nic_tx_head_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(13), .msg = "cmdq_nic_rx_addr_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(15), .msg = "cmdq_nic_tx_addr_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(17), .msg = "cmdq_rocee_rx_depth_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(19), .msg = "cmdq_rocee_tx_depth_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(21), .msg = "cmdq_rocee_rx_tail_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(23), .msg = "cmdq_rocee_tx_tail_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(25), .msg = "cmdq_rocee_rx_head_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(27), .msg = "cmdq_rocee_tx_head_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(29), .msg = "cmdq_rocee_rx_addr_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(31), .msg = "cmdq_rocee_tx_addr_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(1), + .msg = "cmdq_nic_rx_depth_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(3), + .msg = "cmdq_nic_tx_depth_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(5), + .msg = "cmdq_nic_rx_tail_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(7), + .msg = "cmdq_nic_tx_tail_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(9), + .msg = "cmdq_nic_rx_head_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(11), + .msg = "cmdq_nic_tx_head_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(13), + .msg = "cmdq_nic_rx_addr_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(15), + .msg = "cmdq_nic_tx_addr_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(17), + .msg = "cmdq_rocee_rx_depth_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(19), + .msg = "cmdq_rocee_tx_depth_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(21), + .msg = "cmdq_rocee_rx_tail_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(23), + .msg = "cmdq_rocee_tx_tail_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(25), + .msg = "cmdq_rocee_rx_head_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(27), + .msg = "cmdq_rocee_tx_head_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(29), + .msg = "cmdq_rocee_rx_addr_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(31), + .msg = "cmdq_rocee_tx_addr_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_tqp_int_ecc_int[] = { - { .int_msk = BIT(6), .msg = "tqp_int_cfg_even_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(7), .msg = "tqp_int_cfg_odd_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(8), .msg = "tqp_int_ctrl_even_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(9), .msg = "tqp_int_ctrl_odd_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(10), .msg = "tx_que_scan_int_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(11), .msg = "rx_que_scan_int_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(6), + .msg = "tqp_int_cfg_even_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(7), + .msg = "tqp_int_cfg_odd_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(8), + .msg = "tqp_int_ctrl_even_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(9), + .msg = "tqp_int_ctrl_odd_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(10), + .msg = "tx_que_scan_int_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(11), + .msg = "rx_que_scan_int_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_msix_sram_ecc_int[] = { - { .int_msk = BIT(1), .msg = "msix_nic_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(3), .msg = "msix_rocee_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(1), + .msg = "msix_nic_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(3), + .msg = "msix_rocee_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_igu_int[] = { - { .int_msk = BIT(0), .msg = "igu_rx_buf0_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(2), .msg = "igu_rx_buf1_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(0), + .msg = "igu_rx_buf0_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(2), + .msg = "igu_rx_buf1_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_igu_egu_tnl_int[] = { - { .int_msk = BIT(0), .msg = "rx_buf_overflow", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(2), .msg = "rx_stp_fifo_underflow", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(3), .msg = "tx_buf_overflow", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(4), .msg = "tx_buf_underrun", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(5), .msg = "rx_stp_buf_overflow", - .reset_level = HNAE3_GLOBAL_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(0), + .msg = "rx_buf_overflow", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(1), + .msg = "rx_stp_fifo_overflow", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(2), + .msg = "rx_stp_fifo_underflow", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(3), + .msg = "tx_buf_overflow", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(4), + .msg = "tx_buf_underrun", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(5), + .msg = "rx_stp_buf_overflow", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_ncsi_err_int[] = { - { .int_msk = BIT(1), .msg = "ncsi_tx_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(1), + .msg = "ncsi_tx_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st1[] = { - { .int_msk = BIT(0), .msg = "vf_vlan_ad_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(1), .msg = "umv_mcast_group_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(2), .msg = "umv_key_mem0_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(10), .msg = "rss_idt_mem2_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(11), .msg = "rss_idt_mem3_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(12), .msg = "rss_idt_mem4_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(13), .msg = "rss_idt_mem5_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(14), .msg = "rss_idt_mem6_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(15), .msg = "rss_idt_mem7_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(16), .msg = "rss_idt_mem8_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(17), .msg = "rss_idt_mem9_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(18), .msg = "rss_idt_mem10_ecc_m1bit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(19), .msg = "rss_idt_mem11_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(20), .msg = "rss_idt_mem12_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(21), .msg = "rss_idt_mem13_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(22), .msg = "rss_idt_mem14_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(23), .msg = "rss_idt_mem15_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(24), .msg = "port_vlan_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(25), .msg = "mcast_linear_table_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(26), .msg = "mcast_result_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(27), .msg = "flow_director_ad_mem0_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(28), .msg = "flow_director_ad_mem1_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(29), .msg = "rx_vlan_tag_memory_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(30), .msg = "Tx_UP_mapping_config_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(0), + .msg = "vf_vlan_ad_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(1), + .msg = "umv_mcast_group_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(2), + .msg = "umv_key_mem0_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(3), + .msg = "umv_key_mem1_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(4), + .msg = "umv_key_mem2_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(5), + .msg = "umv_key_mem3_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(6), + .msg = "umv_ad_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(7), + .msg = "rss_tc_mode_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(8), + .msg = "rss_idt_mem0_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(9), + .msg = "rss_idt_mem1_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(10), + .msg = "rss_idt_mem2_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(11), + .msg = "rss_idt_mem3_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(12), + .msg = "rss_idt_mem4_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(13), + .msg = "rss_idt_mem5_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(14), + .msg = "rss_idt_mem6_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(15), + .msg = "rss_idt_mem7_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(16), + .msg = "rss_idt_mem8_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(17), + .msg = "rss_idt_mem9_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(18), + .msg = "rss_idt_mem10_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(19), + .msg = "rss_idt_mem11_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(20), + .msg = "rss_idt_mem12_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(21), + .msg = "rss_idt_mem13_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(22), + .msg = "rss_idt_mem14_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(23), + .msg = "rss_idt_mem15_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(24), + .msg = "port_vlan_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(25), + .msg = "mcast_linear_table_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(26), + .msg = "mcast_result_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(27), + .msg = "flow_director_ad_mem0_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(28), + .msg = "flow_director_ad_mem1_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(29), + .msg = "rx_vlan_tag_memory_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(30), + .msg = "Tx_UP_mapping_config_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_ppp_pf_abnormal_int[] = { - { .int_msk = BIT(0), .msg = "tx_vlan_tag_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(1), .msg = "rss_list_tc_unassigned_queue_err", - .reset_level = HNAE3_NONE_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(0), + .msg = "tx_vlan_tag_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(1), + .msg = "rss_list_tc_unassigned_queue_err", + .reset_level = HNAE3_NONE_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st3[] = { - { .int_msk = BIT(0), .msg = "hfs_fifo_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(1), .msg = "rslt_descr_fifo_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(2), .msg = "tx_vlan_tag_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(3), .msg = "FD_CN0_memory_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(4), .msg = "FD_CN1_memory_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(5), .msg = "GRO_AD_memory_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(0), + .msg = "hfs_fifo_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(1), + .msg = "rslt_descr_fifo_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(2), + .msg = "tx_vlan_tag_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(3), + .msg = "FD_CN0_memory_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(4), + .msg = "FD_CN1_memory_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(5), + .msg = "GRO_AD_memory_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_tm_sch_rint[] = { - { .int_msk = BIT(1), .msg = "tm_sch_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(2), .msg = "tm_sch_port_shap_sub_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(3), .msg = "tm_sch_port_shap_sub_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(4), .msg = "tm_sch_pg_pshap_sub_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(5), .msg = "tm_sch_pg_pshap_sub_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(6), .msg = "tm_sch_pg_cshap_sub_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(7), .msg = "tm_sch_pg_cshap_sub_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(8), .msg = "tm_sch_pri_pshap_sub_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(9), .msg = "tm_sch_pri_pshap_sub_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(10), .msg = "tm_sch_pri_cshap_sub_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(11), .msg = "tm_sch_pri_cshap_sub_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(12), .msg = "tm_sch_port_shap_offset_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(13), .msg = "tm_sch_port_shap_offset_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(14), .msg = "tm_sch_pg_pshap_offset_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(15), .msg = "tm_sch_pg_pshap_offset_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(16), .msg = "tm_sch_pg_cshap_offset_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(17), .msg = "tm_sch_pg_cshap_offset_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(18), .msg = "tm_sch_pri_pshap_offset_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(19), .msg = "tm_sch_pri_pshap_offset_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(20), .msg = "tm_sch_pri_cshap_offset_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(21), .msg = "tm_sch_pri_cshap_offset_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(22), .msg = "tm_sch_rq_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(23), .msg = "tm_sch_rq_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(24), .msg = "tm_sch_nq_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(25), .msg = "tm_sch_nq_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(26), .msg = "tm_sch_roce_up_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(27), .msg = "tm_sch_roce_up_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(28), .msg = "tm_sch_rcb_byte_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(29), .msg = "tm_sch_rcb_byte_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(30), .msg = "tm_sch_ssu_byte_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(31), .msg = "tm_sch_ssu_byte_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(1), + .msg = "tm_sch_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(2), + .msg = "tm_sch_port_shap_sub_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(3), + .msg = "tm_sch_port_shap_sub_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(4), + .msg = "tm_sch_pg_pshap_sub_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(5), + .msg = "tm_sch_pg_pshap_sub_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(6), + .msg = "tm_sch_pg_cshap_sub_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(7), + .msg = "tm_sch_pg_cshap_sub_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(8), + .msg = "tm_sch_pri_pshap_sub_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(9), + .msg = "tm_sch_pri_pshap_sub_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(10), + .msg = "tm_sch_pri_cshap_sub_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(11), + .msg = "tm_sch_pri_cshap_sub_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(12), + .msg = "tm_sch_port_shap_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(13), + .msg = "tm_sch_port_shap_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(14), + .msg = "tm_sch_pg_pshap_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(15), + .msg = "tm_sch_pg_pshap_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(16), + .msg = "tm_sch_pg_cshap_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(17), + .msg = "tm_sch_pg_cshap_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(18), + .msg = "tm_sch_pri_pshap_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(19), + .msg = "tm_sch_pri_pshap_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(20), + .msg = "tm_sch_pri_cshap_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(21), + .msg = "tm_sch_pri_cshap_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(22), + .msg = "tm_sch_rq_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(23), + .msg = "tm_sch_rq_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(24), + .msg = "tm_sch_nq_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(25), + .msg = "tm_sch_nq_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(26), + .msg = "tm_sch_roce_up_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(27), + .msg = "tm_sch_roce_up_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(28), + .msg = "tm_sch_rcb_byte_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(29), + .msg = "tm_sch_rcb_byte_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(30), + .msg = "tm_sch_ssu_byte_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(31), + .msg = "tm_sch_ssu_byte_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_qcn_fifo_rint[] = { - { .int_msk = BIT(0), .msg = "qcn_shap_gp0_sch_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(1), .msg = "qcn_shap_gp0_sch_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(2), .msg = "qcn_shap_gp1_sch_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(3), .msg = "qcn_shap_gp1_sch_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(4), .msg = "qcn_shap_gp2_sch_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(5), .msg = "qcn_shap_gp2_sch_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(6), .msg = "qcn_shap_gp3_sch_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(7), .msg = "qcn_shap_gp3_sch_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(8), .msg = "qcn_shap_gp0_offset_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(9), .msg = "qcn_shap_gp0_offset_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(10), .msg = "qcn_shap_gp1_offset_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(11), .msg = "qcn_shap_gp1_offset_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(12), .msg = "qcn_shap_gp2_offset_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(13), .msg = "qcn_shap_gp2_offset_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(14), .msg = "qcn_shap_gp3_offset_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(15), .msg = "qcn_shap_gp3_offset_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(16), .msg = "qcn_byte_info_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(17), .msg = "qcn_byte_info_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(0), + .msg = "qcn_shap_gp0_sch_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(1), + .msg = "qcn_shap_gp0_sch_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(2), + .msg = "qcn_shap_gp1_sch_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(3), + .msg = "qcn_shap_gp1_sch_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(4), + .msg = "qcn_shap_gp2_sch_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(5), + .msg = "qcn_shap_gp2_sch_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(6), + .msg = "qcn_shap_gp3_sch_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(7), + .msg = "qcn_shap_gp3_sch_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(8), + .msg = "qcn_shap_gp0_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(9), + .msg = "qcn_shap_gp0_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(10), + .msg = "qcn_shap_gp1_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(11), + .msg = "qcn_shap_gp1_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(12), + .msg = "qcn_shap_gp2_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(13), + .msg = "qcn_shap_gp2_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(14), + .msg = "qcn_shap_gp3_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(15), + .msg = "qcn_shap_gp3_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(16), + .msg = "qcn_byte_info_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(17), + .msg = "qcn_byte_info_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_qcn_ecc_rint[] = { - { .int_msk = BIT(1), .msg = "qcn_byte_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(3), .msg = "qcn_time_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(5), .msg = "qcn_fb_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(7), .msg = "qcn_link_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(9), .msg = "qcn_rate_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(11), .msg = "qcn_tmplt_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(13), .msg = "qcn_shap_cfg_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(15), .msg = "qcn_gp0_barrel_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(17), .msg = "qcn_gp1_barrel_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(19), .msg = "qcn_gp2_barrel_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(21), .msg = "qcn_gp3_barral_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(1), + .msg = "qcn_byte_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(3), + .msg = "qcn_time_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(5), + .msg = "qcn_fb_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(7), + .msg = "qcn_link_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(9), + .msg = "qcn_rate_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(11), + .msg = "qcn_tmplt_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(13), + .msg = "qcn_shap_cfg_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(15), + .msg = "qcn_gp0_barrel_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(17), + .msg = "qcn_gp1_barrel_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(19), + .msg = "qcn_gp2_barrel_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(21), + .msg = "qcn_gp3_barral_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_mac_afifo_tnl_int[] = { - { .int_msk = BIT(0), .msg = "egu_cge_afifo_ecc_1bit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(1), .msg = "egu_cge_afifo_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(2), .msg = "egu_lge_afifo_ecc_1bit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(3), .msg = "egu_lge_afifo_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(4), .msg = "cge_igu_afifo_ecc_1bit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(5), .msg = "cge_igu_afifo_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(6), .msg = "lge_igu_afifo_ecc_1bit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(7), .msg = "lge_igu_afifo_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(8), .msg = "cge_igu_afifo_overflow_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(9), .msg = "lge_igu_afifo_overflow_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(10), .msg = "egu_cge_afifo_underrun_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(11), .msg = "egu_lge_afifo_underrun_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(12), .msg = "egu_ge_afifo_underrun_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(13), .msg = "ge_igu_afifo_overflow_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(0), + .msg = "egu_cge_afifo_ecc_1bit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(1), + .msg = "egu_cge_afifo_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(2), + .msg = "egu_lge_afifo_ecc_1bit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(3), + .msg = "egu_lge_afifo_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(4), + .msg = "cge_igu_afifo_ecc_1bit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(5), + .msg = "cge_igu_afifo_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(6), + .msg = "lge_igu_afifo_ecc_1bit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(7), + .msg = "lge_igu_afifo_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(8), + .msg = "cge_igu_afifo_overflow_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(9), + .msg = "lge_igu_afifo_overflow_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(10), + .msg = "egu_cge_afifo_underrun_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(11), + .msg = "egu_lge_afifo_underrun_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(12), + .msg = "egu_ge_afifo_underrun_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(13), + .msg = "ge_igu_afifo_overflow_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st2[] = { - { .int_msk = BIT(13), .msg = "rpu_rx_pkt_bit32_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(14), .msg = "rpu_rx_pkt_bit33_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(15), .msg = "rpu_rx_pkt_bit34_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(16), .msg = "rpu_rx_pkt_bit35_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(17), .msg = "rcb_tx_ring_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(18), .msg = "rcb_rx_ring_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(19), .msg = "rcb_tx_fbd_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(20), .msg = "rcb_rx_ebd_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(21), .msg = "rcb_tso_info_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(22), .msg = "rcb_tx_int_info_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(23), .msg = "rcb_rx_int_info_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(24), .msg = "tpu_tx_pkt_0_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(25), .msg = "tpu_tx_pkt_1_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(26), .msg = "rd_bus_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(27), .msg = "wr_bus_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(28), .msg = "reg_search_miss", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(29), .msg = "rx_q_search_miss", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(30), .msg = "ooo_ecc_err_detect", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(31), .msg = "ooo_ecc_err_multpl", - .reset_level = HNAE3_GLOBAL_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(13), + .msg = "rpu_rx_pkt_bit32_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(14), + .msg = "rpu_rx_pkt_bit33_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(15), + .msg = "rpu_rx_pkt_bit34_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(16), + .msg = "rpu_rx_pkt_bit35_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(17), + .msg = "rcb_tx_ring_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(18), + .msg = "rcb_rx_ring_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(19), + .msg = "rcb_tx_fbd_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(20), + .msg = "rcb_rx_ebd_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(21), + .msg = "rcb_tso_info_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(22), + .msg = "rcb_tx_int_info_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(23), + .msg = "rcb_rx_int_info_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(24), + .msg = "tpu_tx_pkt_0_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(25), + .msg = "tpu_tx_pkt_1_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(26), + .msg = "rd_bus_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(27), + .msg = "wr_bus_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(28), + .msg = "reg_search_miss", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(29), + .msg = "rx_q_search_miss", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(30), + .msg = "ooo_ecc_err_detect", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(31), + .msg = "ooo_ecc_err_multpl", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st3[] = { - { .int_msk = BIT(4), .msg = "gro_bd_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(5), .msg = "gro_context_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(6), .msg = "rx_stash_cfg_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(7), .msg = "axi_rd_fbd_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(4), + .msg = "gro_bd_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(5), + .msg = "gro_context_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(6), + .msg = "rx_stash_cfg_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(7), + .msg = "axi_rd_fbd_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_ppu_pf_abnormal_int[] = { - { .int_msk = BIT(0), .msg = "over_8bd_no_fe", - .reset_level = HNAE3_FUNC_RESET }, - { .int_msk = BIT(1), .msg = "tso_mss_cmp_min_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(2), .msg = "tso_mss_cmp_max_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(3), .msg = "tx_rd_fbd_poison", - .reset_level = HNAE3_FUNC_RESET }, - { .int_msk = BIT(4), .msg = "rx_rd_ebd_poison", - .reset_level = HNAE3_FUNC_RESET }, - { .int_msk = BIT(5), .msg = "buf_wait_timeout", - .reset_level = HNAE3_NONE_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(0), + .msg = "over_8bd_no_fe", + .reset_level = HNAE3_FUNC_RESET + }, { + .int_msk = BIT(1), + .msg = "tso_mss_cmp_min_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(2), + .msg = "tso_mss_cmp_max_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(3), + .msg = "tx_rd_fbd_poison", + .reset_level = HNAE3_FUNC_RESET + }, { + .int_msk = BIT(4), + .msg = "rx_rd_ebd_poison", + .reset_level = HNAE3_FUNC_RESET + }, { + .int_msk = BIT(5), + .msg = "buf_wait_timeout", + .reset_level = HNAE3_NONE_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_ssu_com_err_int[] = { - { .int_msk = BIT(0), .msg = "buf_sum_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(1), .msg = "ppp_mb_num_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(2), .msg = "ppp_mbid_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(3), .msg = "ppp_rlt_mac_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(4), .msg = "ppp_rlt_host_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(5), .msg = "cks_edit_position_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(6), .msg = "cks_edit_condition_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(7), .msg = "vlan_edit_condition_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(8), .msg = "vlan_num_ot_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(9), .msg = "vlan_num_in_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(0), + .msg = "buf_sum_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(1), + .msg = "ppp_mb_num_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(2), + .msg = "ppp_mbid_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(3), + .msg = "ppp_rlt_mac_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(4), + .msg = "ppp_rlt_host_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(5), + .msg = "cks_edit_position_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(6), + .msg = "cks_edit_condition_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(7), + .msg = "vlan_edit_condition_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(8), + .msg = "vlan_num_ot_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(9), + .msg = "vlan_num_in_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } }; #define HCLGE_SSU_MEM_ECC_ERR(x) \ - { .int_msk = BIT(x), .msg = "ssu_mem" #x "_ecc_mbit_err", \ - .reset_level = HNAE3_GLOBAL_RESET } +{ \ + .int_msk = BIT(x), \ + .msg = "ssu_mem" #x "_ecc_mbit_err", \ + .reset_level = HNAE3_GLOBAL_RESET \ +} static const struct hclge_hw_error hclge_ssu_mem_ecc_err_int[] = { HCLGE_SSU_MEM_ECC_ERR(0), @@ -504,131 +931,269 @@ static const struct hclge_hw_error hclge_ssu_mem_ecc_err_int[] = { }; static const struct hclge_hw_error hclge_ssu_port_based_err_int[] = { - { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port", - .reset_level = HNAE3_FUNC_RESET }, - { .int_msk = BIT(1), .msg = "tpu_pkt_without_key_port", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(2), .msg = "igu_pkt_without_key_port", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(3), .msg = "roc_eof_mis_match_port", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(4), .msg = "tpu_eof_mis_match_port", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(5), .msg = "igu_eof_mis_match_port", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(6), .msg = "roc_sof_mis_match_port", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(7), .msg = "tpu_sof_mis_match_port", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(8), .msg = "igu_sof_mis_match_port", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(11), .msg = "ets_rd_int_rx_port", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(12), .msg = "ets_wr_int_rx_port", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(13), .msg = "ets_rd_int_tx_port", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(14), .msg = "ets_wr_int_tx_port", - .reset_level = HNAE3_GLOBAL_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(0), + .msg = "roc_pkt_without_key_port", + .reset_level = HNAE3_FUNC_RESET + }, { + .int_msk = BIT(1), + .msg = "tpu_pkt_without_key_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(2), + .msg = "igu_pkt_without_key_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(3), + .msg = "roc_eof_mis_match_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(4), + .msg = "tpu_eof_mis_match_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(5), + .msg = "igu_eof_mis_match_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(6), + .msg = "roc_sof_mis_match_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(7), + .msg = "tpu_sof_mis_match_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(8), + .msg = "igu_sof_mis_match_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(11), + .msg = "ets_rd_int_rx_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(12), + .msg = "ets_wr_int_rx_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(13), + .msg = "ets_rd_int_tx_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(14), + .msg = "ets_wr_int_tx_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_ssu_fifo_overflow_int[] = { - { .int_msk = BIT(0), .msg = "ig_mac_inf_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(1), .msg = "ig_host_inf_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(2), .msg = "ig_roc_buf_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(3), .msg = "ig_host_data_fifo_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(4), .msg = "ig_host_key_fifo_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(5), .msg = "tx_qcn_fifo_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(6), .msg = "rx_qcn_fifo_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(7), .msg = "tx_pf_rd_fifo_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(8), .msg = "rx_pf_rd_fifo_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(9), .msg = "qm_eof_fifo_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(10), .msg = "mb_rlt_fifo_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(11), .msg = "dup_uncopy_fifo_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(12), .msg = "dup_cnt_rd_fifo_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(13), .msg = "dup_cnt_drop_fifo_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(14), .msg = "dup_cnt_wrb_fifo_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(15), .msg = "host_cmd_fifo_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(16), .msg = "mac_cmd_fifo_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(17), .msg = "host_cmd_bitmap_empty_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(18), .msg = "mac_cmd_bitmap_empty_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(19), .msg = "dup_bitmap_empty_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(20), .msg = "out_queue_bitmap_empty_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(21), .msg = "bank2_bitmap_empty_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(22), .msg = "bank1_bitmap_empty_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(23), .msg = "bank0_bitmap_empty_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(0), + .msg = "ig_mac_inf_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(1), + .msg = "ig_host_inf_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(2), + .msg = "ig_roc_buf_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(3), + .msg = "ig_host_data_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(4), + .msg = "ig_host_key_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(5), + .msg = "tx_qcn_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(6), + .msg = "rx_qcn_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(7), + .msg = "tx_pf_rd_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(8), + .msg = "rx_pf_rd_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(9), + .msg = "qm_eof_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(10), + .msg = "mb_rlt_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(11), + .msg = "dup_uncopy_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(12), + .msg = "dup_cnt_rd_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(13), + .msg = "dup_cnt_drop_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(14), + .msg = "dup_cnt_wrb_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(15), + .msg = "host_cmd_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(16), + .msg = "mac_cmd_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(17), + .msg = "host_cmd_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(18), + .msg = "mac_cmd_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(19), + .msg = "dup_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(20), + .msg = "out_queue_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(21), + .msg = "bank2_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(22), + .msg = "bank1_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(23), + .msg = "bank0_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_ssu_ets_tcg_int[] = { - { .int_msk = BIT(0), .msg = "ets_rd_int_rx_tcg", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(1), .msg = "ets_wr_int_rx_tcg", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(2), .msg = "ets_rd_int_tx_tcg", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(3), .msg = "ets_wr_int_tx_tcg", - .reset_level = HNAE3_GLOBAL_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(0), + .msg = "ets_rd_int_rx_tcg", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(1), + .msg = "ets_wr_int_rx_tcg", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(2), + .msg = "ets_rd_int_tx_tcg", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(3), + .msg = "ets_wr_int_tx_tcg", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_ssu_port_based_pf_int[] = { - { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port", - .reset_level = HNAE3_FUNC_RESET }, - { .int_msk = BIT(9), .msg = "low_water_line_err_port", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(10), .msg = "hi_water_line_err_port", - .reset_level = HNAE3_GLOBAL_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(0), + .msg = "roc_pkt_without_key_port", + .reset_level = HNAE3_FUNC_RESET + }, { + .int_msk = BIT(9), + .msg = "low_water_line_err_port", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(10), + .msg = "hi_water_line_err_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_rocee_qmm_ovf_err_int[] = { - { .int_msk = 0, .msg = "rocee qmm ovf: sgid invalid err" }, - { .int_msk = 0x4, .msg = "rocee qmm ovf: sgid ovf err" }, - { .int_msk = 0x8, .msg = "rocee qmm ovf: smac invalid err" }, - { .int_msk = 0xC, .msg = "rocee qmm ovf: smac ovf err" }, - { .int_msk = 0x10, .msg = "rocee qmm ovf: cqc invalid err" }, - { .int_msk = 0x11, .msg = "rocee qmm ovf: cqc ovf err" }, - { .int_msk = 0x12, .msg = "rocee qmm ovf: cqc hopnum err" }, - { .int_msk = 0x13, .msg = "rocee qmm ovf: cqc ba0 err" }, - { .int_msk = 0x14, .msg = "rocee qmm ovf: srqc invalid err" }, - { .int_msk = 0x15, .msg = "rocee qmm ovf: srqc ovf err" }, - { .int_msk = 0x16, .msg = "rocee qmm ovf: srqc hopnum err" }, - { .int_msk = 0x17, .msg = "rocee qmm ovf: srqc ba0 err" }, - { .int_msk = 0x18, .msg = "rocee qmm ovf: mpt invalid err" }, - { .int_msk = 0x19, .msg = "rocee qmm ovf: mpt ovf err" }, - { .int_msk = 0x1A, .msg = "rocee qmm ovf: mpt hopnum err" }, - { .int_msk = 0x1B, .msg = "rocee qmm ovf: mpt ba0 err" }, - { .int_msk = 0x1C, .msg = "rocee qmm ovf: qpc invalid err" }, - { .int_msk = 0x1D, .msg = "rocee qmm ovf: qpc ovf err" }, - { .int_msk = 0x1E, .msg = "rocee qmm ovf: qpc hopnum err" }, - { .int_msk = 0x1F, .msg = "rocee qmm ovf: qpc ba0 err" }, - { /* sentinel */ } + { + .int_msk = 0, + .msg = "rocee qmm ovf: sgid invalid err" + }, { + .int_msk = 0x4, + .msg = "rocee qmm ovf: sgid ovf err" + }, { + .int_msk = 0x8, + .msg = "rocee qmm ovf: smac invalid err" + }, { + .int_msk = 0xC, + .msg = "rocee qmm ovf: smac ovf err" + }, { + .int_msk = 0x10, + .msg = "rocee qmm ovf: cqc invalid err" + }, { + .int_msk = 0x11, + .msg = "rocee qmm ovf: cqc ovf err" + }, { + .int_msk = 0x12, + .msg = "rocee qmm ovf: cqc hopnum err" + }, { + .int_msk = 0x13, + .msg = "rocee qmm ovf: cqc ba0 err" + }, { + .int_msk = 0x14, + .msg = "rocee qmm ovf: srqc invalid err" + }, { + .int_msk = 0x15, + .msg = "rocee qmm ovf: srqc ovf err" + }, { + .int_msk = 0x16, + .msg = "rocee qmm ovf: srqc hopnum err" + }, { + .int_msk = 0x17, + .msg = "rocee qmm ovf: srqc ba0 err" + }, { + .int_msk = 0x18, + .msg = "rocee qmm ovf: mpt invalid err" + }, { + .int_msk = 0x19, + .msg = "rocee qmm ovf: mpt ovf err" + }, { + .int_msk = 0x1A, + .msg = "rocee qmm ovf: mpt hopnum err" + }, { + .int_msk = 0x1B, + .msg = "rocee qmm ovf: mpt ba0 err" + }, { + .int_msk = 0x1C, + .msg = "rocee qmm ovf: qpc invalid err" + }, { + .int_msk = 0x1D, + .msg = "rocee qmm ovf: qpc ovf err" + }, { + .int_msk = 0x1E, + .msg = "rocee qmm ovf: qpc hopnum err" + }, { + .int_msk = 0x1F, + .msg = "rocee qmm ovf: qpc ba0 err" + }, { + /* sentinel */ + } }; static const struct hclge_hw_module_id hclge_hw_module_id_st[] = { @@ -1709,34 +2274,36 @@ static void hclge_handle_rocee_ras_error(struct hnae3_ae_dev *ae_dev) static const struct hclge_hw_blk hw_blk[] = { { - .msk = BIT(0), .name = "IGU_EGU", - .config_err_int = hclge_config_igu_egu_hw_err_int, - }, - { - .msk = BIT(1), .name = "PPP", - .config_err_int = hclge_config_ppp_hw_err_int, - }, - { - .msk = BIT(2), .name = "SSU", - .config_err_int = hclge_config_ssu_hw_err_int, - }, - { - .msk = BIT(3), .name = "PPU", - .config_err_int = hclge_config_ppu_hw_err_int, - }, - { - .msk = BIT(4), .name = "TM", - .config_err_int = hclge_config_tm_hw_err_int, - }, - { - .msk = BIT(5), .name = "COMMON", - .config_err_int = hclge_config_common_hw_err_int, - }, - { - .msk = BIT(8), .name = "MAC", - .config_err_int = hclge_config_mac_err_int, - }, - { /* sentinel */ } + .msk = BIT(0), + .name = "IGU_EGU", + .config_err_int = hclge_config_igu_egu_hw_err_int, + }, { + .msk = BIT(1), + .name = "PPP", + .config_err_int = hclge_config_ppp_hw_err_int, + }, { + .msk = BIT(2), + .name = "SSU", + .config_err_int = hclge_config_ssu_hw_err_int, + }, { + .msk = BIT(3), + .name = "PPU", + .config_err_int = hclge_config_ppu_hw_err_int, + }, { + .msk = BIT(4), + .name = "TM", + .config_err_int = hclge_config_tm_hw_err_int, + }, { + .msk = BIT(5), + .name = "COMMON", + .config_err_int = hclge_config_common_hw_err_int, + }, { + .msk = BIT(8), + .name = "MAC", + .config_err_int = hclge_config_mac_err_int, + }, { + /* sentinel */ + } }; static void hclge_config_all_msix_error(struct hclge_dev *hdev, bool enable) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 03ae122f1c9a..e55ba2e511b1 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -23,6 +23,7 @@ #include "hclge_tm.h" #include "hclge_err.h" #include "hnae3.h" +#include "hclge_devlink.h" #define HCLGE_NAME "hclge" #define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset))) @@ -91,23 +92,23 @@ static const struct pci_device_id ae_algo_pci_tbl[] = { MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl); -static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG, - HCLGE_CMDQ_TX_ADDR_H_REG, - HCLGE_CMDQ_TX_DEPTH_REG, - HCLGE_CMDQ_TX_TAIL_REG, - HCLGE_CMDQ_TX_HEAD_REG, - HCLGE_CMDQ_RX_ADDR_L_REG, - HCLGE_CMDQ_RX_ADDR_H_REG, - HCLGE_CMDQ_RX_DEPTH_REG, - HCLGE_CMDQ_RX_TAIL_REG, - HCLGE_CMDQ_RX_HEAD_REG, +static const u32 cmdq_reg_addr_list[] = {HCLGE_NIC_CSQ_BASEADDR_L_REG, + HCLGE_NIC_CSQ_BASEADDR_H_REG, + HCLGE_NIC_CSQ_DEPTH_REG, + HCLGE_NIC_CSQ_TAIL_REG, + HCLGE_NIC_CSQ_HEAD_REG, + HCLGE_NIC_CRQ_BASEADDR_L_REG, + HCLGE_NIC_CRQ_BASEADDR_H_REG, + HCLGE_NIC_CRQ_DEPTH_REG, + HCLGE_NIC_CRQ_TAIL_REG, + HCLGE_NIC_CRQ_HEAD_REG, HCLGE_VECTOR0_CMDQ_SRC_REG, HCLGE_CMDQ_INTR_STS_REG, HCLGE_CMDQ_INTR_EN_REG, HCLGE_CMDQ_INTR_GEN_REG}; static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE, - HCLGE_VECTOR0_OTER_EN_REG, + HCLGE_PF_OTHER_INT_REG, HCLGE_MISC_RESET_STS_REG, HCLGE_MISC_VECTOR_INT_STS, HCLGE_GLOBAL_RESET_REG, @@ -374,14 +375,14 @@ static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = { }; static const struct key_info meta_data_key_info[] = { - { PACKET_TYPE_ID, 6}, - { IP_FRAGEMENT, 1}, - { ROCE_TYPE, 1}, - { NEXT_KEY, 5}, - { VLAN_NUMBER, 2}, - { SRC_VPORT, 12}, - { DST_VPORT, 12}, - { TUNNEL_PACKET, 1}, + { PACKET_TYPE_ID, 6 }, + { IP_FRAGEMENT, 1 }, + { ROCE_TYPE, 1 }, + { NEXT_KEY, 5 }, + { VLAN_NUMBER, 2 }, + { SRC_VPORT, 12 }, + { DST_VPORT, 12 }, + { TUNNEL_PACKET, 1 }, }; static const struct key_info tuple_key_info[] = { @@ -748,9 +749,9 @@ static void hclge_update_stats(struct hnae3_handle *handle, static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) { -#define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\ - HNAE3_SUPPORT_PHY_LOOPBACK |\ - HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\ +#define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK | \ + HNAE3_SUPPORT_PHY_LOOPBACK | \ + HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK | \ HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) struct hclge_vport *vport = hclge_get_vport(handle); @@ -958,31 +959,31 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev) static int hclge_parse_speed(u8 speed_cmd, u32 *speed) { switch (speed_cmd) { - case 6: + case HCLGE_FW_MAC_SPEED_10M: *speed = HCLGE_MAC_SPEED_10M; break; - case 7: + case HCLGE_FW_MAC_SPEED_100M: *speed = HCLGE_MAC_SPEED_100M; break; - case 0: + case HCLGE_FW_MAC_SPEED_1G: *speed = HCLGE_MAC_SPEED_1G; break; - case 1: + case HCLGE_FW_MAC_SPEED_10G: *speed = HCLGE_MAC_SPEED_10G; break; - case 2: + case HCLGE_FW_MAC_SPEED_25G: *speed = HCLGE_MAC_SPEED_25G; break; - case 3: + case HCLGE_FW_MAC_SPEED_40G: *speed = HCLGE_MAC_SPEED_40G; break; - case 4: + case HCLGE_FW_MAC_SPEED_50G: *speed = HCLGE_MAC_SPEED_50G; break; - case 5: + case HCLGE_FW_MAC_SPEED_100G: *speed = HCLGE_MAC_SPEED_100G; break; - case 8: + case HCLGE_FW_MAC_SPEED_200G: *speed = HCLGE_MAC_SPEED_200G; break; default: @@ -992,44 +993,43 @@ static int hclge_parse_speed(u8 speed_cmd, u32 *speed) return 0; } +static const struct hclge_speed_bit_map speed_bit_map[] = { + {HCLGE_MAC_SPEED_10M, HCLGE_SUPPORT_10M_BIT}, + {HCLGE_MAC_SPEED_100M, HCLGE_SUPPORT_100M_BIT}, + {HCLGE_MAC_SPEED_1G, HCLGE_SUPPORT_1G_BIT}, + {HCLGE_MAC_SPEED_10G, HCLGE_SUPPORT_10G_BIT}, + {HCLGE_MAC_SPEED_25G, HCLGE_SUPPORT_25G_BIT}, + {HCLGE_MAC_SPEED_40G, HCLGE_SUPPORT_40G_BIT}, + {HCLGE_MAC_SPEED_50G, HCLGE_SUPPORT_50G_BIT}, + {HCLGE_MAC_SPEED_100G, HCLGE_SUPPORT_100G_BIT}, + {HCLGE_MAC_SPEED_200G, HCLGE_SUPPORT_200G_BIT}, +}; + +static int hclge_get_speed_bit(u32 speed, u32 *speed_bit) +{ + u16 i; + + for (i = 0; i < ARRAY_SIZE(speed_bit_map); i++) { + if (speed == speed_bit_map[i].speed) { + *speed_bit = speed_bit_map[i].speed_bit; + return 0; + } + } + + return -EINVAL; +} + static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; u32 speed_ability = hdev->hw.mac.speed_ability; u32 speed_bit = 0; + int ret; - switch (speed) { - case HCLGE_MAC_SPEED_10M: - speed_bit = HCLGE_SUPPORT_10M_BIT; - break; - case HCLGE_MAC_SPEED_100M: - speed_bit = HCLGE_SUPPORT_100M_BIT; - break; - case HCLGE_MAC_SPEED_1G: - speed_bit = HCLGE_SUPPORT_1G_BIT; - break; - case HCLGE_MAC_SPEED_10G: - speed_bit = HCLGE_SUPPORT_10G_BIT; - break; - case HCLGE_MAC_SPEED_25G: - speed_bit = HCLGE_SUPPORT_25G_BIT; - break; - case HCLGE_MAC_SPEED_40G: - speed_bit = HCLGE_SUPPORT_40G_BIT; - break; - case HCLGE_MAC_SPEED_50G: - speed_bit = HCLGE_SUPPORT_50G_BIT; - break; - case HCLGE_MAC_SPEED_100G: - speed_bit = HCLGE_SUPPORT_100G_BIT; - break; - case HCLGE_MAC_SPEED_200G: - speed_bit = HCLGE_SUPPORT_200G_BIT; - break; - default: - return -EINVAL; - } + ret = hclge_get_speed_bit(speed, &speed_bit); + if (ret) + return ret; if (speed_bit & speed_ability) return 0; @@ -1814,6 +1814,7 @@ static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps) nic->pdev = hdev->pdev; nic->ae_algo = &ae_algo; nic->numa_node_mask = hdev->numa_node_mask; + nic->kinfo.io_base = hdev->hw.io_base; ret = hclge_knic_setup(vport, num_tqps, hdev->num_tx_desc, hdev->num_rx_desc); @@ -2580,39 +2581,39 @@ static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed, switch (speed) { case HCLGE_MAC_SPEED_10M: hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 6); + HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_10M); break; case HCLGE_MAC_SPEED_100M: hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 7); + HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_100M); break; case HCLGE_MAC_SPEED_1G: hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 0); + HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_1G); break; case HCLGE_MAC_SPEED_10G: hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 1); + HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_10G); break; case HCLGE_MAC_SPEED_25G: hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 2); + HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_25G); break; case HCLGE_MAC_SPEED_40G: hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 3); + HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_40G); break; case HCLGE_MAC_SPEED_50G: hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 4); + HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_50G); break; case HCLGE_MAC_SPEED_100G: hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 5); + HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_100G); break; case HCLGE_MAC_SPEED_200G: hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 8); + HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_200G); break; default: dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed); @@ -3420,7 +3421,7 @@ static irqreturn_t hclge_misc_irq_handle(int irq, void *data) hclge_enable_vector(&hdev->misc_vector, false); event_cause = hclge_check_event_cause(hdev, &clearval); - /* vector 0 interrupt is shared with reset and mailbox source events.*/ + /* vector 0 interrupt is shared with reset and mailbox source events. */ switch (event_cause) { case HCLGE_VECTOR0_EVENT_ERR: hclge_errhand_task_schedule(hdev); @@ -3789,6 +3790,12 @@ static void hclge_do_reset(struct hclge_dev *hdev) } switch (hdev->reset_type) { + case HNAE3_IMP_RESET: + dev_info(&pdev->dev, "IMP reset requested\n"); + val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); + hnae3_set_bit(val, HCLGE_TRIGGER_IMP_RESET_B, 1); + hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, val); + break; case HNAE3_GLOBAL_RESET: dev_info(&pdev->dev, "global reset requested\n"); val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); @@ -5937,7 +5944,7 @@ static int hclge_config_key(struct hclge_dev *hdev, u8 stage, cur_key_x = key_x; cur_key_y = key_y; - for (i = 0 ; i < MAX_TUPLE; i++) { + for (i = 0; i < MAX_TUPLE; i++) { bool tuple_valid; tuple_size = tuple_key_info[i].key_length / 8; @@ -11509,10 +11516,14 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) if (ret) goto out; + ret = hclge_devlink_init(hdev); + if (ret) + goto err_pci_uninit; + /* Firmware command queue initialize */ ret = hclge_cmd_queue_init(hdev); if (ret) - goto err_pci_uninit; + goto err_devlink_uninit; /* Firmware command initialize */ ret = hclge_cmd_init(hdev); @@ -11689,6 +11700,8 @@ err_msi_uninit: pci_free_irq_vectors(pdev); err_cmd_uninit: hclge_cmd_uninit(hdev); +err_devlink_uninit: + hclge_devlink_uninit(hdev); err_pci_uninit: pcim_iounmap(pdev, hdev->hw.io_base); pci_clear_master(pdev); @@ -12079,6 +12092,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) hclge_cmd_uninit(hdev); hclge_misc_irq_uninit(hdev); + hclge_devlink_uninit(hdev); hclge_pci_uninit(hdev); mutex_destroy(&hdev->vport_lock); hclge_uninit_vport_vlan_table(hdev); @@ -12867,6 +12881,29 @@ static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset, return 0; } +static int hclge_get_link_diagnosis_info(struct hnae3_handle *handle, + u32 *status_code) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_desc desc; + int ret; + + if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) + return -EOPNOTSUPP; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_DIAGNOSIS, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to query link diagnosis info, ret = %d\n", ret); + return ret; + } + + *status_code = le32_to_cpu(desc.data[0]); + return 0; +} + static const struct hnae3_ae_ops hclge_ops = { .init_ae_dev = hclge_init_ae_dev, .uninit_ae_dev = hclge_uninit_ae_dev, @@ -12967,6 +13004,7 @@ static const struct hnae3_ae_ops hclge_ops = { .set_tx_hwts_info = hclge_ptp_set_tx_info, .get_rx_hwts = hclge_ptp_get_rx_hwts, .get_ts_info = hclge_ptp_get_ts_info, + .get_link_diagnosis_info = hclge_get_link_diagnosis_info, }; static struct hnae3_ae_algo ae_algo = { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index e446b839a371..de6afbcbfbac 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -8,6 +8,7 @@ #include <linux/phy.h> #include <linux/if_vlan.h> #include <linux/kfifo.h> +#include <net/devlink.h> #include "hclge_cmd.h" #include "hclge_ptp.h" @@ -37,22 +38,22 @@ #define HCLGE_VECTOR_REG_OFFSET_H 0x1000 #define HCLGE_VECTOR_VF_OFFSET 0x100000 -#define HCLGE_CMDQ_TX_ADDR_L_REG 0x27000 -#define HCLGE_CMDQ_TX_ADDR_H_REG 0x27004 -#define HCLGE_CMDQ_TX_DEPTH_REG 0x27008 -#define HCLGE_CMDQ_TX_TAIL_REG 0x27010 -#define HCLGE_CMDQ_TX_HEAD_REG 0x27014 -#define HCLGE_CMDQ_RX_ADDR_L_REG 0x27018 -#define HCLGE_CMDQ_RX_ADDR_H_REG 0x2701C -#define HCLGE_CMDQ_RX_DEPTH_REG 0x27020 -#define HCLGE_CMDQ_RX_TAIL_REG 0x27024 -#define HCLGE_CMDQ_RX_HEAD_REG 0x27028 +#define HCLGE_NIC_CSQ_BASEADDR_L_REG 0x27000 +#define HCLGE_NIC_CSQ_BASEADDR_H_REG 0x27004 +#define HCLGE_NIC_CSQ_DEPTH_REG 0x27008 +#define HCLGE_NIC_CSQ_TAIL_REG 0x27010 +#define HCLGE_NIC_CSQ_HEAD_REG 0x27014 +#define HCLGE_NIC_CRQ_BASEADDR_L_REG 0x27018 +#define HCLGE_NIC_CRQ_BASEADDR_H_REG 0x2701C +#define HCLGE_NIC_CRQ_DEPTH_REG 0x27020 +#define HCLGE_NIC_CRQ_TAIL_REG 0x27024 +#define HCLGE_NIC_CRQ_HEAD_REG 0x27028 + #define HCLGE_CMDQ_INTR_STS_REG 0x27104 #define HCLGE_CMDQ_INTR_EN_REG 0x27108 #define HCLGE_CMDQ_INTR_GEN_REG 0x2710C /* bar registers for common func */ -#define HCLGE_VECTOR0_OTER_EN_REG 0x20600 #define HCLGE_GRO_EN_REG 0x28000 #define HCLGE_RXD_ADV_LAYOUT_EN_REG 0x28008 @@ -193,6 +194,7 @@ enum HLCGE_PORT_TYPE { #define HCLGE_VECTOR0_IMP_CMDQ_ERR_B 4U #define HCLGE_VECTOR0_IMP_RD_POISON_B 5U #define HCLGE_VECTOR0_ALL_MSIX_ERR_B 6U +#define HCLGE_TRIGGER_IMP_RESET_B 7U #define HCLGE_MAC_DEFAULT_FRAME \ (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN + ETH_DATA_LEN) @@ -944,6 +946,7 @@ struct hclge_dev { cpumask_t affinity_mask; struct irq_affinity_notify affinity_notify; struct hclge_ptp *ptp; + struct devlink *devlink; }; /* VPort level vlan tag configuration for TX direction */ @@ -1055,6 +1058,11 @@ struct hclge_vport { struct list_head vlan_list; /* Store VF vlan table */ }; +struct hclge_speed_bit_map { + u32 speed; + u32 speed_bit; +}; + int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc, bool en_mc_pmc, bool en_bc_pmc); int hclge_add_uc_addr_common(struct hclge_vport *vport, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index c0a478ae9583..2ce5302c5956 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -10,7 +10,14 @@ static u16 hclge_errno_to_resp(int errno) { - return abs(errno); + int resp = abs(errno); + + /* The status for pf to vf msg cmd is u16, constrainted by HW. + * We need to keep the same type with it. + * The intput errno is the stander error code, it's safely to + * use a u16 to store the abs(errno). + */ + return (u16)resp; } /* hclge_gen_resp_to_vf: used to generate a synchronous response to VF when PF @@ -66,6 +73,8 @@ static int hclge_gen_resp_to_vf(struct hclge_vport *vport, memcpy(resp_pf_to_vf->msg.resp_data, resp_msg->data, resp_msg->len); + trace_hclge_pf_mbx_send(hdev, resp_pf_to_vf); + status = hclge_cmd_send(&hdev->hw, &desc, 1); if (status) dev_err(&hdev->pdev->dev, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h index dbf5f4c08019..7a9b77de632a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h @@ -127,7 +127,7 @@ static inline struct hclge_dev *hclge_ptp_get_hdev(struct ptp_clock_info *info) } bool hclge_ptp_set_tx_info(struct hnae3_handle *handle, struct sk_buff *skb); -void hclge_ptp_clean_tx_hwts(struct hclge_dev *dev); +void hclge_ptp_clean_tx_hwts(struct hclge_dev *hdev); void hclge_ptp_get_rx_hwts(struct hnae3_handle *handle, struct sk_buff *skb, u32 nsec, u32 sec); int hclge_ptp_get_cfg(struct hclge_dev *hdev, struct ifreq *ifr); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile index 2c26ea607a53..51ff7d86ee90 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile @@ -7,4 +7,4 @@ ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3 ccflags-y += -I $(srctree)/$(src) obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o -hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o +hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o hclgevf_devlink.o diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c index d9ddb0a243d4..59772b0e9531 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c @@ -71,7 +71,7 @@ static bool hclgevf_cmd_csq_done(struct hclgevf_hw *hw) static bool hclgevf_is_special_opcode(u16 opcode) { - static const u16 spec_opcode[] = {0x30, 0x31, 0x32}; + const u16 spec_opcode[] = {0x30, 0x31, 0x32}; int i; for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) { @@ -342,25 +342,26 @@ static void hclgevf_set_default_capability(struct hclgevf_dev *hdev) set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps); } +const struct hclgevf_caps_bit_map hclgevf_cmd_caps_bit_map0[] = { + {HCLGEVF_CAP_UDP_GSO_B, HNAE3_DEV_SUPPORT_UDP_GSO_B}, + {HCLGEVF_CAP_INT_QL_B, HNAE3_DEV_SUPPORT_INT_QL_B}, + {HCLGEVF_CAP_TQP_TXRX_INDEP_B, HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B}, + {HCLGEVF_CAP_HW_TX_CSUM_B, HNAE3_DEV_SUPPORT_HW_TX_CSUM_B}, + {HCLGEVF_CAP_UDP_TUNNEL_CSUM_B, HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B}, + {HCLGEVF_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B}, +}; + static void hclgevf_parse_capability(struct hclgevf_dev *hdev, struct hclgevf_query_version_cmd *cmd) { struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); - u32 caps; + u32 caps, i; caps = __le32_to_cpu(cmd->caps[0]); - if (hnae3_get_bit(caps, HCLGEVF_CAP_UDP_GSO_B)) - set_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps); - if (hnae3_get_bit(caps, HCLGEVF_CAP_INT_QL_B)) - set_bit(HNAE3_DEV_SUPPORT_INT_QL_B, ae_dev->caps); - if (hnae3_get_bit(caps, HCLGEVF_CAP_TQP_TXRX_INDEP_B)) - set_bit(HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B, ae_dev->caps); - if (hnae3_get_bit(caps, HCLGEVF_CAP_HW_TX_CSUM_B)) - set_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps); - if (hnae3_get_bit(caps, HCLGEVF_CAP_UDP_TUNNEL_CSUM_B)) - set_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps); - if (hnae3_get_bit(caps, HCLGEVF_CAP_RXD_ADV_LAYOUT_B)) - set_bit(HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B, ae_dev->caps); + for (i = 0; i < ARRAY_SIZE(hclgevf_cmd_caps_bit_map0); i++) + if (hnae3_get_bit(caps, hclgevf_cmd_caps_bit_map0[i].imp_bit)) + set_bit(hclgevf_cmd_caps_bit_map0[i].local_bit, + ae_dev->caps); } static __le32 hclgevf_build_api_caps(void) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h index 5b82177f98b4..39d0b589c720 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h @@ -266,16 +266,6 @@ struct hclgevf_cfg_tx_queue_pointer_cmd { #define HCLGEVF_TYPE_CRQ 0 #define HCLGEVF_TYPE_CSQ 1 -#define HCLGEVF_NIC_CSQ_BASEADDR_L_REG 0x27000 -#define HCLGEVF_NIC_CSQ_BASEADDR_H_REG 0x27004 -#define HCLGEVF_NIC_CSQ_DEPTH_REG 0x27008 -#define HCLGEVF_NIC_CSQ_TAIL_REG 0x27010 -#define HCLGEVF_NIC_CSQ_HEAD_REG 0x27014 -#define HCLGEVF_NIC_CRQ_BASEADDR_L_REG 0x27018 -#define HCLGEVF_NIC_CRQ_BASEADDR_H_REG 0x2701c -#define HCLGEVF_NIC_CRQ_DEPTH_REG 0x27020 -#define HCLGEVF_NIC_CRQ_TAIL_REG 0x27024 -#define HCLGEVF_NIC_CRQ_HEAD_REG 0x27028 /* this bit indicates that the driver is ready for hardware reset */ #define HCLGEVF_NIC_SW_RST_RDY_B 16 @@ -306,6 +296,12 @@ struct hclgevf_dev_specs_1_cmd { u8 rsv1[18]; }; +/* capabilities bits map between imp firmware and local driver */ +struct hclgevf_caps_bit_map { + u16 imp_bit; + u16 local_bit; +}; + static inline void hclgevf_write_reg(void __iomem *base, u32 reg, u32 value) { writel(value, base + reg); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c new file mode 100644 index 000000000000..f478770299c6 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c @@ -0,0 +1,150 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2021 Hisilicon Limited. */ + +#include <net/devlink.h> + +#include "hclgevf_devlink.h" + +static int hclgevf_devlink_info_get(struct devlink *devlink, + struct devlink_info_req *req, + struct netlink_ext_ack *extack) +{ +#define HCLGEVF_DEVLINK_FW_STRING_LEN 32 + struct hclgevf_devlink_priv *priv = devlink_priv(devlink); + char version_str[HCLGEVF_DEVLINK_FW_STRING_LEN]; + struct hclgevf_dev *hdev = priv->hdev; + int ret; + + ret = devlink_info_driver_name_put(req, KBUILD_MODNAME); + if (ret) + return ret; + + snprintf(version_str, sizeof(version_str), "%lu.%lu.%lu.%lu", + hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE3_MASK, + HNAE3_FW_VERSION_BYTE3_SHIFT), + hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE2_MASK, + HNAE3_FW_VERSION_BYTE2_SHIFT), + hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE1_MASK, + HNAE3_FW_VERSION_BYTE1_SHIFT), + hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE0_MASK, + HNAE3_FW_VERSION_BYTE0_SHIFT)); + + return devlink_info_version_running_put(req, + DEVLINK_INFO_VERSION_GENERIC_FW, + version_str); +} + +static int hclgevf_devlink_reload_down(struct devlink *devlink, + bool netns_change, + enum devlink_reload_action action, + enum devlink_reload_limit limit, + struct netlink_ext_ack *extack) +{ + struct hclgevf_devlink_priv *priv = devlink_priv(devlink); + struct hclgevf_dev *hdev = priv->hdev; + struct hnae3_handle *h = &hdev->nic; + struct pci_dev *pdev = hdev->pdev; + int ret; + + if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) { + dev_err(&pdev->dev, "reset is handling\n"); + return -EBUSY; + } + + switch (action) { + case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: + rtnl_lock(); + ret = hdev->nic_client->ops->reset_notify(h, HNAE3_DOWN_CLIENT); + if (ret) { + rtnl_unlock(); + return ret; + } + + ret = hdev->nic_client->ops->reset_notify(h, + HNAE3_UNINIT_CLIENT); + rtnl_unlock(); + return ret; + default: + return -EOPNOTSUPP; + } +} + +static int hclgevf_devlink_reload_up(struct devlink *devlink, + enum devlink_reload_action action, + enum devlink_reload_limit limit, + u32 *actions_performed, + struct netlink_ext_ack *extack) +{ + struct hclgevf_devlink_priv *priv = devlink_priv(devlink); + struct hclgevf_dev *hdev = priv->hdev; + struct hnae3_handle *h = &hdev->nic; + int ret; + + *actions_performed = BIT(action); + switch (action) { + case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: + rtnl_lock(); + ret = hdev->nic_client->ops->reset_notify(h, HNAE3_INIT_CLIENT); + if (ret) { + rtnl_unlock(); + return ret; + } + + ret = hdev->nic_client->ops->reset_notify(h, HNAE3_UP_CLIENT); + rtnl_unlock(); + return ret; + default: + return -EOPNOTSUPP; + } +} + +static const struct devlink_ops hclgevf_devlink_ops = { + .info_get = hclgevf_devlink_info_get, + .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT), + .reload_down = hclgevf_devlink_reload_down, + .reload_up = hclgevf_devlink_reload_up, +}; + +int hclgevf_devlink_init(struct hclgevf_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + struct hclgevf_devlink_priv *priv; + struct devlink *devlink; + int ret; + + devlink = + devlink_alloc(&hclgevf_devlink_ops, + sizeof(struct hclgevf_devlink_priv), &pdev->dev); + if (!devlink) + return -ENOMEM; + + priv = devlink_priv(devlink); + priv->hdev = hdev; + hdev->devlink = devlink; + + ret = devlink_register(devlink); + if (ret) { + dev_err(&pdev->dev, "failed to register devlink, ret = %d\n", + ret); + goto out_reg_fail; + } + + devlink_reload_enable(devlink); + + return 0; + +out_reg_fail: + devlink_free(devlink); + return ret; +} + +void hclgevf_devlink_uninit(struct hclgevf_dev *hdev) +{ + struct devlink *devlink = hdev->devlink; + + devlink_reload_disable(devlink); + + devlink_unregister(devlink); + + devlink_free(devlink); +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.h new file mode 100644 index 000000000000..e09ea3d8a963 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2021 Hisilicon Limited. */ + +#ifndef __HCLGEVF_DEVLINK_H +#define __HCLGEVF_DEVLINK_H + +#include "hclgevf_main.h" + +struct hclgevf_devlink_priv { + struct hclgevf_dev *hdev; +}; + +int hclgevf_devlink_init(struct hclgevf_dev *hdev); +void hclgevf_devlink_uninit(struct hclgevf_dev *hdev); +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 938654778979..82e727020120 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -8,6 +8,7 @@ #include "hclgevf_main.h" #include "hclge_mbx.h" #include "hnae3.h" +#include "hclgevf_devlink.h" #define HCLGEVF_NAME "hclgevf" @@ -39,16 +40,16 @@ static const u8 hclgevf_hash_key[] = { MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); -static const u32 cmdq_reg_addr_list[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG, - HCLGEVF_CMDQ_TX_ADDR_H_REG, - HCLGEVF_CMDQ_TX_DEPTH_REG, - HCLGEVF_CMDQ_TX_TAIL_REG, - HCLGEVF_CMDQ_TX_HEAD_REG, - HCLGEVF_CMDQ_RX_ADDR_L_REG, - HCLGEVF_CMDQ_RX_ADDR_H_REG, - HCLGEVF_CMDQ_RX_DEPTH_REG, - HCLGEVF_CMDQ_RX_TAIL_REG, - HCLGEVF_CMDQ_RX_HEAD_REG, +static const u32 cmdq_reg_addr_list[] = {HCLGEVF_NIC_CSQ_BASEADDR_L_REG, + HCLGEVF_NIC_CSQ_BASEADDR_H_REG, + HCLGEVF_NIC_CSQ_DEPTH_REG, + HCLGEVF_NIC_CSQ_TAIL_REG, + HCLGEVF_NIC_CSQ_HEAD_REG, + HCLGEVF_NIC_CRQ_BASEADDR_L_REG, + HCLGEVF_NIC_CRQ_BASEADDR_H_REG, + HCLGEVF_NIC_CRQ_DEPTH_REG, + HCLGEVF_NIC_CRQ_TAIL_REG, + HCLGEVF_NIC_CRQ_HEAD_REG, HCLGEVF_VECTOR0_CMDQ_SRC_REG, HCLGEVF_VECTOR0_CMDQ_STATE_REG, HCLGEVF_CMDQ_INTR_EN_REG, @@ -538,6 +539,7 @@ static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) nic->pdev = hdev->pdev; nic->numa_node_mask = hdev->numa_node_mask; nic->flags |= HNAE3_SUPPORT_VF; + nic->kinfo.io_base = hdev->hw.io_base; ret = hclgevf_knic_setup(hdev); if (ret) @@ -1961,7 +1963,7 @@ static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev) dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n", hclgevf_read_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_STATE_REG)); dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n", - hclgevf_read_dev(&hdev->hw, HCLGEVF_CMDQ_TX_DEPTH_REG)); + hclgevf_read_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG)); dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n", hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING)); dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state); @@ -3339,6 +3341,10 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) if (ret) return ret; + ret = hclgevf_devlink_init(hdev); + if (ret) + goto err_devlink_init; + ret = hclgevf_cmd_queue_init(hdev); if (ret) goto err_cmd_queue_init; @@ -3443,6 +3449,8 @@ err_misc_irq_init: err_cmd_init: hclgevf_cmd_uninit(hdev); err_cmd_queue_init: + hclgevf_devlink_uninit(hdev); +err_devlink_init: hclgevf_pci_uninit(hdev); clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); return ret; @@ -3464,6 +3472,7 @@ static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) } hclgevf_cmd_uninit(hdev); + hclgevf_devlink_uninit(hdev); hclgevf_pci_uninit(hdev); hclgevf_uninit_mac_list(hdev); } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h index e8013be055f8..883130a9b48f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -6,6 +6,7 @@ #include <linux/fs.h> #include <linux/if_vlan.h> #include <linux/types.h> +#include <net/devlink.h> #include "hclge_mbx.h" #include "hclgevf_cmd.h" #include "hnae3.h" @@ -32,16 +33,17 @@ #define HCLGEVF_VECTOR_VF_OFFSET 0x100000 /* bar registers for cmdq */ -#define HCLGEVF_CMDQ_TX_ADDR_L_REG 0x27000 -#define HCLGEVF_CMDQ_TX_ADDR_H_REG 0x27004 -#define HCLGEVF_CMDQ_TX_DEPTH_REG 0x27008 -#define HCLGEVF_CMDQ_TX_TAIL_REG 0x27010 -#define HCLGEVF_CMDQ_TX_HEAD_REG 0x27014 -#define HCLGEVF_CMDQ_RX_ADDR_L_REG 0x27018 -#define HCLGEVF_CMDQ_RX_ADDR_H_REG 0x2701C -#define HCLGEVF_CMDQ_RX_DEPTH_REG 0x27020 -#define HCLGEVF_CMDQ_RX_TAIL_REG 0x27024 -#define HCLGEVF_CMDQ_RX_HEAD_REG 0x27028 +#define HCLGEVF_NIC_CSQ_BASEADDR_L_REG 0x27000 +#define HCLGEVF_NIC_CSQ_BASEADDR_H_REG 0x27004 +#define HCLGEVF_NIC_CSQ_DEPTH_REG 0x27008 +#define HCLGEVF_NIC_CSQ_TAIL_REG 0x27010 +#define HCLGEVF_NIC_CSQ_HEAD_REG 0x27014 +#define HCLGEVF_NIC_CRQ_BASEADDR_L_REG 0x27018 +#define HCLGEVF_NIC_CRQ_BASEADDR_H_REG 0x2701C +#define HCLGEVF_NIC_CRQ_DEPTH_REG 0x27020 +#define HCLGEVF_NIC_CRQ_TAIL_REG 0x27024 +#define HCLGEVF_NIC_CRQ_HEAD_REG 0x27028 + #define HCLGEVF_CMDQ_INTR_EN_REG 0x27108 #define HCLGEVF_CMDQ_INTR_GEN_REG 0x2710C @@ -316,7 +318,6 @@ struct hclgevf_dev { struct hclgevf_mac_table_cfg mac_table; - bool mbx_event_pending; struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */ struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */ @@ -332,6 +333,8 @@ struct hclgevf_dev { u32 flag; unsigned long serv_processed_cnt; unsigned long last_serv_processed; + + struct devlink *devlink; }; static inline bool hclgevf_is_reset_pending(struct hclgevf_dev *hdev) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c index b339b9bc0625..fdc66fae0960 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c @@ -155,18 +155,66 @@ static bool hclgevf_cmd_crq_empty(struct hclgevf_hw *hw) return tail == hw->cmq.crq.next_to_use; } +static void hclgevf_handle_mbx_response(struct hclgevf_dev *hdev, + struct hclge_mbx_pf_to_vf_cmd *req) +{ + struct hclgevf_mbx_resp_status *resp = &hdev->mbx_resp; + + if (resp->received_resp) + dev_warn(&hdev->pdev->dev, + "VF mbx resp flag not clear(%u)\n", + req->msg.vf_mbx_msg_code); + + resp->origin_mbx_msg = + (req->msg.vf_mbx_msg_code << 16); + resp->origin_mbx_msg |= req->msg.vf_mbx_msg_subcode; + resp->resp_status = + hclgevf_resp_to_errno(req->msg.resp_status); + memcpy(resp->additional_info, req->msg.resp_data, + HCLGE_MBX_MAX_RESP_DATA_SIZE * sizeof(u8)); + if (req->match_id) { + /* If match_id is not zero, it means PF support match_id. + * if the match_id is right, VF get the right response, or + * ignore the response. and driver will clear hdev->mbx_resp + * when send next message which need response. + */ + if (req->match_id == resp->match_id) + resp->received_resp = true; + } else { + resp->received_resp = true; + } +} + +static void hclgevf_handle_mbx_msg(struct hclgevf_dev *hdev, + struct hclge_mbx_pf_to_vf_cmd *req) +{ + /* we will drop the async msg if we find ARQ as full + * and continue with next message + */ + if (atomic_read(&hdev->arq.count) >= + HCLGE_MBX_MAX_ARQ_MSG_NUM) { + dev_warn(&hdev->pdev->dev, + "Async Q full, dropping msg(%u)\n", + req->msg.code); + return; + } + + /* tail the async message in arq */ + memcpy(hdev->arq.msg_q[hdev->arq.tail], &req->msg, + HCLGE_MBX_MAX_ARQ_MSG_SIZE * sizeof(u16)); + hclge_mbx_tail_ptr_move_arq(hdev->arq); + atomic_inc(&hdev->arq.count); + + hclgevf_mbx_task_schedule(hdev); +} + void hclgevf_mbx_handler(struct hclgevf_dev *hdev) { - struct hclgevf_mbx_resp_status *resp; struct hclge_mbx_pf_to_vf_cmd *req; struct hclgevf_cmq_ring *crq; struct hclgevf_desc *desc; - u16 *msg_q; u16 flag; - u8 *temp; - int i; - resp = &hdev->mbx_resp; crq = &hdev->hw.cmq.crq; while (!hclgevf_cmd_crq_empty(&hdev->hw)) { @@ -200,69 +248,14 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) */ switch (req->msg.code) { case HCLGE_MBX_PF_VF_RESP: - if (resp->received_resp) - dev_warn(&hdev->pdev->dev, - "VF mbx resp flag not clear(%u)\n", - req->msg.vf_mbx_msg_code); - resp->received_resp = true; - - resp->origin_mbx_msg = - (req->msg.vf_mbx_msg_code << 16); - resp->origin_mbx_msg |= req->msg.vf_mbx_msg_subcode; - resp->resp_status = - hclgevf_resp_to_errno(req->msg.resp_status); - - temp = (u8 *)req->msg.resp_data; - for (i = 0; i < HCLGE_MBX_MAX_RESP_DATA_SIZE; i++) { - resp->additional_info[i] = *temp; - temp++; - } - - /* If match_id is not zero, it means PF support - * match_id. If the match_id is right, VF get the - * right response, otherwise ignore the response. - * Driver will clear hdev->mbx_resp when send - * next message which need response. - */ - if (req->match_id) { - if (req->match_id == resp->match_id) - resp->received_resp = true; - } else { - resp->received_resp = true; - } + hclgevf_handle_mbx_response(hdev, req); break; case HCLGE_MBX_LINK_STAT_CHANGE: case HCLGE_MBX_ASSERTING_RESET: case HCLGE_MBX_LINK_STAT_MODE: case HCLGE_MBX_PUSH_VLAN_INFO: case HCLGE_MBX_PUSH_PROMISC_INFO: - /* set this mbx event as pending. This is required as we - * might loose interrupt event when mbx task is busy - * handling. This shall be cleared when mbx task just - * enters handling state. - */ - hdev->mbx_event_pending = true; - - /* we will drop the async msg if we find ARQ as full - * and continue with next message - */ - if (atomic_read(&hdev->arq.count) >= - HCLGE_MBX_MAX_ARQ_MSG_NUM) { - dev_warn(&hdev->pdev->dev, - "Async Q full, dropping msg(%u)\n", - req->msg.code); - break; - } - - /* tail the async message in arq */ - msg_q = hdev->arq.msg_q[hdev->arq.tail]; - memcpy(&msg_q[0], &req->msg, - HCLGE_MBX_MAX_ARQ_MSG_SIZE * sizeof(u16)); - hclge_mbx_tail_ptr_move_arq(hdev->arq); - atomic_inc(&hdev->arq.count); - - hclgevf_mbx_task_schedule(hdev); - + hclgevf_handle_mbx_msg(hdev, req); break; default: dev_err(&hdev->pdev->dev, @@ -298,11 +291,6 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) u8 flag; u8 idx; - /* we can safely clear it now as we are at start of the async message - * processing - */ - hdev->mbx_event_pending = false; - tail = hdev->arq.tail; /* process all the async queue messages */ diff --git a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c index 58d5646444b0..6e11ee339f12 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c @@ -293,9 +293,9 @@ static const struct devlink_ops hinic_devlink_ops = { .flash_update = hinic_devlink_flash_update, }; -struct devlink *hinic_devlink_alloc(void) +struct devlink *hinic_devlink_alloc(struct device *dev) { - return devlink_alloc(&hinic_devlink_ops, sizeof(struct hinic_dev)); + return devlink_alloc(&hinic_devlink_ops, sizeof(struct hinic_dev), dev); } void hinic_devlink_free(struct devlink *devlink) @@ -303,11 +303,11 @@ void hinic_devlink_free(struct devlink *devlink) devlink_free(devlink); } -int hinic_devlink_register(struct hinic_devlink_priv *priv, struct device *dev) +int hinic_devlink_register(struct hinic_devlink_priv *priv) { struct devlink *devlink = priv_to_devlink(priv); - return devlink_register(devlink, dev); + return devlink_register(devlink); } void hinic_devlink_unregister(struct hinic_devlink_priv *priv) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_devlink.h b/drivers/net/ethernet/huawei/hinic/hinic_devlink.h index a090ebcfaabb..9e315011015c 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_devlink.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_devlink.h @@ -108,9 +108,9 @@ struct host_image_st { u32 device_id; }; -struct devlink *hinic_devlink_alloc(void); +struct devlink *hinic_devlink_alloc(struct device *dev); void hinic_devlink_free(struct devlink *devlink); -int hinic_devlink_register(struct hinic_devlink_priv *priv, struct device *dev); +int hinic_devlink_register(struct hinic_devlink_priv *priv); void hinic_devlink_unregister(struct hinic_devlink_priv *priv); int hinic_health_reporters_create(struct hinic_devlink_priv *priv); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c index 162d3c330dec..b431c300ef1b 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c @@ -795,13 +795,17 @@ static int __hinic_set_coalesce(struct net_device *netdev, } static int hinic_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *coal) + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { return __hinic_get_coalesce(netdev, coal, COALESCE_ALL_QUEUE); } static int hinic_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *coal) + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { return __hinic_set_coalesce(netdev, coal, COALESCE_ALL_QUEUE); } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c index 428108eb10d2..56b6b04e209b 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c @@ -754,7 +754,7 @@ static int init_pfhwdev(struct hinic_pfhwdev *pfhwdev) return err; } - err = hinic_devlink_register(hwdev->devlink_dev, &pdev->dev); + err = hinic_devlink_register(hwdev->devlink_dev); if (err) { dev_err(&hwif->pdev->dev, "Failed to register devlink\n"); hinic_pf_to_mgmt_free(&pfhwdev->pf_to_mgmt); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c index 405ee4d2d2b1..ae707e305684 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_main.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c @@ -1183,7 +1183,7 @@ static int nic_dev_init(struct pci_dev *pdev) struct devlink *devlink; int err, num_qps; - devlink = hinic_devlink_alloc(); + devlink = hinic_devlink_alloc(&pdev->dev); if (!devlink) { dev_err(&pdev->dev, "Hinic devlink alloc failed\n"); return -ENOMEM; @@ -1392,28 +1392,16 @@ static int hinic_probe(struct pci_dev *pdev, pci_set_master(pdev); - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (err) { dev_warn(&pdev->dev, "Couldn't set 64-bit DMA mask\n"); - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "Failed to set DMA mask\n"); goto err_dma_mask; } } - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); - if (err) { - dev_warn(&pdev->dev, - "Couldn't set 64-bit consistent DMA mask\n"); - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); - if (err) { - dev_err(&pdev->dev, - "Failed to set consistent DMA mask\n"); - goto err_dma_consistent_mask; - } - } - err = nic_dev_init(pdev); if (err) { dev_err(&pdev->dev, "Failed to initialize NIC device\n"); @@ -1424,7 +1412,6 @@ static int hinic_probe(struct pci_dev *pdev, return 0; err_nic_dev_init: -err_dma_consistent_mask: err_dma_mask: pci_release_regions(pdev); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c index f8a26459ff65..a78c398bf5b2 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c @@ -836,8 +836,10 @@ int hinic_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting) int hinic_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate, int max_tx_rate) { - u32 speeds[] = {SPEED_10, SPEED_100, SPEED_1000, SPEED_10000, - SPEED_25000, SPEED_40000, SPEED_100000}; + static const u32 speeds[] = { + SPEED_10, SPEED_100, SPEED_1000, SPEED_10000, + SPEED_25000, SPEED_40000, SPEED_100000 + }; struct hinic_dev *nic_dev = netdev_priv(netdev); struct hinic_port_cap port_cap = { 0 }; enum hinic_port_link_state link_state; diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c index fc8c7cd67471..b8a40146b895 100644 --- a/drivers/net/ethernet/i825xx/82596.c +++ b/drivers/net/ethernet/i825xx/82596.c @@ -1110,9 +1110,6 @@ static void print_eth(unsigned char *add, char *str) add, add + 6, add, add[12], add[13], str); } -static int io = 0x300; -static int irq = 10; - static const struct net_device_ops i596_netdev_ops = { .ndo_open = i596_open, .ndo_stop = i596_close, @@ -1123,7 +1120,7 @@ static const struct net_device_ops i596_netdev_ops = { .ndo_validate_addr = eth_validate_addr, }; -struct net_device * __init i82596_probe(int unit) +static struct net_device * __init i82596_probe(void) { struct net_device *dev; int i; @@ -1140,14 +1137,6 @@ struct net_device * __init i82596_probe(int unit) if (!dev) return ERR_PTR(-ENOMEM); - if (unit >= 0) { - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - } else { - dev->base_addr = io; - dev->irq = irq; - } - #ifdef ENABLE_MVME16x_NET if (MACH_IS_MVME16x) { if (mvme16x_config & MVME16x_CONFIG_NO_ETHERNET) { @@ -1515,22 +1504,22 @@ static void set_multicast_list(struct net_device *dev) } } -#ifdef MODULE static struct net_device *dev_82596; static int debug = -1; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "i82596 debug mask"); -int __init init_module(void) +static int __init i82596_init(void) { if (debug >= 0) i596_debug = debug; - dev_82596 = i82596_probe(-1); + dev_82596 = i82596_probe(); return PTR_ERR_OR_ZERO(dev_82596); } +module_init(i82596_init); -void __exit cleanup_module(void) +static void __exit i82596_cleanup(void) { unregister_netdev(dev_82596); #ifdef __mc68000__ @@ -1544,5 +1533,4 @@ void __exit cleanup_module(void) free_page ((u32)(dev_82596->mem_start)); free_netdev(dev_82596); } - -#endif /* MODULE */ +module_exit(i82596_cleanup); diff --git a/drivers/net/ethernet/i825xx/sun3_82586.c b/drivers/net/ethernet/i825xx/sun3_82586.c index 4564ee02c95f..893e0ddcb611 100644 --- a/drivers/net/ethernet/i825xx/sun3_82586.c +++ b/drivers/net/ethernet/i825xx/sun3_82586.c @@ -29,6 +29,7 @@ static int rfdadd = 0; /* rfdadd=1 may be better for 8K MEM cards */ static int fifo=0x8; /* don't change */ #include <linux/kernel.h> +#include <linux/module.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/ioport.h> @@ -276,7 +277,7 @@ static void alloc586(struct net_device *dev) memset((char *)p->scb,0,sizeof(struct scb_struct)); } -struct net_device * __init sun3_82586_probe(int unit) +static int __init sun3_82586_probe(void) { struct net_device *dev; unsigned long ioaddr; @@ -291,25 +292,20 @@ struct net_device * __init sun3_82586_probe(int unit) break; default: - return ERR_PTR(-ENODEV); + return -ENODEV; } if (found) - return ERR_PTR(-ENODEV); + return -ENODEV; ioaddr = (unsigned long)ioremap(IE_OBIO, SUN3_82586_TOTAL_SIZE); if (!ioaddr) - return ERR_PTR(-ENOMEM); + return -ENOMEM; found = 1; dev = alloc_etherdev(sizeof(struct priv)); if (!dev) goto out; - if (unit >= 0) { - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - } - dev->irq = IE_IRQ; dev->base_addr = ioaddr; err = sun3_82586_probe1(dev, ioaddr); @@ -326,8 +322,9 @@ out1: free_netdev(dev); out: iounmap((void __iomem *)ioaddr); - return ERR_PTR(err); + return err; } +module_init(sun3_82586_probe); static const struct net_device_ops sun3_82586_netdev_ops = { .ndo_open = sun3_82586_open, diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 471be6ec7e8a..664a91af662d 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -3011,7 +3011,7 @@ static const struct net_device_ops emac_netdev_ops = { .ndo_stop = emac_close, .ndo_get_stats = emac_stats, .ndo_set_rx_mode = emac_set_multicast_list, - .ndo_do_ioctl = emac_ioctl, + .ndo_eth_ioctl = emac_ioctl, .ndo_tx_timeout = emac_tx_timeout, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = emac_set_mac_address, @@ -3023,7 +3023,7 @@ static const struct net_device_ops emac_gige_netdev_ops = { .ndo_stop = emac_close, .ndo_get_stats = emac_stats, .ndo_set_rx_mode = emac_set_multicast_list, - .ndo_do_ioctl = emac_ioctl, + .ndo_eth_ioctl = emac_ioctl, .ndo_tx_timeout = emac_tx_timeout, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = emac_set_mac_address, diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 737ba85e409f..3d9b4f99d357 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -1630,7 +1630,7 @@ static const struct net_device_ops ibmveth_netdev_ops = { .ndo_stop = ibmveth_close, .ndo_start_xmit = ibmveth_start_xmit, .ndo_set_rx_mode = ibmveth_set_multicast_list, - .ndo_do_ioctl = ibmveth_ioctl, + .ndo_eth_ioctl = ibmveth_ioctl, .ndo_change_mtu = ibmveth_change_mtu, .ndo_fix_features = ibmveth_fix_features, .ndo_set_features = ibmveth_set_features, diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 82744a7501c7..b0b6f90deb7d 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -58,8 +58,8 @@ config E1000 config E1000E tristate "Intel(R) PRO/1000 PCI-Express Gigabit Ethernet support" depends on PCI && (!SPARC32 || BROKEN) + depends on PTP_1588_CLOCK_OPTIONAL select CRC32 - imply PTP_1588_CLOCK help This driver supports the PCI-Express Intel(R) PRO/1000 gigabit ethernet family of adapters. For PCI or PCI-X e1000 adapters, @@ -87,7 +87,7 @@ config E1000E_HWTS config IGB tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support" depends on PCI - imply PTP_1588_CLOCK + depends on PTP_1588_CLOCK_OPTIONAL select I2C select I2C_ALGOBIT help @@ -159,9 +159,9 @@ config IXGB config IXGBE tristate "Intel(R) 10GbE PCI Express adapters support" depends on PCI + depends on PTP_1588_CLOCK_OPTIONAL select MDIO select PHYLIB - imply PTP_1588_CLOCK help This driver supports Intel(R) 10GbE PCI Express family of adapters. For more information on how to identify your adapter, go @@ -239,7 +239,7 @@ config IXGBEVF_IPSEC config I40E tristate "Intel(R) Ethernet Controller XL710 Family support" - imply PTP_1588_CLOCK + depends on PTP_1588_CLOCK_OPTIONAL depends on PCI select AUXILIARY_BUS help @@ -295,11 +295,11 @@ config ICE tristate "Intel(R) Ethernet Connection E800 Series Support" default n depends on PCI_MSI + depends on PTP_1588_CLOCK_OPTIONAL select AUXILIARY_BUS select DIMLIB select NET_DEVLINK select PLDMFW - imply PTP_1588_CLOCK help This driver supports Intel(R) Ethernet Connection E800 Series of devices. For more information on how to identify your adapter, go @@ -317,7 +317,7 @@ config FM10K tristate "Intel(R) FM10000 Ethernet Switch Host Interface Support" default n depends on PCI_MSI - imply PTP_1588_CLOCK + depends on PTP_1588_CLOCK_OPTIONAL help This driver supports Intel(R) FM10000 Ethernet Switch Host Interface. For more information on how to identify your adapter, diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index 1b0958bd24f6..373eb027b925 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -2715,10 +2715,10 @@ static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { switch (stringset) { case ETH_SS_TEST: - memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test)); + memcpy(data, e100_gstrings_test, sizeof(e100_gstrings_test)); break; case ETH_SS_STATS: - memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats)); + memcpy(data, e100_gstrings_stats, sizeof(e100_gstrings_stats)); break; } } @@ -2809,7 +2809,7 @@ static const struct net_device_ops e100_netdev_ops = { .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = e100_set_multicast_list, .ndo_set_mac_address = e100_set_mac_address, - .ndo_do_ioctl = e100_do_ioctl, + .ndo_eth_ioctl = e100_do_ioctl, .ndo_tx_timeout = e100_tx_timeout, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = e100_netpoll, diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c index 3c51ee94fa00..0a57172dfcbc 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c +++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c @@ -1739,7 +1739,9 @@ static int e1000_set_phys_id(struct net_device *netdev, } static int e1000_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct e1000_adapter *adapter = netdev_priv(netdev); @@ -1755,7 +1757,9 @@ static int e1000_get_coalesce(struct net_device *netdev, } static int e1000_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index c2a109126c27..bed4f040face 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -832,7 +832,7 @@ static const struct net_device_ops e1000_netdev_ops = { .ndo_set_mac_address = e1000_set_mac, .ndo_tx_timeout = e1000_tx_timeout, .ndo_change_mtu = e1000_change_mtu, - .ndo_do_ioctl = e1000_ioctl, + .ndo_eth_ioctl = e1000_ioctl, .ndo_validate_addr = eth_validate_addr, .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid, diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index 06442e6bef73..8515e00d1b40 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -903,6 +903,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) case e1000_pch_tgp: case e1000_pch_adp: case e1000_pch_mtp: + case e1000_pch_lnp: mask |= BIT(18); break; default: @@ -1569,6 +1570,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter) case e1000_pch_tgp: case e1000_pch_adp: case e1000_pch_mtp: + case e1000_pch_lnp: fext_nvm11 = er32(FEXTNVM11); fext_nvm11 &= ~E1000_FEXTNVM11_DISABLE_MULR_FIX; ew32(FEXTNVM11, fext_nvm11); @@ -1991,7 +1993,9 @@ static int e1000_set_phys_id(struct net_device *netdev, } static int e1000_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct e1000_adapter *adapter = netdev_priv(netdev); @@ -2004,7 +2008,9 @@ static int e1000_get_coalesce(struct net_device *netdev, } static int e1000_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct e1000_adapter *adapter = netdev_priv(netdev); diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h index db79c4e6413e..bcf680e83811 100644 --- a/drivers/net/ethernet/intel/e1000e/hw.h +++ b/drivers/net/ethernet/intel/e1000e/hw.h @@ -98,14 +98,22 @@ struct e1000_hw; #define E1000_DEV_ID_PCH_TGP_I219_V14 0x15FA #define E1000_DEV_ID_PCH_TGP_I219_LM15 0x15F4 #define E1000_DEV_ID_PCH_TGP_I219_V15 0x15F5 +#define E1000_DEV_ID_PCH_RPL_I219_LM23 0x0DC5 +#define E1000_DEV_ID_PCH_RPL_I219_V23 0x0DC6 #define E1000_DEV_ID_PCH_ADP_I219_LM16 0x1A1E #define E1000_DEV_ID_PCH_ADP_I219_V16 0x1A1F #define E1000_DEV_ID_PCH_ADP_I219_LM17 0x1A1C #define E1000_DEV_ID_PCH_ADP_I219_V17 0x1A1D +#define E1000_DEV_ID_PCH_RPL_I219_LM22 0x0DC7 +#define E1000_DEV_ID_PCH_RPL_I219_V22 0x0DC8 #define E1000_DEV_ID_PCH_MTP_I219_LM18 0x550A #define E1000_DEV_ID_PCH_MTP_I219_V18 0x550B #define E1000_DEV_ID_PCH_MTP_I219_LM19 0x550C #define E1000_DEV_ID_PCH_MTP_I219_V19 0x550D +#define E1000_DEV_ID_PCH_LNP_I219_LM20 0x550E +#define E1000_DEV_ID_PCH_LNP_I219_V20 0x550F +#define E1000_DEV_ID_PCH_LNP_I219_LM21 0x5510 +#define E1000_DEV_ID_PCH_LNP_I219_V21 0x5511 #define E1000_REVISION_4 4 @@ -132,6 +140,7 @@ enum e1000_mac_type { e1000_pch_tgp, e1000_pch_adp, e1000_pch_mtp, + e1000_pch_lnp, }; enum e1000_media_type { diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index a80336c4319b..60c582a16821 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -321,6 +321,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) case e1000_pch_tgp: case e1000_pch_adp: case e1000_pch_mtp: + case e1000_pch_lnp: if (e1000_phy_is_accessible_pchlan(hw)) break; @@ -466,6 +467,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) case e1000_pch_tgp: case e1000_pch_adp: case e1000_pch_mtp: + case e1000_pch_lnp: /* In case the PHY needs to be in mdio slow mode, * set slow mode and try to get the PHY id again. */ @@ -711,6 +713,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw) case e1000_pch_tgp: case e1000_pch_adp: case e1000_pch_mtp: + case e1000_pch_lnp: case e1000_pchlan: /* check management mode */ mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan; @@ -1278,9 +1281,11 @@ static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force) usleep_range(10000, 11000); } if (firmware_bug) - e_warn("ULP_CONFIG_DONE took %dmsec. This is a firmware bug\n", i * 10); + e_warn("ULP_CONFIG_DONE took %d msec. This is a firmware bug\n", + i * 10); else - e_dbg("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10); + e_dbg("ULP_CONFIG_DONE cleared after %d msec\n", + i * 10); if (force) { mac_reg = er32(H2ME); @@ -1675,6 +1680,7 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) case e1000_pch_tgp: case e1000_pch_adp: case e1000_pch_mtp: + case e1000_pch_lnp: rc = e1000_init_phy_params_pchlan(hw); break; default: @@ -2130,6 +2136,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) case e1000_pch_tgp: case e1000_pch_adp: case e1000_pch_mtp: + case e1000_pch_lnp: sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M; break; default: @@ -3174,6 +3181,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) case e1000_pch_tgp: case e1000_pch_adp: case e1000_pch_mtp: + case e1000_pch_lnp: bank1_offset = nvm->flash_bank_size; act_offset = E1000_ICH_NVM_SIG_WORD; @@ -4113,6 +4121,7 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw) case e1000_pch_tgp: case e1000_pch_adp: case e1000_pch_mtp: + case e1000_pch_lnp: word = NVM_COMPAT; valid_csum_mask = NVM_COMPAT_VALID_CSUM; break; diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h index e757896287eb..d6a092e5ee74 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h @@ -41,12 +41,15 @@ #define E1000_FWSM_WLOCK_MAC_MASK 0x0380 #define E1000_FWSM_WLOCK_MAC_SHIFT 7 #define E1000_FWSM_ULP_CFG_DONE 0x00000400 /* Low power cfg done */ +#define E1000_EXFWSM_DPG_EXIT_DONE 0x00000001 /* Shared Receive Address Registers */ #define E1000_SHRAL_PCH_LPT(_i) (0x05408 + ((_i) * 8)) #define E1000_SHRAH_PCH_LPT(_i) (0x0540C + ((_i) * 8)) #define E1000_H2ME 0x05B50 /* Host to ME */ +#define E1000_H2ME_START_DPG 0x00000001 /* indicate the ME of DPG */ +#define E1000_H2ME_EXIT_DPG 0x00000002 /* indicate the ME exit DPG */ #define E1000_H2ME_ULP 0x00000800 /* ULP Indication Bit */ #define E1000_H2ME_ENFORCE_SETTINGS 0x00001000 /* Enforce Settings */ diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 757a54c39eef..900b3ab998bd 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -3550,6 +3550,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca) case e1000_pch_tgp: case e1000_pch_adp: case e1000_pch_mtp: + case e1000_pch_lnp: if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) { /* Stable 24MHz frequency */ incperiod = INCPERIOD_24MHZ; @@ -4068,6 +4069,7 @@ void e1000e_reset(struct e1000_adapter *adapter) case e1000_pch_tgp: case e1000_pch_adp: case e1000_pch_mtp: + case e1000_pch_lnp: fc->refresh_time = 0xFFFF; fc->pause_time = 0xFFFF; @@ -6343,42 +6345,110 @@ static void e1000e_s0ix_entry_flow(struct e1000_adapter *adapter) u32 mac_data; u16 phy_data; - /* Disable the periodic inband message, - * don't request PCIe clock in K1 page770_17[10:9] = 10b - */ - e1e_rphy(hw, HV_PM_CTRL, &phy_data); - phy_data &= ~HV_PM_CTRL_K1_CLK_REQ; - phy_data |= BIT(10); - e1e_wphy(hw, HV_PM_CTRL, phy_data); + if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) { + /* Request ME configure the device for S0ix */ + mac_data = er32(H2ME); + mac_data |= E1000_H2ME_START_DPG; + mac_data &= ~E1000_H2ME_EXIT_DPG; + ew32(H2ME, mac_data); + } else { + /* Request driver configure the device to S0ix */ + /* Disable the periodic inband message, + * don't request PCIe clock in K1 page770_17[10:9] = 10b + */ + e1e_rphy(hw, HV_PM_CTRL, &phy_data); + phy_data &= ~HV_PM_CTRL_K1_CLK_REQ; + phy_data |= BIT(10); + e1e_wphy(hw, HV_PM_CTRL, phy_data); - /* Make sure we don't exit K1 every time a new packet arrives - * 772_29[5] = 1 CS_Mode_Stay_In_K1 - */ - e1e_rphy(hw, I217_CGFREG, &phy_data); - phy_data |= BIT(5); - e1e_wphy(hw, I217_CGFREG, phy_data); + /* Make sure we don't exit K1 every time a new packet arrives + * 772_29[5] = 1 CS_Mode_Stay_In_K1 + */ + e1e_rphy(hw, I217_CGFREG, &phy_data); + phy_data |= BIT(5); + e1e_wphy(hw, I217_CGFREG, phy_data); - /* Change the MAC/PHY interface to SMBus - * Force the SMBus in PHY page769_23[0] = 1 - * Force the SMBus in MAC CTRL_EXT[11] = 1 - */ - e1e_rphy(hw, CV_SMB_CTRL, &phy_data); - phy_data |= CV_SMB_CTRL_FORCE_SMBUS; - e1e_wphy(hw, CV_SMB_CTRL, phy_data); - mac_data = er32(CTRL_EXT); - mac_data |= E1000_CTRL_EXT_FORCE_SMBUS; - ew32(CTRL_EXT, mac_data); + /* Change the MAC/PHY interface to SMBus + * Force the SMBus in PHY page769_23[0] = 1 + * Force the SMBus in MAC CTRL_EXT[11] = 1 + */ + e1e_rphy(hw, CV_SMB_CTRL, &phy_data); + phy_data |= CV_SMB_CTRL_FORCE_SMBUS; + e1e_wphy(hw, CV_SMB_CTRL, phy_data); + mac_data = er32(CTRL_EXT); + mac_data |= E1000_CTRL_EXT_FORCE_SMBUS; + ew32(CTRL_EXT, mac_data); + + /* DFT control: PHY bit: page769_20[0] = 1 + * page769_20[7] - PHY PLL stop + * page769_20[8] - PHY go to the electrical idle + * page769_20[9] - PHY serdes disable + * Gate PPW via EXTCNF_CTRL - set 0x0F00[7] = 1 + */ + e1e_rphy(hw, I82579_DFT_CTRL, &phy_data); + phy_data |= BIT(0); + phy_data |= BIT(7); + phy_data |= BIT(8); + phy_data |= BIT(9); + e1e_wphy(hw, I82579_DFT_CTRL, phy_data); + + mac_data = er32(EXTCNF_CTRL); + mac_data |= E1000_EXTCNF_CTRL_GATE_PHY_CFG; + ew32(EXTCNF_CTRL, mac_data); + + /* Enable the Dynamic Power Gating in the MAC */ + mac_data = er32(FEXTNVM7); + mac_data |= BIT(22); + ew32(FEXTNVM7, mac_data); + + /* Disable disconnected cable conditioning for Power Gating */ + mac_data = er32(DPGFR); + mac_data |= BIT(2); + ew32(DPGFR, mac_data); + + /* Don't wake from dynamic Power Gating with clock request */ + mac_data = er32(FEXTNVM12); + mac_data |= BIT(12); + ew32(FEXTNVM12, mac_data); + + /* Ungate PGCB clock */ + mac_data = er32(FEXTNVM9); + mac_data &= ~BIT(28); + ew32(FEXTNVM9, mac_data); + + /* Enable K1 off to enable mPHY Power Gating */ + mac_data = er32(FEXTNVM6); + mac_data |= BIT(31); + ew32(FEXTNVM6, mac_data); + + /* Enable mPHY power gating for any link and speed */ + mac_data = er32(FEXTNVM8); + mac_data |= BIT(9); + ew32(FEXTNVM8, mac_data); + + /* Enable the Dynamic Clock Gating in the DMA and MAC */ + mac_data = er32(CTRL_EXT); + mac_data |= E1000_CTRL_EXT_DMA_DYN_CLK_EN; + ew32(CTRL_EXT, mac_data); + + /* No MAC DPG gating SLP_S0 in modern standby + * Switch the logic of the lanphypc to use PMC counter + */ + mac_data = er32(FEXTNVM5); + mac_data |= BIT(7); + ew32(FEXTNVM5, mac_data); + } - /* DFT control: PHY bit: page769_20[0] = 1 - * Gate PPW via EXTCNF_CTRL - set 0x0F00[7] = 1 - */ - e1e_rphy(hw, I82579_DFT_CTRL, &phy_data); - phy_data |= BIT(0); - e1e_wphy(hw, I82579_DFT_CTRL, phy_data); + /* Disable the time synchronization clock */ + mac_data = er32(FEXTNVM7); + mac_data |= BIT(31); + mac_data &= ~BIT(0); + ew32(FEXTNVM7, mac_data); - mac_data = er32(EXTCNF_CTRL); - mac_data |= E1000_EXTCNF_CTRL_GATE_PHY_CFG; - ew32(EXTCNF_CTRL, mac_data); + /* Dynamic Power Gating Enable */ + mac_data = er32(CTRL_EXT); + mac_data |= BIT(3); + ew32(CTRL_EXT, mac_data); /* Check MAC Tx/Rx packet buffer pointers. * Reset MAC Tx/Rx packet buffer pointers to suppress any @@ -6414,148 +6484,130 @@ static void e1000e_s0ix_entry_flow(struct e1000_adapter *adapter) mac_data = er32(RDFPC); if (mac_data) ew32(RDFPC, 0); - - /* Enable the Dynamic Power Gating in the MAC */ - mac_data = er32(FEXTNVM7); - mac_data |= BIT(22); - ew32(FEXTNVM7, mac_data); - - /* Disable the time synchronization clock */ - mac_data = er32(FEXTNVM7); - mac_data |= BIT(31); - mac_data &= ~BIT(0); - ew32(FEXTNVM7, mac_data); - - /* Dynamic Power Gating Enable */ - mac_data = er32(CTRL_EXT); - mac_data |= BIT(3); - ew32(CTRL_EXT, mac_data); - - /* Disable disconnected cable conditioning for Power Gating */ - mac_data = er32(DPGFR); - mac_data |= BIT(2); - ew32(DPGFR, mac_data); - - /* Don't wake from dynamic Power Gating with clock request */ - mac_data = er32(FEXTNVM12); - mac_data |= BIT(12); - ew32(FEXTNVM12, mac_data); - - /* Ungate PGCB clock */ - mac_data = er32(FEXTNVM9); - mac_data &= ~BIT(28); - ew32(FEXTNVM9, mac_data); - - /* Enable K1 off to enable mPHY Power Gating */ - mac_data = er32(FEXTNVM6); - mac_data |= BIT(31); - ew32(FEXTNVM6, mac_data); - - /* Enable mPHY power gating for any link and speed */ - mac_data = er32(FEXTNVM8); - mac_data |= BIT(9); - ew32(FEXTNVM8, mac_data); - - /* Enable the Dynamic Clock Gating in the DMA and MAC */ - mac_data = er32(CTRL_EXT); - mac_data |= E1000_CTRL_EXT_DMA_DYN_CLK_EN; - ew32(CTRL_EXT, mac_data); - - /* No MAC DPG gating SLP_S0 in modern standby - * Switch the logic of the lanphypc to use PMC counter - */ - mac_data = er32(FEXTNVM5); - mac_data |= BIT(7); - ew32(FEXTNVM5, mac_data); } static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; + bool firmware_bug = false; u32 mac_data; u16 phy_data; + u32 i = 0; + + if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) { + /* Request ME unconfigure the device from S0ix */ + mac_data = er32(H2ME); + mac_data &= ~E1000_H2ME_START_DPG; + mac_data |= E1000_H2ME_EXIT_DPG; + ew32(H2ME, mac_data); + + /* Poll up to 2.5 seconds for ME to unconfigure DPG. + * If this takes more than 1 second, show a warning indicating a + * firmware bug + */ + while (!(er32(EXFWSM) & E1000_EXFWSM_DPG_EXIT_DONE)) { + if (i > 100 && !firmware_bug) + firmware_bug = true; - /* Disable the Dynamic Power Gating in the MAC */ - mac_data = er32(FEXTNVM7); - mac_data &= 0xFFBFFFFF; - ew32(FEXTNVM7, mac_data); + if (i++ == 250) { + e_dbg("Timeout (firmware bug): %d msec\n", + i * 10); + break; + } - /* Enable the time synchronization clock */ - mac_data = er32(FEXTNVM7); - mac_data |= BIT(0); - ew32(FEXTNVM7, mac_data); + usleep_range(10000, 11000); + } + if (firmware_bug) + e_warn("DPG_EXIT_DONE took %d msec. This is a firmware bug\n", + i * 10); + else + e_dbg("DPG_EXIT_DONE cleared after %d msec\n", i * 10); + } else { + /* Request driver unconfigure the device from S0ix */ + + /* Disable the Dynamic Power Gating in the MAC */ + mac_data = er32(FEXTNVM7); + mac_data &= 0xFFBFFFFF; + ew32(FEXTNVM7, mac_data); + + /* Disable mPHY power gating for any link and speed */ + mac_data = er32(FEXTNVM8); + mac_data &= ~BIT(9); + ew32(FEXTNVM8, mac_data); + + /* Disable K1 off */ + mac_data = er32(FEXTNVM6); + mac_data &= ~BIT(31); + ew32(FEXTNVM6, mac_data); + + /* Disable Ungate PGCB clock */ + mac_data = er32(FEXTNVM9); + mac_data |= BIT(28); + ew32(FEXTNVM9, mac_data); + + /* Cancel not waking from dynamic + * Power Gating with clock request + */ + mac_data = er32(FEXTNVM12); + mac_data &= ~BIT(12); + ew32(FEXTNVM12, mac_data); - /* Disable mPHY power gating for any link and speed */ - mac_data = er32(FEXTNVM8); - mac_data &= ~BIT(9); - ew32(FEXTNVM8, mac_data); + /* Cancel disable disconnected cable conditioning + * for Power Gating + */ + mac_data = er32(DPGFR); + mac_data &= ~BIT(2); + ew32(DPGFR, mac_data); - /* Disable K1 off */ - mac_data = er32(FEXTNVM6); - mac_data &= ~BIT(31); - ew32(FEXTNVM6, mac_data); + /* Disable the Dynamic Clock Gating in the DMA and MAC */ + mac_data = er32(CTRL_EXT); + mac_data &= 0xFFF7FFFF; + ew32(CTRL_EXT, mac_data); - /* Disable Ungate PGCB clock */ - mac_data = er32(FEXTNVM9); - mac_data |= BIT(28); - ew32(FEXTNVM9, mac_data); + /* Revert the lanphypc logic to use the internal Gbe counter + * and not the PMC counter + */ + mac_data = er32(FEXTNVM5); + mac_data &= 0xFFFFFF7F; + ew32(FEXTNVM5, mac_data); - /* Cancel not waking from dynamic - * Power Gating with clock request - */ - mac_data = er32(FEXTNVM12); - mac_data &= ~BIT(12); - ew32(FEXTNVM12, mac_data); + /* Enable the periodic inband message, + * Request PCIe clock in K1 page770_17[10:9] =01b + */ + e1e_rphy(hw, HV_PM_CTRL, &phy_data); + phy_data &= 0xFBFF; + phy_data |= HV_PM_CTRL_K1_CLK_REQ; + e1e_wphy(hw, HV_PM_CTRL, phy_data); - /* Cancel disable disconnected cable conditioning - * for Power Gating - */ - mac_data = er32(DPGFR); - mac_data &= ~BIT(2); - ew32(DPGFR, mac_data); + /* Return back configuration + * 772_29[5] = 0 CS_Mode_Stay_In_K1 + */ + e1e_rphy(hw, I217_CGFREG, &phy_data); + phy_data &= 0xFFDF; + e1e_wphy(hw, I217_CGFREG, phy_data); + + /* Change the MAC/PHY interface to Kumeran + * Unforce the SMBus in PHY page769_23[0] = 0 + * Unforce the SMBus in MAC CTRL_EXT[11] = 0 + */ + e1e_rphy(hw, CV_SMB_CTRL, &phy_data); + phy_data &= ~CV_SMB_CTRL_FORCE_SMBUS; + e1e_wphy(hw, CV_SMB_CTRL, phy_data); + mac_data = er32(CTRL_EXT); + mac_data &= ~E1000_CTRL_EXT_FORCE_SMBUS; + ew32(CTRL_EXT, mac_data); + } /* Disable Dynamic Power Gating */ mac_data = er32(CTRL_EXT); mac_data &= 0xFFFFFFF7; ew32(CTRL_EXT, mac_data); - /* Disable the Dynamic Clock Gating in the DMA and MAC */ - mac_data = er32(CTRL_EXT); - mac_data &= 0xFFF7FFFF; - ew32(CTRL_EXT, mac_data); - - /* Revert the lanphypc logic to use the internal Gbe counter - * and not the PMC counter - */ - mac_data = er32(FEXTNVM5); - mac_data &= 0xFFFFFF7F; - ew32(FEXTNVM5, mac_data); - - /* Enable the periodic inband message, - * Request PCIe clock in K1 page770_17[10:9] =01b - */ - e1e_rphy(hw, HV_PM_CTRL, &phy_data); - phy_data &= 0xFBFF; - phy_data |= HV_PM_CTRL_K1_CLK_REQ; - e1e_wphy(hw, HV_PM_CTRL, phy_data); - - /* Return back configuration - * 772_29[5] = 0 CS_Mode_Stay_In_K1 - */ - e1e_rphy(hw, I217_CGFREG, &phy_data); - phy_data &= 0xFFDF; - e1e_wphy(hw, I217_CGFREG, phy_data); - - /* Change the MAC/PHY interface to Kumeran - * Unforce the SMBus in PHY page769_23[0] = 0 - * Unforce the SMBus in MAC CTRL_EXT[11] = 0 - */ - e1e_rphy(hw, CV_SMB_CTRL, &phy_data); - phy_data &= ~CV_SMB_CTRL_FORCE_SMBUS; - e1e_wphy(hw, CV_SMB_CTRL, phy_data); - mac_data = er32(CTRL_EXT); - mac_data &= ~E1000_CTRL_EXT_FORCE_SMBUS; - ew32(CTRL_EXT, mac_data); + /* Enable the time synchronization clock */ + mac_data = er32(FEXTNVM7); + mac_data &= ~BIT(31); + mac_data |= BIT(0); + ew32(FEXTNVM7, mac_data); } static int e1000e_pm_freeze(struct device *dev) @@ -7302,7 +7354,7 @@ static const struct net_device_ops e1000e_netdev_ops = { .ndo_set_rx_mode = e1000e_set_rx_mode, .ndo_set_mac_address = e1000_set_mac, .ndo_change_mtu = e1000_change_mtu, - .ndo_do_ioctl = e1000_ioctl, + .ndo_eth_ioctl = e1000_ioctl, .ndo_tx_timeout = e1000_tx_timeout, .ndo_validate_addr = eth_validate_addr, @@ -7677,7 +7729,7 @@ err_dma: * @pdev: PCI device information struct * * e1000_remove is called by the PCI subsystem to alert the driver - * that it should release a PCI device. The could be caused by a + * that it should release a PCI device. This could be caused by a * Hot-Plug event, or because the driver is going to be removed from * memory. **/ @@ -7850,14 +7902,22 @@ static const struct pci_device_id e1000_pci_tbl[] = { { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V14), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM15), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V15), board_pch_cnp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM23), board_pch_cnp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V23), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM16), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_cnp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM22), board_pch_cnp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V22), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_cnp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM20), board_pch_cnp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V20), board_pch_cnp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM21), board_pch_cnp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V21), board_pch_cnp }, { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ }; diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c index 9e79d672f4f1..eb5c014c02fb 100644 --- a/drivers/net/ethernet/intel/e1000e/ptp.c +++ b/drivers/net/ethernet/intel/e1000e/ptp.c @@ -298,6 +298,7 @@ void e1000e_ptp_init(struct e1000_adapter *adapter) case e1000_pch_tgp: case e1000_pch_adp: case e1000_pch_mtp: + case e1000_pch_lnp: if ((hw->mac.type < e1000_pch_lpt) || (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) { adapter->ptp_clock_info.max_adj = 24000000 - 1; diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h index 8165ba2619a4..6c0cd8cab3ef 100644 --- a/drivers/net/ethernet/intel/e1000e/regs.h +++ b/drivers/net/ethernet/intel/e1000e/regs.h @@ -213,6 +213,7 @@ #define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ #define E1000_SWSM 0x05B50 /* SW Semaphore */ #define E1000_FWSM 0x05B54 /* FW Semaphore */ +#define E1000_EXFWSM 0x05B58 /* Extended FW Semaphore */ /* Driver-only SW semaphore (not used by BOOT agents) */ #define E1000_SWSM2 0x05B58 #define E1000_FFLT_DBG 0x05F04 /* Debug Register */ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c index 66776ba7bfb6..0d37f011d0ce 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -632,7 +632,9 @@ clear_reset: } static int fm10k_get_coalesce(struct net_device *dev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct fm10k_intfc *interface = netdev_priv(dev); @@ -646,7 +648,9 @@ static int fm10k_get_coalesce(struct net_device *dev, } static int fm10k_set_coalesce(struct net_device *dev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct fm10k_intfc *interface = netdev_priv(dev); u16 tx_itr, rx_itr; diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index b9417dc0007c..39fb3d57c057 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -428,6 +428,8 @@ struct i40e_channel { struct i40e_vsi *parent_vsi; }; +struct i40e_ptp_pins_settings; + static inline bool i40e_is_channel_macvlan(struct i40e_channel *ch) { return !!ch->fwd; @@ -644,12 +646,83 @@ struct i40e_pf { struct i40e_rx_pb_config pb_cfg; /* Current Rx packet buffer config */ struct i40e_dcbx_config tmp_cfg; +/* GPIO defines used by PTP */ +#define I40E_SDP3_2 18 +#define I40E_SDP3_3 19 +#define I40E_GPIO_4 20 +#define I40E_LED2_0 26 +#define I40E_LED2_1 27 +#define I40E_LED3_0 28 +#define I40E_LED3_1 29 +#define I40E_GLGEN_GPIO_SET_SDP_DATA_HI \ + (1 << I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT) +#define I40E_GLGEN_GPIO_SET_DRV_SDP_DATA \ + (1 << I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT) +#define I40E_GLGEN_GPIO_CTL_PRT_NUM_0 \ + (0 << I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT) +#define I40E_GLGEN_GPIO_CTL_PRT_NUM_1 \ + (1 << I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT) +#define I40E_GLGEN_GPIO_CTL_RESERVED BIT(2) +#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_Z \ + (1 << I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT) +#define I40E_GLGEN_GPIO_CTL_DIR_OUT \ + (1 << I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT) +#define I40E_GLGEN_GPIO_CTL_TRI_DRV_HI \ + (1 << I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT) +#define I40E_GLGEN_GPIO_CTL_OUT_HI_RST \ + (1 << I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT) +#define I40E_GLGEN_GPIO_CTL_TIMESYNC_0 \ + (3 << I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT) +#define I40E_GLGEN_GPIO_CTL_TIMESYNC_1 \ + (4 << I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT) +#define I40E_GLGEN_GPIO_CTL_NOT_FOR_PHY_CONN \ + (0x3F << I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT) +#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT \ + (1 << I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT) +#define I40E_GLGEN_GPIO_CTL_PORT_0_IN_TIMESYNC_0 \ + (I40E_GLGEN_GPIO_CTL_NOT_FOR_PHY_CONN | \ + I40E_GLGEN_GPIO_CTL_TIMESYNC_0 | \ + I40E_GLGEN_GPIO_CTL_RESERVED | I40E_GLGEN_GPIO_CTL_PRT_NUM_0) +#define I40E_GLGEN_GPIO_CTL_PORT_1_IN_TIMESYNC_0 \ + (I40E_GLGEN_GPIO_CTL_NOT_FOR_PHY_CONN | \ + I40E_GLGEN_GPIO_CTL_TIMESYNC_0 | \ + I40E_GLGEN_GPIO_CTL_RESERVED | I40E_GLGEN_GPIO_CTL_PRT_NUM_1) +#define I40E_GLGEN_GPIO_CTL_PORT_0_OUT_TIMESYNC_1 \ + (I40E_GLGEN_GPIO_CTL_NOT_FOR_PHY_CONN | \ + I40E_GLGEN_GPIO_CTL_TIMESYNC_1 | I40E_GLGEN_GPIO_CTL_OUT_HI_RST | \ + I40E_GLGEN_GPIO_CTL_TRI_DRV_HI | I40E_GLGEN_GPIO_CTL_DIR_OUT | \ + I40E_GLGEN_GPIO_CTL_RESERVED | I40E_GLGEN_GPIO_CTL_PRT_NUM_0) +#define I40E_GLGEN_GPIO_CTL_PORT_1_OUT_TIMESYNC_1 \ + (I40E_GLGEN_GPIO_CTL_NOT_FOR_PHY_CONN | \ + I40E_GLGEN_GPIO_CTL_TIMESYNC_1 | I40E_GLGEN_GPIO_CTL_OUT_HI_RST | \ + I40E_GLGEN_GPIO_CTL_TRI_DRV_HI | I40E_GLGEN_GPIO_CTL_DIR_OUT | \ + I40E_GLGEN_GPIO_CTL_RESERVED | I40E_GLGEN_GPIO_CTL_PRT_NUM_1) +#define I40E_GLGEN_GPIO_CTL_LED_INIT \ + (I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_Z | \ + I40E_GLGEN_GPIO_CTL_DIR_OUT | \ + I40E_GLGEN_GPIO_CTL_TRI_DRV_HI | \ + I40E_GLGEN_GPIO_CTL_OUT_HI_RST | \ + I40E_GLGEN_GPIO_CTL_OUT_DEFAULT | \ + I40E_GLGEN_GPIO_CTL_NOT_FOR_PHY_CONN) +#define I40E_PRTTSYN_AUX_1_INSTNT \ + (1 << I40E_PRTTSYN_AUX_1_INSTNT_SHIFT) +#define I40E_PRTTSYN_AUX_0_OUT_ENABLE \ + (1 << I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT) +#define I40E_PRTTSYN_AUX_0_OUT_CLK_MOD (3 << I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT) +#define I40E_PRTTSYN_AUX_0_OUT_ENABLE_CLK_MOD \ + (I40E_PRTTSYN_AUX_0_OUT_ENABLE | I40E_PRTTSYN_AUX_0_OUT_CLK_MOD) +#define I40E_PTP_HALF_SECOND 500000000LL /* nano seconds */ +#define I40E_PTP_2_SEC_DELAY 2 + struct ptp_clock *ptp_clock; struct ptp_clock_info ptp_caps; struct sk_buff *ptp_tx_skb; unsigned long ptp_tx_start; struct hwtstamp_config tstamp_config; struct timespec64 ptp_prev_hw_time; + struct work_struct ptp_pps_work; + struct work_struct ptp_extts0_work; + struct work_struct ptp_extts1_work; ktime_t ptp_reset_start; struct mutex tmreg_lock; /* Used to protect the SYSTIME registers. */ u32 ptp_adj_mult; @@ -657,10 +730,14 @@ struct i40e_pf { u32 tx_hwtstamp_skipped; u32 rx_hwtstamp_cleared; u32 latch_event_flags; + u64 ptp_pps_start; + u32 pps_delay; spinlock_t ptp_rx_lock; /* Used to protect Rx timestamp registers. */ + struct ptp_pin_desc ptp_pin[3]; unsigned long latch_events[4]; bool ptp_tx; bool ptp_rx; + struct i40e_ptp_pins_settings *ptp_pins; u16 rss_table_size; /* HW RSS table size */ u32 max_bw; u32 min_bw; @@ -1169,6 +1246,7 @@ void i40e_ptp_save_hw_time(struct i40e_pf *pf); void i40e_ptp_restore_hw_time(struct i40e_pf *pf); void i40e_ptp_init(struct i40e_pf *pf); void i40e_ptp_stop(struct i40e_pf *pf); +int i40e_ptp_alloc_pins(struct i40e_pf *pf); int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi); i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf); i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf); diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 2c9e4eeb7270..513ba6974355 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -2812,13 +2812,17 @@ static int __i40e_get_coalesce(struct net_device *netdev, * i40e_get_coalesce - get a netdev's coalesce settings * @netdev: the netdev to check * @ec: ethtool coalesce data structure + * @kernel_coal: ethtool CQE mode setting structure + * @extack: extack for reporting error messages * * Gets the coalesce settings for a particular netdev. Note that if user has * modified per-queue settings, this only guarantees to represent queue 0. See * __i40e_get_coalesce for more details. **/ static int i40e_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { return __i40e_get_coalesce(netdev, ec, -1); } @@ -2986,11 +2990,15 @@ static int __i40e_set_coalesce(struct net_device *netdev, * i40e_set_coalesce - set coalesce settings for every queue on the netdev * @netdev: the netdev to change * @ec: ethtool coalesce settings + * @kernel_coal: ethtool CQE mode setting structure + * @extack: extack for reporting error messages * * This will set each queue to the same coalesce settings. **/ static int i40e_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { return __i40e_set_coalesce(netdev, ec, -1); } diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 1d1f52756a93..2f20980dd9a5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -4079,10 +4079,13 @@ static irqreturn_t i40e_intr(int irq, void *data) if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) { u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0); - if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) { - icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; + if (prttsyn_stat & I40E_PRTTSYN_STAT_0_EVENT0_MASK) + schedule_work(&pf->ptp_extts0_work); + + if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) i40e_ptp_tx_hwtstamp(pf); - } + + icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; } /* If a critical error is pending we have no choice but to reset the @@ -4635,7 +4638,7 @@ void i40e_vsi_stop_rings(struct i40e_vsi *vsi) err = i40e_control_wait_rx_q(pf, pf_q, false); if (err) dev_info(&pf->pdev->dev, - "VSI seid %d Rx ring %d dissable timeout\n", + "VSI seid %d Rx ring %d disable timeout\n", vsi->seid, pf_q); } @@ -13265,7 +13268,7 @@ static const struct net_device_ops i40e_netdev_ops = { .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = i40e_set_mac, .ndo_change_mtu = i40e_change_mtu, - .ndo_do_ioctl = i40e_ioctl, + .ndo_eth_ioctl = i40e_ioctl, .ndo_tx_timeout = i40e_tx_timeout, .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid, @@ -15181,6 +15184,22 @@ err_switch_setup: } /** + * i40e_set_subsystem_device_id - set subsystem device id + * @hw: pointer to the hardware info + * + * Set PCI subsystem device id either from a pci_dev structure or + * a specific FW register. + **/ +static inline void i40e_set_subsystem_device_id(struct i40e_hw *hw) +{ + struct pci_dev *pdev = ((struct i40e_pf *)hw->back)->pdev; + + hw->subsystem_device_id = pdev->subsystem_device ? + pdev->subsystem_device : + (ushort)(rd32(hw, I40E_PFPCI_SUBSYSID) & USHRT_MAX); +} + +/** * i40e_probe - Device initialization routine * @pdev: PCI device information struct * @ent: entry in i40e_pci_tbl @@ -15275,7 +15294,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw->device_id = pdev->device; pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); hw->subsystem_vendor_id = pdev->subsystem_vendor; - hw->subsystem_device_id = pdev->subsystem_device; + i40e_set_subsystem_device_id(hw); hw->bus.device = PCI_SLOT(pdev->devfn); hw->bus.func = PCI_FUNC(pdev->devfn); hw->bus.bus_id = pdev->bus->number; @@ -15455,6 +15474,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (is_valid_ether_addr(hw->mac.port_addr)) pf->hw_features |= I40E_HW_PORT_ID_VALID; + i40e_ptp_alloc_pins(pf); pci_set_drvdata(pdev, pf); pci_save_state(pdev); diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index 7b971b205d36..09b1d5aed1c9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -3,6 +3,7 @@ #include "i40e.h" #include <linux/ptp_classify.h> +#include <linux/posix-clock.h> /* The XL710 timesync is very much like Intel's 82599 design when it comes to * the fundamental clock design. However, the clock operations are much simpler @@ -20,10 +21,252 @@ #define I40E_PTP_10GB_INCVAL_MULT 2 #define I40E_PTP_5GB_INCVAL_MULT 2 #define I40E_PTP_1GB_INCVAL_MULT 20 +#define I40E_ISGN 0x80000000 #define I40E_PRTTSYN_CTL1_TSYNTYPE_V1 BIT(I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT) #define I40E_PRTTSYN_CTL1_TSYNTYPE_V2 (2 << \ I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT) +#define I40E_SUBDEV_ID_25G_PTP_PIN 0xB +#define to_dev(obj) container_of(obj, struct device, kobj) + +enum i40e_ptp_pin { + SDP3_2 = 0, + SDP3_3, + GPIO_4 +}; + +enum i40e_can_set_pins_t { + CANT_DO_PINS = -1, + CAN_SET_PINS, + CAN_DO_PINS +}; + +static struct ptp_pin_desc sdp_desc[] = { + /* name idx func chan */ + {"SDP3_2", SDP3_2, PTP_PF_NONE, 0}, + {"SDP3_3", SDP3_3, PTP_PF_NONE, 1}, + {"GPIO_4", GPIO_4, PTP_PF_NONE, 1}, +}; + +enum i40e_ptp_gpio_pin_state { + end = -2, + invalid, + off, + in_A, + in_B, + out_A, + out_B, +}; + +static const char * const i40e_ptp_gpio_pin_state2str[] = { + "off", "in_A", "in_B", "out_A", "out_B" +}; + +enum i40e_ptp_led_pin_state { + led_end = -2, + low = 0, + high, +}; + +struct i40e_ptp_pins_settings { + enum i40e_ptp_gpio_pin_state sdp3_2; + enum i40e_ptp_gpio_pin_state sdp3_3; + enum i40e_ptp_gpio_pin_state gpio_4; + enum i40e_ptp_led_pin_state led2_0; + enum i40e_ptp_led_pin_state led2_1; + enum i40e_ptp_led_pin_state led3_0; + enum i40e_ptp_led_pin_state led3_1; +}; + +static const struct i40e_ptp_pins_settings + i40e_ptp_pin_led_allowed_states[] = { + {off, off, off, high, high, high, high}, + {off, in_A, off, high, high, high, low}, + {off, out_A, off, high, low, high, high}, + {off, in_B, off, high, high, high, low}, + {off, out_B, off, high, low, high, high}, + {in_A, off, off, high, high, high, low}, + {in_A, in_B, off, high, high, high, low}, + {in_A, out_B, off, high, low, high, high}, + {out_A, off, off, high, low, high, high}, + {out_A, in_B, off, high, low, high, high}, + {in_B, off, off, high, high, high, low}, + {in_B, in_A, off, high, high, high, low}, + {in_B, out_A, off, high, low, high, high}, + {out_B, off, off, high, low, high, high}, + {out_B, in_A, off, high, low, high, high}, + {off, off, in_A, high, high, low, high}, + {off, out_A, in_A, high, low, low, high}, + {off, in_B, in_A, high, high, low, low}, + {off, out_B, in_A, high, low, low, high}, + {out_A, off, in_A, high, low, low, high}, + {out_A, in_B, in_A, high, low, low, high}, + {in_B, off, in_A, high, high, low, low}, + {in_B, out_A, in_A, high, low, low, high}, + {out_B, off, in_A, high, low, low, high}, + {off, off, out_A, low, high, high, high}, + {off, in_A, out_A, low, high, high, low}, + {off, in_B, out_A, low, high, high, low}, + {off, out_B, out_A, low, low, high, high}, + {in_A, off, out_A, low, high, high, low}, + {in_A, in_B, out_A, low, high, high, low}, + {in_A, out_B, out_A, low, low, high, high}, + {in_B, off, out_A, low, high, high, low}, + {in_B, in_A, out_A, low, high, high, low}, + {out_B, off, out_A, low, low, high, high}, + {out_B, in_A, out_A, low, low, high, high}, + {off, off, in_B, high, high, low, high}, + {off, in_A, in_B, high, high, low, low}, + {off, out_A, in_B, high, low, low, high}, + {off, out_B, in_B, high, low, low, high}, + {in_A, off, in_B, high, high, low, low}, + {in_A, out_B, in_B, high, low, low, high}, + {out_A, off, in_B, high, low, low, high}, + {out_B, off, in_B, high, low, low, high}, + {out_B, in_A, in_B, high, low, low, high}, + {off, off, out_B, low, high, high, high}, + {off, in_A, out_B, low, high, high, low}, + {off, out_A, out_B, low, low, high, high}, + {off, in_B, out_B, low, high, high, low}, + {in_A, off, out_B, low, high, high, low}, + {in_A, in_B, out_B, low, high, high, low}, + {out_A, off, out_B, low, low, high, high}, + {out_A, in_B, out_B, low, low, high, high}, + {in_B, off, out_B, low, high, high, low}, + {in_B, in_A, out_B, low, high, high, low}, + {in_B, out_A, out_B, low, low, high, high}, + {end, end, end, led_end, led_end, led_end, led_end} +}; + +static int i40e_ptp_set_pins(struct i40e_pf *pf, + struct i40e_ptp_pins_settings *pins); + +/** + * i40e_ptp_extts0_work - workqueue task function + * @work: workqueue task structure + * + * Service for PTP external clock event + **/ +static void i40e_ptp_extts0_work(struct work_struct *work) +{ + struct i40e_pf *pf = container_of(work, struct i40e_pf, + ptp_extts0_work); + struct i40e_hw *hw = &pf->hw; + struct ptp_clock_event event; + u32 hi, lo; + + /* Event time is captured by one of the two matched registers + * PRTTSYN_EVNT_L: 32 LSB of sampled time event + * PRTTSYN_EVNT_H: 32 MSB of sampled time event + * Event is defined in PRTTSYN_EVNT_0 register + */ + lo = rd32(hw, I40E_PRTTSYN_EVNT_L(0)); + hi = rd32(hw, I40E_PRTTSYN_EVNT_H(0)); + + event.timestamp = (((u64)hi) << 32) | lo; + + event.type = PTP_CLOCK_EXTTS; + event.index = hw->pf_id; + + /* fire event */ + ptp_clock_event(pf->ptp_clock, &event); +} + +/** + * i40e_is_ptp_pin_dev - check if device supports PTP pins + * @hw: pointer to the hardware structure + * + * Return true if device supports PTP pins, false otherwise. + **/ +static bool i40e_is_ptp_pin_dev(struct i40e_hw *hw) +{ + return hw->device_id == I40E_DEV_ID_25G_SFP28 && + hw->subsystem_device_id == I40E_SUBDEV_ID_25G_PTP_PIN; +} + +/** + * i40e_can_set_pins - check possibility of manipulating the pins + * @pf: board private structure + * + * Check if all conditions are satisfied to manipulate PTP pins. + * Return CAN_SET_PINS if pins can be set on a specific PF or + * return CAN_DO_PINS if pins can be manipulated within a NIC or + * return CANT_DO_PINS otherwise. + **/ +static enum i40e_can_set_pins_t i40e_can_set_pins(struct i40e_pf *pf) +{ + if (!i40e_is_ptp_pin_dev(&pf->hw)) { + dev_warn(&pf->pdev->dev, + "PTP external clock not supported.\n"); + return CANT_DO_PINS; + } + + if (!pf->ptp_pins) { + dev_warn(&pf->pdev->dev, + "PTP PIN manipulation not allowed.\n"); + return CANT_DO_PINS; + } + + if (pf->hw.pf_id) { + dev_warn(&pf->pdev->dev, + "PTP PINs should be accessed via PF0.\n"); + return CAN_DO_PINS; + } + + return CAN_SET_PINS; +} + +/** + * i40_ptp_reset_timing_events - Reset PTP timing events + * @pf: Board private structure + * + * This function resets timing events for pf. + **/ +static void i40_ptp_reset_timing_events(struct i40e_pf *pf) +{ + u32 i; + + spin_lock_bh(&pf->ptp_rx_lock); + for (i = 0; i <= I40E_PRTTSYN_RXTIME_L_MAX_INDEX; i++) { + /* reading and automatically clearing timing events registers */ + rd32(&pf->hw, I40E_PRTTSYN_RXTIME_L(i)); + rd32(&pf->hw, I40E_PRTTSYN_RXTIME_H(i)); + pf->latch_events[i] = 0; + } + /* reading and automatically clearing timing events registers */ + rd32(&pf->hw, I40E_PRTTSYN_TXTIME_L); + rd32(&pf->hw, I40E_PRTTSYN_TXTIME_H); + + pf->tx_hwtstamp_timeouts = 0; + pf->tx_hwtstamp_skipped = 0; + pf->rx_hwtstamp_cleared = 0; + pf->latch_event_flags = 0; + spin_unlock_bh(&pf->ptp_rx_lock); +} + +/** + * i40e_ptp_verify - check pins + * @ptp: ptp clock + * @pin: pin index + * @func: assigned function + * @chan: channel + * + * Check pins consistency. + * Return 0 on success or error on failure. + **/ +static int i40e_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin, + enum ptp_pin_function func, unsigned int chan) +{ + switch (func) { + case PTP_PF_NONE: + case PTP_PF_EXTTS: + case PTP_PF_PEROUT: + break; + case PTP_PF_PHYSYNC: + return -EOPNOTSUPP; + } + return 0; +} /** * i40e_ptp_read - Read the PHC time from the device @@ -137,6 +380,37 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) } /** + * i40e_ptp_set_1pps_signal_hw - configure 1PPS PTP signal for pins + * @pf: the PF private data structure + * + * Configure 1PPS signal used for PTP pins + **/ +static void i40e_ptp_set_1pps_signal_hw(struct i40e_pf *pf) +{ + struct i40e_hw *hw = &pf->hw; + struct timespec64 now; + u64 ns; + + wr32(hw, I40E_PRTTSYN_AUX_0(1), 0); + wr32(hw, I40E_PRTTSYN_AUX_1(1), I40E_PRTTSYN_AUX_1_INSTNT); + wr32(hw, I40E_PRTTSYN_AUX_0(1), I40E_PRTTSYN_AUX_0_OUT_ENABLE); + + i40e_ptp_read(pf, &now, NULL); + now.tv_sec += I40E_PTP_2_SEC_DELAY; + now.tv_nsec = 0; + ns = timespec64_to_ns(&now); + + /* I40E_PRTTSYN_TGT_L(1) */ + wr32(hw, I40E_PRTTSYN_TGT_L(1), ns & 0xFFFFFFFF); + /* I40E_PRTTSYN_TGT_H(1) */ + wr32(hw, I40E_PRTTSYN_TGT_H(1), ns >> 32); + wr32(hw, I40E_PRTTSYN_CLKO(1), I40E_PTP_HALF_SECOND); + wr32(hw, I40E_PRTTSYN_AUX_1(1), I40E_PRTTSYN_AUX_1_INSTNT); + wr32(hw, I40E_PRTTSYN_AUX_0(1), + I40E_PRTTSYN_AUX_0_OUT_ENABLE_CLK_MOD); +} + +/** * i40e_ptp_adjtime - Adjust the PHC time * @ptp: The PTP clock structure * @delta: Offset in nanoseconds to adjust the PHC time by @@ -146,14 +420,35 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) { struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); - struct timespec64 now, then; + struct i40e_hw *hw = &pf->hw; - then = ns_to_timespec64(delta); mutex_lock(&pf->tmreg_lock); - i40e_ptp_read(pf, &now, NULL); - now = timespec64_add(now, then); - i40e_ptp_write(pf, (const struct timespec64 *)&now); + if (delta > -999999900LL && delta < 999999900LL) { + int neg_adj = 0; + u32 timadj; + u64 tohw; + + if (delta < 0) { + neg_adj = 1; + tohw = -delta; + } else { + tohw = delta; + } + + timadj = tohw & 0x3FFFFFFF; + if (neg_adj) + timadj |= I40E_ISGN; + wr32(hw, I40E_PRTTSYN_ADJ, timadj); + } else { + struct timespec64 then, now; + + then = ns_to_timespec64(delta); + i40e_ptp_read(pf, &now, NULL); + now = timespec64_add(now, then); + i40e_ptp_write(pf, (const struct timespec64 *)&now); + i40e_ptp_set_1pps_signal_hw(pf); + } mutex_unlock(&pf->tmreg_lock); @@ -184,7 +479,7 @@ static int i40e_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts, /** * i40e_ptp_settime - Set the time of the PHC * @ptp: The PTP clock structure - * @ts: timespec structure that holds the new time value + * @ts: timespec64 structure that holds the new time value * * Set the device clock to the user input value. The conversion from timespec * to ns happens in the write function. @@ -202,18 +497,145 @@ static int i40e_ptp_settime(struct ptp_clock_info *ptp, } /** - * i40e_ptp_feature_enable - Enable/disable ancillary features of the PHC subsystem + * i40e_pps_configure - configure PPS events + * @ptp: ptp clock + * @rq: clock request + * @on: status + * + * Configure PPS events for external clock source. + * Return 0 on success or error on failure. + **/ +static int i40e_pps_configure(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, + int on) +{ + struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); + + if (!!on) + i40e_ptp_set_1pps_signal_hw(pf); + + return 0; +} + +/** + * i40e_pin_state - determine PIN state + * @index: PIN index + * @func: function assigned to PIN + * + * Determine PIN state based on PIN index and function assigned. + * Return PIN state. + **/ +static enum i40e_ptp_gpio_pin_state i40e_pin_state(int index, int func) +{ + enum i40e_ptp_gpio_pin_state state = off; + + if (index == 0 && func == PTP_PF_EXTTS) + state = in_A; + if (index == 1 && func == PTP_PF_EXTTS) + state = in_B; + if (index == 0 && func == PTP_PF_PEROUT) + state = out_A; + if (index == 1 && func == PTP_PF_PEROUT) + state = out_B; + + return state; +} + +/** + * i40e_ptp_enable_pin - enable PINs. + * @pf: private board structure + * @chan: channel + * @func: PIN function + * @on: state + * + * Enable PTP pins for external clock source. + * Return 0 on success or error code on failure. + **/ +static int i40e_ptp_enable_pin(struct i40e_pf *pf, unsigned int chan, + enum ptp_pin_function func, int on) +{ + enum i40e_ptp_gpio_pin_state *pin = NULL; + struct i40e_ptp_pins_settings pins; + int pin_index; + + /* Use PF0 to set pins. Return success for user space tools */ + if (pf->hw.pf_id) + return 0; + + /* Preserve previous state of pins that we don't touch */ + pins.sdp3_2 = pf->ptp_pins->sdp3_2; + pins.sdp3_3 = pf->ptp_pins->sdp3_3; + pins.gpio_4 = pf->ptp_pins->gpio_4; + + /* To turn on the pin - find the corresponding one based on + * the given index. To to turn the function off - find + * which pin had it assigned. Don't use ptp_find_pin here + * because it tries to lock the pincfg_mux which is locked by + * ptp_pin_store() that calls here. + */ + if (on) { + pin_index = ptp_find_pin(pf->ptp_clock, func, chan); + if (pin_index < 0) + return -EBUSY; + + switch (pin_index) { + case SDP3_2: + pin = &pins.sdp3_2; + break; + case SDP3_3: + pin = &pins.sdp3_3; + break; + case GPIO_4: + pin = &pins.gpio_4; + break; + default: + return -EINVAL; + } + + *pin = i40e_pin_state(chan, func); + } else { + pins.sdp3_2 = off; + pins.sdp3_3 = off; + pins.gpio_4 = off; + } + + return i40e_ptp_set_pins(pf, &pins) ? -EINVAL : 0; +} + +/** + * i40e_ptp_feature_enable - Enable external clock pins * @ptp: The PTP clock structure - * @rq: The requested feature to change - * @on: Enable/disable flag + * @rq: The PTP clock request structure + * @on: To turn feature on/off * - * The XL710 does not support any of the ancillary features of the PHC - * subsystem, so this function may just return. + * Setting on/off PTP PPS feature for pin. **/ static int i40e_ptp_feature_enable(struct ptp_clock_info *ptp, - struct ptp_clock_request *rq, int on) + struct ptp_clock_request *rq, + int on) { - return -EOPNOTSUPP; + struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); + + enum ptp_pin_function func; + unsigned int chan; + + /* TODO: Implement flags handling for EXTTS and PEROUT */ + switch (rq->type) { + case PTP_CLK_REQ_EXTTS: + func = PTP_PF_EXTTS; + chan = rq->extts.index; + break; + case PTP_CLK_REQ_PEROUT: + func = PTP_PF_PEROUT; + chan = rq->perout.index; + break; + case PTP_CLK_REQ_PPS: + return i40e_pps_configure(ptp, rq, on); + default: + return -EOPNOTSUPP; + } + + return i40e_ptp_enable_pin(pf, chan, func, on); } /** @@ -528,6 +950,229 @@ int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr) } /** + * i40e_ptp_free_pins - free memory used by PTP pins + * @pf: Board private structure + * + * Release memory allocated for PTP pins. + **/ +static void i40e_ptp_free_pins(struct i40e_pf *pf) +{ + if (i40e_is_ptp_pin_dev(&pf->hw)) { + kfree(pf->ptp_pins); + kfree(pf->ptp_caps.pin_config); + pf->ptp_pins = NULL; + } +} + +/** + * i40e_ptp_set_pin_hw - Set HW GPIO pin + * @hw: pointer to the hardware structure + * @pin: pin index + * @state: pin state + * + * Set status of GPIO pin for external clock handling. + **/ +static void i40e_ptp_set_pin_hw(struct i40e_hw *hw, + unsigned int pin, + enum i40e_ptp_gpio_pin_state state) +{ + switch (state) { + case off: + wr32(hw, I40E_GLGEN_GPIO_CTL(pin), 0); + break; + case in_A: + wr32(hw, I40E_GLGEN_GPIO_CTL(pin), + I40E_GLGEN_GPIO_CTL_PORT_0_IN_TIMESYNC_0); + break; + case in_B: + wr32(hw, I40E_GLGEN_GPIO_CTL(pin), + I40E_GLGEN_GPIO_CTL_PORT_1_IN_TIMESYNC_0); + break; + case out_A: + wr32(hw, I40E_GLGEN_GPIO_CTL(pin), + I40E_GLGEN_GPIO_CTL_PORT_0_OUT_TIMESYNC_1); + break; + case out_B: + wr32(hw, I40E_GLGEN_GPIO_CTL(pin), + I40E_GLGEN_GPIO_CTL_PORT_1_OUT_TIMESYNC_1); + break; + default: + break; + } +} + +/** + * i40e_ptp_set_led_hw - Set HW GPIO led + * @hw: pointer to the hardware structure + * @led: led index + * @state: led state + * + * Set status of GPIO led for external clock handling. + **/ +static void i40e_ptp_set_led_hw(struct i40e_hw *hw, + unsigned int led, + enum i40e_ptp_led_pin_state state) +{ + switch (state) { + case low: + wr32(hw, I40E_GLGEN_GPIO_SET, + I40E_GLGEN_GPIO_SET_DRV_SDP_DATA | led); + break; + case high: + wr32(hw, I40E_GLGEN_GPIO_SET, + I40E_GLGEN_GPIO_SET_DRV_SDP_DATA | + I40E_GLGEN_GPIO_SET_SDP_DATA_HI | led); + break; + default: + break; + } +} + +/** + * i40e_ptp_init_leds_hw - init LEDs + * @hw: pointer to a hardware structure + * + * Set initial state of LEDs + **/ +static void i40e_ptp_init_leds_hw(struct i40e_hw *hw) +{ + wr32(hw, I40E_GLGEN_GPIO_CTL(I40E_LED2_0), + I40E_GLGEN_GPIO_CTL_LED_INIT); + wr32(hw, I40E_GLGEN_GPIO_CTL(I40E_LED2_1), + I40E_GLGEN_GPIO_CTL_LED_INIT); + wr32(hw, I40E_GLGEN_GPIO_CTL(I40E_LED3_0), + I40E_GLGEN_GPIO_CTL_LED_INIT); + wr32(hw, I40E_GLGEN_GPIO_CTL(I40E_LED3_1), + I40E_GLGEN_GPIO_CTL_LED_INIT); +} + +/** + * i40e_ptp_set_pins_hw - Set HW GPIO pins + * @pf: Board private structure + * + * This function sets GPIO pins for PTP + **/ +static void i40e_ptp_set_pins_hw(struct i40e_pf *pf) +{ + const struct i40e_ptp_pins_settings *pins = pf->ptp_pins; + struct i40e_hw *hw = &pf->hw; + + /* pin must be disabled before it may be used */ + i40e_ptp_set_pin_hw(hw, I40E_SDP3_2, off); + i40e_ptp_set_pin_hw(hw, I40E_SDP3_3, off); + i40e_ptp_set_pin_hw(hw, I40E_GPIO_4, off); + + i40e_ptp_set_pin_hw(hw, I40E_SDP3_2, pins->sdp3_2); + i40e_ptp_set_pin_hw(hw, I40E_SDP3_3, pins->sdp3_3); + i40e_ptp_set_pin_hw(hw, I40E_GPIO_4, pins->gpio_4); + + i40e_ptp_set_led_hw(hw, I40E_LED2_0, pins->led2_0); + i40e_ptp_set_led_hw(hw, I40E_LED2_1, pins->led2_1); + i40e_ptp_set_led_hw(hw, I40E_LED3_0, pins->led3_0); + i40e_ptp_set_led_hw(hw, I40E_LED3_1, pins->led3_1); + + dev_info(&pf->pdev->dev, + "PTP configuration set to: SDP3_2: %s, SDP3_3: %s, GPIO_4: %s.\n", + i40e_ptp_gpio_pin_state2str[pins->sdp3_2], + i40e_ptp_gpio_pin_state2str[pins->sdp3_3], + i40e_ptp_gpio_pin_state2str[pins->gpio_4]); +} + +/** + * i40e_ptp_set_pins - set PTP pins in HW + * @pf: Board private structure + * @pins: PTP pins to be applied + * + * Validate and set PTP pins in HW for specific PF. + * Return 0 on success or negative value on error. + **/ +static int i40e_ptp_set_pins(struct i40e_pf *pf, + struct i40e_ptp_pins_settings *pins) +{ + enum i40e_can_set_pins_t pin_caps = i40e_can_set_pins(pf); + int i = 0; + + if (pin_caps == CANT_DO_PINS) + return -EOPNOTSUPP; + else if (pin_caps == CAN_DO_PINS) + return 0; + + if (pins->sdp3_2 == invalid) + pins->sdp3_2 = pf->ptp_pins->sdp3_2; + if (pins->sdp3_3 == invalid) + pins->sdp3_3 = pf->ptp_pins->sdp3_3; + if (pins->gpio_4 == invalid) + pins->gpio_4 = pf->ptp_pins->gpio_4; + while (i40e_ptp_pin_led_allowed_states[i].sdp3_2 != end) { + if (pins->sdp3_2 == i40e_ptp_pin_led_allowed_states[i].sdp3_2 && + pins->sdp3_3 == i40e_ptp_pin_led_allowed_states[i].sdp3_3 && + pins->gpio_4 == i40e_ptp_pin_led_allowed_states[i].gpio_4) { + pins->led2_0 = + i40e_ptp_pin_led_allowed_states[i].led2_0; + pins->led2_1 = + i40e_ptp_pin_led_allowed_states[i].led2_1; + pins->led3_0 = + i40e_ptp_pin_led_allowed_states[i].led3_0; + pins->led3_1 = + i40e_ptp_pin_led_allowed_states[i].led3_1; + break; + } + i++; + } + if (i40e_ptp_pin_led_allowed_states[i].sdp3_2 == end) { + dev_warn(&pf->pdev->dev, + "Unsupported PTP pin configuration: SDP3_2: %s, SDP3_3: %s, GPIO_4: %s.\n", + i40e_ptp_gpio_pin_state2str[pins->sdp3_2], + i40e_ptp_gpio_pin_state2str[pins->sdp3_3], + i40e_ptp_gpio_pin_state2str[pins->gpio_4]); + + return -EPERM; + } + memcpy(pf->ptp_pins, pins, sizeof(*pins)); + i40e_ptp_set_pins_hw(pf); + i40_ptp_reset_timing_events(pf); + + return 0; +} + +/** + * i40e_ptp_alloc_pins - allocate PTP pins structure + * @pf: Board private structure + * + * allocate PTP pins structure + **/ +int i40e_ptp_alloc_pins(struct i40e_pf *pf) +{ + if (!i40e_is_ptp_pin_dev(&pf->hw)) + return 0; + + pf->ptp_pins = + kzalloc(sizeof(struct i40e_ptp_pins_settings), GFP_KERNEL); + + if (!pf->ptp_pins) { + dev_warn(&pf->pdev->dev, "Cannot allocate memory for PTP pins structure.\n"); + return -I40E_ERR_NO_MEMORY; + } + + pf->ptp_pins->sdp3_2 = off; + pf->ptp_pins->sdp3_3 = off; + pf->ptp_pins->gpio_4 = off; + pf->ptp_pins->led2_0 = high; + pf->ptp_pins->led2_1 = high; + pf->ptp_pins->led3_0 = high; + pf->ptp_pins->led3_1 = high; + + /* Use PF0 to set pins in HW. Return success for user space tools */ + if (pf->hw.pf_id) + return 0; + + i40e_ptp_init_leds_hw(&pf->hw); + i40e_ptp_set_pins_hw(pf); + + return 0; +} + +/** * i40e_ptp_set_timestamp_mode - setup hardware for requested timestamp mode * @pf: Board private structure * @config: hwtstamp settings requested or saved @@ -545,6 +1190,21 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf, struct i40e_hw *hw = &pf->hw; u32 tsyntype, regval; + /* Selects external trigger to cause event */ + regval = rd32(hw, I40E_PRTTSYN_AUX_0(0)); + /* Bit 17:16 is EVNTLVL, 01B rising edge */ + regval &= 0; + regval |= (1 << I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT); + /* regval: 0001 0000 0000 0000 0000 */ + wr32(hw, I40E_PRTTSYN_AUX_0(0), regval); + + /* Enabel interrupts */ + regval = rd32(hw, I40E_PRTTSYN_CTL0); + regval |= 1 << I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT; + wr32(hw, I40E_PRTTSYN_CTL0, regval); + + INIT_WORK(&pf->ptp_extts0_work, i40e_ptp_extts0_work); + /* Reserved for future extensions. */ if (config->flags) return -EINVAL; @@ -688,6 +1348,45 @@ int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr) } /** + * i40e_init_pin_config - initialize pins. + * @pf: private board structure + * + * Initialize pins for external clock source. + * Return 0 on success or error code on failure. + **/ +static int i40e_init_pin_config(struct i40e_pf *pf) +{ + int i; + + pf->ptp_caps.n_pins = 3; + pf->ptp_caps.n_ext_ts = 2; + pf->ptp_caps.pps = 1; + pf->ptp_caps.n_per_out = 2; + + pf->ptp_caps.pin_config = kcalloc(pf->ptp_caps.n_pins, + sizeof(*pf->ptp_caps.pin_config), + GFP_KERNEL); + if (!pf->ptp_caps.pin_config) + return -ENOMEM; + + for (i = 0; i < pf->ptp_caps.n_pins; i++) { + snprintf(pf->ptp_caps.pin_config[i].name, + sizeof(pf->ptp_caps.pin_config[i].name), + "%s", sdp_desc[i].name); + pf->ptp_caps.pin_config[i].index = sdp_desc[i].index; + pf->ptp_caps.pin_config[i].func = PTP_PF_NONE; + pf->ptp_caps.pin_config[i].chan = sdp_desc[i].chan; + } + + pf->ptp_caps.verify = i40e_ptp_verify; + pf->ptp_caps.enable = i40e_ptp_feature_enable; + + pf->ptp_caps.pps = 1; + + return 0; +} + +/** * i40e_ptp_create_clock - Create PTP clock device for userspace * @pf: Board private structure * @@ -707,13 +1406,16 @@ static long i40e_ptp_create_clock(struct i40e_pf *pf) sizeof(pf->ptp_caps.name) - 1); pf->ptp_caps.owner = THIS_MODULE; pf->ptp_caps.max_adj = 999999999; - pf->ptp_caps.n_ext_ts = 0; - pf->ptp_caps.pps = 0; pf->ptp_caps.adjfreq = i40e_ptp_adjfreq; pf->ptp_caps.adjtime = i40e_ptp_adjtime; pf->ptp_caps.gettimex64 = i40e_ptp_gettimex; pf->ptp_caps.settime64 = i40e_ptp_settime; - pf->ptp_caps.enable = i40e_ptp_feature_enable; + if (i40e_is_ptp_pin_dev(&pf->hw)) { + int err = i40e_init_pin_config(pf); + + if (err) + return err; + } /* Attempt to register the clock before enabling the hardware. */ pf->ptp_clock = ptp_clock_register(&pf->ptp_caps, &pf->pdev->dev); @@ -843,6 +1545,8 @@ void i40e_ptp_init(struct i40e_pf *pf) /* Restore the clock time based on last known value */ i40e_ptp_restore_hw_time(pf); } + + i40e_ptp_set_1pps_signal_hw(pf); } /** @@ -854,6 +1558,9 @@ void i40e_ptp_init(struct i40e_pf *pf) **/ void i40e_ptp_stop(struct i40e_pf *pf) { + struct i40e_hw *hw = &pf->hw; + u32 regval; + pf->flags &= ~I40E_FLAG_PTP; pf->ptp_tx = false; pf->ptp_rx = false; @@ -872,4 +1579,21 @@ void i40e_ptp_stop(struct i40e_pf *pf) dev_info(&pf->pdev->dev, "%s: removed PHC on %s\n", __func__, pf->vsi[pf->lan_vsi]->netdev->name); } + + if (i40e_is_ptp_pin_dev(&pf->hw)) { + i40e_ptp_set_pin_hw(hw, I40E_SDP3_2, off); + i40e_ptp_set_pin_hw(hw, I40E_SDP3_3, off); + i40e_ptp_set_pin_hw(hw, I40E_GPIO_4, off); + } + + regval = rd32(hw, I40E_PRTTSYN_AUX_0(0)); + regval &= ~I40E_PRTTSYN_AUX_0_PTPFLAG_MASK; + wr32(hw, I40E_PRTTSYN_AUX_0(0), regval); + + /* Disable interrupts */ + regval = rd32(hw, I40E_PRTTSYN_CTL0); + regval &= ~I40E_PRTTSYN_CTL0_EVENT_INT_ENA_MASK; + wr32(hw, I40E_PRTTSYN_CTL0, regval); + + i40e_ptp_free_pins(pf); } diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h index 36f7b27a04ae..8d0588a27a05 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_register.h +++ b/drivers/net/ethernet/intel/i40e/i40e_register.h @@ -182,11 +182,20 @@ #define I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT) #define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT 3 #define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT) +#define I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT 4 +#define I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT 5 +#define I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT 6 #define I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT 7 #define I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK I40E_MASK(0x7, I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT) #define I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT 11 #define I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT 12 #define I40E_GLGEN_GPIO_CTL_LED_MODE_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) +#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT 19 +#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT) +#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20 +#define I40E_GLGEN_GPIO_SET 0x00088184 /* Reset: POR */ +#define I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT 5 +#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT 6 #define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ #define I40E_GLGEN_MSCA(_i) (0x0008818C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ #define I40E_GLGEN_MSCA_MDIADD_SHIFT 0 @@ -540,6 +549,7 @@ #define I40E_PF_PCI_CIAA_VF_NUM_SHIFT 12 #define I40E_PF_PCI_CIAD 0x0009C100 /* Reset: FLR */ #define I40E_PRTPM_EEE_STAT 0x001E4320 /* Reset: GLOBR */ +#define I40E_PFPCI_SUBSYSID 0x000BE100 /* Reset: PCIR */ #define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT 30 #define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT) #define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT 31 @@ -742,6 +752,8 @@ #define I40E_PRTTSYN_CTL0 0x001E4200 /* Reset: GLOBR */ #define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT 1 #define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT) +#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT 2 +#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT) #define I40E_PRTTSYN_CTL0_PF_ID_SHIFT 8 #define I40E_PRTTSYN_CTL0_PF_ID_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL0_PF_ID_SHIFT) #define I40E_PRTTSYN_CTL0_TSYNENA_SHIFT 31 @@ -760,7 +772,10 @@ #define I40E_PRTTSYN_INC_L 0x001E4040 /* Reset: GLOBR */ #define I40E_PRTTSYN_RXTIME_H(_i) (0x00085040 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_PRTTSYN_RXTIME_L(_i) (0x000850C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_PRTTSYN_RXTIME_L_MAX_INDEX 3 #define I40E_PRTTSYN_STAT_0 0x001E4220 /* Reset: GLOBR */ +#define I40E_PRTTSYN_STAT_0_EVENT0_SHIFT 0 +#define I40E_PRTTSYN_STAT_0_EVENT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT0_SHIFT) #define I40E_PRTTSYN_STAT_0_TXTIME_SHIFT 4 #define I40E_PRTTSYN_STAT_0_TXTIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TXTIME_SHIFT) #define I40E_PRTTSYN_STAT_1 0x00085140 /* Reset: CORER */ @@ -768,6 +783,20 @@ #define I40E_PRTTSYN_TIME_L 0x001E4100 /* Reset: GLOBR */ #define I40E_PRTTSYN_TXTIME_H 0x001E41E0 /* Reset: GLOBR */ #define I40E_PRTTSYN_TXTIME_L 0x001E41C0 /* Reset: GLOBR */ +#define I40E_PRTTSYN_EVNT_H(_i) (0x001E40C0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ +#define I40E_PRTTSYN_EVNT_L(_i) (0x001E4080 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ +#define I40E_PRTTSYN_AUX_0(_i) (0x001E42A0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ +#define I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT 0 +#define I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT 1 +#define I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT 16 +#define I40E_PRTTSYN_AUX_0_PTPFLAG_SHIFT 17 +#define I40E_PRTTSYN_AUX_0_PTPFLAG_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_PTPFLAG_SHIFT) +#define I40E_PRTTSYN_AUX_1(_i) (0x001E42E0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ +#define I40E_PRTTSYN_AUX_1_INSTNT_SHIFT 0 +#define I40E_PRTTSYN_TGT_H(_i) (0x001E4180 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ +#define I40E_PRTTSYN_TGT_L(_i) (0x001E4140 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ +#define I40E_PRTTSYN_CLKO(_i) (0x001E4240 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ +#define I40E_PRTTSYN_ADJ 0x001E4280 /* Reset: GLOBR */ #define I40E_GL_MDET_RX 0x0012A510 /* Reset: CORER */ #define I40E_GL_MDET_RX_FUNCTION_SHIFT 0 #define I40E_GL_MDET_RX_FUNCTION_MASK I40E_MASK(0xFF, I40E_GL_MDET_RX_FUNCTION_SHIFT) diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index eff0a30790dd..472f56b360b8 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1160,12 +1160,12 @@ static int i40e_quiesce_vf_pci(struct i40e_vf *vf) } /** - * i40e_getnum_vf_vsi_vlan_filters + * __i40e_getnum_vf_vsi_vlan_filters * @vsi: pointer to the vsi * * called to get the number of VLANs offloaded on this VF **/ -static int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi) +static int __i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi) { struct i40e_mac_filter *f; u16 num_vlans = 0, bkt; @@ -1179,6 +1179,23 @@ static int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi) } /** + * i40e_getnum_vf_vsi_vlan_filters + * @vsi: pointer to the vsi + * + * wrapper for __i40e_getnum_vf_vsi_vlan_filters() with spinlock held + **/ +static int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi) +{ + int num_vlans; + + spin_lock_bh(&vsi->mac_filter_hash_lock); + num_vlans = __i40e_getnum_vf_vsi_vlan_filters(vsi); + spin_unlock_bh(&vsi->mac_filter_hash_lock); + + return num_vlans; +} + +/** * i40e_get_vlan_list_sync * @vsi: pointer to the VSI * @num_vlans: number of VLANs in mac_filter_hash, returned to caller @@ -1195,7 +1212,7 @@ static void i40e_get_vlan_list_sync(struct i40e_vsi *vsi, u16 *num_vlans, int bkt; spin_lock_bh(&vsi->mac_filter_hash_lock); - *num_vlans = i40e_getnum_vf_vsi_vlan_filters(vsi); + *num_vlans = __i40e_getnum_vf_vsi_vlan_filters(vsi); *vlan_list = kcalloc(*num_vlans, sizeof(**vlan_list), GFP_ATOMIC); if (!(*vlan_list)) goto err; diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h index 90793b36126e..68c80f04113c 100644 --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -186,12 +186,6 @@ enum iavf_state_t { __IAVF_RUNNING, /* opened, working */ }; -enum iavf_critical_section_t { - __IAVF_IN_CRITICAL_TASK, /* cannot be interrupted */ - __IAVF_IN_CLIENT_TASK, - __IAVF_IN_REMOVE_TASK, /* device being removed */ -}; - #define IAVF_CLOUD_FIELD_OMAC 0x01 #define IAVF_CLOUD_FIELD_IMAC 0x02 #define IAVF_CLOUD_FIELD_IVLAN 0x04 @@ -236,6 +230,9 @@ struct iavf_adapter { struct iavf_q_vector *q_vectors; struct list_head vlan_filter_list; struct list_head mac_filter_list; + struct mutex crit_lock; + struct mutex client_lock; + struct mutex remove_lock; /* Lock to protect accesses to MAC and VLAN lists */ spinlock_t mac_vlan_list_lock; char misc_vector_name[IFNAMSIZ + 9]; diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c index af43fbd8cb75..5a359a0a20ec 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c +++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c @@ -685,6 +685,8 @@ static int __iavf_get_coalesce(struct net_device *netdev, * iavf_get_coalesce - Get interrupt coalescing settings * @netdev: network interface device structure * @ec: ethtool coalesce structure + * @kernel_coal: ethtool CQE mode setting structure + * @extack: extack for reporting error messages * * Returns current coalescing settings. This is referred to elsewhere in the * driver as Interrupt Throttle Rate, as this is how the hardware describes @@ -692,7 +694,9 @@ static int __iavf_get_coalesce(struct net_device *netdev, * only represents the settings of queue 0. **/ static int iavf_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { return __iavf_get_coalesce(netdev, ec, -1); } @@ -804,11 +808,15 @@ static int __iavf_set_coalesce(struct net_device *netdev, * iavf_set_coalesce - Set interrupt coalescing settings * @netdev: network interface device structure * @ec: ethtool coalesce structure + * @kernel_coal: ethtool CQE mode setting structure + * @extack: extack for reporting error messages * * Change current coalescing settings for every queue. **/ static int iavf_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { return __iavf_set_coalesce(netdev, ec, -1); } @@ -1352,8 +1360,7 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx if (!fltr) return -ENOMEM; - while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, - &adapter->crit_section)) { + while (!mutex_trylock(&adapter->crit_lock)) { if (--count == 0) { kfree(fltr); return -EINVAL; @@ -1378,7 +1385,7 @@ ret: if (err && fltr) kfree(fltr); - clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); + mutex_unlock(&adapter->crit_lock); return err; } @@ -1563,8 +1570,7 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter, return -EINVAL; } - while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, - &adapter->crit_section)) { + while (!mutex_trylock(&adapter->crit_lock)) { if (--count == 0) { kfree(rss_new); return -EINVAL; @@ -1600,7 +1606,7 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter, if (!err) mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); - clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); + mutex_unlock(&adapter->crit_lock); if (!rss_new_add) kfree(rss_new); diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 606a01ce4073..23762a7ef740 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -132,6 +132,27 @@ enum iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw, } /** + * iavf_lock_timeout - try to lock mutex but give up after timeout + * @lock: mutex that should be locked + * @msecs: timeout in msecs + * + * Returns 0 on success, negative on failure + **/ +static int iavf_lock_timeout(struct mutex *lock, unsigned int msecs) +{ + unsigned int wait, delay = 10; + + for (wait = 0; wait < msecs; wait += delay) { + if (mutex_trylock(lock)) + return 0; + + msleep(delay); + } + + return -1; +} + +/** * iavf_schedule_reset - Set the flags and schedule a reset event * @adapter: board private structure **/ @@ -1916,7 +1937,7 @@ static void iavf_watchdog_task(struct work_struct *work) struct iavf_hw *hw = &adapter->hw; u32 reg_val; - if (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section)) + if (!mutex_trylock(&adapter->crit_lock)) goto restart_watchdog; if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) @@ -1934,8 +1955,7 @@ static void iavf_watchdog_task(struct work_struct *work) adapter->state = __IAVF_STARTUP; adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; queue_delayed_work(iavf_wq, &adapter->init_task, 10); - clear_bit(__IAVF_IN_CRITICAL_TASK, - &adapter->crit_section); + mutex_unlock(&adapter->crit_lock); /* Don't reschedule the watchdog, since we've restarted * the init task. When init_task contacts the PF and * gets everything set up again, it'll restart the @@ -1945,14 +1965,13 @@ static void iavf_watchdog_task(struct work_struct *work) } adapter->aq_required = 0; adapter->current_op = VIRTCHNL_OP_UNKNOWN; - clear_bit(__IAVF_IN_CRITICAL_TASK, - &adapter->crit_section); + mutex_unlock(&adapter->crit_lock); queue_delayed_work(iavf_wq, &adapter->watchdog_task, msecs_to_jiffies(10)); goto watchdog_done; case __IAVF_RESETTING: - clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); + mutex_unlock(&adapter->crit_lock); queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2); return; case __IAVF_DOWN: @@ -1975,7 +1994,7 @@ static void iavf_watchdog_task(struct work_struct *work) } break; case __IAVF_REMOVE: - clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); + mutex_unlock(&adapter->crit_lock); return; default: goto restart_watchdog; @@ -1984,7 +2003,6 @@ static void iavf_watchdog_task(struct work_struct *work) /* check for hw reset */ reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK; if (!reg_val) { - adapter->state = __IAVF_RESETTING; adapter->flags |= IAVF_FLAG_RESET_PENDING; adapter->aq_required = 0; adapter->current_op = VIRTCHNL_OP_UNKNOWN; @@ -1998,7 +2016,7 @@ watchdog_done: if (adapter->state == __IAVF_RUNNING || adapter->state == __IAVF_COMM_FAILED) iavf_detect_recover_hung(&adapter->vsi); - clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); + mutex_unlock(&adapter->crit_lock); restart_watchdog: if (adapter->aq_required) queue_delayed_work(iavf_wq, &adapter->watchdog_task, @@ -2062,7 +2080,7 @@ static void iavf_disable_vf(struct iavf_adapter *adapter) memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE); iavf_shutdown_adminq(&adapter->hw); adapter->netdev->flags &= ~IFF_UP; - clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); + mutex_unlock(&adapter->crit_lock); adapter->flags &= ~IAVF_FLAG_RESET_PENDING; adapter->state = __IAVF_DOWN; wake_up(&adapter->down_waitqueue); @@ -2095,11 +2113,14 @@ static void iavf_reset_task(struct work_struct *work) /* When device is being removed it doesn't make sense to run the reset * task, just return in such a case. */ - if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) + if (mutex_is_locked(&adapter->remove_lock)) return; - while (test_and_set_bit(__IAVF_IN_CLIENT_TASK, - &adapter->crit_section)) + if (iavf_lock_timeout(&adapter->crit_lock, 200)) { + schedule_work(&adapter->reset_task); + return; + } + while (!mutex_trylock(&adapter->client_lock)) usleep_range(500, 1000); if (CLIENT_ENABLED(adapter)) { adapter->flags &= ~(IAVF_FLAG_CLIENT_NEEDS_OPEN | @@ -2151,7 +2172,7 @@ static void iavf_reset_task(struct work_struct *work) dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n", reg_val); iavf_disable_vf(adapter); - clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section); + mutex_unlock(&adapter->client_lock); return; /* Do not attempt to reinit. It's dead, Jim. */ } @@ -2278,13 +2299,13 @@ continue_reset: adapter->state = __IAVF_DOWN; wake_up(&adapter->down_waitqueue); } - clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section); - clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); + mutex_unlock(&adapter->client_lock); + mutex_unlock(&adapter->crit_lock); return; reset_err: - clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section); - clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); + mutex_unlock(&adapter->client_lock); + mutex_unlock(&adapter->crit_lock); dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); iavf_close(netdev); } @@ -2312,6 +2333,8 @@ static void iavf_adminq_task(struct work_struct *work) if (!event.msg_buf) goto out; + if (iavf_lock_timeout(&adapter->crit_lock, 200)) + goto freedom; do { ret = iavf_clean_arq_element(hw, &event, &pending); v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high); @@ -2325,6 +2348,7 @@ static void iavf_adminq_task(struct work_struct *work) if (pending != 0) memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE); } while (pending); + mutex_unlock(&adapter->crit_lock); if ((adapter->flags & (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) || @@ -2391,7 +2415,7 @@ static void iavf_client_task(struct work_struct *work) * later. */ - if (test_and_set_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section)) + if (!mutex_trylock(&adapter->client_lock)) return; if (adapter->flags & IAVF_FLAG_SERVICE_CLIENT_REQUESTED) { @@ -2414,7 +2438,7 @@ static void iavf_client_task(struct work_struct *work) adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_OPEN; } out: - clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section); + mutex_unlock(&adapter->client_lock); } /** @@ -3017,8 +3041,7 @@ static int iavf_configure_clsflower(struct iavf_adapter *adapter, if (!filter) return -ENOMEM; - while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, - &adapter->crit_section)) { + while (!mutex_trylock(&adapter->crit_lock)) { if (--count == 0) goto err; udelay(1); @@ -3049,7 +3072,7 @@ err: if (err) kfree(filter); - clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); + mutex_unlock(&adapter->crit_lock); return err; } @@ -3196,8 +3219,7 @@ static int iavf_open(struct net_device *netdev) return -EIO; } - while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, - &adapter->crit_section)) + while (!mutex_trylock(&adapter->crit_lock)) usleep_range(500, 1000); if (adapter->state != __IAVF_DOWN) { @@ -3232,7 +3254,7 @@ static int iavf_open(struct net_device *netdev) iavf_irq_enable(adapter, true); - clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); + mutex_unlock(&adapter->crit_lock); return 0; @@ -3244,7 +3266,7 @@ err_setup_rx: err_setup_tx: iavf_free_all_tx_resources(adapter); err_unlock: - clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); + mutex_unlock(&adapter->crit_lock); return err; } @@ -3268,8 +3290,7 @@ static int iavf_close(struct net_device *netdev) if (adapter->state <= __IAVF_DOWN_PENDING) return 0; - while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, - &adapter->crit_section)) + while (!mutex_trylock(&adapter->crit_lock)) usleep_range(500, 1000); set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); @@ -3280,7 +3301,7 @@ static int iavf_close(struct net_device *netdev) adapter->state = __IAVF_DOWN_PENDING; iavf_free_traffic_irqs(adapter); - clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); + mutex_unlock(&adapter->crit_lock); /* We explicitly don't free resources here because the hardware is * still active and can DMA into memory. Resources are cleared in @@ -3629,6 +3650,10 @@ static void iavf_init_task(struct work_struct *work) init_task.work); struct iavf_hw *hw = &adapter->hw; + if (iavf_lock_timeout(&adapter->crit_lock, 5000)) { + dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__); + return; + } switch (adapter->state) { case __IAVF_STARTUP: if (iavf_startup(adapter) < 0) @@ -3641,14 +3666,14 @@ static void iavf_init_task(struct work_struct *work) case __IAVF_INIT_GET_RESOURCES: if (iavf_init_get_resources(adapter) < 0) goto init_failed; - return; + goto out; default: goto init_failed; } queue_delayed_work(iavf_wq, &adapter->init_task, msecs_to_jiffies(30)); - return; + goto out; init_failed: if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) { dev_err(&adapter->pdev->dev, @@ -3657,9 +3682,11 @@ init_failed: iavf_shutdown_adminq(hw); adapter->state = __IAVF_STARTUP; queue_delayed_work(iavf_wq, &adapter->init_task, HZ * 5); - return; + goto out; } queue_delayed_work(iavf_wq, &adapter->init_task, HZ); +out: + mutex_unlock(&adapter->crit_lock); } /** @@ -3676,9 +3703,12 @@ static void iavf_shutdown(struct pci_dev *pdev) if (netif_running(netdev)) iavf_close(netdev); + if (iavf_lock_timeout(&adapter->crit_lock, 5000)) + dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__); /* Prevent the watchdog from running. */ adapter->state = __IAVF_REMOVE; adapter->aq_required = 0; + mutex_unlock(&adapter->crit_lock); #ifdef CONFIG_PM pci_save_state(pdev); @@ -3772,6 +3802,9 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* set up the locks for the AQ, do this only once in probe * and destroy them only once in remove */ + mutex_init(&adapter->crit_lock); + mutex_init(&adapter->client_lock); + mutex_init(&adapter->remove_lock); mutex_init(&hw->aq.asq_mutex); mutex_init(&hw->aq.arq_mutex); @@ -3823,8 +3856,7 @@ static int __maybe_unused iavf_suspend(struct device *dev_d) netif_device_detach(netdev); - while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, - &adapter->crit_section)) + while (!mutex_trylock(&adapter->crit_lock)) usleep_range(500, 1000); if (netif_running(netdev)) { @@ -3835,7 +3867,7 @@ static int __maybe_unused iavf_suspend(struct device *dev_d) iavf_free_misc_irq(adapter); iavf_reset_interrupt_capability(adapter); - clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); + mutex_unlock(&adapter->crit_lock); return 0; } @@ -3897,7 +3929,7 @@ static void iavf_remove(struct pci_dev *pdev) struct iavf_hw *hw = &adapter->hw; int err; /* Indicate we are in remove and not to run reset_task */ - set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section); + mutex_lock(&adapter->remove_lock); cancel_delayed_work_sync(&adapter->init_task); cancel_work_sync(&adapter->reset_task); cancel_delayed_work_sync(&adapter->client_task); @@ -3912,10 +3944,6 @@ static void iavf_remove(struct pci_dev *pdev) err); } - /* Shut down all the garbage mashers on the detention level */ - adapter->state = __IAVF_REMOVE; - adapter->aq_required = 0; - adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; iavf_request_reset(adapter); msleep(50); /* If the FW isn't responding, kick it once, but only once. */ @@ -3923,6 +3951,13 @@ static void iavf_remove(struct pci_dev *pdev) iavf_request_reset(adapter); msleep(50); } + if (iavf_lock_timeout(&adapter->crit_lock, 5000)) + dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__); + + /* Shut down all the garbage mashers on the detention level */ + adapter->state = __IAVF_REMOVE; + adapter->aq_required = 0; + adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; iavf_free_all_tx_resources(adapter); iavf_free_all_rx_resources(adapter); iavf_misc_irq_disable(adapter); @@ -3942,6 +3977,11 @@ static void iavf_remove(struct pci_dev *pdev) /* destroy the locks only once, here */ mutex_destroy(&hw->aq.arq_mutex); mutex_destroy(&hw->aq.asq_mutex); + mutex_destroy(&adapter->client_lock); + mutex_unlock(&adapter->crit_lock); + mutex_destroy(&adapter->crit_lock); + mutex_unlock(&adapter->remove_lock); + mutex_destroy(&adapter->remove_lock); iounmap(hw->hw_addr); pci_release_regions(pdev); diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c index 7fe6e8ea39f0..14afce82ef63 100644 --- a/drivers/net/ethernet/intel/ice/ice_devlink.c +++ b/drivers/net/ethernet/intel/ice/ice_devlink.c @@ -477,7 +477,7 @@ struct ice_pf *ice_allocate_pf(struct device *dev) { struct devlink *devlink; - devlink = devlink_alloc(&ice_devlink_ops, sizeof(struct ice_pf)); + devlink = devlink_alloc(&ice_devlink_ops, sizeof(struct ice_pf), dev); if (!devlink) return NULL; @@ -504,7 +504,7 @@ int ice_devlink_register(struct ice_pf *pf) struct device *dev = ice_pf_to_dev(pf); int err; - err = devlink_register(devlink, dev); + err = devlink_register(devlink); if (err) { dev_err(dev, "devlink registration failed: %d\n", err); return err; diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index d95a5daca114..c451cf401e63 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -3568,8 +3568,10 @@ __ice_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, return 0; } -static int -ice_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec) +static int ice_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { return __ice_get_coalesce(netdev, ec, -1); } @@ -3787,8 +3789,10 @@ set_complete: return 0; } -static int -ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec) +static int ice_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { return __ice_set_coalesce(netdev, ec, -1); } diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index fe2ded775f25..0d6c143f6653 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -5122,6 +5122,7 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi) struct ice_hw *hw = &pf->hw; struct sockaddr *addr = pi; enum ice_status status; + u8 old_mac[ETH_ALEN]; u8 flags = 0; int err = 0; u8 *mac; @@ -5144,8 +5145,13 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi) } netif_addr_lock_bh(netdev); + ether_addr_copy(old_mac, netdev->dev_addr); + /* change the netdev's MAC address */ + memcpy(netdev->dev_addr, mac, netdev->addr_len); + netif_addr_unlock_bh(netdev); + /* Clean up old MAC filter. Not an error if old filter doesn't exist */ - status = ice_fltr_remove_mac(vsi, netdev->dev_addr, ICE_FWD_TO_VSI); + status = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI); if (status && status != ICE_ERR_DOES_NOT_EXIST) { err = -EADDRNOTAVAIL; goto err_update_filters; @@ -5168,13 +5174,12 @@ err_update_filters: if (err) { netdev_err(netdev, "can't set MAC %pM. filter update failed\n", mac); + netif_addr_lock_bh(netdev); + ether_addr_copy(netdev->dev_addr, old_mac); netif_addr_unlock_bh(netdev); return err; } - /* change the netdev's MAC address */ - memcpy(netdev->dev_addr, mac, netdev->addr_len); - netif_addr_unlock_bh(netdev); netdev_dbg(vsi->netdev, "updated MAC address to %pM\n", netdev->dev_addr); @@ -6570,12 +6575,12 @@ event_after: } /** - * ice_do_ioctl - Access the hwtstamp interface + * ice_eth_ioctl - Access the hwtstamp interface * @netdev: network interface device structure * @ifr: interface request data * @cmd: ioctl command */ -static int ice_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +static int ice_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_pf *pf = np->vsi->back; @@ -7241,7 +7246,7 @@ static const struct net_device_ops ice_netdev_ops = { .ndo_change_mtu = ice_change_mtu, .ndo_get_stats64 = ice_get_stats64, .ndo_set_tx_maxrate = ice_set_tx_maxrate, - .ndo_do_ioctl = ice_do_ioctl, + .ndo_eth_ioctl = ice_eth_ioctl, .ndo_set_vf_spoofchk = ice_set_vf_spoofchk, .ndo_set_vf_mac = ice_set_vf_mac, .ndo_get_vf_config = ice_get_vf_cfg, diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c index 9e3ddb9b8b51..05cc5870e4ef 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp.c @@ -22,7 +22,7 @@ static void ice_set_tx_tstamp(struct ice_pf *pf, bool on) return; /* Set the timestamp enable flag for all the Tx rings */ - ice_for_each_rxq(vsi, i) { + ice_for_each_txq(vsi, i) { if (!vsi->tx_rings[i]) continue; vsi->tx_rings[i]->ptp_tx = on; @@ -689,6 +689,41 @@ err: } /** + * ice_ptp_disable_all_clkout - Disable all currently configured outputs + * @pf: pointer to the PF structure + * + * Disable all currently configured clock outputs. This is necessary before + * certain changes to the PTP hardware clock. Use ice_ptp_enable_all_clkout to + * re-enable the clocks again. + */ +static void ice_ptp_disable_all_clkout(struct ice_pf *pf) +{ + uint i; + + for (i = 0; i < pf->ptp.info.n_per_out; i++) + if (pf->ptp.perout_channels[i].ena) + ice_ptp_cfg_clkout(pf, i, NULL, false); +} + +/** + * ice_ptp_enable_all_clkout - Enable all configured periodic clock outputs + * @pf: pointer to the PF structure + * + * Enable all currently configured clock outputs. Use this after + * ice_ptp_disable_all_clkout to reconfigure the output signals according to + * their configuration. + */ +static void ice_ptp_enable_all_clkout(struct ice_pf *pf) +{ + uint i; + + for (i = 0; i < pf->ptp.info.n_per_out; i++) + if (pf->ptp.perout_channels[i].ena) + ice_ptp_cfg_clkout(pf, i, &pf->ptp.perout_channels[i], + false); +} + +/** * ice_ptp_gpio_enable_e810 - Enable/disable ancillary features of PHC * @info: the driver's PTP info structure * @rq: The requested feature to change @@ -783,12 +818,17 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts) goto exit; } + /* Disable periodic outputs */ + ice_ptp_disable_all_clkout(pf); + err = ice_ptp_write_init(pf, &ts64); ice_ptp_unlock(hw); if (!err) ice_ptp_update_cached_phctime(pf); + /* Reenable periodic outputs */ + ice_ptp_enable_all_clkout(pf); exit: if (err) { dev_err(ice_pf_to_dev(pf), "PTP failed to set time %d\n", err); @@ -842,8 +882,14 @@ static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta) return -EBUSY; } + /* Disable periodic outputs */ + ice_ptp_disable_all_clkout(pf); + err = ice_ptp_write_adj(pf, delta); + /* Reenable periodic outputs */ + ice_ptp_enable_all_clkout(pf); + ice_ptp_unlock(hw); if (err) { @@ -1064,17 +1110,6 @@ static long ice_ptp_create_clock(struct ice_pf *pf) info = &pf->ptp.info; dev = ice_pf_to_dev(pf); - /* Allocate memory for kernel pins interface */ - if (info->n_pins) { - info->pin_config = devm_kcalloc(dev, info->n_pins, - sizeof(*info->pin_config), - GFP_KERNEL); - if (!info->pin_config) { - info->n_pins = 0; - return -ENOMEM; - } - } - /* Attempt to register the clock before enabling the hardware. */ clock = ptp_clock_register(info, dev); if (IS_ERR(clock)) @@ -1278,6 +1313,8 @@ ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) { u8 idx; + spin_lock(&tx->lock); + for (idx = 0; idx < tx->len; idx++) { u8 phy_idx = idx + tx->quad_offset; @@ -1290,6 +1327,8 @@ ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) tx->tstamps[idx].skb = NULL; } } + + spin_unlock(&tx->lock); } /** @@ -1550,6 +1589,9 @@ void ice_ptp_release(struct ice_pf *pf) if (!pf->ptp.clock) return; + /* Disable periodic outputs */ + ice_ptp_disable_all_clkout(pf); + ice_clear_ptp_clock_index(pf); ptp_clock_unregister(pf->ptp.clock); pf->ptp.clock = NULL; diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c index e63ee3cca5ea..1277c5c7d099 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mac.c +++ b/drivers/net/ethernet/intel/igb/e1000_mac.c @@ -492,6 +492,7 @@ static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) **/ static void igb_i21x_hw_doublecheck(struct e1000_hw *hw) { + int failed_cnt = 3; bool is_failed; int i; @@ -502,9 +503,12 @@ static void igb_i21x_hw_doublecheck(struct e1000_hw *hw) is_failed = true; array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]); wrfl(); - break; } } + if (is_failed && --failed_cnt <= 0) { + hw_dbg("Failed to update MTA_REGISTER, too many retries"); + break; + } } while (is_failed); } diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 636a1b1fb7e1..fb1029352c3e 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -2182,7 +2182,9 @@ static int igb_set_phys_id(struct net_device *netdev, } static int igb_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct igb_adapter *adapter = netdev_priv(netdev); int i; @@ -2238,7 +2240,9 @@ static int igb_set_coalesce(struct net_device *netdev, } static int igb_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct igb_adapter *adapter = netdev_priv(netdev); @@ -2343,8 +2347,7 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) switch (stringset) { case ETH_SS_TEST: - memcpy(data, *igb_gstrings_test, - IGB_TEST_LEN*ETH_GSTRING_LEN); + memcpy(data, igb_gstrings_test, sizeof(igb_gstrings_test)); break; case ETH_SS_STATS: for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 171a7a629b20..751de06019a0 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -2991,7 +2991,7 @@ static const struct net_device_ops igb_netdev_ops = { .ndo_set_rx_mode = igb_set_rx_mode, .ndo_set_mac_address = igb_set_mac, .ndo_change_mtu = igb_change_mtu, - .ndo_do_ioctl = igb_ioctl, + .ndo_eth_ioctl = igb_ioctl, .ndo_tx_timeout = igb_tx_timeout, .ndo_validate_addr = eth_validate_addr, .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid, diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c index f4835eb62fee..06e5bd646a0e 100644 --- a/drivers/net/ethernet/intel/igbvf/ethtool.c +++ b/drivers/net/ethernet/intel/igbvf/ethtool.c @@ -314,7 +314,9 @@ static int igbvf_set_wol(struct net_device *netdev, } static int igbvf_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct igbvf_adapter *adapter = netdev_priv(netdev); @@ -327,7 +329,9 @@ static int igbvf_get_coalesce(struct net_device *netdev, } static int igbvf_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct igbvf_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index 1bbe9862a758..d32e72d953c8 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -2657,7 +2657,7 @@ static const struct net_device_ops igbvf_netdev_ops = { .ndo_set_rx_mode = igbvf_set_rx_mode, .ndo_set_mac_address = igbvf_set_mac, .ndo_change_mtu = igbvf_change_mtu, - .ndo_do_ioctl = igbvf_ioctl, + .ndo_eth_ioctl = igbvf_ioctl, .ndo_tx_timeout = igbvf_tx_timeout, .ndo_vlan_rx_add_vid = igbvf_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = igbvf_vlan_rx_kill_vid, diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index 5901ed9fb545..3e386c38d016 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -33,6 +33,8 @@ void igc_ethtool_set_ops(struct net_device *); #define IGC_N_PEROUT 2 #define IGC_N_SDP 4 +#define MAX_FLEX_FILTER 32 + enum igc_mac_filter_type { IGC_MAC_FILTER_TYPE_DST = 0, IGC_MAC_FILTER_TYPE_SRC @@ -96,6 +98,13 @@ struct igc_ring { u32 start_time; u32 end_time; + /* CBS parameters */ + bool cbs_enable; /* indicates if CBS is enabled */ + s32 idleslope; /* idleSlope in kbps */ + s32 sendslope; /* sendSlope in kbps */ + s32 hicredit; /* hiCredit in bytes */ + s32 locredit; /* loCredit in bytes */ + /* everything past this point are written often */ u16 next_to_clean; u16 next_to_use; @@ -225,6 +234,7 @@ struct igc_adapter { struct timecounter tc; struct timespec64 prev_ptp_time; /* Pre-reset PTP clock */ ktime_t ptp_reset_start; /* Reset time in clock mono */ + struct system_time_snapshot snapshot; char fw_version[32]; @@ -287,6 +297,10 @@ extern char igc_driver_name[]; #define IGC_FLAG_VLAN_PROMISC BIT(15) #define IGC_FLAG_RX_LEGACY BIT(16) #define IGC_FLAG_TSN_QBV_ENABLED BIT(17) +#define IGC_FLAG_TSN_QAV_ENABLED BIT(18) + +#define IGC_FLAG_TSN_ANY_ENABLED \ + (IGC_FLAG_TSN_QBV_ENABLED | IGC_FLAG_TSN_QAV_ENABLED) #define IGC_FLAG_RSS_FIELD_IPV4_UDP BIT(6) #define IGC_FLAG_RSS_FIELD_IPV6_UDP BIT(7) @@ -476,18 +490,28 @@ struct igc_q_vector { }; enum igc_filter_match_flags { - IGC_FILTER_FLAG_ETHER_TYPE = 0x1, - IGC_FILTER_FLAG_VLAN_TCI = 0x2, - IGC_FILTER_FLAG_SRC_MAC_ADDR = 0x4, - IGC_FILTER_FLAG_DST_MAC_ADDR = 0x8, + IGC_FILTER_FLAG_ETHER_TYPE = BIT(0), + IGC_FILTER_FLAG_VLAN_TCI = BIT(1), + IGC_FILTER_FLAG_SRC_MAC_ADDR = BIT(2), + IGC_FILTER_FLAG_DST_MAC_ADDR = BIT(3), + IGC_FILTER_FLAG_USER_DATA = BIT(4), + IGC_FILTER_FLAG_VLAN_ETYPE = BIT(5), }; struct igc_nfc_filter { u8 match_flags; u16 etype; + __be16 vlan_etype; u16 vlan_tci; u8 src_addr[ETH_ALEN]; u8 dst_addr[ETH_ALEN]; + u8 user_data[8]; + u8 user_mask[8]; + u8 flex_index; + u8 rx_queue; + u8 prio; + u8 immediate_irq; + u8 drop; }; struct igc_nfc_rule { @@ -495,12 +519,24 @@ struct igc_nfc_rule { struct igc_nfc_filter filter; u32 location; u16 action; + bool flex; }; -/* IGC supports a total of 32 NFC rules: 16 MAC address based,, 8 VLAN priority - * based, and 8 ethertype based. +/* IGC supports a total of 32 NFC rules: 16 MAC address based, 8 VLAN priority + * based, 8 ethertype based and 32 Flex filter based rules. */ -#define IGC_MAX_RXNFC_RULES 32 +#define IGC_MAX_RXNFC_RULES 64 + +struct igc_flex_filter { + u8 index; + u8 data[128]; + u8 mask[16]; + u8 length; + u8 rx_queue; + u8 prio; + u8 immediate_irq; + u8 drop; +}; /* igc_desc_unused - calculate if we have unused descriptors */ static inline u16 igc_desc_unused(const struct igc_ring *ring) diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c index d0700d48ecf9..84f142f5e472 100644 --- a/drivers/net/ethernet/intel/igc/igc_base.c +++ b/drivers/net/ethernet/intel/igc/igc_base.c @@ -187,15 +187,7 @@ static s32 igc_init_phy_params_base(struct igc_hw *hw) igc_check_for_copper_link(hw); - /* Verify phy id and set remaining function pointers */ - switch (phy->id) { - case I225_I_PHY_ID: - phy->type = igc_phy_i225; - break; - default: - ret_val = -IGC_ERR_PHY; - goto out; - } + phy->type = igc_phy_i225; out: return ret_val; diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h index c3a5a5518790..a4bbee748798 100644 --- a/drivers/net/ethernet/intel/igc/igc_defines.h +++ b/drivers/net/ethernet/intel/igc/igc_defines.h @@ -17,11 +17,22 @@ #define IGC_WUC_PME_EN 0x00000002 /* PME Enable */ /* Wake Up Filter Control */ -#define IGC_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ -#define IGC_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ -#define IGC_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ -#define IGC_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ -#define IGC_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ +#define IGC_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define IGC_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define IGC_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define IGC_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +#define IGC_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ +#define IGC_WUFC_FLEX_HQ BIT(14) /* Flex Filters Host Queuing */ +#define IGC_WUFC_FLX0 BIT(16) /* Flexible Filter 0 Enable */ +#define IGC_WUFC_FLX1 BIT(17) /* Flexible Filter 1 Enable */ +#define IGC_WUFC_FLX2 BIT(18) /* Flexible Filter 2 Enable */ +#define IGC_WUFC_FLX3 BIT(19) /* Flexible Filter 3 Enable */ +#define IGC_WUFC_FLX4 BIT(20) /* Flexible Filter 4 Enable */ +#define IGC_WUFC_FLX5 BIT(21) /* Flexible Filter 5 Enable */ +#define IGC_WUFC_FLX6 BIT(22) /* Flexible Filter 6 Enable */ +#define IGC_WUFC_FLX7 BIT(23) /* Flexible Filter 7 Enable */ + +#define IGC_WUFC_FILTER_MASK GENMASK(23, 14) #define IGC_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */ @@ -46,6 +57,37 @@ /* Wake Up Packet Memory stores the first 128 bytes of the wake up packet */ #define IGC_WUPM_BYTES 128 +/* Wakeup Filter Control Extended */ +#define IGC_WUFC_EXT_FLX8 BIT(8) /* Flexible Filter 8 Enable */ +#define IGC_WUFC_EXT_FLX9 BIT(9) /* Flexible Filter 9 Enable */ +#define IGC_WUFC_EXT_FLX10 BIT(10) /* Flexible Filter 10 Enable */ +#define IGC_WUFC_EXT_FLX11 BIT(11) /* Flexible Filter 11 Enable */ +#define IGC_WUFC_EXT_FLX12 BIT(12) /* Flexible Filter 12 Enable */ +#define IGC_WUFC_EXT_FLX13 BIT(13) /* Flexible Filter 13 Enable */ +#define IGC_WUFC_EXT_FLX14 BIT(14) /* Flexible Filter 14 Enable */ +#define IGC_WUFC_EXT_FLX15 BIT(15) /* Flexible Filter 15 Enable */ +#define IGC_WUFC_EXT_FLX16 BIT(16) /* Flexible Filter 16 Enable */ +#define IGC_WUFC_EXT_FLX17 BIT(17) /* Flexible Filter 17 Enable */ +#define IGC_WUFC_EXT_FLX18 BIT(18) /* Flexible Filter 18 Enable */ +#define IGC_WUFC_EXT_FLX19 BIT(19) /* Flexible Filter 19 Enable */ +#define IGC_WUFC_EXT_FLX20 BIT(20) /* Flexible Filter 20 Enable */ +#define IGC_WUFC_EXT_FLX21 BIT(21) /* Flexible Filter 21 Enable */ +#define IGC_WUFC_EXT_FLX22 BIT(22) /* Flexible Filter 22 Enable */ +#define IGC_WUFC_EXT_FLX23 BIT(23) /* Flexible Filter 23 Enable */ +#define IGC_WUFC_EXT_FLX24 BIT(24) /* Flexible Filter 24 Enable */ +#define IGC_WUFC_EXT_FLX25 BIT(25) /* Flexible Filter 25 Enable */ +#define IGC_WUFC_EXT_FLX26 BIT(26) /* Flexible Filter 26 Enable */ +#define IGC_WUFC_EXT_FLX27 BIT(27) /* Flexible Filter 27 Enable */ +#define IGC_WUFC_EXT_FLX28 BIT(28) /* Flexible Filter 28 Enable */ +#define IGC_WUFC_EXT_FLX29 BIT(29) /* Flexible Filter 29 Enable */ +#define IGC_WUFC_EXT_FLX30 BIT(30) /* Flexible Filter 30 Enable */ +#define IGC_WUFC_EXT_FLX31 BIT(31) /* Flexible Filter 31 Enable */ + +#define IGC_WUFC_EXT_FILTER_MASK GENMASK(31, 8) + +/* Physical Func Reset Done Indication */ +#define IGC_CTRL_EXT_LINK_MODE_MASK 0x00C00000 + /* Loop limit on how long we wait for auto-negotiation to complete */ #define COPPER_LINK_UP_LIMIT 10 #define PHY_AUTO_NEG_LIMIT 45 @@ -476,11 +518,50 @@ #define IGC_TXQCTL_QUEUE_MODE_LAUNCHT 0x00000001 #define IGC_TXQCTL_STRICT_CYCLE 0x00000002 #define IGC_TXQCTL_STRICT_END 0x00000004 +#define IGC_TXQCTL_QAV_SEL_MASK 0x000000C0 +#define IGC_TXQCTL_QAV_SEL_CBS0 0x00000080 +#define IGC_TXQCTL_QAV_SEL_CBS1 0x000000C0 + +#define IGC_TQAVCC_IDLESLOPE_MASK 0xFFFF +#define IGC_TQAVCC_KEEP_CREDITS BIT(30) + +#define IGC_MAX_SR_QUEUES 2 /* Receive Checksum Control */ #define IGC_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */ #define IGC_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ +/* PCIe PTM Control */ +#define IGC_PTM_CTRL_START_NOW BIT(29) /* Start PTM Now */ +#define IGC_PTM_CTRL_EN BIT(30) /* Enable PTM */ +#define IGC_PTM_CTRL_TRIG BIT(31) /* PTM Cycle trigger */ +#define IGC_PTM_CTRL_SHRT_CYC(usec) (((usec) & 0x2f) << 2) +#define IGC_PTM_CTRL_PTM_TO(usec) (((usec) & 0xff) << 8) + +#define IGC_PTM_SHORT_CYC_DEFAULT 10 /* Default Short/interrupted cycle interval */ +#define IGC_PTM_CYC_TIME_DEFAULT 5 /* Default PTM cycle time */ +#define IGC_PTM_TIMEOUT_DEFAULT 255 /* Default timeout for PTM errors */ + +/* PCIe Digital Delay */ +#define IGC_PCIE_DIG_DELAY_DEFAULT 0x01440000 + +/* PCIe PHY Delay */ +#define IGC_PCIE_PHY_DELAY_DEFAULT 0x40900000 + +#define IGC_TIMADJ_ADJUST_METH 0x40000000 + +/* PCIe PTM Status */ +#define IGC_PTM_STAT_VALID BIT(0) /* PTM Status */ +#define IGC_PTM_STAT_RET_ERR BIT(1) /* Root port timeout */ +#define IGC_PTM_STAT_BAD_PTM_RES BIT(2) /* PTM Response msg instead of PTM Response Data */ +#define IGC_PTM_STAT_T4M1_OVFL BIT(3) /* T4 minus T1 overflow */ +#define IGC_PTM_STAT_ADJUST_1ST BIT(4) /* 1588 timer adjusted during 1st PTM cycle */ +#define IGC_PTM_STAT_ADJUST_CYC BIT(5) /* 1588 timer adjusted during non-1st PTM cycle */ + +/* PCIe PTM Cycle Control */ +#define IGC_PTM_CYCLE_CTRL_CYC_TIME(msec) ((msec) & 0x3ff) /* PTM Cycle Time (msec) */ +#define IGC_PTM_CYCLE_CTRL_AUTO_CYC_EN BIT(31) /* PTM Cycle Control */ + /* GPY211 - I225 defines */ #define GPY_MMD_MASK 0xFFFF0000 #define GPY_MMD_SHIFT 16 diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c index fa4171860623..e0a76ac1bbbc 100644 --- a/drivers/net/ethernet/intel/igc/igc_ethtool.c +++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c @@ -862,7 +862,9 @@ static void igc_ethtool_get_stats(struct net_device *netdev, } static int igc_ethtool_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct igc_adapter *adapter = netdev_priv(netdev); @@ -882,7 +884,9 @@ static int igc_ethtool_get_coalesce(struct net_device *netdev, } static int igc_ethtool_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct igc_adapter *adapter = netdev_priv(netdev); int i; @@ -979,6 +983,12 @@ static int igc_ethtool_get_nfc_rule(struct igc_adapter *adapter, eth_broadcast_addr(fsp->m_u.ether_spec.h_source); } + if (rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA) { + fsp->flow_type |= FLOW_EXT; + memcpy(fsp->h_ext.data, rule->filter.user_data, sizeof(fsp->h_ext.data)); + memcpy(fsp->m_ext.data, rule->filter.user_mask, sizeof(fsp->m_ext.data)); + } + mutex_unlock(&adapter->nfc_rule_lock); return 0; @@ -1215,6 +1225,30 @@ static void igc_ethtool_init_nfc_rule(struct igc_nfc_rule *rule, ether_addr_copy(rule->filter.dst_addr, fsp->h_u.ether_spec.h_dest); } + + /* VLAN etype matching */ + if ((fsp->flow_type & FLOW_EXT) && fsp->h_ext.vlan_etype) { + rule->filter.vlan_etype = fsp->h_ext.vlan_etype; + rule->filter.match_flags |= IGC_FILTER_FLAG_VLAN_ETYPE; + } + + /* Check for user defined data */ + if ((fsp->flow_type & FLOW_EXT) && + (fsp->h_ext.data[0] || fsp->h_ext.data[1])) { + rule->filter.match_flags |= IGC_FILTER_FLAG_USER_DATA; + memcpy(rule->filter.user_data, fsp->h_ext.data, sizeof(fsp->h_ext.data)); + memcpy(rule->filter.user_mask, fsp->m_ext.data, sizeof(fsp->m_ext.data)); + } + + /* When multiple filter options or user data or vlan etype is set, use a + * flex filter. + */ + if ((rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA) || + (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) || + (rule->filter.match_flags & (rule->filter.match_flags - 1))) + rule->flex = true; + else + rule->flex = false; } /** @@ -1244,11 +1278,6 @@ static int igc_ethtool_check_nfc_rule(struct igc_adapter *adapter, return -EINVAL; } - if (flags & (flags - 1)) { - netdev_dbg(dev, "Rule with multiple matches not supported\n"); - return -EOPNOTSUPP; - } - list_for_each_entry(tmp, &adapter->nfc_rule_list, list) { if (!memcmp(&rule->filter, &tmp->filter, sizeof(rule->filter)) && @@ -1280,12 +1309,6 @@ static int igc_ethtool_add_nfc_rule(struct igc_adapter *adapter, return -EOPNOTSUPP; } - if ((fsp->flow_type & FLOW_EXT) && - fsp->m_ext.vlan_tci != htons(VLAN_PRIO_MASK)) { - netdev_dbg(netdev, "VLAN mask not supported\n"); - return -EOPNOTSUPP; - } - if (fsp->ring_cookie >= adapter->num_rx_queues) { netdev_dbg(netdev, "Invalid action\n"); return -EINVAL; diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index ed2d66bc2d6c..b877efae61df 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -12,6 +12,8 @@ #include <net/pkt_sched.h> #include <linux/bpf_trace.h> #include <net/xdp_sock_drv.h> +#include <linux/pci.h> + #include <net/ipv6.h> #include "igc.h" @@ -118,7 +120,7 @@ void igc_reset(struct igc_adapter *adapter) igc_ptp_reset(adapter); /* Re-enable TSN offloading, where applicable. */ - igc_tsn_offload_apply(adapter); + igc_tsn_reset(adapter); igc_get_phy_info(hw); } @@ -3078,11 +3080,320 @@ static void igc_del_etype_filter(struct igc_adapter *adapter, u16 etype) etype); } +static int igc_flex_filter_select(struct igc_adapter *adapter, + struct igc_flex_filter *input, + u32 *fhft) +{ + struct igc_hw *hw = &adapter->hw; + u8 fhft_index; + u32 fhftsl; + + if (input->index >= MAX_FLEX_FILTER) { + dev_err(&adapter->pdev->dev, "Wrong Flex Filter index selected!\n"); + return -EINVAL; + } + + /* Indirect table select register */ + fhftsl = rd32(IGC_FHFTSL); + fhftsl &= ~IGC_FHFTSL_FTSL_MASK; + switch (input->index) { + case 0 ... 7: + fhftsl |= 0x00; + break; + case 8 ... 15: + fhftsl |= 0x01; + break; + case 16 ... 23: + fhftsl |= 0x02; + break; + case 24 ... 31: + fhftsl |= 0x03; + break; + } + wr32(IGC_FHFTSL, fhftsl); + + /* Normalize index down to host table register */ + fhft_index = input->index % 8; + + *fhft = (fhft_index < 4) ? IGC_FHFT(fhft_index) : + IGC_FHFT_EXT(fhft_index - 4); + + return 0; +} + +static int igc_write_flex_filter_ll(struct igc_adapter *adapter, + struct igc_flex_filter *input) +{ + struct device *dev = &adapter->pdev->dev; + struct igc_hw *hw = &adapter->hw; + u8 *data = input->data; + u8 *mask = input->mask; + u32 queuing; + u32 fhft; + u32 wufc; + int ret; + int i; + + /* Length has to be aligned to 8. Otherwise the filter will fail. Bail + * out early to avoid surprises later. + */ + if (input->length % 8 != 0) { + dev_err(dev, "The length of a flex filter has to be 8 byte aligned!\n"); + return -EINVAL; + } + + /* Select corresponding flex filter register and get base for host table. */ + ret = igc_flex_filter_select(adapter, input, &fhft); + if (ret) + return ret; + + /* When adding a filter globally disable flex filter feature. That is + * recommended within the datasheet. + */ + wufc = rd32(IGC_WUFC); + wufc &= ~IGC_WUFC_FLEX_HQ; + wr32(IGC_WUFC, wufc); + + /* Configure filter */ + queuing = input->length & IGC_FHFT_LENGTH_MASK; + queuing |= (input->rx_queue << IGC_FHFT_QUEUE_SHIFT) & IGC_FHFT_QUEUE_MASK; + queuing |= (input->prio << IGC_FHFT_PRIO_SHIFT) & IGC_FHFT_PRIO_MASK; + + if (input->immediate_irq) + queuing |= IGC_FHFT_IMM_INT; + + if (input->drop) + queuing |= IGC_FHFT_DROP; + + wr32(fhft + 0xFC, queuing); + + /* Write data (128 byte) and mask (128 bit) */ + for (i = 0; i < 16; ++i) { + const size_t data_idx = i * 8; + const size_t row_idx = i * 16; + u32 dw0 = + (data[data_idx + 0] << 0) | + (data[data_idx + 1] << 8) | + (data[data_idx + 2] << 16) | + (data[data_idx + 3] << 24); + u32 dw1 = + (data[data_idx + 4] << 0) | + (data[data_idx + 5] << 8) | + (data[data_idx + 6] << 16) | + (data[data_idx + 7] << 24); + u32 tmp; + + /* Write row: dw0, dw1 and mask */ + wr32(fhft + row_idx, dw0); + wr32(fhft + row_idx + 4, dw1); + + /* mask is only valid for MASK(7, 0) */ + tmp = rd32(fhft + row_idx + 8); + tmp &= ~GENMASK(7, 0); + tmp |= mask[i]; + wr32(fhft + row_idx + 8, tmp); + } + + /* Enable filter. */ + wufc |= IGC_WUFC_FLEX_HQ; + if (input->index > 8) { + /* Filter 0-7 are enabled via WUFC. The other 24 filters are not. */ + u32 wufc_ext = rd32(IGC_WUFC_EXT); + + wufc_ext |= (IGC_WUFC_EXT_FLX8 << (input->index - 8)); + + wr32(IGC_WUFC_EXT, wufc_ext); + } else { + wufc |= (IGC_WUFC_FLX0 << input->index); + } + wr32(IGC_WUFC, wufc); + + dev_dbg(&adapter->pdev->dev, "Added flex filter %u to HW.\n", + input->index); + + return 0; +} + +static void igc_flex_filter_add_field(struct igc_flex_filter *flex, + const void *src, unsigned int offset, + size_t len, const void *mask) +{ + int i; + + /* data */ + memcpy(&flex->data[offset], src, len); + + /* mask */ + for (i = 0; i < len; ++i) { + const unsigned int idx = i + offset; + const u8 *ptr = mask; + + if (mask) { + if (ptr[i] & 0xff) + flex->mask[idx / 8] |= BIT(idx % 8); + + continue; + } + + flex->mask[idx / 8] |= BIT(idx % 8); + } +} + +static int igc_find_avail_flex_filter_slot(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 wufc, wufc_ext; + int i; + + wufc = rd32(IGC_WUFC); + wufc_ext = rd32(IGC_WUFC_EXT); + + for (i = 0; i < MAX_FLEX_FILTER; i++) { + if (i < 8) { + if (!(wufc & (IGC_WUFC_FLX0 << i))) + return i; + } else { + if (!(wufc_ext & (IGC_WUFC_EXT_FLX8 << (i - 8)))) + return i; + } + } + + return -ENOSPC; +} + +static bool igc_flex_filter_in_use(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 wufc, wufc_ext; + + wufc = rd32(IGC_WUFC); + wufc_ext = rd32(IGC_WUFC_EXT); + + if (wufc & IGC_WUFC_FILTER_MASK) + return true; + + if (wufc_ext & IGC_WUFC_EXT_FILTER_MASK) + return true; + + return false; +} + +static int igc_add_flex_filter(struct igc_adapter *adapter, + struct igc_nfc_rule *rule) +{ + struct igc_flex_filter flex = { }; + struct igc_nfc_filter *filter = &rule->filter; + unsigned int eth_offset, user_offset; + int ret, index; + bool vlan; + + index = igc_find_avail_flex_filter_slot(adapter); + if (index < 0) + return -ENOSPC; + + /* Construct the flex filter: + * -> dest_mac [6] + * -> src_mac [6] + * -> tpid [2] + * -> vlan tci [2] + * -> ether type [2] + * -> user data [8] + * -> = 26 bytes => 32 length + */ + flex.index = index; + flex.length = 32; + flex.rx_queue = rule->action; + + vlan = rule->filter.vlan_tci || rule->filter.vlan_etype; + eth_offset = vlan ? 16 : 12; + user_offset = vlan ? 18 : 14; + + /* Add destination MAC */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) + igc_flex_filter_add_field(&flex, &filter->dst_addr, 0, + ETH_ALEN, NULL); + + /* Add source MAC */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) + igc_flex_filter_add_field(&flex, &filter->src_addr, 6, + ETH_ALEN, NULL); + + /* Add VLAN etype */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) + igc_flex_filter_add_field(&flex, &filter->vlan_etype, 12, + sizeof(filter->vlan_etype), + NULL); + + /* Add VLAN TCI */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) + igc_flex_filter_add_field(&flex, &filter->vlan_tci, 14, + sizeof(filter->vlan_tci), NULL); + + /* Add Ether type */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { + __be16 etype = cpu_to_be16(filter->etype); + + igc_flex_filter_add_field(&flex, &etype, eth_offset, + sizeof(etype), NULL); + } + + /* Add user data */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA) + igc_flex_filter_add_field(&flex, &filter->user_data, + user_offset, + sizeof(filter->user_data), + filter->user_mask); + + /* Add it down to the hardware and enable it. */ + ret = igc_write_flex_filter_ll(adapter, &flex); + if (ret) + return ret; + + filter->flex_index = index; + + return 0; +} + +static void igc_del_flex_filter(struct igc_adapter *adapter, + u16 reg_index) +{ + struct igc_hw *hw = &adapter->hw; + u32 wufc; + + /* Just disable the filter. The filter table itself is kept + * intact. Another flex_filter_add() should override the "old" data + * then. + */ + if (reg_index > 8) { + u32 wufc_ext = rd32(IGC_WUFC_EXT); + + wufc_ext &= ~(IGC_WUFC_EXT_FLX8 << (reg_index - 8)); + wr32(IGC_WUFC_EXT, wufc_ext); + } else { + wufc = rd32(IGC_WUFC); + + wufc &= ~(IGC_WUFC_FLX0 << reg_index); + wr32(IGC_WUFC, wufc); + } + + if (igc_flex_filter_in_use(adapter)) + return; + + /* No filters are in use, we may disable flex filters */ + wufc = rd32(IGC_WUFC); + wufc &= ~IGC_WUFC_FLEX_HQ; + wr32(IGC_WUFC, wufc); +} + static int igc_enable_nfc_rule(struct igc_adapter *adapter, - const struct igc_nfc_rule *rule) + struct igc_nfc_rule *rule) { int err; + if (rule->flex) { + return igc_add_flex_filter(adapter, rule); + } + if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { err = igc_add_etype_filter(adapter, rule->filter.etype, rule->action); @@ -3119,6 +3430,11 @@ static int igc_enable_nfc_rule(struct igc_adapter *adapter, static void igc_disable_nfc_rule(struct igc_adapter *adapter, const struct igc_nfc_rule *rule) { + if (rule->flex) { + igc_del_flex_filter(adapter, rule->filter.flex_index); + return; + } + if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) igc_del_etype_filter(adapter, rule->filter.etype); @@ -4817,6 +5133,7 @@ static irqreturn_t igc_msix_ring(int irq, void *data) */ static int igc_request_msix(struct igc_adapter *adapter) { + unsigned int num_q_vectors = adapter->num_q_vectors; int i = 0, err = 0, vector = 0, free_vector = 0; struct net_device *netdev = adapter->netdev; @@ -4825,7 +5142,13 @@ static int igc_request_msix(struct igc_adapter *adapter) if (err) goto err_out; - for (i = 0; i < adapter->num_q_vectors; i++) { + if (num_q_vectors > MAX_Q_VECTORS) { + num_q_vectors = MAX_Q_VECTORS; + dev_warn(&adapter->pdev->dev, + "The number of queue vectors (%d) is higher than max allowed (%d)\n", + adapter->num_q_vectors, MAX_Q_VECTORS); + } + for (i = 0; i < num_q_vectors; i++) { struct igc_q_vector *q_vector = adapter->q_vector[i]; vector++; @@ -4904,20 +5227,12 @@ bool igc_has_link(struct igc_adapter *adapter) * false until the igc_check_for_link establishes link * for copper adapters ONLY */ - switch (hw->phy.media_type) { - case igc_media_type_copper: - if (!hw->mac.get_link_status) - return true; - hw->mac.ops.check_for_link(hw); - link_active = !hw->mac.get_link_status; - break; - default: - case igc_media_type_unknown: - break; - } + if (!hw->mac.get_link_status) + return true; + hw->mac.ops.check_for_link(hw); + link_active = !hw->mac.get_link_status; - if (hw->mac.type == igc_i225 && - hw->phy.id == I225_I_PHY_ID) { + if (hw->mac.type == igc_i225) { if (!netif_carrier_ok(adapter->netdev)) { adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; } else if (!(adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)) { @@ -5005,7 +5320,9 @@ static void igc_watchdog_task(struct work_struct *work) adapter->tx_timeout_factor = 14; break; case SPEED_100: - /* maybe add some timeout factor ? */ + case SPEED_1000: + case SPEED_2500: + adapter->tx_timeout_factor = 7; break; } @@ -5432,7 +5749,6 @@ static int igc_save_launchtime_params(struct igc_adapter *adapter, int queue, bool enable) { struct igc_ring *ring; - int i; if (queue < 0 || queue >= adapter->num_tx_queues) return -EINVAL; @@ -5440,17 +5756,6 @@ static int igc_save_launchtime_params(struct igc_adapter *adapter, int queue, ring = adapter->tx_ring[queue]; ring->launchtime_enable = enable; - if (adapter->base_time) - return 0; - - adapter->cycle_time = NSEC_PER_SEC; - - for (i = 0; i < adapter->num_tx_queues; i++) { - ring = adapter->tx_ring[i]; - ring->start_time = 0; - ring->end_time = NSEC_PER_SEC; - } - return 0; } @@ -5523,16 +5828,31 @@ static int igc_tsn_enable_launchtime(struct igc_adapter *adapter, return igc_tsn_offload_apply(adapter); } +static int igc_tsn_clear_schedule(struct igc_adapter *adapter) +{ + int i; + + adapter->base_time = 0; + adapter->cycle_time = NSEC_PER_SEC; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + + ring->start_time = 0; + ring->end_time = NSEC_PER_SEC; + } + + return 0; +} + static int igc_save_qbv_schedule(struct igc_adapter *adapter, struct tc_taprio_qopt_offload *qopt) { u32 start_time = 0, end_time = 0; size_t n; - if (!qopt->enable) { - adapter->base_time = 0; - return 0; - } + if (!qopt->enable) + return igc_tsn_clear_schedule(adapter); if (adapter->base_time) return -EALREADY; @@ -5584,6 +5904,74 @@ static int igc_tsn_enable_qbv_scheduling(struct igc_adapter *adapter, return igc_tsn_offload_apply(adapter); } +static int igc_save_cbs_params(struct igc_adapter *adapter, int queue, + bool enable, int idleslope, int sendslope, + int hicredit, int locredit) +{ + bool cbs_status[IGC_MAX_SR_QUEUES] = { false }; + struct net_device *netdev = adapter->netdev; + struct igc_ring *ring; + int i; + + /* i225 has two sets of credit-based shaper logic. + * Supporting it only on the top two priority queues + */ + if (queue < 0 || queue > 1) + return -EINVAL; + + ring = adapter->tx_ring[queue]; + + for (i = 0; i < IGC_MAX_SR_QUEUES; i++) + if (adapter->tx_ring[i]) + cbs_status[i] = adapter->tx_ring[i]->cbs_enable; + + /* CBS should be enabled on the highest priority queue first in order + * for the CBS algorithm to operate as intended. + */ + if (enable) { + if (queue == 1 && !cbs_status[0]) { + netdev_err(netdev, + "Enabling CBS on queue1 before queue0\n"); + return -EINVAL; + } + } else { + if (queue == 0 && cbs_status[1]) { + netdev_err(netdev, + "Disabling CBS on queue0 before queue1\n"); + return -EINVAL; + } + } + + ring->cbs_enable = enable; + ring->idleslope = idleslope; + ring->sendslope = sendslope; + ring->hicredit = hicredit; + ring->locredit = locredit; + + return 0; +} + +static int igc_tsn_enable_cbs(struct igc_adapter *adapter, + struct tc_cbs_qopt_offload *qopt) +{ + struct igc_hw *hw = &adapter->hw; + int err; + + if (hw->mac.type != igc_i225) + return -EOPNOTSUPP; + + if (qopt->queue < 0 || qopt->queue > 1) + return -EINVAL; + + err = igc_save_cbs_params(adapter, qopt->queue, qopt->enable, + qopt->idleslope, qopt->sendslope, + qopt->hicredit, qopt->locredit); + if (err) + return err; + + return igc_tsn_offload_apply(adapter); +} + static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { @@ -5596,6 +5984,9 @@ static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type, case TC_SETUP_QDISC_ETF: return igc_tsn_enable_launchtime(adapter, type_data); + case TC_SETUP_QDISC_CBS: + return igc_tsn_enable_cbs(adapter, type_data); + default: return -EOPNOTSUPP; } @@ -5704,7 +6095,7 @@ static const struct net_device_ops igc_netdev_ops = { .ndo_fix_features = igc_fix_features, .ndo_set_features = igc_set_features, .ndo_features_check = igc_features_check, - .ndo_do_ioctl = igc_ioctl, + .ndo_eth_ioctl = igc_ioctl, .ndo_setup_tc = igc_setup_tc, .ndo_bpf = igc_bpf, .ndo_xdp_xmit = igc_xdp_xmit, @@ -5865,6 +6256,10 @@ static int igc_probe(struct pci_dev *pdev, pci_enable_pcie_error_reporting(pdev); + err = pci_enable_ptm(pdev, NULL); + if (err < 0) + dev_info(&pdev->dev, "PCIe PTM not supported by PCIe bus/controller\n"); + pci_set_master(pdev); err = -ENOMEM; @@ -6018,6 +6413,8 @@ static int igc_probe(struct pci_dev *pdev, igc_ptp_init(adapter); + igc_tsn_clear_schedule(adapter); + /* reset the hardware with the new settings */ igc_reset(adapter); diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c index 83aeb5e7076f..5cad31c3c7b0 100644 --- a/drivers/net/ethernet/intel/igc/igc_phy.c +++ b/drivers/net/ethernet/intel/igc/igc_phy.c @@ -249,8 +249,7 @@ static s32 igc_phy_setup_autoneg(struct igc_hw *hw) return ret_val; } - if ((phy->autoneg_mask & ADVERTISE_2500_FULL) && - hw->phy.id == I225_I_PHY_ID) { + if (phy->autoneg_mask & ADVERTISE_2500_FULL) { /* Read the MULTI GBT AN Control Register - reg 7.32 */ ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK << MMD_DEVADDR_SHIFT) | @@ -390,8 +389,7 @@ static s32 igc_phy_setup_autoneg(struct igc_hw *hw) ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg); - if ((phy->autoneg_mask & ADVERTISE_2500_FULL) && - hw->phy.id == I225_I_PHY_ID) + if (phy->autoneg_mask & ADVERTISE_2500_FULL) ret_val = phy->ops.write_reg(hw, (STANDARD_AN_REG_MASK << MMD_DEVADDR_SHIFT) | diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c index 4ae19c6a3247..0f021909b430 100644 --- a/drivers/net/ethernet/intel/igc/igc_ptp.c +++ b/drivers/net/ethernet/intel/igc/igc_ptp.c @@ -9,6 +9,8 @@ #include <linux/ptp_classify.h> #include <linux/clocksource.h> #include <linux/ktime.h> +#include <linux/delay.h> +#include <linux/iopoll.h> #define INCVALUE_MASK 0x7fffffff #define ISGN 0x80000000 @@ -16,6 +18,9 @@ #define IGC_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 9) #define IGC_PTP_TX_TIMEOUT (HZ * 15) +#define IGC_PTM_STAT_SLEEP 2 +#define IGC_PTM_STAT_TIMEOUT 100 + /* SYSTIM read access for I225 */ void igc_ptp_read(struct igc_adapter *adapter, struct timespec64 *ts) { @@ -752,6 +757,147 @@ int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr) -EFAULT : 0; } +/* The two conditions below must be met for cross timestamping via + * PCIe PTM: + * + * 1. We have an way to convert the timestamps in the PTM messages + * to something related to the system clocks (right now, only + * X86 systems with support for the Always Running Timer allow that); + * + * 2. We have PTM enabled in the path from the device to the PCIe root port. + */ +static bool igc_is_crosststamp_supported(struct igc_adapter *adapter) +{ + return IS_ENABLED(CONFIG_X86_TSC) ? pcie_ptm_enabled(adapter->pdev) : false; +} + +static struct system_counterval_t igc_device_tstamp_to_system(u64 tstamp) +{ +#if IS_ENABLED(CONFIG_X86_TSC) + return convert_art_ns_to_tsc(tstamp); +#else + return (struct system_counterval_t) { }; +#endif +} + +static void igc_ptm_log_error(struct igc_adapter *adapter, u32 ptm_stat) +{ + struct net_device *netdev = adapter->netdev; + + switch (ptm_stat) { + case IGC_PTM_STAT_RET_ERR: + netdev_err(netdev, "PTM Error: Root port timeout\n"); + break; + case IGC_PTM_STAT_BAD_PTM_RES: + netdev_err(netdev, "PTM Error: Bad response, PTM Response Data expected\n"); + break; + case IGC_PTM_STAT_T4M1_OVFL: + netdev_err(netdev, "PTM Error: T4 minus T1 overflow\n"); + break; + case IGC_PTM_STAT_ADJUST_1ST: + netdev_err(netdev, "PTM Error: 1588 timer adjusted during first PTM cycle\n"); + break; + case IGC_PTM_STAT_ADJUST_CYC: + netdev_err(netdev, "PTM Error: 1588 timer adjusted during non-first PTM cycle\n"); + break; + default: + netdev_err(netdev, "PTM Error: Unknown error (%#x)\n", ptm_stat); + break; + } +} + +static int igc_phc_get_syncdevicetime(ktime_t *device, + struct system_counterval_t *system, + void *ctx) +{ + u32 stat, t2_curr_h, t2_curr_l, ctrl; + struct igc_adapter *adapter = ctx; + struct igc_hw *hw = &adapter->hw; + int err, count = 100; + ktime_t t1, t2_curr; + + /* Get a snapshot of system clocks to use as historic value. */ + ktime_get_snapshot(&adapter->snapshot); + + do { + /* Doing this in a loop because in the event of a + * badly timed (ha!) system clock adjustment, we may + * get PTM errors from the PCI root, but these errors + * are transitory. Repeating the process returns valid + * data eventually. + */ + + /* To "manually" start the PTM cycle we need to clear and + * then set again the TRIG bit. + */ + ctrl = rd32(IGC_PTM_CTRL); + ctrl &= ~IGC_PTM_CTRL_TRIG; + wr32(IGC_PTM_CTRL, ctrl); + ctrl |= IGC_PTM_CTRL_TRIG; + wr32(IGC_PTM_CTRL, ctrl); + + /* The cycle only starts "for real" when software notifies + * that it has read the registers, this is done by setting + * VALID bit. + */ + wr32(IGC_PTM_STAT, IGC_PTM_STAT_VALID); + + err = readx_poll_timeout(rd32, IGC_PTM_STAT, stat, + stat, IGC_PTM_STAT_SLEEP, + IGC_PTM_STAT_TIMEOUT); + if (err < 0) { + netdev_err(adapter->netdev, "Timeout reading IGC_PTM_STAT register\n"); + return err; + } + + if ((stat & IGC_PTM_STAT_VALID) == IGC_PTM_STAT_VALID) + break; + + if (stat & ~IGC_PTM_STAT_VALID) { + /* An error occurred, log it. */ + igc_ptm_log_error(adapter, stat); + /* The STAT register is write-1-to-clear (W1C), + * so write the previous error status to clear it. + */ + wr32(IGC_PTM_STAT, stat); + continue; + } + } while (--count); + + if (!count) { + netdev_err(adapter->netdev, "Exceeded number of tries for PTM cycle\n"); + return -ETIMEDOUT; + } + + t1 = ktime_set(rd32(IGC_PTM_T1_TIM0_H), rd32(IGC_PTM_T1_TIM0_L)); + + t2_curr_l = rd32(IGC_PTM_CURR_T2_L); + t2_curr_h = rd32(IGC_PTM_CURR_T2_H); + + /* FIXME: When the register that tells the endianness of the + * PTM registers are implemented, check them here and add the + * appropriate conversion. + */ + t2_curr_h = swab32(t2_curr_h); + + t2_curr = ((s64)t2_curr_h << 32 | t2_curr_l); + + *device = t1; + *system = igc_device_tstamp_to_system(t2_curr); + + return 0; +} + +static int igc_ptp_getcrosststamp(struct ptp_clock_info *ptp, + struct system_device_crosststamp *cts) +{ + struct igc_adapter *adapter = container_of(ptp, struct igc_adapter, + ptp_caps); + + return get_device_system_crosststamp(igc_phc_get_syncdevicetime, + adapter, &adapter->snapshot, cts); +} + /** * igc_ptp_init - Initialize PTP functionality * @adapter: Board private structure @@ -788,6 +934,11 @@ void igc_ptp_init(struct igc_adapter *adapter) adapter->ptp_caps.n_per_out = IGC_N_PEROUT; adapter->ptp_caps.n_pins = IGC_N_SDP; adapter->ptp_caps.verify = igc_ptp_verify_pin; + + if (!igc_is_crosststamp_supported(adapter)) + break; + + adapter->ptp_caps.getcrosststamp = igc_ptp_getcrosststamp; break; default: adapter->ptp_clock = NULL; @@ -879,7 +1030,9 @@ void igc_ptp_stop(struct igc_adapter *adapter) void igc_ptp_reset(struct igc_adapter *adapter) { struct igc_hw *hw = &adapter->hw; + u32 cycle_ctrl, ctrl; unsigned long flags; + u32 timadj; /* reset the tstamp_config */ igc_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config); @@ -888,12 +1041,38 @@ void igc_ptp_reset(struct igc_adapter *adapter) switch (adapter->hw.mac.type) { case igc_i225: + timadj = rd32(IGC_TIMADJ); + timadj |= IGC_TIMADJ_ADJUST_METH; + wr32(IGC_TIMADJ, timadj); + wr32(IGC_TSAUXC, 0x0); wr32(IGC_TSSDP, 0x0); wr32(IGC_TSIM, IGC_TSICR_INTERRUPTS | (adapter->pps_sys_wrap_on ? IGC_TSICR_SYS_WRAP : 0)); wr32(IGC_IMS, IGC_IMS_TS); + + if (!igc_is_crosststamp_supported(adapter)) + break; + + wr32(IGC_PCIE_DIG_DELAY, IGC_PCIE_DIG_DELAY_DEFAULT); + wr32(IGC_PCIE_PHY_DELAY, IGC_PCIE_PHY_DELAY_DEFAULT); + + cycle_ctrl = IGC_PTM_CYCLE_CTRL_CYC_TIME(IGC_PTM_CYC_TIME_DEFAULT); + + wr32(IGC_PTM_CYCLE_CTRL, cycle_ctrl); + + ctrl = IGC_PTM_CTRL_EN | + IGC_PTM_CTRL_START_NOW | + IGC_PTM_CTRL_SHRT_CYC(IGC_PTM_SHORT_CYC_DEFAULT) | + IGC_PTM_CTRL_PTM_TO(IGC_PTM_TIMEOUT_DEFAULT) | + IGC_PTM_CTRL_TRIG; + + wr32(IGC_PTM_CTRL, ctrl); + + /* Force the first cycle to run. */ + wr32(IGC_PTM_STAT, IGC_PTM_STAT_VALID); + break; default: /* No work to do. */ diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h index 0f82990567d9..e197a33d93a0 100644 --- a/drivers/net/ethernet/intel/igc/igc_regs.h +++ b/drivers/net/ethernet/intel/igc/igc_regs.h @@ -67,6 +67,9 @@ /* Filtering Registers */ #define IGC_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */ +#define IGC_FHFT(_n) (0x09000 + (256 * (_n))) /* Flexible Host Filter */ +#define IGC_FHFT_EXT(_n) (0x09A00 + (256 * (_n))) /* Flexible Host Filter Extended */ +#define IGC_FHFTSL 0x05804 /* Flex Filter indirect table select */ /* ETQF register bit definitions */ #define IGC_ETQF_FILTER_ENABLE BIT(26) @@ -75,6 +78,19 @@ #define IGC_ETQF_QUEUE_MASK 0x00070000 #define IGC_ETQF_ETYPE_MASK 0x0000FFFF +/* FHFT register bit definitions */ +#define IGC_FHFT_LENGTH_MASK GENMASK(7, 0) +#define IGC_FHFT_QUEUE_SHIFT 8 +#define IGC_FHFT_QUEUE_MASK GENMASK(10, 8) +#define IGC_FHFT_PRIO_SHIFT 16 +#define IGC_FHFT_PRIO_MASK GENMASK(18, 16) +#define IGC_FHFT_IMM_INT BIT(24) +#define IGC_FHFT_DROP BIT(25) + +/* FHFTSL register bit definitions */ +#define IGC_FHFTSL_FTSL_SHIFT 0 +#define IGC_FHFTSL_FTSL_MASK GENMASK(1, 0) + /* Redirection Table - RW Array */ #define IGC_RETA(_i) (0x05C00 + ((_i) * 4)) /* RSS Random Key - RW Array */ @@ -220,6 +236,9 @@ #define IGC_ENDQT(_n) (0x3334 + 0x4 * (_n)) #define IGC_DTXMXPKTSZ 0x355C +#define IGC_TQAVCC(_n) (0x3004 + ((_n) * 0x40)) +#define IGC_TQAVHC(_n) (0x300C + ((_n) * 0x40)) + /* System Time Registers */ #define IGC_SYSTIML 0x0B600 /* System time register Low - RO */ #define IGC_SYSTIMH 0x0B604 /* System time register High - RO */ @@ -229,6 +248,29 @@ #define IGC_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */ #define IGC_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */ +#define IGC_TIMADJ 0x0B60C /* Time Adjustment Offset Register */ + +/* PCIe Registers */ +#define IGC_PTM_CTRL 0x12540 /* PTM Control */ +#define IGC_PTM_STAT 0x12544 /* PTM Status */ +#define IGC_PTM_CYCLE_CTRL 0x1254C /* PTM Cycle Control */ + +/* PTM Time registers */ +#define IGC_PTM_T1_TIM0_L 0x12558 /* T1 on Timer 0 Low */ +#define IGC_PTM_T1_TIM0_H 0x1255C /* T1 on Timer 0 High */ + +#define IGC_PTM_CURR_T2_L 0x1258C /* Current T2 Low */ +#define IGC_PTM_CURR_T2_H 0x12590 /* Current T2 High */ +#define IGC_PTM_PREV_T2_L 0x12584 /* Previous T2 Low */ +#define IGC_PTM_PREV_T2_H 0x12588 /* Previous T2 High */ +#define IGC_PTM_PREV_T4M1 0x12578 /* T4 Minus T1 on previous PTM Cycle */ +#define IGC_PTM_CURR_T4M1 0x1257C /* T4 Minus T1 on this PTM Cycle */ +#define IGC_PTM_PREV_T3M2 0x12580 /* T3 Minus T2 on previous PTM Cycle */ +#define IGC_PTM_TDELAY 0x12594 /* PTM PCIe Link Delay */ + +#define IGC_PCIE_DIG_DELAY 0x12550 /* PCIe Digital Delay */ +#define IGC_PCIE_PHY_DELAY 0x12554 /* PCIe PHY Delay */ + /* Management registers */ #define IGC_MANC 0x05820 /* Management Control - RW */ @@ -240,6 +282,7 @@ #define IGC_WUFC 0x05808 /* Wakeup Filter Control - RW */ #define IGC_WUS 0x05810 /* Wakeup Status - R/W1C */ #define IGC_WUPL 0x05900 /* Wakeup Packet Length - RW */ +#define IGC_WUFC_EXT 0x0580C /* Wakeup Filter Control Register Extended - RW */ /* Wake Up packet memory */ #define IGC_WUPM_REG(_i) (0x05A00 + ((_i) * 4)) diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.c b/drivers/net/ethernet/intel/igc/igc_tsn.c index 174103c4bea6..0fce22de2ab8 100644 --- a/drivers/net/ethernet/intel/igc/igc_tsn.c +++ b/drivers/net/ethernet/intel/igc/igc_tsn.c @@ -18,8 +18,38 @@ static bool is_any_launchtime(struct igc_adapter *adapter) return false; } +static bool is_cbs_enabled(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + + if (ring->cbs_enable) + return true; + } + + return false; +} + +static unsigned int igc_tsn_new_flags(struct igc_adapter *adapter) +{ + unsigned int new_flags = adapter->flags & ~IGC_FLAG_TSN_ANY_ENABLED; + + if (adapter->base_time) + new_flags |= IGC_FLAG_TSN_QBV_ENABLED; + + if (is_any_launchtime(adapter)) + new_flags |= IGC_FLAG_TSN_QBV_ENABLED; + + if (is_cbs_enabled(adapter)) + new_flags |= IGC_FLAG_TSN_QAV_ENABLED; + + return new_flags; +} + /* Returns the TSN specific registers to their default values after - * TSN offloading is disabled. + * the adapter is reset. */ static int igc_tsn_disable_offload(struct igc_adapter *adapter) { @@ -27,11 +57,6 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter) u32 tqavctrl; int i; - if (!(adapter->flags & IGC_FLAG_TSN_QBV_ENABLED)) - return 0; - - adapter->cycle_time = 0; - wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT); wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_DEFAULT); @@ -41,18 +66,12 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter) wr32(IGC_TQAVCTRL, tqavctrl); for (i = 0; i < adapter->num_tx_queues; i++) { - struct igc_ring *ring = adapter->tx_ring[i]; - - ring->start_time = 0; - ring->end_time = 0; - ring->launchtime_enable = false; - wr32(IGC_TXQCTL(i), 0); wr32(IGC_STQT(i), 0); wr32(IGC_ENDQT(i), NSEC_PER_SEC); } - wr32(IGC_QBVCYCLET_S, NSEC_PER_SEC); + wr32(IGC_QBVCYCLET_S, 0); wr32(IGC_QBVCYCLET, NSEC_PER_SEC); adapter->flags &= ~IGC_FLAG_TSN_QBV_ENABLED; @@ -68,9 +87,6 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter) ktime_t base_time, systim; int i; - if (adapter->flags & IGC_FLAG_TSN_QBV_ENABLED) - return 0; - cycle = adapter->cycle_time; base_time = adapter->base_time; @@ -88,6 +104,8 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter) for (i = 0; i < adapter->num_tx_queues; i++) { struct igc_ring *ring = adapter->tx_ring[i]; u32 txqctl = 0; + u16 cbs_value; + u32 tqavcc; wr32(IGC_STQT(i), ring->start_time); wr32(IGC_ENDQT(i), ring->end_time); @@ -105,6 +123,90 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter) if (ring->launchtime_enable) txqctl |= IGC_TXQCTL_QUEUE_MODE_LAUNCHT; + /* Skip configuring CBS for Q2 and Q3 */ + if (i > 1) + goto skip_cbs; + + if (ring->cbs_enable) { + if (i == 0) + txqctl |= IGC_TXQCTL_QAV_SEL_CBS0; + else + txqctl |= IGC_TXQCTL_QAV_SEL_CBS1; + + /* According to i225 datasheet section 7.5.2.7, we + * should set the 'idleSlope' field from TQAVCC + * register following the equation: + * + * value = link-speed 0x7736 * BW * 0.2 + * ---------- * ----------------- (E1) + * 100Mbps 2.5 + * + * Note that 'link-speed' is in Mbps. + * + * 'BW' is the percentage bandwidth out of full + * link speed which can be found with the + * following equation. Note that idleSlope here + * is the parameter from this function + * which is in kbps. + * + * BW = idleSlope + * ----------------- (E2) + * link-speed * 1000 + * + * That said, we can come up with a generic + * equation to calculate the value we should set + * it TQAVCC register by replacing 'BW' in E1 by E2. + * The resulting equation is: + * + * value = link-speed * 0x7736 * idleSlope * 0.2 + * ------------------------------------- (E3) + * 100 * 2.5 * link-speed * 1000 + * + * 'link-speed' is present in both sides of the + * fraction so it is canceled out. The final + * equation is the following: + * + * value = idleSlope * 61036 + * ----------------- (E4) + * 2500000 + * + * NOTE: For i225, given the above, we can see + * that idleslope is represented in + * 40.959433 kbps units by the value at + * the TQAVCC register (2.5Gbps / 61036), + * which reduces the granularity for + * idleslope increments. + * + * In i225 controller, the sendSlope and loCredit + * parameters from CBS are not configurable + * by software so we don't do any + * 'controller configuration' in respect to + * these parameters. + */ + cbs_value = DIV_ROUND_UP_ULL(ring->idleslope + * 61036ULL, 2500000); + + tqavcc = rd32(IGC_TQAVCC(i)); + tqavcc &= ~IGC_TQAVCC_IDLESLOPE_MASK; + tqavcc |= cbs_value | IGC_TQAVCC_KEEP_CREDITS; + wr32(IGC_TQAVCC(i), tqavcc); + + wr32(IGC_TQAVHC(i), + 0x80000000 + ring->hicredit * 0x7735); + } else { + /* Disable any CBS for the queue */ + txqctl &= ~(IGC_TXQCTL_QAV_SEL_MASK); + + /* Set idleSlope to zero. */ + tqavcc = rd32(IGC_TQAVCC(i)); + tqavcc &= ~(IGC_TQAVCC_IDLESLOPE_MASK | + IGC_TQAVCC_KEEP_CREDITS); + wr32(IGC_TQAVCC(i), tqavcc); + + /* Set hiCredit to zero. */ + wr32(IGC_TQAVHC(i), 0); + } +skip_cbs: wr32(IGC_TXQCTL(i), txqctl); } @@ -125,33 +227,41 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter) wr32(IGC_BASET_H, baset_h); wr32(IGC_BASET_L, baset_l); - adapter->flags |= IGC_FLAG_TSN_QBV_ENABLED; - return 0; } -int igc_tsn_offload_apply(struct igc_adapter *adapter) +int igc_tsn_reset(struct igc_adapter *adapter) { - bool is_any_enabled = adapter->base_time || is_any_launchtime(adapter); + unsigned int new_flags; + int err = 0; - if (!(adapter->flags & IGC_FLAG_TSN_QBV_ENABLED) && !is_any_enabled) - return 0; + new_flags = igc_tsn_new_flags(adapter); - if (!is_any_enabled) { - int err = igc_tsn_disable_offload(adapter); + if (!(new_flags & IGC_FLAG_TSN_ANY_ENABLED)) + return igc_tsn_disable_offload(adapter); - if (err < 0) - return err; + err = igc_tsn_enable_offload(adapter); + if (err < 0) + return err; - /* The BASET registers aren't cleared when writing - * into them, force a reset if the interface is - * running. - */ - if (netif_running(adapter->netdev)) - schedule_work(&adapter->reset_task); + adapter->flags = new_flags; + return err; +} + +int igc_tsn_offload_apply(struct igc_adapter *adapter) +{ + int err; + + if (netif_running(adapter->netdev)) { + schedule_work(&adapter->reset_task); return 0; } - return igc_tsn_enable_offload(adapter); + err = igc_tsn_enable_offload(adapter); + if (err < 0) + return err; + + adapter->flags = igc_tsn_new_flags(adapter); + return 0; } diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.h b/drivers/net/ethernet/intel/igc/igc_tsn.h index f76bc86ddccd..1512307f5a52 100644 --- a/drivers/net/ethernet/intel/igc/igc_tsn.h +++ b/drivers/net/ethernet/intel/igc/igc_tsn.h @@ -5,5 +5,6 @@ #define _IGC_TSN_H_ int igc_tsn_offload_apply(struct igc_adapter *adapter); +int igc_tsn_reset(struct igc_adapter *adapter); #endif /* _IGC_BASE_H */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 4ceaca0f6ce3..fc26e4ddeb0d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -2358,7 +2358,9 @@ static int ixgbe_set_phys_id(struct net_device *netdev, } static int ixgbe_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct ixgbe_adapter *adapter = netdev_priv(netdev); @@ -2412,7 +2414,9 @@ static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter) } static int ixgbe_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_q_vector *q_vector; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 14aea40da50f..24e06ba6f5e9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -10247,7 +10247,7 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_set_tx_maxrate = ixgbe_tx_maxrate, .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid, - .ndo_do_ioctl = ixgbe_ioctl, + .ndo_eth_ioctl = ixgbe_ioctl, .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac, .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan, .ndo_set_vf_rate = ixgbe_ndo_set_vf_bw, diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c index e49fb1cd9a99..8380f905e708 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -787,7 +787,9 @@ static int ixgbevf_nway_reset(struct net_device *netdev) } static int ixgbevf_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); @@ -811,7 +813,9 @@ static int ixgbevf_get_coalesce(struct net_device *netdev, } static int ixgbevf_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbevf_q_vector *q_vector; diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index f1b9284e0bea..1bdc4f23e1e5 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c @@ -734,17 +734,17 @@ jme_make_new_rx_buf(struct jme_adapter *jme, int i) if (unlikely(!skb)) return -ENOMEM; - mapping = pci_map_page(jme->pdev, virt_to_page(skb->data), + mapping = dma_map_page(&jme->pdev->dev, virt_to_page(skb->data), offset_in_page(skb->data), skb_tailroom(skb), - PCI_DMA_FROMDEVICE); - if (unlikely(pci_dma_mapping_error(jme->pdev, mapping))) { + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(&jme->pdev->dev, mapping))) { dev_kfree_skb(skb); return -ENOMEM; } if (likely(rxbi->mapping)) - pci_unmap_page(jme->pdev, rxbi->mapping, - rxbi->len, PCI_DMA_FROMDEVICE); + dma_unmap_page(&jme->pdev->dev, rxbi->mapping, rxbi->len, + DMA_FROM_DEVICE); rxbi->skb = skb; rxbi->len = skb_tailroom(skb); @@ -760,10 +760,8 @@ jme_free_rx_buf(struct jme_adapter *jme, int i) rxbi += i; if (rxbi->skb) { - pci_unmap_page(jme->pdev, - rxbi->mapping, - rxbi->len, - PCI_DMA_FROMDEVICE); + dma_unmap_page(&jme->pdev->dev, rxbi->mapping, rxbi->len, + DMA_FROM_DEVICE); dev_kfree_skb(rxbi->skb); rxbi->skb = NULL; rxbi->mapping = 0; @@ -1005,16 +1003,12 @@ jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx) rxbi += idx; skb = rxbi->skb; - pci_dma_sync_single_for_cpu(jme->pdev, - rxbi->mapping, - rxbi->len, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&jme->pdev->dev, rxbi->mapping, rxbi->len, + DMA_FROM_DEVICE); if (unlikely(jme_make_new_rx_buf(jme, idx))) { - pci_dma_sync_single_for_device(jme->pdev, - rxbi->mapping, - rxbi->len, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_device(&jme->pdev->dev, rxbi->mapping, + rxbi->len, DMA_FROM_DEVICE); ++(NET_STAT(jme).rx_dropped); } else { @@ -1453,10 +1447,9 @@ static void jme_tx_clean_tasklet(struct tasklet_struct *t) ttxbi = txbi + ((i + j) & (mask)); txdesc[(i + j) & (mask)].dw[0] = 0; - pci_unmap_page(jme->pdev, - ttxbi->mapping, - ttxbi->len, - PCI_DMA_TODEVICE); + dma_unmap_page(&jme->pdev->dev, + ttxbi->mapping, ttxbi->len, + DMA_TO_DEVICE); ttxbi->mapping = 0; ttxbi->len = 0; @@ -1966,19 +1959,13 @@ jme_fill_tx_map(struct pci_dev *pdev, { dma_addr_t dmaaddr; - dmaaddr = pci_map_page(pdev, - page, - page_offset, - len, - PCI_DMA_TODEVICE); + dmaaddr = dma_map_page(&pdev->dev, page, page_offset, len, + DMA_TO_DEVICE); - if (unlikely(pci_dma_mapping_error(pdev, dmaaddr))) + if (unlikely(dma_mapping_error(&pdev->dev, dmaaddr))) return -EINVAL; - pci_dma_sync_single_for_device(pdev, - dmaaddr, - len, - PCI_DMA_TODEVICE); + dma_sync_single_for_device(&pdev->dev, dmaaddr, len, DMA_TO_DEVICE); txdesc->dw[0] = 0; txdesc->dw[1] = 0; @@ -2003,10 +1990,8 @@ static void jme_drop_tx_map(struct jme_adapter *jme, int startidx, int count) for (j = 0 ; j < count ; j++) { ctxbi = txbi + ((startidx + j + 2) & (mask)); - pci_unmap_page(jme->pdev, - ctxbi->mapping, - ctxbi->len, - PCI_DMA_TODEVICE); + dma_unmap_page(&jme->pdev->dev, ctxbi->mapping, ctxbi->len, + DMA_TO_DEVICE); ctxbi->mapping = 0; ctxbi->len = 0; @@ -2400,8 +2385,10 @@ jme_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) mdio_memcpy(jme, p32, JME_PHY_REG_NR); } -static int -jme_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd) +static int jme_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ecmd, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct jme_adapter *jme = netdev_priv(netdev); @@ -2437,8 +2424,10 @@ jme_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd) return 0; } -static int -jme_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd) +static int jme_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ecmd, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct jme_adapter *jme = netdev_priv(netdev); struct dynpcc_info *dpi = &(jme->dpi); @@ -2859,18 +2848,15 @@ static int jme_pci_dma64(struct pci_dev *pdev) { if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 && - !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) - if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) - return 1; + !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) + return 1; if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 && - !pci_set_dma_mask(pdev, DMA_BIT_MASK(40))) - if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40))) - return 1; + !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40))) + return 1; - if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) - if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) - return 0; + if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) + return 0; return -1; } @@ -2901,7 +2887,7 @@ static const struct net_device_ops jme_netdev_ops = { .ndo_open = jme_open, .ndo_stop = jme_close, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = jme_ioctl, + .ndo_eth_ioctl = jme_ioctl, .ndo_start_xmit = jme_start_xmit, .ndo_set_mac_address = jme_set_macaddr, .ndo_set_rx_mode = jme_set_multi, diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c index b30a45725374..3e9f324f1061 100644 --- a/drivers/net/ethernet/korina.c +++ b/drivers/net/ethernet/korina.c @@ -1272,7 +1272,7 @@ static const struct net_device_ops korina_netdev_ops = { .ndo_start_xmit = korina_send_packet, .ndo_set_rx_mode = korina_multicast_list, .ndo_tx_timeout = korina_tx_timeout, - .ndo_do_ioctl = korina_ioctl, + .ndo_eth_ioctl = korina_ioctl, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index 2d0c52f7106b..62f8c5212182 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c @@ -609,7 +609,7 @@ static const struct net_device_ops ltq_eth_netdev_ops = { .ndo_stop = ltq_etop_stop, .ndo_start_xmit = ltq_etop_tx, .ndo_change_mtu = ltq_etop_change_mtu, - .ndo_do_ioctl = phy_do_ioctl, + .ndo_eth_ioctl = phy_do_ioctl, .ndo_set_mac_address = ltq_etop_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = ltq_etop_set_multicast_list, diff --git a/drivers/net/ethernet/litex/Kconfig b/drivers/net/ethernet/litex/Kconfig new file mode 100644 index 000000000000..63bf01d28f0c --- /dev/null +++ b/drivers/net/ethernet/litex/Kconfig @@ -0,0 +1,28 @@ +# +# LiteX device configuration +# + +config NET_VENDOR_LITEX + bool "LiteX devices" + default y + help + If you have a network (Ethernet) card belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about LiteX devices. If you say Y, you will be asked + for your specific card in the following questions. + +if NET_VENDOR_LITEX + +config LITEX_LITEETH + tristate "LiteX Ethernet support" + depends on OF_NET + help + If you wish to compile a kernel for hardware with a LiteX LiteEth + device then you should answer Y to this. + + LiteX is a soft system-on-chip that targets FPGAs. LiteETH is a basic + network device that is commonly used in LiteX designs. + +endif # NET_VENDOR_LITEX diff --git a/drivers/net/ethernet/litex/Makefile b/drivers/net/ethernet/litex/Makefile new file mode 100644 index 000000000000..9343b73b8e49 --- /dev/null +++ b/drivers/net/ethernet/litex/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the LiteX network device drivers. +# + +obj-$(CONFIG_LITEX_LITEETH) += litex_liteeth.o diff --git a/drivers/net/ethernet/litex/litex_liteeth.c b/drivers/net/ethernet/litex/litex_liteeth.c new file mode 100644 index 000000000000..a9bdbf0dcfe1 --- /dev/null +++ b/drivers/net/ethernet/litex/litex_liteeth.c @@ -0,0 +1,314 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * LiteX Liteeth Ethernet + * + * Copyright 2017 Joel Stanley <joel@jms.id.au> + * + */ + +#include <linux/etherdevice.h> +#include <linux/interrupt.h> +#include <linux/litex.h> +#include <linux/module.h> +#include <linux/of_net.h> +#include <linux/platform_device.h> + +#define LITEETH_WRITER_SLOT 0x00 +#define LITEETH_WRITER_LENGTH 0x04 +#define LITEETH_WRITER_ERRORS 0x08 +#define LITEETH_WRITER_EV_STATUS 0x0C +#define LITEETH_WRITER_EV_PENDING 0x10 +#define LITEETH_WRITER_EV_ENABLE 0x14 +#define LITEETH_READER_START 0x18 +#define LITEETH_READER_READY 0x1C +#define LITEETH_READER_LEVEL 0x20 +#define LITEETH_READER_SLOT 0x24 +#define LITEETH_READER_LENGTH 0x28 +#define LITEETH_READER_EV_STATUS 0x2C +#define LITEETH_READER_EV_PENDING 0x30 +#define LITEETH_READER_EV_ENABLE 0x34 +#define LITEETH_PREAMBLE_CRC 0x38 +#define LITEETH_PREAMBLE_ERRORS 0x3C +#define LITEETH_CRC_ERRORS 0x40 + +#define LITEETH_PHY_CRG_RESET 0x00 +#define LITEETH_MDIO_W 0x04 +#define LITEETH_MDIO_R 0x0C + +#define DRV_NAME "liteeth" + +struct liteeth { + void __iomem *base; + struct net_device *netdev; + struct device *dev; + u32 slot_size; + + /* Tx */ + u32 tx_slot; + u32 num_tx_slots; + void __iomem *tx_base; + + /* Rx */ + u32 rx_slot; + u32 num_rx_slots; + void __iomem *rx_base; +}; + +static int liteeth_rx(struct net_device *netdev) +{ + struct liteeth *priv = netdev_priv(netdev); + struct sk_buff *skb; + unsigned char *data; + u8 rx_slot; + int len; + + rx_slot = litex_read8(priv->base + LITEETH_WRITER_SLOT); + len = litex_read32(priv->base + LITEETH_WRITER_LENGTH); + + if (len == 0 || len > 2048) + goto rx_drop; + + skb = netdev_alloc_skb_ip_align(netdev, len); + if (!skb) { + netdev_err(netdev, "couldn't get memory\n"); + goto rx_drop; + } + + data = skb_put(skb, len); + memcpy_fromio(data, priv->rx_base + rx_slot * priv->slot_size, len); + skb->protocol = eth_type_trans(skb, netdev); + + netdev->stats.rx_packets++; + netdev->stats.rx_bytes += len; + + return netif_rx(skb); + +rx_drop: + netdev->stats.rx_dropped++; + netdev->stats.rx_errors++; + + return NET_RX_DROP; +} + +static irqreturn_t liteeth_interrupt(int irq, void *dev_id) +{ + struct net_device *netdev = dev_id; + struct liteeth *priv = netdev_priv(netdev); + u8 reg; + + reg = litex_read8(priv->base + LITEETH_READER_EV_PENDING); + if (reg) { + if (netif_queue_stopped(netdev)) + netif_wake_queue(netdev); + litex_write8(priv->base + LITEETH_READER_EV_PENDING, reg); + } + + reg = litex_read8(priv->base + LITEETH_WRITER_EV_PENDING); + if (reg) { + liteeth_rx(netdev); + litex_write8(priv->base + LITEETH_WRITER_EV_PENDING, reg); + } + + return IRQ_HANDLED; +} + +static int liteeth_open(struct net_device *netdev) +{ + struct liteeth *priv = netdev_priv(netdev); + int err; + + /* Clear pending events */ + litex_write8(priv->base + LITEETH_WRITER_EV_PENDING, 1); + litex_write8(priv->base + LITEETH_READER_EV_PENDING, 1); + + err = request_irq(netdev->irq, liteeth_interrupt, 0, netdev->name, netdev); + if (err) { + netdev_err(netdev, "failed to request irq %d\n", netdev->irq); + return err; + } + + /* Enable IRQs */ + litex_write8(priv->base + LITEETH_WRITER_EV_ENABLE, 1); + litex_write8(priv->base + LITEETH_READER_EV_ENABLE, 1); + + netif_carrier_on(netdev); + netif_start_queue(netdev); + + return 0; +} + +static int liteeth_stop(struct net_device *netdev) +{ + struct liteeth *priv = netdev_priv(netdev); + + netif_stop_queue(netdev); + netif_carrier_off(netdev); + + litex_write8(priv->base + LITEETH_WRITER_EV_ENABLE, 0); + litex_write8(priv->base + LITEETH_READER_EV_ENABLE, 0); + + free_irq(netdev->irq, netdev); + + return 0; +} + +static int liteeth_start_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct liteeth *priv = netdev_priv(netdev); + void __iomem *txbuffer; + + if (!litex_read8(priv->base + LITEETH_READER_READY)) { + if (net_ratelimit()) + netdev_err(netdev, "LITEETH_READER_READY not ready\n"); + + netif_stop_queue(netdev); + + return NETDEV_TX_BUSY; + } + + /* Reject oversize packets */ + if (unlikely(skb->len > priv->slot_size)) { + if (net_ratelimit()) + netdev_err(netdev, "tx packet too big\n"); + + dev_kfree_skb_any(skb); + netdev->stats.tx_dropped++; + netdev->stats.tx_errors++; + + return NETDEV_TX_OK; + } + + txbuffer = priv->tx_base + priv->tx_slot * priv->slot_size; + memcpy_toio(txbuffer, skb->data, skb->len); + litex_write8(priv->base + LITEETH_READER_SLOT, priv->tx_slot); + litex_write16(priv->base + LITEETH_READER_LENGTH, skb->len); + litex_write8(priv->base + LITEETH_READER_START, 1); + + netdev->stats.tx_bytes += skb->len; + netdev->stats.tx_packets++; + + priv->tx_slot = (priv->tx_slot + 1) % priv->num_tx_slots; + dev_kfree_skb_any(skb); + + return NETDEV_TX_OK; +} + +static const struct net_device_ops liteeth_netdev_ops = { + .ndo_open = liteeth_open, + .ndo_stop = liteeth_stop, + .ndo_start_xmit = liteeth_start_xmit, +}; + +static void liteeth_setup_slots(struct liteeth *priv) +{ + struct device_node *np = priv->dev->of_node; + int err; + + err = of_property_read_u32(np, "litex,rx-slots", &priv->num_rx_slots); + if (err) { + dev_dbg(priv->dev, "unable to get litex,rx-slots, using 2\n"); + priv->num_rx_slots = 2; + } + + err = of_property_read_u32(np, "litex,tx-slots", &priv->num_tx_slots); + if (err) { + dev_dbg(priv->dev, "unable to get litex,tx-slots, using 2\n"); + priv->num_tx_slots = 2; + } + + err = of_property_read_u32(np, "litex,slot-size", &priv->slot_size); + if (err) { + dev_dbg(priv->dev, "unable to get litex,slot-size, using 0x800\n"); + priv->slot_size = 0x800; + } +} + +static int liteeth_probe(struct platform_device *pdev) +{ + struct net_device *netdev; + void __iomem *buf_base; + struct liteeth *priv; + int irq, err; + + netdev = devm_alloc_etherdev(&pdev->dev, sizeof(*priv)); + if (!netdev) + return -ENOMEM; + + SET_NETDEV_DEV(netdev, &pdev->dev); + platform_set_drvdata(pdev, netdev); + + priv = netdev_priv(netdev); + priv->netdev = netdev; + priv->dev = &pdev->dev; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "Failed to get IRQ %d\n", irq); + return irq; + } + netdev->irq = irq; + + priv->base = devm_platform_ioremap_resource_byname(pdev, "mac"); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + buf_base = devm_platform_ioremap_resource_byname(pdev, "buffer"); + if (IS_ERR(buf_base)) + return PTR_ERR(buf_base); + + liteeth_setup_slots(priv); + + /* Rx slots */ + priv->rx_base = buf_base; + priv->rx_slot = 0; + + /* Tx slots come after Rx slots */ + priv->tx_base = buf_base + priv->num_rx_slots * priv->slot_size; + priv->tx_slot = 0; + + err = of_get_mac_address(pdev->dev.of_node, netdev->dev_addr); + if (err) + eth_hw_addr_random(netdev); + + netdev->netdev_ops = &liteeth_netdev_ops; + + err = register_netdev(netdev); + if (err) { + dev_err(&pdev->dev, "Failed to register netdev %d\n", err); + return err; + } + + netdev_info(netdev, "irq %d slots: tx %d rx %d size %d\n", + netdev->irq, priv->num_tx_slots, priv->num_rx_slots, priv->slot_size); + + return 0; +} + +static int liteeth_remove(struct platform_device *pdev) +{ + struct net_device *netdev = platform_get_drvdata(pdev); + + unregister_netdev(netdev); + free_netdev(netdev); + + return 0; +} + +static const struct of_device_id liteeth_of_match[] = { + { .compatible = "litex,liteeth" }, + { } +}; +MODULE_DEVICE_TABLE(of, liteeth_of_match); + +static struct platform_driver liteeth_driver = { + .probe = liteeth_probe, + .remove = liteeth_remove, + .driver = { + .name = DRV_NAME, + .of_match_table = liteeth_of_match, + }, +}; +module_platform_driver(liteeth_driver); + +MODULE_AUTHOR("Joel Stanley <joel@jms.id.au>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index d207bfcaf31d..28d5ad296646 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -1611,8 +1611,10 @@ static void mv643xx_eth_get_drvinfo(struct net_device *dev, strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info)); } -static int -mv643xx_eth_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) +static int mv643xx_eth_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct mv643xx_eth_private *mp = netdev_priv(dev); @@ -1622,8 +1624,10 @@ mv643xx_eth_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) return 0; } -static int -mv643xx_eth_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) +static int mv643xx_eth_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct mv643xx_eth_private *mp = netdev_priv(dev); @@ -3060,7 +3064,7 @@ static const struct net_device_ops mv643xx_eth_netdev_ops = { .ndo_set_rx_mode = mv643xx_eth_set_rx_mode, .ndo_set_mac_address = mv643xx_eth_set_mac_address, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = mv643xx_eth_ioctl, + .ndo_eth_ioctl = mv643xx_eth_ioctl, .ndo_change_mtu = mv643xx_eth_change_mtu, .ndo_set_features = mv643xx_eth_set_features, .ndo_tx_timeout = mv643xx_eth_tx_timeout, diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index de32e5b49053..9d460a270601 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -2327,7 +2327,7 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool, if (!skb) return ERR_PTR(-ENOMEM); - skb_mark_for_recycle(skb, virt_to_page(xdp->data), pool); + skb_mark_for_recycle(skb); skb_reserve(skb, xdp->data - xdp->data_hard_start); skb_put(skb, xdp->data_end - xdp->data); @@ -2339,10 +2339,6 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool, skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, skb_frag_page(frag), skb_frag_off(frag), skb_frag_size(frag), PAGE_SIZE); - /* We don't need to reset pp_recycle here. It's already set, so - * just mark fragments for recycling. - */ - page_pool_store_mem_info(skb_frag_page(frag), pool); } return skb; @@ -2666,7 +2662,7 @@ static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev, return 0; if (skb_headlen(skb) < (skb_transport_offset(skb) + tcp_hdrlen(skb))) { - pr_info("*** Is this even possible???!?!?\n"); + pr_info("*** Is this even possible?\n"); return 0; } @@ -3832,12 +3828,20 @@ static void mvneta_validate(struct phylink_config *config, struct mvneta_port *pp = netdev_priv(ndev); __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; - /* We only support QSGMII, SGMII, 802.3z and RGMII modes */ - if (state->interface != PHY_INTERFACE_MODE_NA && - state->interface != PHY_INTERFACE_MODE_QSGMII && - state->interface != PHY_INTERFACE_MODE_SGMII && - !phy_interface_mode_is_8023z(state->interface) && - !phy_interface_mode_is_rgmii(state->interface)) { + /* We only support QSGMII, SGMII, 802.3z and RGMII modes. + * When in 802.3z mode, we must have AN enabled: + * "Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ... + * When <PortType> = 1 (1000BASE-X) this field must be set to 1." + */ + if (phy_interface_mode_is_8023z(state->interface)) { + if (!phylink_test(state->advertising, Autoneg)) { + bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + return; + } + } else if (state->interface != PHY_INTERFACE_MODE_NA && + state->interface != PHY_INTERFACE_MODE_QSGMII && + state->interface != PHY_INTERFACE_MODE_SGMII && + !phy_interface_mode_is_rgmii(state->interface)) { bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); return; } @@ -4496,8 +4500,11 @@ static int mvneta_ethtool_nway_reset(struct net_device *dev) } /* Set interrupt coalescing for ethtools */ -static int mvneta_ethtool_set_coalesce(struct net_device *dev, - struct ethtool_coalesce *c) +static int +mvneta_ethtool_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *c, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct mvneta_port *pp = netdev_priv(dev); int queue; @@ -4520,8 +4527,11 @@ static int mvneta_ethtool_set_coalesce(struct net_device *dev, } /* get coalescing for ethtools */ -static int mvneta_ethtool_get_coalesce(struct net_device *dev, - struct ethtool_coalesce *c) +static int +mvneta_ethtool_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *c, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct mvneta_port *pp = netdev_priv(dev); @@ -4986,7 +4996,7 @@ static const struct net_device_ops mvneta_netdev_ops = { .ndo_change_mtu = mvneta_change_mtu, .ndo_fix_features = mvneta_fix_features, .ndo_get_stats64 = mvneta_get_stats64, - .ndo_do_ioctl = mvneta_ioctl, + .ndo_eth_ioctl = mvneta_ioctl, .ndo_bpf = mvneta_xdp, .ndo_xdp_xmit = mvneta_xdp_xmit, .ndo_setup_tc = mvneta_setup_tc, diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 3229bafa2a2c..d5c92e43f89e 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -3995,7 +3995,7 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, } if (pp) - skb_mark_for_recycle(skb, page, pp); + skb_mark_for_recycle(skb); else dma_unmap_single_attrs(dev->dev.parent, dma_addr, bm_pool->buf_size, DMA_FROM_DEVICE, @@ -5367,8 +5367,11 @@ static int mvpp2_ethtool_nway_reset(struct net_device *dev) } /* Set interrupt coalescing for ethtools */ -static int mvpp2_ethtool_set_coalesce(struct net_device *dev, - struct ethtool_coalesce *c) +static int +mvpp2_ethtool_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *c, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct mvpp2_port *port = netdev_priv(dev); int queue; @@ -5400,8 +5403,11 @@ static int mvpp2_ethtool_set_coalesce(struct net_device *dev, } /* get coalescing for ethtools */ -static int mvpp2_ethtool_get_coalesce(struct net_device *dev, - struct ethtool_coalesce *c) +static int +mvpp2_ethtool_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *c, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct mvpp2_port *port = netdev_priv(dev); @@ -5702,7 +5708,7 @@ static const struct net_device_ops mvpp2_netdev_ops = { .ndo_set_mac_address = mvpp2_set_mac_address, .ndo_change_mtu = mvpp2_change_mtu, .ndo_get_stats64 = mvpp2_get_stats64, - .ndo_do_ioctl = mvpp2_ioctl, + .ndo_eth_ioctl = mvpp2_ioctl, .ndo_vlan_rx_add_vid = mvpp2_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = mvpp2_vlan_rx_kill_vid, .ndo_set_features = mvpp2_set_features, @@ -6269,6 +6275,15 @@ static void mvpp2_phylink_validate(struct phylink_config *config, if (!mvpp2_port_supports_rgmii(port)) goto empty_set; break; + case PHY_INTERFACE_MODE_1000BASEX: + case PHY_INTERFACE_MODE_2500BASEX: + /* When in 802.3z mode, we must have AN enabled: + * Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ... + * When <PortType> = 1 (1000BASE-X) this field must be set to 1. + */ + if (!phylink_test(state->advertising, Autoneg)) + goto empty_set; + break; default: break; } diff --git a/drivers/net/ethernet/marvell/octeontx2/Kconfig b/drivers/net/ethernet/marvell/octeontx2/Kconfig index 16caa02095fe..3f982ccf2c85 100644 --- a/drivers/net/ethernet/marvell/octeontx2/Kconfig +++ b/drivers/net/ethernet/marvell/octeontx2/Kconfig @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only # -# Marvell OcteonTX2 drivers configuration +# Marvell RVU Network drivers configuration # config OCTEONTX2_MBOX @@ -12,6 +12,7 @@ config OCTEONTX2_AF select NET_DEVLINK depends on (64BIT && COMPILE_TEST) || ARM64 depends on PCI + depends on PTP_1588_CLOCK_OPTIONAL help This driver supports Marvell's OcteonTX2 Resource Virtualization Unit's admin function manager which manages all RVU HW resources @@ -32,6 +33,7 @@ config OCTEONTX2_PF select OCTEONTX2_MBOX depends on (64BIT && COMPILE_TEST) || ARM64 depends on PCI + depends on PTP_1588_CLOCK_OPTIONAL help This driver supports Marvell's OcteonTX2 NIC physical function. diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile index cc8ac36cf687..7f4a4ca9af78 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/Makefile +++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 # -# Makefile for Marvell's OcteonTX2 RVU Admin Function driver +# Makefile for Marvell's RVU Admin Function driver # ccflags-y += -I$(src) @@ -10,4 +10,5 @@ obj-$(CONFIG_OCTEONTX2_AF) += rvu_af.o rvu_mbox-y := mbox.o rvu_trace.o rvu_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \ rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o rvu_npc_fs.o \ - rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o rvu_switch.o + rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o rvu_switch.o \ + rvu_sdp.o diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c index 544c96c8fe1d..7f3d01059e19 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c @@ -1,11 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 /* Marvell OcteonTx2 CGX driver * - * Copyright (C) 2018 Marvell International Ltd. + * Copyright (C) 2018 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/acpi.h> diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h index 237ba2b56210..ab1e4abdea38 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h @@ -1,11 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Marvell OcteonTx2 CGX driver +/* Marvell OcteonTx2 CGX driver * - * Copyright (C) 2018 Marvell International Ltd. + * Copyright (C) 2018 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef CGX_H diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h index aa4e42f78f13..f72ec0e2506f 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h @@ -1,11 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Marvell OcteonTx2 CGX driver +/* Marvell OcteonTx2 CGX driver * - * Copyright (C) 2018 Marvell International Ltd. + * Copyright (C) 2018 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef __CGX_FW_INTF_H__ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h index 47f5ed006a93..d9bea13f15b8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/common.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h @@ -1,11 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Marvell OcteonTx2 RVU Admin Function driver +/* Marvell RVU Admin Function driver * - * Copyright (C) 2018 Marvell International Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. + * Copyright (C) 2018 Marvell. */ #ifndef COMMON_H @@ -64,8 +60,8 @@ static inline int qmem_alloc(struct device *dev, struct qmem **q, qmem->entry_sz = entry_sz; qmem->alloc_sz = (qsize * entry_sz) + OTX2_ALIGN; - qmem->base = dma_alloc_coherent(dev, qmem->alloc_sz, - &qmem->iova, GFP_KERNEL); + qmem->base = dma_alloc_attrs(dev, qmem->alloc_sz, &qmem->iova, + GFP_KERNEL, DMA_ATTR_FORCE_CONTIGUOUS); if (!qmem->base) return -ENOMEM; @@ -84,9 +80,10 @@ static inline void qmem_free(struct device *dev, struct qmem *qmem) return; if (qmem->base) - dma_free_coherent(dev, qmem->alloc_sz, - qmem->base - qmem->align, - qmem->iova - qmem->align); + dma_free_attrs(dev, qmem->alloc_sz, + qmem->base - qmem->align, + qmem->iova - qmem->align, + DMA_ATTR_FORCE_CONTIGUOUS); devm_kfree(dev, qmem); } @@ -146,10 +143,7 @@ enum nix_scheduler { #define TXSCH_RR_QTM_MAX ((1 << 24) - 1) #define TXSCH_TL1_DFLT_RR_QTM TXSCH_RR_QTM_MAX #define TXSCH_TL1_DFLT_RR_PRIO (0x1ull) -#define MAX_SCHED_WEIGHT 0xFF -#define DFLT_RR_WEIGHT 71 -#define DFLT_RR_QTM ((DFLT_RR_WEIGHT * TXSCH_RR_QTM_MAX) \ - / MAX_SCHED_WEIGHT) +#define CN10K_MAX_DWRR_WEIGHT 16384 /* Weight is 14bit on CN10K */ /* Min/Max packet sizes, excluding FCS */ #define NIC_HW_MIN_FRS 40 @@ -187,15 +181,16 @@ enum nix_scheduler { #define NIX_INTF_TYPE_CGX 0 #define NIX_INTF_TYPE_LBK 1 +#define NIX_INTF_TYPE_SDP 2 #define MAX_LMAC_PKIND 12 #define NIX_LINK_CGX_LMAC(a, b) (0 + 4 * (a) + (b)) #define NIX_LINK_LBK(a) (12 + (a)) #define NIX_CHAN_CGX_LMAC_CHX(a, b, c) (0x800 + 0x100 * (a) + 0x10 * (b) + (c)) #define NIX_CHAN_LBK_CHX(a, b) (0 + 0x100 * (a) + (b)) -#define NIX_CHAN_SDP_CH_START (0x700ull) - -#define SDP_CHANNELS 256 +#define NIX_CHAN_SDP_CH_START (0x700ull) +#define NIX_CHAN_SDP_CHX(a) (NIX_CHAN_SDP_CH_START + (a)) +#define NIX_CHAN_SDP_NUM_CHANS 256 /* The mask is to extract lower 10-bits of channel number * which CPT will pass to X2P. diff --git a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h index a8b7b1c7a1d5..c38306b3384a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h @@ -1,7 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Marvell OcteonTx2 RPM driver +/* Marvell CN10K RPM driver * * Copyright (C) 2020 Marvell. + * */ #ifndef LMAC_COMMON_H diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c index 0a37ca96aab8..2898931d5260 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c @@ -1,11 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Admin Function driver +/* Marvell RVU Admin Function driver * - * Copyright (C) 2018 Marvell International Ltd. + * Copyright (C) 2018 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/module.h> @@ -412,5 +409,5 @@ const char *otx2_mbox_id2name(u16 id) } EXPORT_SYMBOL(otx2_mbox_id2name); -MODULE_AUTHOR("Marvell International Ltd."); +MODULE_AUTHOR("Marvell."); MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h index f5ec39de026a..154877706a0e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -1,11 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Marvell OcteonTx2 RVU Admin Function driver +/* Marvell RVU Admin Function driver * - * Copyright (C) 2018 Marvell International Ltd. + * Copyright (C) 2018 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef MBOX_H @@ -87,7 +84,7 @@ struct mbox_msghdr { #define OTX2_MBOX_REQ_SIG (0xdead) #define OTX2_MBOX_RSP_SIG (0xbeef) u16 sig; /* Signature, for validating corrupted msgs */ -#define OTX2_MBOX_VERSION (0x0007) +#define OTX2_MBOX_VERSION (0x0009) u16 ver; /* Version of msg's structure for this ID */ u16 next_msgoff; /* Offset of next msg within mailbox region */ int rc; /* Msg process'ed response code */ @@ -130,6 +127,7 @@ static inline struct mbox_msghdr *otx2_mbox_alloc_msg(struct otx2_mbox *mbox, M(READY, 0x001, ready, msg_req, ready_msg_rsp) \ M(ATTACH_RESOURCES, 0x002, attach_resources, rsrc_attach, msg_rsp) \ M(DETACH_RESOURCES, 0x003, detach_resources, rsrc_detach, msg_rsp) \ +M(FREE_RSRC_CNT, 0x004, free_rsrc_cnt, msg_req, free_rsrcs_rsp) \ M(MSIX_OFFSET, 0x005, msix_offset, msg_req, msix_offset_rsp) \ M(VF_FLR, 0x006, vf_flr, msg_req, msg_rsp) \ M(PTP_OP, 0x007, ptp_op, ptp_req, ptp_rsp) \ @@ -191,6 +189,9 @@ M(CPT_RD_WR_REGISTER, 0xA02, cpt_rd_wr_register, cpt_rd_wr_reg_msg, \ M(CPT_STATS, 0xA05, cpt_sts, cpt_sts_req, cpt_sts_rsp) \ M(CPT_RXC_TIME_CFG, 0xA06, cpt_rxc_time_cfg, cpt_rxc_time_cfg_req, \ msg_rsp) \ +/* SDP mbox IDs (range 0x1000 - 0x11FF) */ \ +M(SET_SDP_CHAN_INFO, 0x1000, set_sdp_chan_info, sdp_chan_info_msg, msg_rsp) \ +M(GET_SDP_CHAN_INFO, 0x1001, get_sdp_chan_info, msg_req, sdp_get_chan_info_msg) \ /* NPC mbox IDs (range 0x6000 - 0x7FFF) */ \ M(NPC_MCAM_ALLOC_ENTRY, 0x6000, npc_mcam_alloc_entry, npc_mcam_alloc_entry_req,\ npc_mcam_alloc_entry_rsp) \ @@ -243,7 +244,8 @@ M(NIX_HWCTX_DISABLE, 0x8003, nix_hwctx_disable, \ M(NIX_TXSCH_ALLOC, 0x8004, nix_txsch_alloc, \ nix_txsch_alloc_req, nix_txsch_alloc_rsp) \ M(NIX_TXSCH_FREE, 0x8005, nix_txsch_free, nix_txsch_free_req, msg_rsp) \ -M(NIX_TXSCHQ_CFG, 0x8006, nix_txschq_cfg, nix_txschq_config, msg_rsp) \ +M(NIX_TXSCHQ_CFG, 0x8006, nix_txschq_cfg, nix_txschq_config, \ + nix_txschq_config) \ M(NIX_STATS_RST, 0x8007, nix_stats_rst, msg_req, msg_rsp) \ M(NIX_VTAG_CFG, 0x8008, nix_vtag_cfg, nix_vtag_config, \ nix_vtag_config_rsp) \ @@ -268,13 +270,15 @@ M(NIX_BP_ENABLE, 0x8016, nix_bp_enable, nix_bp_cfg_req, \ nix_bp_cfg_rsp) \ M(NIX_BP_DISABLE, 0x8017, nix_bp_disable, nix_bp_cfg_req, msg_rsp) \ M(NIX_GET_MAC_ADDR, 0x8018, nix_get_mac_addr, msg_req, nix_get_mac_addr_rsp) \ -M(NIX_CN10K_AQ_ENQ, 0x8019, nix_cn10k_aq_enq, nix_cn10k_aq_enq_req, \ +M(NIX_CN10K_AQ_ENQ, 0x801b, nix_cn10k_aq_enq, nix_cn10k_aq_enq_req, \ nix_cn10k_aq_enq_rsp) \ M(NIX_GET_HW_INFO, 0x801c, nix_get_hw_info, msg_req, nix_hw_info) \ M(NIX_BANDPROF_ALLOC, 0x801d, nix_bandprof_alloc, nix_bandprof_alloc_req, \ nix_bandprof_alloc_rsp) \ M(NIX_BANDPROF_FREE, 0x801e, nix_bandprof_free, nix_bandprof_free_req, \ - msg_rsp) + msg_rsp) \ +M(NIX_BANDPROF_GET_HWINFO, 0x801f, nix_bandprof_get_hwinfo, msg_req, \ + nix_bandprof_get_hwinfo_rsp) /* Messages initiated by AF (range 0xC00 - 0xDFF) */ #define MBOX_UP_CGX_MESSAGES \ @@ -363,6 +367,25 @@ struct rsrc_detach { u8 cptlfs:1; }; +/* Number of resources available to the caller. + * In reply to MBOX_MSG_FREE_RSRC_CNT. + */ +struct free_rsrcs_rsp { + struct mbox_msghdr hdr; + u16 schq[NIX_TXSCH_LVL_CNT]; + u16 sso; + u16 tim; + u16 ssow; + u16 cpt; + u8 npa; + u8 nix; + u16 schq_nix1[NIX_TXSCH_LVL_CNT]; + u8 nix1; + u8 cpt1; + u8 ree0; + u8 ree1; +}; + #define MSIX_VECTOR_INVALID 0xFFFF #define MAX_RVU_BLKLF_CNT 256 @@ -370,16 +393,20 @@ struct msix_offset_rsp { struct mbox_msghdr hdr; u16 npa_msixoff; u16 nix_msixoff; - u8 sso; - u8 ssow; - u8 timlfs; - u8 cptlfs; + u16 sso; + u16 ssow; + u16 timlfs; + u16 cptlfs; u16 sso_msixoff[MAX_RVU_BLKLF_CNT]; u16 ssow_msixoff[MAX_RVU_BLKLF_CNT]; u16 timlf_msixoff[MAX_RVU_BLKLF_CNT]; u16 cptlf_msixoff[MAX_RVU_BLKLF_CNT]; - u8 cpt1_lfs; + u16 cpt1_lfs; + u16 ree0_lfs; + u16 ree1_lfs; u16 cpt1_lf_msixoff[MAX_RVU_BLKLF_CNT]; + u16 ree0_lf_msixoff[MAX_RVU_BLKLF_CNT]; + u16 ree1_lf_msixoff[MAX_RVU_BLKLF_CNT]; }; struct get_hw_cap_rsp { @@ -594,6 +621,7 @@ struct npa_lf_alloc_rsp { u32 stack_pg_ptrs; /* No of ptrs per stack page */ u32 stack_pg_bytes; /* Size of stack page */ u16 qints; /* NPA_AF_CONST::QINTS */ + u8 cache_lines; /*BATCH ALLOC DMA */ }; /* NPA AQ enqueue msg */ @@ -698,6 +726,9 @@ struct nix_lf_alloc_req { u16 sso_func; u64 rx_cfg; /* See NIX_AF_LF(0..127)_RX_CFG */ u64 way_mask; +#define NIX_LF_RSS_TAG_LSB_AS_ADDER BIT_ULL(0) +#define NIX_LF_LBK_BLK_SEL BIT_ULL(1) + u64 flags; }; struct nix_lf_alloc_rsp { @@ -717,6 +748,7 @@ struct nix_lf_alloc_rsp { u8 cgx_links; /* No. of CGX links present in HW */ u8 lbk_links; /* No. of LBK links present in HW */ u8 sdp_links; /* No. of SDP links present in HW */ + u8 tx_link; /* Transmit channel link number */ }; struct nix_lf_free_req { @@ -835,6 +867,7 @@ struct nix_txsch_free_req { struct nix_txschq_config { struct mbox_msghdr hdr; u8 lvl; /* SMQ/MDQ/TL4/TL3/TL2/TL1 */ + u8 read; #define TXSCHQ_IDX_SHIFT 16 #define TXSCHQ_IDX_MASK (BIT_ULL(10) - 1) #define TXSCHQ_IDX(reg, shift) (((reg) >> (shift)) & TXSCHQ_IDX_MASK) @@ -842,6 +875,8 @@ struct nix_txschq_config { #define MAX_REGS_PER_MBOX_MSG 20 u64 reg[MAX_REGS_PER_MBOX_MSG]; u64 regval[MAX_REGS_PER_MBOX_MSG]; + /* All 0's => overwrite with new value */ + u64 regval_mask[MAX_REGS_PER_MBOX_MSG]; }; struct nix_vtag_config { @@ -1032,8 +1067,12 @@ struct nix_bp_cfg_rsp { struct nix_hw_info { struct mbox_msghdr hdr; + u16 rsvs16; u16 max_mtu; u16 min_mtu; + u32 rpm_dwrr_mtu; + u32 sdp_dwrr_mtu; + u64 rsvd[16]; /* Add reserved fields for future expansion */ }; struct nix_bandprof_alloc_req { @@ -1061,6 +1100,12 @@ struct nix_bandprof_free_req { u16 prof_idx[BAND_PROF_NUM_LAYERS][MAX_BANDPROF_PER_PFFUNC]; }; +struct nix_bandprof_get_hwinfo_rsp { + struct mbox_msghdr hdr; + u16 prof_count[BAND_PROF_NUM_LAYERS]; + u32 policer_timeunit; +}; + /* NPC mbox message structs */ #define NPC_MCAM_ENTRY_INVALID 0xFFFF @@ -1074,6 +1119,13 @@ enum npc_af_status { NPC_MCAM_ALLOC_DENIED = -702, NPC_MCAM_ALLOC_FAILED = -703, NPC_MCAM_PERM_DENIED = -704, + NPC_FLOW_INTF_INVALID = -707, + NPC_FLOW_CHAN_INVALID = -708, + NPC_FLOW_NO_NIXLF = -709, + NPC_FLOW_NOT_SUPPORTED = -710, + NPC_FLOW_VF_PERM_DENIED = -711, + NPC_FLOW_VF_NOT_INIT = -712, + NPC_FLOW_VF_OVERLAP = -713, }; struct npc_mcam_alloc_entry_req { @@ -1328,6 +1380,10 @@ struct set_vf_perm { struct lmtst_tbl_setup_req { struct mbox_msghdr hdr; + u64 dis_sched_early_comp :1; + u64 sch_ena :1; + u64 dis_line_pref :1; + u64 ssow_pf_func :13; u16 base_pcifunc; u8 use_local_lmt_region; u64 lmt_iova; @@ -1422,4 +1478,34 @@ struct cpt_rxc_time_cfg_req { u16 active_limit; }; +struct sdp_node_info { + /* Node to which this PF belons to */ + u8 node_id; + u8 max_vfs; + u8 num_pf_rings; + u8 pf_srn; +#define SDP_MAX_VFS 128 + u8 vf_rings[SDP_MAX_VFS]; +}; + +struct sdp_chan_info_msg { + struct mbox_msghdr hdr; + struct sdp_node_info info; +}; + +struct sdp_get_chan_info_msg { + struct mbox_msghdr hdr; + u16 chan_base; + u16 num_chan; +}; + +/* CGX mailbox error codes + * Range 1101 - 1200. + */ +enum cgx_af_status { + LMAC_AF_ERR_INVALID_PARAM = -1101, + LMAC_AF_ERR_PF_NOT_MAPPED = -1102, + LMAC_AF_ERR_PERM_DENIED = -1103, +}; + #endif /* MBOX_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h index 243cf8070e77..3a819b24accc 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h @@ -1,11 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Marvell OcteonTx2 RVU Admin Function driver +/* Marvell RVU Admin Function driver * - * Copyright (C) 2018 Marvell International Ltd. + * Copyright (C) 2018 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef NPC_H @@ -172,6 +169,8 @@ enum key_fields { NPC_DMAC, NPC_SMAC, NPC_ETYPE, + NPC_VLAN_ETYPE_CTAG, /* 0x8100 */ + NPC_VLAN_ETYPE_STAG, /* 0x88A8 */ NPC_OUTER_VID, NPC_TOS, NPC_SIP_IPV4, diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h index fee655cc7523..588822a0cf21 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h @@ -1,11 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Marvell OcteonTx2 RVU Admin Function driver +/* Marvell RVU Admin Function driver * - * Copyright (C) 2018 Marvell International Ltd. + * Copyright (C) 2018 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef NPC_PROFILE_H diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c index 1ee37853f338..9b8e59f4c206 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c @@ -1,7 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 /* Marvell PTP driver * - * Copyright (C) 2020 Marvell International Ltd. + * Copyright (C) 2020 Marvell. + * */ #include <linux/bitfield.h> @@ -19,12 +20,11 @@ #define PCI_SUBSYS_DEVID_OCTX2_98xx_PTP 0xB100 #define PCI_SUBSYS_DEVID_OCTX2_96XX_PTP 0xB200 #define PCI_SUBSYS_DEVID_OCTX2_95XX_PTP 0xB300 -#define PCI_SUBSYS_DEVID_OCTX2_LOKI_PTP 0xB400 +#define PCI_SUBSYS_DEVID_OCTX2_95XXN_PTP 0xB400 #define PCI_SUBSYS_DEVID_OCTX2_95MM_PTP 0xB500 -#define PCI_SUBSYS_DEVID_CN10K_A_PTP 0xB900 -#define PCI_SUBSYS_DEVID_CNF10K_A_PTP 0xBA00 -#define PCI_SUBSYS_DEVID_CNF10K_B_PTP 0xBC00 +#define PCI_SUBSYS_DEVID_OCTX2_95XXO_PTP 0xB600 #define PCI_DEVID_OCTEONTX2_RST 0xA085 +#define PCI_DEVID_CN10K_PTP 0xA09E #define PCI_PTP_BAR_NO 0 #define PCI_RST_BAR_NO 0 @@ -39,6 +39,9 @@ #define RST_MUL_BITS GENMASK_ULL(38, 33) #define CLOCK_BASE_RATE 50000000ULL +static struct ptp *first_ptp_block; +static const struct pci_device_id ptp_id_table[]; + static u64 get_clock_rate(void) { u64 cfg, ret = CLOCK_BASE_RATE * 16; @@ -74,23 +77,14 @@ error: struct ptp *ptp_get(void) { - struct pci_dev *pdev; - struct ptp *ptp; + struct ptp *ptp = first_ptp_block; - /* If the PTP pci device is found on the system and ptp - * driver is bound to it then the PTP pci device is returned - * to the caller(rvu driver). - */ - pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, - PCI_DEVID_OCTEONTX2_PTP, NULL); - if (!pdev) + /* Check PTP block is present in hardware */ + if (!pci_dev_present(ptp_id_table)) return ERR_PTR(-ENODEV); - - ptp = pci_get_drvdata(pdev); + /* Check driver is bound to PTP block */ if (!ptp) ptp = ERR_PTR(-EPROBE_DEFER); - if (IS_ERR(ptp)) - pci_dev_put(pdev); return ptp; } @@ -190,6 +184,8 @@ static int ptp_probe(struct pci_dev *pdev, writeq(clock_comp, ptp->reg_base + PTP_CLOCK_COMP); pci_set_drvdata(pdev, ptp); + if (!first_ptp_block) + first_ptp_block = ptp; return 0; @@ -204,6 +200,9 @@ error: * `dev->driver_data`. */ pci_set_drvdata(pdev, ERR_PTR(err)); + if (!first_ptp_block) + first_ptp_block = ERR_PTR(err); + return 0; } @@ -233,19 +232,14 @@ static const struct pci_device_id ptp_id_table[] = { PCI_SUBSYS_DEVID_OCTX2_95XX_PTP) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP, PCI_VENDOR_ID_CAVIUM, - PCI_SUBSYS_DEVID_OCTX2_LOKI_PTP) }, + PCI_SUBSYS_DEVID_OCTX2_95XXN_PTP) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP, PCI_VENDOR_ID_CAVIUM, PCI_SUBSYS_DEVID_OCTX2_95MM_PTP) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP, PCI_VENDOR_ID_CAVIUM, - PCI_SUBSYS_DEVID_CN10K_A_PTP) }, - { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP, - PCI_VENDOR_ID_CAVIUM, - PCI_SUBSYS_DEVID_CNF10K_A_PTP) }, - { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP, - PCI_VENDOR_ID_CAVIUM, - PCI_SUBSYS_DEVID_CNF10K_B_PTP) }, + PCI_SUBSYS_DEVID_OCTX2_95XXO_PTP) }, + { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_PTP) }, { 0, } }; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.h b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h index 878bc395d28f..76d404b24552 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h @@ -1,7 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Marvell PTP driver * - * Copyright (C) 2020 Marvell International Ltd. + * Copyright (C) 2020 Marvell. + * */ #ifndef PTP_H diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c index a91ccdc59403..07b0eafccad8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RPM driver +/* Marvell CN10K RPM driver * * Copyright (C) 2020 Marvell. * diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h index d32e74bd5964..f0b069442dcc 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Marvell OcteonTx2 RPM driver +/* Marvell CN10K RPM driver * * Copyright (C) 2020 Marvell. * diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c index 5fe277e354f7..ce647e037f4d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -1,11 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Admin Function driver +/* Marvell RVU Admin Function driver * - * Copyright (C) 2018 Marvell International Ltd. + * Copyright (C) 2018 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/module.h> @@ -70,18 +67,21 @@ static void rvu_setup_hw_capabilities(struct rvu *rvu) hw->cap.nix_shaping = true; hw->cap.nix_tx_link_bp = true; hw->cap.nix_rx_multicast = true; + hw->cap.nix_shaper_toggle_wait = false; hw->rvu = rvu; - if (is_rvu_96xx_B0(rvu)) { + if (is_rvu_pre_96xx_C0(rvu)) { hw->cap.nix_fixed_txschq_mapping = true; hw->cap.nix_txsch_per_cgx_lmac = 4; hw->cap.nix_txsch_per_lbk_lmac = 132; hw->cap.nix_txsch_per_sdp_lmac = 76; hw->cap.nix_shaping = false; hw->cap.nix_tx_link_bp = false; - if (is_rvu_96xx_A0(rvu)) + if (is_rvu_96xx_A0(rvu) || is_rvu_95xx_A0(rvu)) hw->cap.nix_rx_multicast = false; } + if (!is_rvu_pre_96xx_C0(rvu)) + hw->cap.nix_shaper_toggle_wait = true; if (!is_rvu_otx2(rvu)) hw->cap.per_pf_mbox_regs = true; @@ -498,12 +498,15 @@ int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf) static void rvu_block_reset(struct rvu *rvu, int blkaddr, u64 rst_reg) { struct rvu_block *block = &rvu->hw->block[blkaddr]; + int err; if (!block->implemented) return; rvu_write64(rvu, blkaddr, rst_reg, BIT_ULL(0)); - rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true); + err = rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true); + if (err) + dev_err(rvu->dev, "HW block:%d reset failed\n", blkaddr); } static void rvu_reset_all_blocks(struct rvu *rvu) @@ -924,16 +927,26 @@ static int rvu_setup_hw_resources(struct rvu *rvu) block->lfreset_reg = NPA_AF_LF_RST; sprintf(block->name, "NPA"); err = rvu_alloc_bitmap(&block->lf); - if (err) + if (err) { + dev_err(rvu->dev, + "%s: Failed to allocate NPA LF bitmap\n", __func__); return err; + } nix: err = rvu_setup_nix_hw_resource(rvu, BLKADDR_NIX0); - if (err) + if (err) { + dev_err(rvu->dev, + "%s: Failed to allocate NIX0 LFs bitmap\n", __func__); return err; + } + err = rvu_setup_nix_hw_resource(rvu, BLKADDR_NIX1); - if (err) + if (err) { + dev_err(rvu->dev, + "%s: Failed to allocate NIX1 LFs bitmap\n", __func__); return err; + } /* Init SSO group's bitmap */ block = &hw->block[BLKADDR_SSO]; @@ -953,8 +966,11 @@ nix: block->lfreset_reg = SSO_AF_LF_HWGRP_RST; sprintf(block->name, "SSO GROUP"); err = rvu_alloc_bitmap(&block->lf); - if (err) + if (err) { + dev_err(rvu->dev, + "%s: Failed to allocate SSO LF bitmap\n", __func__); return err; + } ssow: /* Init SSO workslot's bitmap */ @@ -974,8 +990,11 @@ ssow: block->lfreset_reg = SSOW_AF_LF_HWS_RST; sprintf(block->name, "SSOWS"); err = rvu_alloc_bitmap(&block->lf); - if (err) + if (err) { + dev_err(rvu->dev, + "%s: Failed to allocate SSOW LF bitmap\n", __func__); return err; + } tim: /* Init TIM LF's bitmap */ @@ -996,35 +1015,53 @@ tim: block->lfreset_reg = TIM_AF_LF_RST; sprintf(block->name, "TIM"); err = rvu_alloc_bitmap(&block->lf); - if (err) + if (err) { + dev_err(rvu->dev, + "%s: Failed to allocate TIM LF bitmap\n", __func__); return err; + } cpt: err = rvu_setup_cpt_hw_resource(rvu, BLKADDR_CPT0); - if (err) + if (err) { + dev_err(rvu->dev, + "%s: Failed to allocate CPT0 LF bitmap\n", __func__); return err; + } err = rvu_setup_cpt_hw_resource(rvu, BLKADDR_CPT1); - if (err) + if (err) { + dev_err(rvu->dev, + "%s: Failed to allocate CPT1 LF bitmap\n", __func__); return err; + } /* Allocate memory for PFVF data */ rvu->pf = devm_kcalloc(rvu->dev, hw->total_pfs, sizeof(struct rvu_pfvf), GFP_KERNEL); - if (!rvu->pf) + if (!rvu->pf) { + dev_err(rvu->dev, + "%s: Failed to allocate memory for PF's rvu_pfvf struct\n", __func__); return -ENOMEM; + } rvu->hwvf = devm_kcalloc(rvu->dev, hw->total_vfs, sizeof(struct rvu_pfvf), GFP_KERNEL); - if (!rvu->hwvf) + if (!rvu->hwvf) { + dev_err(rvu->dev, + "%s: Failed to allocate memory for VF's rvu_pfvf struct\n", __func__); return -ENOMEM; + } mutex_init(&rvu->rsrc_lock); rvu_fwdata_init(rvu); err = rvu_setup_msix_resources(rvu); - if (err) + if (err) { + dev_err(rvu->dev, + "%s: Failed to setup MSIX resources\n", __func__); return err; + } for (blkid = 0; blkid < BLK_COUNT; blkid++) { block = &hw->block[blkid]; @@ -1050,25 +1087,39 @@ cpt: goto msix_err; err = rvu_npc_init(rvu); - if (err) + if (err) { + dev_err(rvu->dev, "%s: Failed to initialize npc\n", __func__); goto npc_err; + } err = rvu_cgx_init(rvu); - if (err) + if (err) { + dev_err(rvu->dev, "%s: Failed to initialize cgx\n", __func__); goto cgx_err; + } /* Assign MACs for CGX mapped functions */ rvu_setup_pfvf_macaddress(rvu); err = rvu_npa_init(rvu); - if (err) + if (err) { + dev_err(rvu->dev, "%s: Failed to initialize npa\n", __func__); goto npa_err; + } rvu_get_lbk_bufsize(rvu); err = rvu_nix_init(rvu); - if (err) + if (err) { + dev_err(rvu->dev, "%s: Failed to initialize nix\n", __func__); goto nix_err; + } + + err = rvu_sdp_init(rvu); + if (err) { + dev_err(rvu->dev, "%s: Failed to initialize sdp\n", __func__); + goto nix_err; + } rvu_program_channels(rvu); @@ -1322,9 +1373,10 @@ int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc) int blkaddr = BLKADDR_NIX0, vf; struct rvu_pfvf *pf; + pf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); + /* All CGX mapped PFs are set with assigned NIX block during init */ if (is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) { - pf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); blkaddr = pf->nix_blkaddr; } else if (is_afvf(pcifunc)) { vf = pcifunc - 1; @@ -1337,6 +1389,10 @@ int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc) blkaddr = BLKADDR_NIX0; } + /* if SDP1 then the blkaddr is NIX1 */ + if (is_sdp_pfvf(pcifunc) && pf->sdp_info->node_id == 1) + blkaddr = BLKADDR_NIX1; + switch (blkaddr) { case BLKADDR_NIX1: pfvf->nix_blkaddr = BLKADDR_NIX1; @@ -1737,6 +1793,99 @@ int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req, return 0; } +int rvu_mbox_handler_free_rsrc_cnt(struct rvu *rvu, struct msg_req *req, + struct free_rsrcs_rsp *rsp) +{ + struct rvu_hwinfo *hw = rvu->hw; + struct rvu_block *block; + struct nix_txsch *txsch; + struct nix_hw *nix_hw; + + mutex_lock(&rvu->rsrc_lock); + + block = &hw->block[BLKADDR_NPA]; + rsp->npa = rvu_rsrc_free_count(&block->lf); + + block = &hw->block[BLKADDR_NIX0]; + rsp->nix = rvu_rsrc_free_count(&block->lf); + + block = &hw->block[BLKADDR_NIX1]; + rsp->nix1 = rvu_rsrc_free_count(&block->lf); + + block = &hw->block[BLKADDR_SSO]; + rsp->sso = rvu_rsrc_free_count(&block->lf); + + block = &hw->block[BLKADDR_SSOW]; + rsp->ssow = rvu_rsrc_free_count(&block->lf); + + block = &hw->block[BLKADDR_TIM]; + rsp->tim = rvu_rsrc_free_count(&block->lf); + + block = &hw->block[BLKADDR_CPT0]; + rsp->cpt = rvu_rsrc_free_count(&block->lf); + + block = &hw->block[BLKADDR_CPT1]; + rsp->cpt1 = rvu_rsrc_free_count(&block->lf); + + if (rvu->hw->cap.nix_fixed_txschq_mapping) { + rsp->schq[NIX_TXSCH_LVL_SMQ] = 1; + rsp->schq[NIX_TXSCH_LVL_TL4] = 1; + rsp->schq[NIX_TXSCH_LVL_TL3] = 1; + rsp->schq[NIX_TXSCH_LVL_TL2] = 1; + /* NIX1 */ + if (!is_block_implemented(rvu->hw, BLKADDR_NIX1)) + goto out; + rsp->schq_nix1[NIX_TXSCH_LVL_SMQ] = 1; + rsp->schq_nix1[NIX_TXSCH_LVL_TL4] = 1; + rsp->schq_nix1[NIX_TXSCH_LVL_TL3] = 1; + rsp->schq_nix1[NIX_TXSCH_LVL_TL2] = 1; + } else { + nix_hw = get_nix_hw(hw, BLKADDR_NIX0); + txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; + rsp->schq[NIX_TXSCH_LVL_SMQ] = + rvu_rsrc_free_count(&txsch->schq); + + txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL4]; + rsp->schq[NIX_TXSCH_LVL_TL4] = + rvu_rsrc_free_count(&txsch->schq); + + txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL3]; + rsp->schq[NIX_TXSCH_LVL_TL3] = + rvu_rsrc_free_count(&txsch->schq); + + txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2]; + rsp->schq[NIX_TXSCH_LVL_TL2] = + rvu_rsrc_free_count(&txsch->schq); + + if (!is_block_implemented(rvu->hw, BLKADDR_NIX1)) + goto out; + + nix_hw = get_nix_hw(hw, BLKADDR_NIX1); + txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; + rsp->schq_nix1[NIX_TXSCH_LVL_SMQ] = + rvu_rsrc_free_count(&txsch->schq); + + txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL4]; + rsp->schq_nix1[NIX_TXSCH_LVL_TL4] = + rvu_rsrc_free_count(&txsch->schq); + + txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL3]; + rsp->schq_nix1[NIX_TXSCH_LVL_TL3] = + rvu_rsrc_free_count(&txsch->schq); + + txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2]; + rsp->schq_nix1[NIX_TXSCH_LVL_TL2] = + rvu_rsrc_free_count(&txsch->schq); + } + + rsp->schq_nix1[NIX_TXSCH_LVL_TL1] = 1; +out: + rsp->schq[NIX_TXSCH_LVL_TL1] = 1; + mutex_unlock(&rvu->rsrc_lock); + + return 0; +} + int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { @@ -2402,11 +2551,12 @@ static void rvu_afvf_queue_flr_work(struct rvu *rvu, int start_vf, int numvfs) for (vf = 0; vf < numvfs; vf++) { if (!(intr & BIT_ULL(vf))) continue; - dev = vf + start_vf + rvu->hw->total_pfs; - queue_work(rvu->flr_wq, &rvu->flr_wrk[dev].work); /* Clear and disable the interrupt */ rvupf_write64(rvu, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf)); rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(reg), BIT_ULL(vf)); + + dev = vf + start_vf + rvu->hw->total_pfs; + queue_work(rvu->flr_wq, &rvu->flr_wrk[dev].work); } } @@ -2422,14 +2572,14 @@ static irqreturn_t rvu_flr_intr_handler(int irq, void *rvu_irq) for (pf = 0; pf < rvu->hw->total_pfs; pf++) { if (intr & (1ULL << pf)) { - /* PF is already dead do only AF related operations */ - queue_work(rvu->flr_wq, &rvu->flr_wrk[pf].work); /* clear interrupt */ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT, BIT_ULL(pf)); /* Disable the interrupt */ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C, BIT_ULL(pf)); + /* PF is already dead do only AF related operations */ + queue_work(rvu->flr_wq, &rvu->flr_wrk[pf].work); } } @@ -2984,27 +3134,37 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id) err = rvu_mbox_init(rvu, &rvu->afpf_wq_info, TYPE_AFPF, rvu->hw->total_pfs, rvu_afpf_mbox_handler, rvu_afpf_mbox_up_handler); - if (err) + if (err) { + dev_err(dev, "%s: Failed to initialize mbox\n", __func__); goto err_hwsetup; + } err = rvu_flr_init(rvu); - if (err) + if (err) { + dev_err(dev, "%s: Failed to initialize flr\n", __func__); goto err_mbox; + } err = rvu_register_interrupts(rvu); - if (err) + if (err) { + dev_err(dev, "%s: Failed to register interrupts\n", __func__); goto err_flr; + } err = rvu_register_dl(rvu); - if (err) + if (err) { + dev_err(dev, "%s: Failed to register devlink\n", __func__); goto err_irq; + } rvu_setup_rvum_blk_revid(rvu); /* Enable AF's VFs (if any) */ err = rvu_enable_sriov(rvu); - if (err) + if (err) { + dev_err(dev, "%s: Failed to enable sriov\n", __func__); goto err_dl; + } /* Initialize debugfs */ rvu_dbg_init(rvu); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index 91503fb2762c..d38e5c980c30 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -1,11 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Marvell OcteonTx2 RVU Admin Function driver +/* Marvell RVU Admin Function driver * - * Copyright (C) 2018 Marvell International Ltd. + * Copyright (C) 2018 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef RVU_H @@ -243,8 +240,11 @@ struct rvu_pfvf { u8 nix_blkaddr; /* BLKADDR_NIX0/1 assigned to this PF */ u8 nix_rx_intf; /* NIX0_RX/NIX1_RX interface to NPC */ u8 nix_tx_intf; /* NIX0_TX/NIX1_TX interface to NPC */ + u8 lbkid; /* NIX0/1 lbk link ID */ u64 lmt_base_addr; /* Preseving the pcifunc's lmtst base addr*/ + u64 lmt_map_ent_w1; /* Preseving the word1 of lmtst map table entry*/ unsigned long flags; + struct sdp_node_info *sdp_info; }; enum rvu_pfvf_flags { @@ -314,6 +314,7 @@ struct nix_hw { struct nix_lso lso; struct nix_txvlan txvlan; struct nix_ipolicer *ipolicer; + u64 *tx_credits; }; /* RVU block's capabilities or functionality, @@ -327,8 +328,10 @@ struct hw_cap { u16 nix_txsch_per_sdp_lmac; /* Max Q's transmitting to SDP LMAC */ bool nix_fixed_txschq_mapping; /* Schq mapping fixed or flexible */ bool nix_shaping; /* Is shaping and coloring supported */ + bool nix_shaper_toggle_wait; /* Shaping toggle needs poll/wait */ bool nix_tx_link_bp; /* Can link backpressure TL queues ? */ bool nix_rx_multicast; /* Rx packet replication support */ + bool nix_common_dwrr_mtu; /* Common DWRR MTU for quantum config */ bool per_pf_mbox_regs; /* PF mbox specified in per PF registers ? */ bool programmable_chans; /* Channels programmable ? */ bool ipolicer; @@ -355,6 +358,7 @@ struct rvu_hwinfo { u16 npc_counters; /* No of match stats counters */ u32 lbk_bufsize; /* FIFO size supported by LBK */ bool npc_ext_set; /* Extended register set */ + u64 npc_stat_ena; /* Match stats enable bit */ struct hw_cap cap; struct rvu_block block[BLK_COUNT]; /* Block info */ @@ -514,20 +518,34 @@ static inline u64 rvupf_read64(struct rvu *rvu, u64 offset) } /* Silicon revisions */ +static inline bool is_rvu_pre_96xx_C0(struct rvu *rvu) +{ + struct pci_dev *pdev = rvu->pdev; + /* 96XX A0/B0, 95XX A0/A1/B0 chips */ + return ((pdev->revision == 0x00) || (pdev->revision == 0x01) || + (pdev->revision == 0x10) || (pdev->revision == 0x11) || + (pdev->revision == 0x14)); +} + static inline bool is_rvu_96xx_A0(struct rvu *rvu) { struct pci_dev *pdev = rvu->pdev; - return (pdev->revision == 0x00) && - (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX); + return (pdev->revision == 0x00); } static inline bool is_rvu_96xx_B0(struct rvu *rvu) { struct pci_dev *pdev = rvu->pdev; - return ((pdev->revision == 0x00) || (pdev->revision == 0x01)) && - (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX); + return (pdev->revision == 0x00) || (pdev->revision == 0x01); +} + +static inline bool is_rvu_95xx_A0(struct rvu *rvu) +{ + struct pci_dev *pdev = rvu->pdev; + + return (pdev->revision == 0x10) || (pdev->revision == 0x11); } /* REVID for PCIe devices. @@ -536,9 +554,10 @@ static inline bool is_rvu_96xx_B0(struct rvu *rvu) */ #define PCI_REVISION_ID_96XX 0x00 #define PCI_REVISION_ID_95XX 0x10 -#define PCI_REVISION_ID_LOKI 0x20 +#define PCI_REVISION_ID_95XXN 0x20 #define PCI_REVISION_ID_98XX 0x30 #define PCI_REVISION_ID_95XXMM 0x40 +#define PCI_REVISION_ID_95XXO 0xE0 static inline bool is_rvu_otx2(struct rvu *rvu) { @@ -547,8 +566,8 @@ static inline bool is_rvu_otx2(struct rvu *rvu) u8 midr = pdev->revision & 0xF0; return (midr == PCI_REVISION_ID_96XX || midr == PCI_REVISION_ID_95XX || - midr == PCI_REVISION_ID_LOKI || midr == PCI_REVISION_ID_98XX || - midr == PCI_REVISION_ID_95XXMM); + midr == PCI_REVISION_ID_95XXN || midr == PCI_REVISION_ID_98XX || + midr == PCI_REVISION_ID_95XXMM || midr == PCI_REVISION_ID_95XXO); } static inline u16 rvu_nix_chan_cgx(struct rvu *rvu, u8 cgxid, @@ -578,6 +597,16 @@ static inline u16 rvu_nix_chan_lbk(struct rvu *rvu, u8 lbkid, return rvu->hw->lbk_chan_base + lbkid * lbk_chans + chan; } +static inline u16 rvu_nix_chan_sdp(struct rvu *rvu, u8 chan) +{ + struct rvu_hwinfo *hw = rvu->hw; + + if (!hw->cap.programmable_chans) + return NIX_CHAN_SDP_CHX(chan); + + return hw->sdp_chan_base + chan; +} + static inline u16 rvu_nix_chan_cpt(struct rvu *rvu, u8 chan) { return rvu->hw->cpt_chan_base + chan; @@ -640,10 +669,17 @@ int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue, int qsize, int inst_size, int res_size); void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq); +/* SDP APIs */ +int rvu_sdp_init(struct rvu *rvu); +bool is_sdp_pfvf(u16 pcifunc); +bool is_sdp_pf(u16 pcifunc); +bool is_sdp_vf(u16 pcifunc); + /* CGX APIs */ static inline bool is_pf_cgxmapped(struct rvu *rvu, u8 pf) { - return (pf >= PF_CGXMAP_BASE && pf <= rvu->cgx_mapped_pfs); + return (pf >= PF_CGXMAP_BASE && pf <= rvu->cgx_mapped_pfs) && + !is_sdp_pf(pf << RVU_PFVF_PF_SHIFT); } static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id) @@ -706,6 +742,8 @@ int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw, struct nix_cn10k_aq_enq_rsp *aq_rsp, u16 pcifunc, u8 ctype, u32 qidx); int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc); +u32 convert_dwrr_mtu_to_bytes(u8 dwrr_mtu); +u32 convert_bytes_to_dwrr_mtu(u32 bytes); /* NPC APIs */ int rvu_npc_init(struct rvu *rvu); @@ -745,7 +783,6 @@ bool is_npc_intf_tx(u8 intf); bool is_npc_intf_rx(u8 intf); bool is_npc_interface_valid(struct rvu *rvu, u8 intf); int rvu_npc_get_tx_nibble_cfg(struct rvu *rvu, u64 nibble_ena); -int npc_mcam_verify_channel(struct rvu *rvu, u16 pcifunc, u8 intf, u16 channel); int npc_flow_steering_init(struct rvu *rvu, int blkaddr); const char *npc_get_field_name(u8 hdr); int npc_get_bank(struct npc_mcam *mcam, int index); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c index fe99ac4a4dd8..81e8ea9ee30e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c @@ -1,11 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Admin Function driver +/* Marvell RVU Admin Function driver * - * Copyright (C) 2018 Marvell International Ltd. + * Copyright (C) 2018 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/types.h> @@ -448,7 +445,7 @@ int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start) u8 cgx_id, lmac_id; if (!is_cgx_config_permitted(rvu, pcifunc)) - return -EPERM; + return LMAC_AF_ERR_PERM_DENIED; rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); @@ -507,7 +504,7 @@ static int rvu_lmac_get_stats(struct rvu *rvu, struct msg_req *req, void *cgxd; if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) - return -ENODEV; + return LMAC_AF_ERR_PERM_DENIED; rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac); cgxd = rvu_cgx_pdata(cgx_idx, rvu); @@ -561,7 +558,7 @@ int rvu_mbox_handler_cgx_fec_stats(struct rvu *rvu, void *cgxd; if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) - return -EPERM; + return LMAC_AF_ERR_PERM_DENIED; rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac); cgxd = rvu_cgx_pdata(cgx_idx, rvu); @@ -888,7 +885,7 @@ int rvu_mbox_handler_cgx_get_phy_fec_stats(struct rvu *rvu, struct msg_req *req, u8 cgx_id, lmac_id; if (!is_pf_cgxmapped(rvu, pf)) - return -EPERM; + return LMAC_AF_ERR_PF_NOT_MAPPED; rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); return cgx_get_phy_fec_stats(rvu_cgx_pdata(cgx_id, rvu), lmac_id); @@ -1046,7 +1043,7 @@ int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct msg_req *req, u8 cgx_id, lmac_id; if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) - return -EPERM; + return LMAC_AF_ERR_PERM_DENIED; rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); return cgx_lmac_addr_reset(cgx_id, lmac_id); @@ -1060,7 +1057,7 @@ int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu, u8 cgx_id, lmac_id; if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) - return -EPERM; + return LMAC_AF_ERR_PERM_DENIED; rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); return cgx_lmac_addr_update(cgx_id, lmac_id, req->mac_addr, req->index); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c index 8d48b64485c6..46a41cfff575 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell RPM CN10K driver +/* Marvell RPM CN10K driver * * Copyright (C) 2020 Marvell. */ @@ -49,6 +49,7 @@ static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val, return 0; } +#define LMT_MAP_TBL_W1_OFF 8 static u32 rvu_get_lmtst_tbl_index(struct rvu *rvu, u16 pcifunc) { return ((rvu_get_pf(pcifunc) * rvu->hw->total_vfs) + @@ -82,10 +83,10 @@ static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc, dev_err(rvu->dev, "%s LMTLINE iova transulation failed err:%llx\n", __func__, val); return -EIO; } - /* PA[51:12] = RVU_AF_SMMU_TLN_FLIT1[60:21] + /* PA[51:12] = RVU_AF_SMMU_TLN_FLIT0[57:18] * PA[11:0] = IOVA[11:0] */ - pa = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TLN_FLIT1) >> 21; + pa = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TLN_FLIT0) >> 18; pa &= GENMASK_ULL(39, 0); *lmt_addr = (pa << 12) | (iova & 0xFFF); @@ -131,9 +132,11 @@ int rvu_mbox_handler_lmtst_tbl_setup(struct rvu *rvu, struct lmtst_tbl_setup_req *req, struct msg_rsp *rsp) { - u64 lmt_addr, val; - u32 pri_tbl_idx; + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); + u32 pri_tbl_idx, tbl_idx; + u64 lmt_addr; int err = 0; + u64 val; /* Check if PF_FUNC wants to use it's own local memory as LMTLINE * region, if so, convert that IOVA to physical address and @@ -170,7 +173,7 @@ int rvu_mbox_handler_lmtst_tbl_setup(struct rvu *rvu, dev_err(rvu->dev, "Failed to read LMT map table: index 0x%x err %d\n", pri_tbl_idx, err); - return err; + goto error; } /* Update the base lmt addr of secondary with primary's base @@ -181,7 +184,53 @@ int rvu_mbox_handler_lmtst_tbl_setup(struct rvu *rvu, return err; } - return 0; + /* This mailbox can also be used to update word1 of APR_LMT_MAP_ENTRY_S + * like enabling scheduled LMTST, disable LMTLINE prefetch, disable + * early completion for ordered LMTST. + */ + if (req->sch_ena || req->dis_sched_early_comp || req->dis_line_pref) { + tbl_idx = rvu_get_lmtst_tbl_index(rvu, req->hdr.pcifunc); + err = lmtst_map_table_ops(rvu, tbl_idx + LMT_MAP_TBL_W1_OFF, + &val, LMT_TBL_OP_READ); + if (err) { + dev_err(rvu->dev, + "Failed to read LMT map table: index 0x%x err %d\n", + tbl_idx + LMT_MAP_TBL_W1_OFF, err); + goto error; + } + + /* Storing lmt map table entry word1 default value as this needs + * to be reverted in FLR. Also making sure this default value + * doesn't get overwritten on multiple calls to this mailbox. + */ + if (!pfvf->lmt_map_ent_w1) + pfvf->lmt_map_ent_w1 = val; + + /* Disable early completion for Ordered LMTSTs. */ + if (req->dis_sched_early_comp) + val |= (req->dis_sched_early_comp << + APR_LMT_MAP_ENT_DIS_SCH_CMP_SHIFT); + /* Enable scheduled LMTST */ + if (req->sch_ena) + val |= (req->sch_ena << APR_LMT_MAP_ENT_SCH_ENA_SHIFT) | + req->ssow_pf_func; + /* Disables LMTLINE prefetch before receiving store data. */ + if (req->dis_line_pref) + val |= (req->dis_line_pref << + APR_LMT_MAP_ENT_DIS_LINE_PREF_SHIFT); + + err = lmtst_map_table_ops(rvu, tbl_idx + LMT_MAP_TBL_W1_OFF, + &val, LMT_TBL_OP_WRITE); + if (err) { + dev_err(rvu->dev, + "Failed to update LMT map table: index 0x%x err %d\n", + tbl_idx + LMT_MAP_TBL_W1_OFF, err); + goto error; + } + } + +error: + return err; } /* Resetting the lmtst map table to original base addresses */ @@ -194,27 +243,45 @@ void rvu_reset_lmt_map_tbl(struct rvu *rvu, u16 pcifunc) if (is_rvu_otx2(rvu)) return; - if (pfvf->lmt_base_addr) { + if (pfvf->lmt_base_addr || pfvf->lmt_map_ent_w1) { /* This corresponds to lmt map table index */ tbl_idx = rvu_get_lmtst_tbl_index(rvu, pcifunc); /* Reverting back original lmt base addr for respective * pcifunc. */ - err = lmtst_map_table_ops(rvu, tbl_idx, &pfvf->lmt_base_addr, - LMT_TBL_OP_WRITE); - if (err) - dev_err(rvu->dev, - "Failed to update LMT map table: index 0x%x err %d\n", - tbl_idx, err); - pfvf->lmt_base_addr = 0; + if (pfvf->lmt_base_addr) { + err = lmtst_map_table_ops(rvu, tbl_idx, + &pfvf->lmt_base_addr, + LMT_TBL_OP_WRITE); + if (err) + dev_err(rvu->dev, + "Failed to update LMT map table: index 0x%x err %d\n", + tbl_idx, err); + pfvf->lmt_base_addr = 0; + } + /* Reverting back to orginal word1 val of lmtst map table entry + * which underwent changes. + */ + if (pfvf->lmt_map_ent_w1) { + err = lmtst_map_table_ops(rvu, + tbl_idx + LMT_MAP_TBL_W1_OFF, + &pfvf->lmt_map_ent_w1, + LMT_TBL_OP_WRITE); + if (err) + dev_err(rvu->dev, + "Failed to update LMT map table: index 0x%x err %d\n", + tbl_idx + LMT_MAP_TBL_W1_OFF, err); + pfvf->lmt_map_ent_w1 = 0; + } } } int rvu_set_channels_base(struct rvu *rvu) { + u16 nr_lbk_chans, nr_sdp_chans, nr_cgx_chans, nr_cpt_chans; + u16 sdp_chan_base, cgx_chan_base, cpt_chan_base; struct rvu_hwinfo *hw = rvu->hw; - u16 cpt_chan_base; - u64 nix_const; + u64 nix_const, nix_const1; int blkaddr; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0); @@ -222,6 +289,7 @@ int rvu_set_channels_base(struct rvu *rvu) return blkaddr; nix_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST); + nix_const1 = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); hw->cgx = (nix_const >> 12) & 0xFULL; hw->lmac_per_cgx = (nix_const >> 8) & 0xFULL; @@ -244,14 +312,24 @@ int rvu_set_channels_base(struct rvu *rvu) * channels such that all channel numbers are contiguous * leaving no holes. This way the new CPT channels can be * accomodated. The order of channel numbers assigned is - * LBK, SDP, CGX and CPT. + * LBK, SDP, CGX and CPT. Also the base channel number + * of a block must be multiple of number of channels + * of the block. */ - hw->sdp_chan_base = hw->lbk_chan_base + hw->lbk_links * - ((nix_const >> 16) & 0xFFULL); - hw->cgx_chan_base = hw->sdp_chan_base + hw->sdp_links * SDP_CHANNELS; + nr_lbk_chans = (nix_const >> 16) & 0xFFULL; + nr_sdp_chans = nix_const1 & 0xFFFULL; + nr_cgx_chans = nix_const & 0xFFULL; + nr_cpt_chans = (nix_const >> 32) & 0xFFFULL; + + sdp_chan_base = hw->lbk_chan_base + hw->lbk_links * nr_lbk_chans; + /* Round up base channel to multiple of number of channels */ + hw->sdp_chan_base = ALIGN(sdp_chan_base, nr_sdp_chans); + + cgx_chan_base = hw->sdp_chan_base + hw->sdp_links * nr_sdp_chans; + hw->cgx_chan_base = ALIGN(cgx_chan_base, nr_cgx_chans); - cpt_chan_base = hw->cgx_chan_base + hw->cgx_links * - (nix_const & 0xFFULL); + cpt_chan_base = hw->cgx_chan_base + hw->cgx_links * nr_cgx_chans; + hw->cpt_chan_base = ALIGN(cpt_chan_base, nr_cpt_chans); /* Out of 4096 channels start CPT from 2048 so * that MSB for CPT channels is always set @@ -355,6 +433,7 @@ err_put: static void __rvu_nix_set_channels(struct rvu *rvu, int blkaddr) { + u64 nix_const1 = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); u64 nix_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST); u16 cgx_chans, lbk_chans, sdp_chans, cpt_chans; struct rvu_hwinfo *hw = rvu->hw; @@ -364,7 +443,7 @@ static void __rvu_nix_set_channels(struct rvu *rvu, int blkaddr) cgx_chans = nix_const & 0xFFULL; lbk_chans = (nix_const >> 16) & 0xFFULL; - sdp_chans = SDP_CHANNELS; + sdp_chans = nix_const1 & 0xFFFULL; cpt_chans = (nix_const >> 32) & 0xFFFULL; start = hw->cgx_chan_base; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c index 89253f7bdadb..1f90a7403392 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c @@ -1,5 +1,9 @@ // SPDX-License-Identifier: GPL-2.0-only -/* Copyright (C) 2020 Marvell. */ +/* Marvell RVU Admin Function driver + * + * Copyright (C) 2020 Marvell. + * + */ #include <linux/bitfield.h> #include <linux/pci.h> diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index 9b2dfbf90e51..9338765da048 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -1,11 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Admin Function driver +/* Marvell RVU Admin Function driver * - * Copyright (C) 2019 Marvell International Ltd. + * Copyright (C) 2019 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifdef CONFIG_DEBUG_FS diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c index 2688186066d9..274d3abe30eb 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Devlink +/* Marvell RVU Admin Function Devlink * * Copyright (C) 2020 Marvell. * @@ -1364,6 +1364,89 @@ static void rvu_health_reporters_destroy(struct rvu *rvu) rvu_nix_health_reporters_destroy(rvu_dl); } +/* Devlink Params APIs */ +static int rvu_af_dl_dwrr_mtu_validate(struct devlink *devlink, u32 id, + union devlink_param_value val, + struct netlink_ext_ack *extack) +{ + struct rvu_devlink *rvu_dl = devlink_priv(devlink); + struct rvu *rvu = rvu_dl->rvu; + int dwrr_mtu = val.vu32; + struct nix_txsch *txsch; + struct nix_hw *nix_hw; + + if (!rvu->hw->cap.nix_common_dwrr_mtu) { + NL_SET_ERR_MSG_MOD(extack, + "Setting DWRR_MTU is not supported on this silicon"); + return -EOPNOTSUPP; + } + + if ((dwrr_mtu > 65536 || !is_power_of_2(dwrr_mtu)) && + (dwrr_mtu != 9728 && dwrr_mtu != 10240)) { + NL_SET_ERR_MSG_MOD(extack, + "Invalid, supported MTUs are 0,2,4,8.16,32,64....4K,8K,32K,64K and 9728, 10240"); + return -EINVAL; + } + + nix_hw = get_nix_hw(rvu->hw, BLKADDR_NIX0); + if (!nix_hw) + return -ENODEV; + + txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; + if (rvu_rsrc_free_count(&txsch->schq) != txsch->schq.max) { + NL_SET_ERR_MSG_MOD(extack, + "Changing DWRR MTU is not supported when there are active NIXLFs"); + NL_SET_ERR_MSG_MOD(extack, + "Make sure none of the PF/VF interfaces are initialized and retry"); + return -EOPNOTSUPP; + } + + return 0; +} + +static int rvu_af_dl_dwrr_mtu_set(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct rvu_devlink *rvu_dl = devlink_priv(devlink); + struct rvu *rvu = rvu_dl->rvu; + u64 dwrr_mtu; + + dwrr_mtu = convert_bytes_to_dwrr_mtu(ctx->val.vu32); + rvu_write64(rvu, BLKADDR_NIX0, NIX_AF_DWRR_RPM_MTU, dwrr_mtu); + + return 0; +} + +static int rvu_af_dl_dwrr_mtu_get(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct rvu_devlink *rvu_dl = devlink_priv(devlink); + struct rvu *rvu = rvu_dl->rvu; + u64 dwrr_mtu; + + if (!rvu->hw->cap.nix_common_dwrr_mtu) + return -EOPNOTSUPP; + + dwrr_mtu = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_DWRR_RPM_MTU); + ctx->val.vu32 = convert_dwrr_mtu_to_bytes(dwrr_mtu); + + return 0; +} + +enum rvu_af_dl_param_id { + RVU_AF_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, + RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU, +}; + +static const struct devlink_param rvu_af_dl_params[] = { + DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU, + "dwrr_mtu", DEVLINK_PARAM_TYPE_U32, + BIT(DEVLINK_PARAM_CMODE_RUNTIME), + rvu_af_dl_dwrr_mtu_get, rvu_af_dl_dwrr_mtu_set, + rvu_af_dl_dwrr_mtu_validate), +}; + +/* Devlink switch mode */ static int rvu_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) { struct rvu_devlink *rvu_dl = devlink_priv(devlink); @@ -1420,13 +1503,14 @@ int rvu_register_dl(struct rvu *rvu) struct devlink *dl; int err; - dl = devlink_alloc(&rvu_devlink_ops, sizeof(struct rvu_devlink)); + dl = devlink_alloc(&rvu_devlink_ops, sizeof(struct rvu_devlink), + rvu->dev); if (!dl) { dev_warn(rvu->dev, "devlink_alloc failed\n"); return -ENOMEM; } - err = devlink_register(dl, rvu->dev); + err = devlink_register(dl); if (err) { dev_err(rvu->dev, "devlink register failed with error %d\n", err); devlink_free(dl); @@ -1438,7 +1522,30 @@ int rvu_register_dl(struct rvu *rvu) rvu_dl->rvu = rvu; rvu->rvu_dl = rvu_dl; - return rvu_health_reporters_create(rvu); + err = rvu_health_reporters_create(rvu); + if (err) { + dev_err(rvu->dev, + "devlink health reporter creation failed with error %d\n", err); + goto err_dl_health; + } + + err = devlink_params_register(dl, rvu_af_dl_params, + ARRAY_SIZE(rvu_af_dl_params)); + if (err) { + dev_err(rvu->dev, + "devlink params register failed with error %d", err); + goto err_dl_health; + } + + devlink_params_publish(dl); + + return 0; + +err_dl_health: + rvu_health_reporters_destroy(rvu); + devlink_unregister(dl); + devlink_free(dl); + return err; } void rvu_unregister_dl(struct rvu *rvu) @@ -1449,6 +1556,8 @@ void rvu_unregister_dl(struct rvu *rvu) if (!dl) return; + devlink_params_unregister(dl, rvu_af_dl_params, + ARRAY_SIZE(rvu_af_dl_params)); rvu_health_reporters_destroy(rvu); devlink_unregister(dl); devlink_free(dl); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h index 471e57dedb20..51efe88dce11 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Marvell OcteonTx2 RVU Devlink +/* Marvell RVU Admin Function Devlink * * Copyright (C) 2020 Marvell. * diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index 4bfbbdf38770..9ef4e942e31e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -1,11 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Admin Function driver +/* Marvell RVU Admin Function driver * - * Copyright (C) 2018 Marvell International Ltd. + * Copyright (C) 2018 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/module.h> @@ -25,7 +22,7 @@ static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc, int type, bool add); static int nix_setup_ipolicers(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr); -static void nix_ipolicer_freemem(struct nix_hw *nix_hw); +static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw); static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req, struct nix_hw *nix_hw, u16 pcifunc); static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc); @@ -192,6 +189,47 @@ struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr) return NULL; } +u32 convert_dwrr_mtu_to_bytes(u8 dwrr_mtu) +{ + dwrr_mtu &= 0x1FULL; + + /* MTU used for DWRR calculation is in power of 2 up until 64K bytes. + * Value of 4 is reserved for MTU value of 9728 bytes. + * Value of 5 is reserved for MTU value of 10240 bytes. + */ + switch (dwrr_mtu) { + case 4: + return 9728; + case 5: + return 10240; + default: + return BIT_ULL(dwrr_mtu); + } + + return 0; +} + +u32 convert_bytes_to_dwrr_mtu(u32 bytes) +{ + /* MTU used for DWRR calculation is in power of 2 up until 64K bytes. + * Value of 4 is reserved for MTU value of 9728 bytes. + * Value of 5 is reserved for MTU value of 10240 bytes. + */ + if (bytes > BIT_ULL(16)) + return 0; + + switch (bytes) { + case 9728: + return 4; + case 10240: + return 5; + default: + return ilog2(bytes); + } + + return 0; +} + static void nix_rx_sync(struct rvu *rvu, int blkaddr) { int err; @@ -249,16 +287,22 @@ static bool is_valid_txschq(struct rvu *rvu, int blkaddr, return true; } -static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf) +static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf, + struct nix_lf_alloc_rsp *rsp, bool loop) { - struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + struct rvu_pfvf *parent_pf, *pfvf = rvu_get_pfvf(rvu, pcifunc); + u16 req_chan_base, req_chan_end, req_chan_cnt; + struct rvu_hwinfo *hw = rvu->hw; + struct sdp_node_info *sdp_info; + int pkind, pf, vf, lbkid, vfid; struct mac_ops *mac_ops; - int pkind, pf, vf, lbkid; u8 cgx_id, lmac_id; + bool from_vf; int err; pf = rvu_get_pf(pcifunc); - if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK) + if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK && + type != NIX_INTF_TYPE_SDP) return 0; switch (type) { @@ -276,10 +320,13 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf) pfvf->tx_chan_base = pfvf->rx_chan_base; pfvf->rx_chan_cnt = 1; pfvf->tx_chan_cnt = 1; + rsp->tx_link = cgx_id * hw->lmac_per_cgx + lmac_id; + cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind); rvu_npc_set_pkind(rvu, pkind, pfvf); mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu)); + /* By default we enable pause frames */ if ((pcifunc & RVU_PFVF_FUNC_MASK) == 0) mac_ops->mac_enadis_pause_frm(rvu_cgx_pdata(cgx_id, @@ -299,6 +346,25 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf) if (rvu->hw->lbk_links > 1) lbkid = vf & 0x1 ? 0 : 1; + /* By default NIX0 is configured to send packet on lbk link 1 + * (which corresponds to LBK1), same packet will receive on + * NIX1 over lbk link 0. If NIX1 sends packet on lbk link 0 + * (which corresponds to LBK2) packet will receive on NIX0 lbk + * link 1. + * But if lbk links for NIX0 and NIX1 are negated, i.e NIX0 + * transmits and receives on lbk link 0, whick corresponds + * to LBK1 block, back to back connectivity between NIX and + * LBK can be achieved (which is similar to 96xx) + * + * RX TX + * NIX0 lbk link 1 (LBK2) 1 (LBK1) + * NIX0 lbk link 0 (LBK0) 0 (LBK0) + * NIX1 lbk link 0 (LBK1) 0 (LBK2) + * NIX1 lbk link 1 (LBK3) 1 (LBK3) + */ + if (loop) + lbkid = !lbkid; + /* Note that AF's VFs work in pairs and talk over consecutive * loopback channels.Therefore if odd number of AF VFs are * enabled then the last VF remains with no pair. @@ -309,10 +375,51 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf) rvu_nix_chan_lbk(rvu, lbkid, vf + 1); pfvf->rx_chan_cnt = 1; pfvf->tx_chan_cnt = 1; + rsp->tx_link = hw->cgx_links + lbkid; + pfvf->lbkid = lbkid; rvu_npc_set_pkind(rvu, NPC_RX_LBK_PKIND, pfvf); rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, pfvf->rx_chan_base, pfvf->rx_chan_cnt); + + break; + case NIX_INTF_TYPE_SDP: + from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK); + parent_pf = &rvu->pf[rvu_get_pf(pcifunc)]; + sdp_info = parent_pf->sdp_info; + if (!sdp_info) { + dev_err(rvu->dev, "Invalid sdp_info pointer\n"); + return -EINVAL; + } + if (from_vf) { + req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn + + sdp_info->num_pf_rings; + vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1; + for (vfid = 0; vfid < vf; vfid++) + req_chan_base += sdp_info->vf_rings[vfid]; + req_chan_cnt = sdp_info->vf_rings[vf]; + req_chan_end = req_chan_base + req_chan_cnt - 1; + if (req_chan_base < rvu_nix_chan_sdp(rvu, 0) || + req_chan_end > rvu_nix_chan_sdp(rvu, 255)) { + dev_err(rvu->dev, + "PF_Func 0x%x: Invalid channel base and count\n", + pcifunc); + return -EINVAL; + } + } else { + req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn; + req_chan_cnt = sdp_info->num_pf_rings; + } + + pfvf->rx_chan_base = req_chan_base; + pfvf->rx_chan_cnt = req_chan_cnt; + pfvf->tx_chan_base = pfvf->rx_chan_base; + pfvf->tx_chan_cnt = pfvf->rx_chan_cnt; + + rsp->tx_link = hw->cgx_links + hw->lbk_links; + rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, + pfvf->rx_chan_base, + pfvf->rx_chan_cnt); break; } @@ -393,9 +500,9 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu, static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, int type, int chan_id) { - int bpid, blkaddr, lmac_chan_cnt; + int bpid, blkaddr, lmac_chan_cnt, sdp_chan_cnt; + u16 cgx_bpid_cnt, lbk_bpid_cnt, sdp_bpid_cnt; struct rvu_hwinfo *hw = rvu->hw; - u16 cgx_bpid_cnt, lbk_bpid_cnt; struct rvu_pfvf *pfvf; u8 cgx_id, lmac_id; u64 cfg; @@ -404,8 +511,12 @@ static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST); lmac_chan_cnt = cfg & 0xFF; + cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); + sdp_chan_cnt = cfg & 0xFFF; + cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt; lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF); + sdp_bpid_cnt = hw->sdp_links * sdp_chan_cnt; pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); @@ -443,6 +554,17 @@ static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt)) return -EINVAL; break; + case NIX_INTF_TYPE_SDP: + if ((req->chan_base + req->chan_cnt) > 255) + return -EINVAL; + + bpid = sdp_bpid_cnt + req->chan_base; + if (req->bpid_per_chan) + bpid += chan_id; + + if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt + sdp_bpid_cnt)) + return -EINVAL; + break; default: return -EINVAL; } @@ -462,9 +584,12 @@ int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu, pf = rvu_get_pf(pcifunc); type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; + if (is_sdp_pfvf(pcifunc)) + type = NIX_INTF_TYPE_SDP; - /* Enable backpressure only for CGX mapped PFs and LBK interface */ - if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK) + /* Enable backpressure only for CGX mapped PFs and LBK/SDP interface */ + if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK && + type != NIX_INTF_TYPE_SDP) return 0; pfvf = rvu_get_pfvf(rvu, pcifunc); @@ -481,8 +606,9 @@ int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu, } cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan)); + cfg &= ~GENMASK_ULL(8, 0); rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan), - cfg | (bpid & 0xFF) | BIT_ULL(16)); + cfg | (bpid & GENMASK_ULL(8, 0)) | BIT_ULL(16)); chan_id++; bpid = rvu_nix_get_bpid(rvu, req, type, chan_id); } @@ -630,9 +756,10 @@ static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf) static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr, struct rvu_pfvf *pfvf, int nixlf, int rss_sz, int rss_grps, int hwctx_size, - u64 way_mask) + u64 way_mask, bool tag_lsb_as_adder) { int err, grp, num_indices; + u64 val; /* RSS is not requested for this NIXLF */ if (!rss_sz) @@ -648,10 +775,13 @@ static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr, (u64)pfvf->rss_ctx->iova); /* Config full RSS table size, enable RSS and caching */ - rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf), - BIT_ULL(36) | BIT_ULL(4) | - ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE) | - way_mask << 20); + val = BIT_ULL(36) | BIT_ULL(4) | way_mask << 20 | + ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE); + + if (tag_lsb_as_adder) + val |= BIT_ULL(5); + + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf), val); /* Config RSS group offset and sizes */ for (grp = 0; grp < rss_grps; grp++) rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp), @@ -943,7 +1073,7 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, nix_hw = get_nix_hw(rvu->hw, blkaddr); if (!nix_hw) - return -EINVAL; + return NIX_AF_ERR_INVALID_NIXBLK; return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp); } @@ -1200,7 +1330,8 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, /* Initialize receive side scaling (RSS) */ hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF); err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz, - req->rss_grps, hwctx_size, req->way_mask); + req->rss_grps, hwctx_size, req->way_mask, + !!(req->flags & NIX_LF_RSS_TAG_LSB_AS_ADDER)); if (err) goto free_mem; @@ -1258,7 +1389,11 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg); intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; - err = nix_interface_init(rvu, pcifunc, intf, nixlf); + if (is_sdp_pfvf(pcifunc)) + intf = NIX_INTF_TYPE_SDP; + + err = nix_interface_init(rvu, pcifunc, intf, nixlf, rsp, + !!(req->flags & NIX_LF_LBK_BLK_SEL)); if (err) goto free_mem; @@ -1364,7 +1499,7 @@ int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu, nix_hw = get_nix_hw(rvu->hw, blkaddr); if (!nix_hw) - return -EINVAL; + return NIX_AF_ERR_INVALID_NIXBLK; cfg = (((u32)req->offset & 0x7) << 16) | (((u32)req->y_mask & 0xF) << 12) | @@ -1382,12 +1517,104 @@ int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu, return 0; } +/* Handle shaper update specially for few revisions */ +static bool +handle_txschq_shaper_update(struct rvu *rvu, int blkaddr, int nixlf, + int lvl, u64 reg, u64 regval) +{ + u64 regbase, oldval, sw_xoff = 0; + u64 dbgval, md_debug0 = 0; + unsigned long poll_tmo; + bool rate_reg = 0; + u32 schq; + + regbase = reg & 0xFFFF; + schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); + + /* Check for rate register */ + switch (lvl) { + case NIX_TXSCH_LVL_TL1: + md_debug0 = NIX_AF_TL1X_MD_DEBUG0(schq); + sw_xoff = NIX_AF_TL1X_SW_XOFF(schq); + + rate_reg = !!(regbase == NIX_AF_TL1X_CIR(0)); + break; + case NIX_TXSCH_LVL_TL2: + md_debug0 = NIX_AF_TL2X_MD_DEBUG0(schq); + sw_xoff = NIX_AF_TL2X_SW_XOFF(schq); + + rate_reg = (regbase == NIX_AF_TL2X_CIR(0) || + regbase == NIX_AF_TL2X_PIR(0)); + break; + case NIX_TXSCH_LVL_TL3: + md_debug0 = NIX_AF_TL3X_MD_DEBUG0(schq); + sw_xoff = NIX_AF_TL3X_SW_XOFF(schq); + + rate_reg = (regbase == NIX_AF_TL3X_CIR(0) || + regbase == NIX_AF_TL3X_PIR(0)); + break; + case NIX_TXSCH_LVL_TL4: + md_debug0 = NIX_AF_TL4X_MD_DEBUG0(schq); + sw_xoff = NIX_AF_TL4X_SW_XOFF(schq); + + rate_reg = (regbase == NIX_AF_TL4X_CIR(0) || + regbase == NIX_AF_TL4X_PIR(0)); + break; + case NIX_TXSCH_LVL_MDQ: + sw_xoff = NIX_AF_MDQX_SW_XOFF(schq); + rate_reg = (regbase == NIX_AF_MDQX_CIR(0) || + regbase == NIX_AF_MDQX_PIR(0)); + break; + } + + if (!rate_reg) + return false; + + /* Nothing special to do when state is not toggled */ + oldval = rvu_read64(rvu, blkaddr, reg); + if ((oldval & 0x1) == (regval & 0x1)) { + rvu_write64(rvu, blkaddr, reg, regval); + return true; + } + + /* PIR/CIR disable */ + if (!(regval & 0x1)) { + rvu_write64(rvu, blkaddr, sw_xoff, 1); + rvu_write64(rvu, blkaddr, reg, 0); + udelay(4); + rvu_write64(rvu, blkaddr, sw_xoff, 0); + return true; + } + + /* PIR/CIR enable */ + rvu_write64(rvu, blkaddr, sw_xoff, 1); + if (md_debug0) { + poll_tmo = jiffies + usecs_to_jiffies(10000); + /* Wait until VLD(bit32) == 1 or C_CON(bit48) == 0 */ + do { + if (time_after(jiffies, poll_tmo)) { + dev_err(rvu->dev, + "NIXLF%d: TLX%u(lvl %u) CIR/PIR enable failed\n", + nixlf, schq, lvl); + goto exit; + } + usleep_range(1, 5); + dbgval = rvu_read64(rvu, blkaddr, md_debug0); + } while (!(dbgval & BIT_ULL(32)) && (dbgval & BIT_ULL(48))); + } + rvu_write64(rvu, blkaddr, reg, regval); +exit: + rvu_write64(rvu, blkaddr, sw_xoff, 0); + return true; +} + /* Disable shaping of pkts by a scheduler queue * at a given scheduler level. */ static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr, - int lvl, int schq) + int nixlf, int lvl, int schq) { + struct rvu_hwinfo *hw = rvu->hw; u64 cir_reg = 0, pir_reg = 0; u64 cfg; @@ -1408,6 +1635,21 @@ static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr, cir_reg = NIX_AF_TL4X_CIR(schq); pir_reg = NIX_AF_TL4X_PIR(schq); break; + case NIX_TXSCH_LVL_MDQ: + cir_reg = NIX_AF_MDQX_CIR(schq); + pir_reg = NIX_AF_MDQX_PIR(schq); + break; + } + + /* Shaper state toggle needs wait/poll */ + if (hw->cap.nix_shaper_toggle_wait) { + if (cir_reg) + handle_txschq_shaper_update(rvu, blkaddr, nixlf, + lvl, cir_reg, 0); + if (pir_reg) + handle_txschq_shaper_update(rvu, blkaddr, nixlf, + lvl, pir_reg, 0); + return; } if (!cir_reg) @@ -1425,6 +1667,7 @@ static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr, int lvl, int schq) { struct rvu_hwinfo *hw = rvu->hw; + int link_level; int link; if (lvl >= hw->cap.nix_tx_aggr_lvl) @@ -1434,7 +1677,9 @@ static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr, if (lvl == NIX_TXSCH_LVL_TL4) rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00); - if (lvl != NIX_TXSCH_LVL_TL2) + link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ? + NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2; + if (lvl != link_level) return; /* Reset TL2's CGX or LBK link config */ @@ -1443,6 +1688,40 @@ static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr, NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00); } +static void nix_clear_tx_xoff(struct rvu *rvu, int blkaddr, + int lvl, int schq) +{ + struct rvu_hwinfo *hw = rvu->hw; + u64 reg; + + /* Skip this if shaping is not supported */ + if (!hw->cap.nix_shaping) + return; + + /* Clear level specific SW_XOFF */ + switch (lvl) { + case NIX_TXSCH_LVL_TL1: + reg = NIX_AF_TL1X_SW_XOFF(schq); + break; + case NIX_TXSCH_LVL_TL2: + reg = NIX_AF_TL2X_SW_XOFF(schq); + break; + case NIX_TXSCH_LVL_TL3: + reg = NIX_AF_TL3X_SW_XOFF(schq); + break; + case NIX_TXSCH_LVL_TL4: + reg = NIX_AF_TL4X_SW_XOFF(schq); + break; + case NIX_TXSCH_LVL_MDQ: + reg = NIX_AF_MDQX_SW_XOFF(schq); + break; + default: + return; + } + + rvu_write64(rvu, blkaddr, reg, 0x0); +} + static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc) { struct rvu_hwinfo *hw = rvu->hw; @@ -1620,19 +1899,18 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu, int link, blkaddr, rc = 0; int lvl, idx, start, end; struct nix_txsch *txsch; - struct rvu_pfvf *pfvf; struct nix_hw *nix_hw; u32 *pfvf_map; + int nixlf; u16 schq; - pfvf = rvu_get_pfvf(rvu, pcifunc); - blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); - if (!pfvf->nixlf || blkaddr < 0) - return NIX_AF_ERR_AF_LF_INVALID; + rc = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); + if (rc) + return rc; nix_hw = get_nix_hw(rvu->hw, blkaddr); if (!nix_hw) - return -EINVAL; + return NIX_AF_ERR_INVALID_NIXBLK; mutex_lock(&rvu->rsrc_lock); @@ -1677,7 +1955,7 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu, NIX_TXSCHQ_CFG_DONE)) pfvf_map[schq] = TXSCH_MAP(pcifunc, 0); nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); - nix_reset_tx_shaping(rvu, blkaddr, lvl, schq); + nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq); } for (idx = 0; idx < req->schq[lvl]; idx++) { @@ -1686,7 +1964,7 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu, NIX_TXSCHQ_CFG_DONE)) pfvf_map[schq] = TXSCH_MAP(pcifunc, 0); nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); - nix_reset_tx_shaping(rvu, blkaddr, lvl, schq); + nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq); } } @@ -1703,8 +1981,8 @@ exit: return rc; } -static void nix_smq_flush(struct rvu *rvu, int blkaddr, - int smq, u16 pcifunc, int nixlf) +static int nix_smq_flush(struct rvu *rvu, int blkaddr, + int smq, u16 pcifunc, int nixlf) { int pf = rvu_get_pf(pcifunc); u8 cgx_id = 0, lmac_id = 0; @@ -1739,6 +2017,7 @@ static void nix_smq_flush(struct rvu *rvu, int blkaddr, /* restore cgx tx state */ if (restore_tx_en) cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false); + return err; } static int nix_txschq_free(struct rvu *rvu, u16 pcifunc) @@ -1747,6 +2026,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc) struct rvu_hwinfo *hw = rvu->hw; struct nix_txsch *txsch; struct nix_hw *nix_hw; + u16 map_func; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); if (blkaddr < 0) @@ -1754,25 +2034,42 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc) nix_hw = get_nix_hw(rvu->hw, blkaddr); if (!nix_hw) - return -EINVAL; + return NIX_AF_ERR_INVALID_NIXBLK; nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); if (nixlf < 0) return NIX_AF_ERR_AF_LF_INVALID; - /* Disable TL2/3 queue links before SMQ flush*/ + /* Disable TL2/3 queue links and all XOFF's before SMQ flush*/ mutex_lock(&rvu->rsrc_lock); - for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) { - if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4) + for (lvl = NIX_TXSCH_LVL_MDQ; lvl < NIX_TXSCH_LVL_CNT; lvl++) { + txsch = &nix_hw->txsch[lvl]; + + if (lvl >= hw->cap.nix_tx_aggr_lvl) continue; - txsch = &nix_hw->txsch[lvl]; for (schq = 0; schq < txsch->schq.max; schq++) { if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) continue; nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); + nix_clear_tx_xoff(rvu, blkaddr, lvl, schq); } } + nix_clear_tx_xoff(rvu, blkaddr, NIX_TXSCH_LVL_TL1, + nix_get_tx_link(rvu, pcifunc)); + + /* On PF cleanup, clear cfg done flag as + * PF would have changed default config. + */ + if (!(pcifunc & RVU_PFVF_FUNC_MASK)) { + txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL1]; + schq = nix_get_tx_link(rvu, pcifunc); + /* Do not clear pcifunc in txsch->pfvf_map[schq] because + * VF might be using this TL1 queue + */ + map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]); + txsch->pfvf_map[schq] = TXSCH_SET_FLAG(map_func, 0x0); + } /* Flush SMQs */ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; @@ -1818,6 +2115,7 @@ static int nix_txschq_free_one(struct rvu *rvu, struct nix_txsch *txsch; struct nix_hw *nix_hw; u32 *pfvf_map; + int rc; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); if (blkaddr < 0) @@ -1825,7 +2123,7 @@ static int nix_txschq_free_one(struct rvu *rvu, nix_hw = get_nix_hw(rvu->hw, blkaddr); if (!nix_hw) - return -EINVAL; + return NIX_AF_ERR_INVALID_NIXBLK; nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); if (nixlf < 0) @@ -1842,15 +2140,24 @@ static int nix_txschq_free_one(struct rvu *rvu, mutex_lock(&rvu->rsrc_lock); if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) { - mutex_unlock(&rvu->rsrc_lock); + rc = NIX_AF_ERR_TLX_INVALID; goto err; } + /* Clear SW_XOFF of this resource only. + * For SMQ level, all path XOFF's + * need to be made clear by user + */ + nix_clear_tx_xoff(rvu, blkaddr, lvl, schq); + /* Flush if it is a SMQ. Onus of disabling * TL2/3 queue links before SMQ flush is on user */ - if (lvl == NIX_TXSCH_LVL_SMQ) - nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf); + if (lvl == NIX_TXSCH_LVL_SMQ && + nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf)) { + rc = NIX_AF_SMQ_FLUSH_FAILED; + goto err; + } /* Free the resource */ rvu_free_rsrc(&txsch->schq, schq); @@ -1858,7 +2165,8 @@ static int nix_txschq_free_one(struct rvu *rvu, mutex_unlock(&rvu->rsrc_lock); return 0; err: - return NIX_AF_ERR_TLX_INVALID; + mutex_unlock(&rvu->rsrc_lock); + return rc; } int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu, @@ -1941,6 +2249,11 @@ static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg) regbase == NIX_AF_TL4X_PIR(0)) return false; break; + case NIX_TXSCH_LVL_MDQ: + if (regbase == NIX_AF_MDQX_CIR(0) || + regbase == NIX_AF_MDQX_PIR(0)) + return false; + break; } return true; } @@ -1958,12 +2271,48 @@ static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw, return; rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq), (TXSCH_TL1_DFLT_RR_PRIO << 1)); - rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq), - TXSCH_TL1_DFLT_RR_QTM); + + /* On OcteonTx2 the config was in bytes and newer silcons + * it's changed to weight. + */ + if (!rvu->hw->cap.nix_common_dwrr_mtu) + rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq), + TXSCH_TL1_DFLT_RR_QTM); + else + rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq), + CN10K_MAX_DWRR_WEIGHT); + rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00); pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE); } +/* Register offset - [15:0] + * Scheduler Queue number - [25:16] + */ +#define NIX_TX_SCHQ_MASK GENMASK_ULL(25, 0) + +static int nix_txschq_cfg_read(struct rvu *rvu, struct nix_hw *nix_hw, + int blkaddr, struct nix_txschq_config *req, + struct nix_txschq_config *rsp) +{ + u16 pcifunc = req->hdr.pcifunc; + int idx, schq; + u64 reg; + + for (idx = 0; idx < req->num_regs; idx++) { + reg = req->reg[idx]; + reg &= NIX_TX_SCHQ_MASK; + schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); + if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, req->lvl, reg) || + !is_valid_txschq(rvu, blkaddr, req->lvl, pcifunc, schq)) + return NIX_AF_INVAL_TXSCHQ_CFG; + rsp->regval[idx] = rvu_read64(rvu, blkaddr, reg); + } + rsp->lvl = req->lvl; + rsp->num_regs = req->num_regs; + return 0; +} + static void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, u16 pcifunc, struct nix_txsch *txsch) { @@ -1995,11 +2344,11 @@ static void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, struct nix_txschq_config *req, - struct msg_rsp *rsp) + struct nix_txschq_config *rsp) { + u64 reg, val, regval, schq_regbase, val_mask; struct rvu_hwinfo *hw = rvu->hw; u16 pcifunc = req->hdr.pcifunc; - u64 reg, regval, schq_regbase; struct nix_txsch *txsch; struct nix_hw *nix_hw; int blkaddr, idx, err; @@ -2016,7 +2365,10 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, nix_hw = get_nix_hw(rvu->hw, blkaddr); if (!nix_hw) - return -EINVAL; + return NIX_AF_ERR_INVALID_NIXBLK; + + if (req->read) + return nix_txschq_cfg_read(rvu, nix_hw, blkaddr, req, rsp); txsch = &nix_hw->txsch[req->lvl]; pfvf_map = txsch->pfvf_map; @@ -2032,8 +2384,10 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, for (idx = 0; idx < req->num_regs; idx++) { reg = req->reg[idx]; + reg &= NIX_TX_SCHQ_MASK; regval = req->regval[idx]; schq_regbase = reg & 0xFFFF; + val_mask = req->regval_mask[idx]; if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr, txsch->lvl, reg, regval)) @@ -2043,6 +2397,15 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, if (!is_txschq_shaping_valid(hw, req->lvl, reg)) continue; + val = rvu_read64(rvu, blkaddr, reg); + regval = (val & val_mask) | (regval & ~val_mask); + + /* Handle shaping state toggle specially */ + if (hw->cap.nix_shaper_toggle_wait && + handle_txschq_shaper_update(rvu, blkaddr, nixlf, + req->lvl, reg, regval)) + continue; + /* Replace PF/VF visible NIXLF slot with HW NIXLF id */ if (schq_regbase == NIX_AF_SMQX_CFG(0)) { nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], @@ -2083,7 +2446,6 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, rvu_nix_tx_tl2_cfg(rvu, blkaddr, pcifunc, &nix_hw->txsch[NIX_TXSCH_LVL_TL2]); - return 0; } @@ -2114,8 +2476,12 @@ static int nix_tx_vtag_free(struct rvu *rvu, int blkaddr, u16 pcifunc, int index) { struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); - struct nix_txvlan *vlan = &nix_hw->txvlan; + struct nix_txvlan *vlan; + + if (!nix_hw) + return NIX_AF_ERR_INVALID_NIXBLK; + vlan = &nix_hw->txvlan; if (vlan->entry2pfvf_map[index] != pcifunc) return NIX_AF_ERR_PARAM; @@ -2156,10 +2522,15 @@ static int nix_tx_vtag_alloc(struct rvu *rvu, int blkaddr, u64 vtag, u8 size) { struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); - struct nix_txvlan *vlan = &nix_hw->txvlan; + struct nix_txvlan *vlan; u64 regval; int index; + if (!nix_hw) + return NIX_AF_ERR_INVALID_NIXBLK; + + vlan = &nix_hw->txvlan; + mutex_lock(&vlan->rsrc_lock); index = rvu_alloc_rsrc(&vlan->rsrc); @@ -2184,12 +2555,16 @@ static int nix_tx_vtag_decfg(struct rvu *rvu, int blkaddr, struct nix_vtag_config *req) { struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); - struct nix_txvlan *vlan = &nix_hw->txvlan; u16 pcifunc = req->hdr.pcifunc; int idx0 = req->tx.vtag0_idx; int idx1 = req->tx.vtag1_idx; + struct nix_txvlan *vlan; int err = 0; + if (!nix_hw) + return NIX_AF_ERR_INVALID_NIXBLK; + + vlan = &nix_hw->txvlan; if (req->tx.free_vtag0 && req->tx.free_vtag1) if (vlan->entry2pfvf_map[idx0] != pcifunc || vlan->entry2pfvf_map[idx1] != pcifunc) @@ -2216,9 +2591,13 @@ static int nix_tx_vtag_cfg(struct rvu *rvu, int blkaddr, struct nix_vtag_config_rsp *rsp) { struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); - struct nix_txvlan *vlan = &nix_hw->txvlan; + struct nix_txvlan *vlan; u16 pcifunc = req->hdr.pcifunc; + if (!nix_hw) + return NIX_AF_ERR_INVALID_NIXBLK; + + vlan = &nix_hw->txvlan; if (req->tx.cfg_vtag0) { rsp->vtag0_idx = nix_tx_vtag_alloc(rvu, blkaddr, @@ -2456,14 +2835,19 @@ static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc, struct npc_mcam *mcam = &rvu->hw->mcam; struct rvu_hwinfo *hw = rvu->hw; struct nix_mce_list *mce_list; + int pf; - /* skip multicast pkt replication for AF's VFs */ - if (is_afvf(pcifunc)) + /* skip multicast pkt replication for AF's VFs & SDP links */ + if (is_afvf(pcifunc) || is_sdp_pfvf(pcifunc)) return 0; if (!hw->cap.nix_rx_multicast) return 0; + pf = rvu_get_pf(pcifunc); + if (!is_pf_cgxmapped(rvu, pf)) + return 0; + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); if (blkaddr < 0) return -EINVAL; @@ -2667,6 +3051,15 @@ static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) for (schq = 0; schq < txsch->schq.max; schq++) txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); } + + /* Setup a default value of 8192 as DWRR MTU */ + if (rvu->hw->cap.nix_common_dwrr_mtu) { + rvu_write64(rvu, blkaddr, NIX_AF_DWRR_RPM_MTU, + convert_bytes_to_dwrr_mtu(8192)); + rvu_write64(rvu, blkaddr, NIX_AF_DWRR_SDP_MTU, + convert_bytes_to_dwrr_mtu(8192)); + } + return 0; } @@ -2743,6 +3136,7 @@ int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req, struct nix_hw_info *rsp) { u16 pcifunc = req->hdr.pcifunc; + u64 dwrr_mtu; int blkaddr; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); @@ -2755,6 +3149,20 @@ int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req, rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu); rsp->min_mtu = NIC_HW_MIN_FRS; + + if (!rvu->hw->cap.nix_common_dwrr_mtu) { + /* Return '1' on OTx2 */ + rsp->rpm_dwrr_mtu = 1; + rsp->sdp_dwrr_mtu = 1; + return 0; + } + + dwrr_mtu = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_DWRR_RPM_MTU); + rsp->rpm_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu); + + dwrr_mtu = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_DWRR_SDP_MTU); + rsp->sdp_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu); + return 0; } @@ -3068,7 +3476,7 @@ static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg) hw = get_nix_hw(rvu->hw, blkaddr); if (!hw) - return -EINVAL; + return NIX_AF_ERR_INVALID_NIXBLK; /* No room to add new flow hash algoritham */ if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX) @@ -3108,7 +3516,7 @@ int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu, nix_hw = get_nix_hw(rvu->hw, blkaddr); if (!nix_hw) - return -EINVAL; + return NIX_AF_ERR_INVALID_NIXBLK; alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg); /* Failed to get algo index from the exiting list, reserve new */ @@ -3366,6 +3774,77 @@ static void nix_find_link_frs(struct rvu *rvu, req->minlen = minlen; } +static int +nix_config_link_credits(struct rvu *rvu, int blkaddr, int link, + u16 pcifunc, u64 tx_credits) +{ + struct rvu_hwinfo *hw = rvu->hw; + int pf = rvu_get_pf(pcifunc); + u8 cgx_id = 0, lmac_id = 0; + unsigned long poll_tmo; + bool restore_tx_en = 0; + struct nix_hw *nix_hw; + u64 cfg, sw_xoff = 0; + u32 schq = 0; + u32 credits; + int rc; + + nix_hw = get_nix_hw(rvu->hw, blkaddr); + if (!nix_hw) + return NIX_AF_ERR_INVALID_NIXBLK; + + if (tx_credits == nix_hw->tx_credits[link]) + return 0; + + /* Enable cgx tx if disabled for credits to be back */ + if (is_pf_cgxmapped(rvu, pf)) { + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), + lmac_id, true); + } + + mutex_lock(&rvu->rsrc_lock); + /* Disable new traffic to link */ + if (hw->cap.nix_shaping) { + schq = nix_get_tx_link(rvu, pcifunc); + sw_xoff = rvu_read64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq)); + rvu_write64(rvu, blkaddr, + NIX_AF_TL1X_SW_XOFF(schq), BIT_ULL(0)); + } + + rc = -EBUSY; + poll_tmo = jiffies + usecs_to_jiffies(10000); + /* Wait for credits to return */ + do { + if (time_after(jiffies, poll_tmo)) + goto exit; + usleep_range(100, 200); + + cfg = rvu_read64(rvu, blkaddr, + NIX_AF_TX_LINKX_NORM_CREDIT(link)); + credits = (cfg >> 12) & 0xFFFFFULL; + } while (credits != nix_hw->tx_credits[link]); + + cfg &= ~(0xFFFFFULL << 12); + cfg |= (tx_credits << 12); + rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg); + rc = 0; + + nix_hw->tx_credits[link] = tx_credits; + +exit: + /* Enable traffic back */ + if (hw->cap.nix_shaping && !sw_xoff) + rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq), 0); + + /* Restore state of cgx tx */ + if (restore_tx_en) + cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false); + + mutex_unlock(&rvu->rsrc_lock); + return rc; +} + int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, struct msg_rsp *rsp) { @@ -3376,6 +3855,7 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, struct nix_txsch *txsch; u64 cfg, lmac_fifo_len; struct nix_hw *nix_hw; + struct rvu_pfvf *pfvf; u8 cgx = 0, lmac = 0; u16 max_mtu; @@ -3385,7 +3865,7 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, nix_hw = get_nix_hw(rvu->hw, blkaddr); if (!nix_hw) - return -EINVAL; + return NIX_AF_ERR_INVALID_NIXBLK; if (is_afvf(pcifunc)) rvu_get_lbk_link_max_frs(rvu, &max_mtu); @@ -3432,7 +3912,8 @@ rx_frscfg: link = (cgx * hw->lmac_per_cgx) + lmac; } else if (pf == 0) { /* For VFs of PF0 ingress is LBK port, so config LBK link */ - link = hw->cgx_links; + pfvf = rvu_get_pfvf(rvu, pcifunc); + link = hw->cgx_links + pfvf->lbkid; } if (link < 0) @@ -3454,11 +3935,8 @@ linkcfg: lmac_fifo_len = rvu_cgx_get_fifolen(rvu) / cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu)); - cfg = rvu_read64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link)); - cfg &= ~(0xFFFFFULL << 12); - cfg |= ((lmac_fifo_len - req->maxlen) / 16) << 12; - rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg); - return 0; + return nix_config_link_credits(rvu, blkaddr, link, pcifunc, + (lmac_fifo_len - req->maxlen) / 16); } int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req, @@ -3502,12 +3980,13 @@ static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs) return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */ } -static void nix_link_config(struct rvu *rvu, int blkaddr) +static void nix_link_config(struct rvu *rvu, int blkaddr, + struct nix_hw *nix_hw) { struct rvu_hwinfo *hw = rvu->hw; int cgx, lmac_cnt, slink, link; u16 lbk_max_frs, lmac_max_frs; - u64 tx_credits; + u64 tx_credits, cfg; rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs); rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs); @@ -3538,15 +4017,18 @@ static void nix_link_config(struct rvu *rvu, int blkaddr) */ for (cgx = 0; cgx < hw->cgx; cgx++) { lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu)); + /* Skip when cgx is not available or lmac cnt is zero */ + if (lmac_cnt <= 0) + continue; tx_credits = ((rvu_cgx_get_fifolen(rvu) / lmac_cnt) - lmac_max_frs) / 16; /* Enable credits and set credit pkt count to max allowed */ - tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); + cfg = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); slink = cgx * hw->lmac_per_cgx; for (link = slink; link < (slink + lmac_cnt); link++) { + nix_hw->tx_credits[link] = tx_credits; rvu_write64(rvu, blkaddr, - NIX_AF_TX_LINKX_NORM_CREDIT(link), - tx_credits); + NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg); } } @@ -3554,6 +4036,7 @@ static void nix_link_config(struct rvu *rvu, int blkaddr) slink = hw->cgx_links; for (link = slink; link < (slink + hw->lbk_links); link++) { tx_credits = rvu_get_lbk_link_credits(rvu, lbk_max_frs); + nix_hw->tx_credits[link] = tx_credits; /* Enable credits and set credit pkt count to max allowed */ tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); rvu_write64(rvu, blkaddr, @@ -3647,6 +4130,28 @@ static int nix_aq_init(struct rvu *rvu, struct rvu_block *block) return 0; } +static void rvu_nix_setup_capabilities(struct rvu *rvu, int blkaddr) +{ + struct rvu_hwinfo *hw = rvu->hw; + u64 hw_const; + + hw_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); + + /* On OcteonTx2 DWRR quantum is directly configured into each of + * the transmit scheduler queues. And PF/VF drivers were free to + * config any value upto 2^24. + * On CN10K, HW is modified, the quantum configuration at scheduler + * queues is in terms of weight. And SW needs to setup a base DWRR MTU + * at NIX_AF_DWRR_RPM_MTU / NIX_AF_DWRR_SDP_MTU. HW will do + * 'DWRR MTU * weight' to get the quantum. + * + * Check if HW uses a common MTU for all DWRR quantum configs. + * On OcteonTx2 this register field is '0'. + */ + if (((hw_const >> 56) & 0x10) == 0x10) + hw->cap.nix_common_dwrr_mtu = true; +} + static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw) { const struct npc_lt_def_cfg *ltdefs; @@ -3684,6 +4189,9 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw) if (err) return err; + /* Setup capabilities of the NIX block */ + rvu_nix_setup_capabilities(rvu, blkaddr); + /* Initialize admin queue */ err = nix_aq_init(rvu, block); if (err) @@ -3692,6 +4200,9 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw) /* Restore CINT timer delay to HW reset values */ rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL); + /* For better performance use NDC TX instead of NDC RX for SQ's SQEs" */ + rvu_write64(rvu, blkaddr, NIX_AF_SEB_CFG, 0x1ULL); + if (is_block_implemented(hw, blkaddr)) { err = nix_setup_txschq(rvu, nix_hw, blkaddr); if (err) @@ -3792,8 +4303,13 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw) if (err) return err; + nix_hw->tx_credits = kcalloc(hw->cgx_links + hw->lbk_links, + sizeof(u64), GFP_KERNEL); + if (!nix_hw->tx_credits) + return -ENOMEM; + /* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */ - nix_link_config(rvu, blkaddr); + nix_link_config(rvu, blkaddr, nix_hw); /* Enable Channel backpressure */ rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0)); @@ -3849,7 +4365,9 @@ static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr, kfree(txsch->schq.bmap); } - nix_ipolicer_freemem(nix_hw); + kfree(nix_hw->tx_credits); + + nix_ipolicer_freemem(rvu, nix_hw); vlan = &nix_hw->txvlan; kfree(vlan->rsrc.bmap); @@ -4027,7 +4545,7 @@ int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu, nix_hw = get_nix_hw(rvu->hw, blkaddr); if (!nix_hw) - return -EINVAL; + return NIX_AF_ERR_INVALID_NIXBLK; /* Find existing matching LSO format, if any */ for (idx = 0; idx < nix_hw->lso.in_use; idx++) { @@ -4225,11 +4743,14 @@ static int nix_setup_ipolicers(struct rvu *rvu, return 0; } -static void nix_ipolicer_freemem(struct nix_hw *nix_hw) +static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw) { struct nix_ipolicer *ipolicer; int layer; + if (!rvu->hw->cap.ipolicer) + return; + for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { ipolicer = &nix_hw->ipolicer[layer]; @@ -4652,3 +5173,36 @@ static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw, rvu_free_rsrc(&ipolicer->band_prof, mid_prof); } } + +int rvu_mbox_handler_nix_bandprof_get_hwinfo(struct rvu *rvu, struct msg_req *req, + struct nix_bandprof_get_hwinfo_rsp *rsp) +{ + struct nix_ipolicer *ipolicer; + int blkaddr, layer, err; + struct nix_hw *nix_hw; + u64 tu; + + if (!rvu->hw->cap.ipolicer) + return NIX_AF_ERR_IPOLICER_NOTSUPP; + + err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr); + if (err) + return err; + + /* Return number of bandwidth profiles free at each layer */ + mutex_lock(&rvu->rsrc_lock); + for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { + if (layer == BAND_PROF_INVAL_LAYER) + continue; + + ipolicer = &nix_hw->ipolicer[layer]; + rsp->prof_count[layer] = rvu_rsrc_free_count(&ipolicer->band_prof); + } + mutex_unlock(&rvu->rsrc_lock); + + /* Set the policer timeunit in nanosec */ + tu = rvu_read64(rvu, blkaddr, NIX_AF_PL_TS) & GENMASK_ULL(9, 0); + rsp->policer_timeunit = (tu + 1) * 100; + + return 0; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c index 24c2bfdfec4e..70bd036ed76e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c @@ -1,11 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Admin Function driver +/* Marvell RVU Admin Function driver * - * Copyright (C) 2018 Marvell International Ltd. + * Copyright (C) 2018 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/module.h> @@ -419,6 +416,10 @@ exit: rsp->stack_pg_ptrs = (cfg >> 8) & 0xFF; rsp->stack_pg_bytes = cfg & 0xFF; rsp->qints = (cfg >> 28) & 0xFFF; + if (!is_rvu_otx2(rvu)) { + cfg = rvu_read64(rvu, block->addr, NPA_AF_BATCH_CTL); + rsp->cache_lines = (cfg >> 1) & 0x3F; + } return rc; } @@ -478,6 +479,13 @@ static int npa_aq_init(struct rvu *rvu, struct rvu_block *block) #endif rvu_write64(rvu, block->addr, NPA_AF_NDC_CFG, cfg); + /* For CN10K NPA BATCH DMA set 35 cache lines */ + if (!is_rvu_otx2(rvu)) { + cfg = rvu_read64(rvu, block->addr, NPA_AF_BATCH_CTL); + cfg &= ~0x7EULL; + cfg |= BIT_ULL(6) | BIT_ULL(2) | BIT_ULL(1); + rvu_write64(rvu, block->addr, NPA_AF_BATCH_CTL, cfg); + } /* Result structure can be followed by Aura/Pool context at * RES + 128bytes and a write mask at RES + 256 bytes, depending on * operation type. Alloc sufficient result memory for all operations. diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c index 52b255426c22..5efb4174e82d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c @@ -1,11 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Admin Function driver +/* Marvell RVU Admin Function driver * - * Copyright (C) 2018 Marvell International Ltd. + * Copyright (C) 2018 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/bitfield.h> @@ -23,7 +20,7 @@ #define RSVD_MCAM_ENTRIES_PER_NIXLF 1 /* Ucast for LFs */ #define NPC_PARSE_RESULT_DMAC_OFFSET 8 -#define NPC_HW_TSTAMP_OFFSET 8 +#define NPC_HW_TSTAMP_OFFSET 8ULL #define NPC_KEX_CHAN_MASK 0xFFFULL #define NPC_KEX_PF_FUNC_MASK 0xFFFFULL @@ -85,36 +82,6 @@ static int npc_mcam_verify_pf_func(struct rvu *rvu, return 0; } -int npc_mcam_verify_channel(struct rvu *rvu, u16 pcifunc, u8 intf, u16 channel) -{ - int pf = rvu_get_pf(pcifunc); - u8 cgx_id, lmac_id; - int base = 0, end; - - if (is_npc_intf_tx(intf)) - return 0; - - /* return in case of AF installed rules */ - if (is_pffunc_af(pcifunc)) - return 0; - - if (is_afvf(pcifunc)) { - end = rvu_get_num_lbk_chans(); - if (end < 0) - return -EINVAL; - } else { - rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); - base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0x0); - /* CGX mapped functions has maximum of 16 channels */ - end = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0xF); - } - - if (channel < base || channel > end) - return -EINVAL; - - return 0; -} - void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf) { int blkaddr; @@ -634,8 +601,8 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc, struct nix_rx_action action; int blkaddr, index; - /* AF's VFs work in promiscuous mode */ - if (is_afvf(pcifunc)) + /* AF's and SDP VFs work in promiscuous mode */ + if (is_afvf(pcifunc) || is_sdp_vf(pcifunc)) return; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); @@ -724,7 +691,17 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc, action.index = pfvf->promisc_mce_idx; } - req.chan_mask = 0xFFFU; + /* For cn10k the upper two bits of the channel number are + * cpt channel number. with masking out these bits in the + * mcam entry, same entry used for NIX will allow packets + * received from cpt for parsing. + */ + if (!is_rvu_otx2(rvu)) { + req.chan_mask = NIX_CHAN_CPT_X2P_MASK; + } else { + req.chan_mask = 0xFFFU; + } + if (chan_cnt > 1) { if (!is_power_of_2(chan_cnt)) { dev_err(rvu->dev, @@ -853,7 +830,7 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf, u16 vf_func; /* Only CGX PF/VF can add allmulticast entry */ - if (is_afvf(pcifunc)) + if (is_afvf(pcifunc) && is_sdp_vf(pcifunc)) return; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); @@ -938,7 +915,7 @@ void rvu_npc_enable_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf, static void npc_update_vf_flow_entry(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr, u16 pcifunc, u64 rx_action) { - int actindex, index, bank; + int actindex, index, bank, entry; bool enable; if (!(pcifunc & RVU_PFVF_FUNC_MASK)) @@ -949,7 +926,7 @@ static void npc_update_vf_flow_entry(struct rvu *rvu, struct npc_mcam *mcam, if (mcam->entry2target_pffunc[index] == pcifunc) { bank = npc_get_bank(mcam, index); actindex = index; - index &= (mcam->banksize - 1); + entry = index & (mcam->banksize - 1); /* read vf flow entry enable status */ enable = is_mcam_entry_enabled(rvu, mcam, blkaddr, @@ -959,7 +936,7 @@ static void npc_update_vf_flow_entry(struct rvu *rvu, struct npc_mcam *mcam, false); /* update 'action' */ rvu_write64(rvu, blkaddr, - NPC_AF_MCAMEX_BANKX_ACTION(index, bank), + NPC_AF_MCAMEX_BANKX_ACTION(entry, bank), rx_action); if (enable) npc_enable_mcam_entry(rvu, mcam, blkaddr, @@ -1898,9 +1875,22 @@ static void rvu_npc_hw_init(struct rvu *rvu, int blkaddr) mcam->banks = (npc_const >> 44) & 0xFULL; mcam->banksize = (npc_const >> 28) & 0xFFFFULL; + hw->npc_stat_ena = BIT_ULL(9); /* Extended set */ if (npc_const2) { hw->npc_ext_set = true; + /* 96xx supports only match_stats and npc_counters + * reflected in NPC_AF_CONST reg. + * STAT_SEL and ENA are at [0:8] and 9 bit positions. + * 98xx has both match_stat and ext and npc_counter + * reflected in NPC_AF_CONST2 + * STAT_SEL_EXT added at [12:14] bit position. + * cn10k supports only ext and hence npc_counters in + * NPC_AF_CONST is 0 and npc_counters reflected in NPC_AF_CONST2. + * STAT_SEL bitpos incremented from [0:8] to [0:11] and ENA bit moved to 63 + */ + if (!hw->npc_counters) + hw->npc_stat_ena = BIT_ULL(63); hw->npc_counters = (npc_const2 >> 16) & 0xFFFFULL; mcam->banksize = npc_const2 & 0xFFFFULL; } @@ -1955,7 +1945,7 @@ static void rvu_npc_setup_interfaces(struct rvu *rvu, int blkaddr) rvu_write64(rvu, blkaddr, NPC_AF_INTFX_MISS_STAT_ACT(intf), ((mcam->rx_miss_act_cntr >> 9) << 12) | - BIT_ULL(9) | mcam->rx_miss_act_cntr); + hw->npc_stat_ena | mcam->rx_miss_act_cntr); } /* Configure TX interfaces */ @@ -2030,14 +2020,15 @@ int rvu_npc_init(struct rvu *rvu) /* Enable below for Rx pkts. * - Outer IPv4 header checksum validation. - * - Detect outer L2 broadcast address and set NPC_RESULT_S[L2M]. + * - Detect outer L2 broadcast address and set NPC_RESULT_S[L2B]. + * - Detect outer L2 multicast address and set NPC_RESULT_S[L2M]. * - Inner IPv4 header checksum validation. * - Set non zero checksum error code value */ rvu_write64(rvu, blkaddr, NPC_AF_PCK_CFG, rvu_read64(rvu, blkaddr, NPC_AF_PCK_CFG) | - BIT_ULL(32) | BIT_ULL(24) | BIT_ULL(6) | - BIT_ULL(2) | BIT_ULL(1)); + ((u64)NPC_EC_OIP4_CSUM << 32) | (NPC_EC_IIP4_CSUM << 24) | + BIT_ULL(7) | BIT_ULL(6) | BIT_ULL(2) | BIT_ULL(1)); rvu_npc_setup_interfaces(rvu, blkaddr); @@ -2147,18 +2138,16 @@ static void npc_map_mcam_entry_and_cntr(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr, u16 entry, u16 cntr) { u16 index = entry & (mcam->banksize - 1); - u16 bank = npc_get_bank(mcam, entry); + u32 bank = npc_get_bank(mcam, entry); + struct rvu_hwinfo *hw = rvu->hw; /* Set mapping and increment counter's refcnt */ mcam->entry2cntr_map[entry] = cntr; mcam->cntr_refcnt[cntr]++; - /* Enable stats - * NPC_AF_MCAMEX_BANKX_STAT_ACT[14:12] - counter[11:9] - * NPC_AF_MCAMEX_BANKX_STAT_ACT[8:0] - counter[8:0] - */ + /* Enable stats */ rvu_write64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank), - ((cntr >> 9) << 12) | BIT_ULL(9) | cntr); + ((cntr >> 9) << 12) | hw->npc_stat_ena | cntr); } static void npc_unmap_mcam_entry_and_cntr(struct rvu *rvu, @@ -2166,7 +2155,7 @@ static void npc_unmap_mcam_entry_and_cntr(struct rvu *rvu, int blkaddr, u16 entry, u16 cntr) { u16 index = entry & (mcam->banksize - 1); - u16 bank = npc_get_bank(mcam, entry); + u32 bank = npc_get_bank(mcam, entry); /* Remove mapping and reduce counter's refcnt */ mcam->entry2cntr_map[entry] = NPC_MCAM_INVALID_MAP; @@ -2414,6 +2403,17 @@ static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc, goto alloc; } + /* For a VF base MCAM match rule is set by its PF. And all the + * further MCAM rules installed by VF on its own are + * concatenated with the base rule set by its PF. Hence PF entries + * should be at lower priority compared to VF entries. Otherwise + * base rule is hit always and rules installed by VF will be of + * no use. Hence if the request is from PF and NOT a priority + * allocation request then allocate low priority entries. + */ + if (!(pcifunc & RVU_PFVF_FUNC_MASK)) + goto lprio_alloc; + /* Find out the search range for non-priority allocation request * * Get MCAM free entry count in middle zone. @@ -2439,6 +2439,7 @@ static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc, /* Not enough free entries, search all entries in reverse, * so that low priority ones will get used up. */ +lprio_alloc: reverse = true; start = 0; end = mcam->bmap_entries; @@ -2673,7 +2674,6 @@ int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu, struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); struct npc_mcam *mcam = &rvu->hw->mcam; u16 pcifunc = req->hdr.pcifunc; - u16 channel, chan_mask; int blkaddr, rc; u8 nix_intf; @@ -2681,10 +2681,6 @@ int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu, if (blkaddr < 0) return NPC_MCAM_INVALID_REQ; - chan_mask = req->entry_data.kw_mask[0] & NPC_KEX_CHAN_MASK; - channel = req->entry_data.kw[0] & NPC_KEX_CHAN_MASK; - channel &= chan_mask; - mutex_lock(&mcam->lock); rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry); if (rc) @@ -2707,12 +2703,6 @@ int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu, nix_intf = pfvf->nix_rx_intf; if (!is_pffunc_af(pcifunc) && - npc_mcam_verify_channel(rvu, pcifunc, req->intf, channel)) { - rc = NPC_MCAM_INVALID_REQ; - goto exit; - } - - if (!is_pffunc_af(pcifunc) && npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf, pcifunc)) { rc = NPC_MCAM_INVALID_REQ; goto exit; @@ -2788,8 +2778,8 @@ int rvu_mbox_handler_npc_mcam_shift_entry(struct rvu *rvu, struct npc_mcam *mcam = &rvu->hw->mcam; u16 pcifunc = req->hdr.pcifunc; u16 old_entry, new_entry; + int blkaddr, rc = 0; u16 index, cntr; - int blkaddr, rc; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) @@ -2990,10 +2980,11 @@ int rvu_mbox_handler_npc_mcam_unmap_counter(struct rvu *rvu, index = find_next_bit(mcam->bmap, mcam->bmap_entries, entry); if (index >= mcam->bmap_entries) break; + entry = index + 1; + if (mcam->entry2cntr_map[index] != req->cntr) continue; - entry = index + 1; npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, index, req->cntr); } @@ -3058,7 +3049,6 @@ int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu, struct npc_mcam *mcam = &rvu->hw->mcam; u16 entry = NPC_MCAM_ENTRY_INVALID; u16 cntr = NPC_MCAM_ENTRY_INVALID; - u16 channel, chan_mask; int blkaddr, rc; u8 nix_intf; @@ -3069,13 +3059,6 @@ int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu, if (!is_npc_interface_valid(rvu, req->intf)) return NPC_MCAM_INVALID_REQ; - chan_mask = req->entry_data.kw_mask[0] & NPC_KEX_CHAN_MASK; - channel = req->entry_data.kw[0] & NPC_KEX_CHAN_MASK; - channel &= chan_mask; - - if (npc_mcam_verify_channel(rvu, req->hdr.pcifunc, req->intf, channel)) - return NPC_MCAM_INVALID_REQ; - if (npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf, req->hdr.pcifunc)) return NPC_MCAM_INVALID_REQ; @@ -3252,7 +3235,7 @@ int rvu_mbox_handler_npc_mcam_entry_stats(struct rvu *rvu, /* read MCAM entry STAT_ACT register */ regval = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank)); - if (!(regval & BIT_ULL(9))) { + if (!(regval & rvu->hw->npc_stat_ena)) { rsp->stat_ena = 0; mutex_unlock(&mcam->lock); return 0; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c index 5c01cf4a9c5b..51ddc7b81d0b 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Admin Function driver +/* Marvell RVU Admin Function driver * * Copyright (C) 2020 Marvell. */ @@ -20,6 +20,8 @@ static const char * const npc_flow_names[] = { [NPC_DMAC] = "dmac", [NPC_SMAC] = "smac", [NPC_ETYPE] = "ether type", + [NPC_VLAN_ETYPE_CTAG] = "vlan ether type ctag", + [NPC_VLAN_ETYPE_STAG] = "vlan ether type stag", [NPC_OUTER_VID] = "outer vlan id", [NPC_TOS] = "tos", [NPC_SIP_IPV4] = "ipv4 source ip", @@ -492,6 +494,11 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf) if (*features & BIT_ULL(NPC_OUTER_VID)) if (!npc_check_field(rvu, blkaddr, NPC_LB, intf)) *features &= ~BIT_ULL(NPC_OUTER_VID); + + /* for vlan ethertypes corresponding layer type should be in the key */ + if (npc_check_field(rvu, blkaddr, NPC_LB, intf)) + *features |= BIT_ULL(NPC_VLAN_ETYPE_CTAG) | + BIT_ULL(NPC_VLAN_ETYPE_STAG); } /* Scan key extraction profile and record how fields of our interest @@ -600,7 +607,7 @@ static int npc_check_unsupported_flows(struct rvu *rvu, u64 features, u8 intf) dev_info(rvu->dev, "Unsupported flow(s):\n"); for_each_set_bit(bit, (unsigned long *)&unsupported, 64) dev_info(rvu->dev, "%s ", npc_get_field_name(bit)); - return NIX_AF_ERR_NPC_KEY_NOT_SUPP; + return -EOPNOTSUPP; } return 0; @@ -747,6 +754,28 @@ static void npc_update_ipv6_flow(struct rvu *rvu, struct mcam_entry *entry, } } +static void npc_update_vlan_features(struct rvu *rvu, struct mcam_entry *entry, + u64 features, u8 intf) +{ + bool ctag = !!(features & BIT_ULL(NPC_VLAN_ETYPE_CTAG)); + bool stag = !!(features & BIT_ULL(NPC_VLAN_ETYPE_STAG)); + bool vid = !!(features & BIT_ULL(NPC_OUTER_VID)); + + /* If only VLAN id is given then always match outer VLAN id */ + if (vid && !ctag && !stag) { + npc_update_entry(rvu, NPC_LB, entry, + NPC_LT_LB_STAG_QINQ | NPC_LT_LB_CTAG, 0, + NPC_LT_LB_STAG_QINQ & NPC_LT_LB_CTAG, 0, intf); + return; + } + if (ctag) + npc_update_entry(rvu, NPC_LB, entry, NPC_LT_LB_CTAG, 0, + ~0ULL, 0, intf); + if (stag) + npc_update_entry(rvu, NPC_LB, entry, NPC_LT_LB_STAG_QINQ, 0, + ~0ULL, 0, intf); +} + static void npc_update_flow(struct rvu *rvu, struct mcam_entry *entry, u64 features, struct flow_msg *pkt, struct flow_msg *mask, @@ -779,11 +808,6 @@ static void npc_update_flow(struct rvu *rvu, struct mcam_entry *entry, npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_ICMP6, 0, ~0ULL, 0, intf); - if (features & BIT_ULL(NPC_OUTER_VID)) - npc_update_entry(rvu, NPC_LB, entry, - NPC_LT_LB_STAG_QINQ | NPC_LT_LB_CTAG, 0, - NPC_LT_LB_STAG_QINQ & NPC_LT_LB_CTAG, 0, intf); - /* For AH, LTYPE should be present in entry */ if (features & BIT_ULL(NPC_IPPROTO_AH)) npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_AH, @@ -829,6 +853,7 @@ do { \ ntohs(mask->vlan_tci), 0); npc_update_ipv6_flow(rvu, entry, features, pkt, mask, output, intf); + npc_update_vlan_features(rvu, entry, features, intf); } static struct rvu_npc_mcam_rule *rvu_mcam_find_rule(struct npc_mcam *mcam, @@ -995,13 +1020,11 @@ static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target, struct npc_mcam *mcam = &rvu->hw->mcam; struct rvu_npc_mcam_rule dummy = { 0 }; struct rvu_npc_mcam_rule *rule; - bool new = false, msg_from_vf; u16 owner = req->hdr.pcifunc; struct msg_rsp write_rsp; struct mcam_entry *entry; int entry_index, err; - - msg_from_vf = !!(owner & RVU_PFVF_FUNC_MASK); + bool new = false; installed_features = req->features; features = req->features; @@ -1027,7 +1050,7 @@ static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target, } /* update mcam entry with default unicast rule attributes */ - if (def_ucast_rule && (msg_from_vf || (req->default_rule && req->append))) { + if (def_ucast_rule && (req->default_rule && req->append)) { missing_features = (def_ucast_rule->features ^ features) & def_ucast_rule->features; if (missing_features) @@ -1130,6 +1153,7 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu, struct npc_install_flow_rsp *rsp) { bool from_vf = !!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK); + struct rvu_switch *rswitch = &rvu->rswitch; int blkaddr, nixlf, err; struct rvu_pfvf *pfvf; bool pf_set_vfs_mac = false; @@ -1139,14 +1163,14 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu, blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) { dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__); - return -ENODEV; + return NPC_MCAM_INVALID_REQ; } if (!is_npc_interface_valid(rvu, req->intf)) - return -EINVAL; + return NPC_FLOW_INTF_INVALID; if (from_vf && req->default_rule) - return NPC_MCAM_PERM_DENIED; + return NPC_FLOW_VF_PERM_DENIED; /* Each PF/VF info is maintained in struct rvu_pfvf. * rvu_pfvf for the target PF/VF needs to be retrieved @@ -1172,12 +1196,7 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu, err = npc_check_unsupported_flows(rvu, req->features, req->intf); if (err) - return err; - - /* Skip channel validation if AF is installing */ - if (!is_pffunc_af(req->hdr.pcifunc) && - npc_mcam_verify_channel(rvu, target, req->intf, req->channel)) - return -EINVAL; + return NPC_FLOW_NOT_SUPPORTED; pfvf = rvu_get_pfvf(rvu, target); @@ -1195,7 +1214,7 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu, /* Proceed if NIXLF is attached or not for TX rules */ err = nix_get_nixlf(rvu, target, &nixlf, NULL); if (err && is_npc_intf_rx(req->intf) && !pf_set_vfs_mac) - return -EINVAL; + return NPC_FLOW_NO_NIXLF; /* don't enable rule when nixlf not attached or initialized */ if (!(is_nixlf_attached(rvu, target) && @@ -1211,7 +1230,7 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu, /* Do not allow requests from uninitialized VFs */ if (from_vf && !enable) - return -EINVAL; + return NPC_FLOW_VF_NOT_INIT; /* PF sets VF mac & VF NIXLF is not attached, update the mac addr */ if (pf_set_vfs_mac && !enable) { @@ -1221,15 +1240,12 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu, return 0; } - /* If message is from VF then its flow should not overlap with - * reserved unicast flow. - */ - if (from_vf && pfvf->def_ucast_rule && is_npc_intf_rx(req->intf) && - pfvf->def_ucast_rule->features & req->features) - return -EINVAL; + mutex_lock(&rswitch->switch_lock); + err = npc_install_flow(rvu, blkaddr, target, nixlf, pfvf, + req, rsp, enable, pf_set_vfs_mac); + mutex_unlock(&rswitch->switch_lock); - return npc_install_flow(rvu, blkaddr, target, nixlf, pfvf, req, rsp, - enable, pf_set_vfs_mac); + return err; } static int npc_delete_flow(struct rvu *rvu, struct rvu_npc_mcam_rule *rule, diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c index e266f0c49559..b3150f053291 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c @@ -1,11 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Admin Function driver +/* Marvell RVU Admin Function driver * - * Copyright (C) 2018 Marvell International Ltd. + * Copyright (C) 2018 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/module.h> @@ -33,8 +30,8 @@ static struct hw_reg_map txsch_reg_map[NIX_TXSCH_LVL_CNT] = { {NIX_TXSCH_LVL_SMQ, 2, 0xFFFF, {{0x0700, 0x0708}, {0x1400, 0x14C8} } }, {NIX_TXSCH_LVL_TL4, 3, 0xFFFF, {{0x0B00, 0x0B08}, {0x0B10, 0x0B18}, {0x1200, 0x12E0} } }, - {NIX_TXSCH_LVL_TL3, 3, 0xFFFF, {{0x1000, 0x10E0}, {0x1600, 0x1608}, - {0x1610, 0x1618} } }, + {NIX_TXSCH_LVL_TL3, 4, 0xFFFF, {{0x1000, 0x10E0}, {0x1600, 0x1608}, + {0x1610, 0x1618}, {0x1700, 0x17B0} } }, {NIX_TXSCH_LVL_TL2, 2, 0xFFFF, {{0x0E00, 0x0EE0}, {0x1700, 0x17B0} } }, {NIX_TXSCH_LVL_TL1, 1, 0xFFFF, {{0x0C00, 0x0D98} } }, }; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h index 8b01ef6e2c99..21f1ed4e222f 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h @@ -1,11 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Marvell OcteonTx2 RVU Admin Function driver +/* Marvell RVU Admin Function driver * - * Copyright (C) 2018 Marvell International Ltd. + * Copyright (C) 2018 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef RVU_REG_H @@ -53,7 +50,7 @@ #define RVU_AF_SMMU_TXN_REQ (0x6008) #define RVU_AF_SMMU_ADDR_RSP_STS (0x6010) #define RVU_AF_SMMU_ADDR_TLN (0x6018) -#define RVU_AF_SMMU_TLN_FLIT1 (0x6030) +#define RVU_AF_SMMU_TLN_FLIT0 (0x6020) /* Admin function's privileged PF/VF registers */ #define RVU_PRIV_CONST (0x8000000) @@ -156,6 +153,7 @@ #define NPA_AF_AQ_DONE_INT_W1S (0x0688) #define NPA_AF_AQ_DONE_ENA_W1S (0x0690) #define NPA_AF_AQ_DONE_ENA_W1C (0x0698) +#define NPA_AF_BATCH_CTL (0x06a0) #define NPA_AF_LFX_AURAS_CFG(a) (0x4000 | (a) << 18) #define NPA_AF_LFX_LOC_AURAS_BASE(a) (0x4010 | (a) << 18) #define NPA_AF_LFX_QINTS_CFG(a) (0x4100 | (a) << 18) @@ -265,10 +263,13 @@ #define NIX_AF_SDP_TX_FIFO_STATUS (0x0640) #define NIX_AF_TX_NPC_CAPTURE_CONFIG (0x0660) #define NIX_AF_TX_NPC_CAPTURE_INFO (0x0670) +#define NIX_AF_SEB_CFG (0x05F0) #define NIX_AF_DEBUG_NPC_RESP_DATAX(a) (0x680 | (a) << 3) #define NIX_AF_SMQX_CFG(a) (0x700 | (a) << 16) #define NIX_AF_SQM_DBG_CTL_STATUS (0x750) +#define NIX_AF_DWRR_SDP_MTU (0x790) +#define NIX_AF_DWRR_RPM_MTU (0x7A0) #define NIX_AF_PSE_CHANNEL_LEVEL (0x800) #define NIX_AF_PSE_SHAPER_CFG (0x810) #define NIX_AF_TX_EXPR_CREDIT (0x830) @@ -701,5 +702,8 @@ #define APR_AF_LMT_CFG (0x000ull) #define APR_AF_LMT_MAP_BASE (0x008ull) #define APR_AF_LMT_CTL (0x010ull) +#define APR_LMT_MAP_ENT_DIS_SCH_CMP_SHIFT 23 +#define APR_LMT_MAP_ENT_SCH_ENA_SHIFT 22 +#define APR_LMT_MAP_ENT_DIS_LINE_PREF_SHIFT 21 #endif /* RVU_REG_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c new file mode 100644 index 000000000000..b04fb226f708 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell RVU Admin Function driver + * + * Copyright (C) 2021 Marvell. + * + */ + +#include <linux/pci.h> +#include "rvu.h" + +/* SDP PF device id */ +#define PCI_DEVID_OTX2_SDP_PF 0xA0F6 + +/* Maximum SDP blocks in a chip */ +#define MAX_SDP 2 + +/* SDP PF number */ +static int sdp_pf_num[MAX_SDP] = {-1, -1}; + +bool is_sdp_pfvf(u16 pcifunc) +{ + u16 pf = rvu_get_pf(pcifunc); + u32 found = 0, i = 0; + + while (i < MAX_SDP) { + if (pf == sdp_pf_num[i]) + found = 1; + i++; + } + + if (!found) + return false; + + return true; +} + +bool is_sdp_pf(u16 pcifunc) +{ + return (is_sdp_pfvf(pcifunc) && + !(pcifunc & RVU_PFVF_FUNC_MASK)); +} + +bool is_sdp_vf(u16 pcifunc) +{ + return (is_sdp_pfvf(pcifunc) && + !!(pcifunc & RVU_PFVF_FUNC_MASK)); +} + +int rvu_sdp_init(struct rvu *rvu) +{ + struct pci_dev *pdev = NULL; + struct rvu_pfvf *pfvf; + u32 i = 0; + + while ((i < MAX_SDP) && (pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, + PCI_DEVID_OTX2_SDP_PF, + pdev)) != NULL) { + /* The RVU PF number is one less than bus number */ + sdp_pf_num[i] = pdev->bus->number - 1; + pfvf = &rvu->pf[sdp_pf_num[i]]; + + pfvf->sdp_info = devm_kzalloc(rvu->dev, + sizeof(struct sdp_node_info), + GFP_KERNEL); + if (!pfvf->sdp_info) + return -ENOMEM; + + dev_info(rvu->dev, "SDP PF number:%d\n", sdp_pf_num[i]); + + put_device(&pdev->dev); + i++; + } + + return 0; +} + +int +rvu_mbox_handler_set_sdp_chan_info(struct rvu *rvu, + struct sdp_chan_info_msg *req, + struct msg_rsp *rsp) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); + + memcpy(pfvf->sdp_info, &req->info, sizeof(struct sdp_node_info)); + dev_info(rvu->dev, "AF: SDP%d max_vfs %d num_pf_rings %d pf_srn %d\n", + req->info.node_id, req->info.max_vfs, req->info.num_pf_rings, + req->info.pf_srn); + return 0; +} + +int +rvu_mbox_handler_get_sdp_chan_info(struct rvu *rvu, struct msg_req *req, + struct sdp_get_chan_info_msg *rsp) +{ + struct rvu_hwinfo *hw = rvu->hw; + int blkaddr; + + if (!hw->cap.programmable_chans) { + rsp->chan_base = NIX_CHAN_SDP_CH_START; + rsp->num_chan = NIX_CHAN_SDP_NUM_CHANS; + } else { + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0); + rsp->chan_base = hw->sdp_chan_base; + rsp->num_chan = rvu_read64(rvu, blkaddr, NIX_AF_CONST1) & 0xFFFUL; + } + + return 0; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h index 5bbe6727d11d..77ac96693f04 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h @@ -1,11 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Marvell OcteonTx2 RVU Admin Function driver +/* Marvell RVU Admin Function driver * - * Copyright (C) 2018 Marvell International Ltd. + * Copyright (C) 2018 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef RVU_STRUCT_H diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c index 820adf390b8e..3392487f6b47 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c @@ -1,7 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Admin Function driver +/* Marvell RVU Admin Function driver * * Copyright (C) 2021 Marvell. + * */ #include <linux/bitfield.h> diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c index 56f90cf9c4c0..775fd4c35794 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c @@ -1,7 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Admin Function driver tracepoints +/* Marvell RVU Admin Function driver + * + * Copyright (C) 2020 Marvell. * - * Copyright (C) 2020 Marvell International Ltd. */ #define CREATE_TRACE_POINTS diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h index 64aa7d350df1..28984d0e848a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h @@ -1,7 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Marvell OcteonTx2 RVU Admin Function driver tracepoints +/* Marvell RVU Admin Function driver + * + * Copyright (C) 2020 Marvell. * - * Copyright (C) 2020 Marvell International Ltd. */ #undef TRACE_SYSTEM @@ -14,6 +15,8 @@ #include <linux/tracepoint.h> #include <linux/pci.h> +#include "mbox.h" + TRACE_EVENT(otx2_msg_alloc, TP_PROTO(const struct pci_dev *pdev, u16 id, u64 size), TP_ARGS(pdev, id, size), @@ -25,8 +28,8 @@ TRACE_EVENT(otx2_msg_alloc, __entry->id = id; __entry->size = size; ), - TP_printk("[%s] msg:(0x%x) size:%lld\n", __get_str(dev), - __entry->id, __entry->size) + TP_printk("[%s] msg:(%s) size:%lld\n", __get_str(dev), + otx2_mbox_id2name(__entry->id), __entry->size) ); TRACE_EVENT(otx2_msg_send, @@ -88,8 +91,8 @@ TRACE_EVENT(otx2_msg_process, __entry->id = id; __entry->err = err; ), - TP_printk("[%s] msg:(0x%x) error:%d\n", __get_str(dev), - __entry->id, __entry->err) + TP_printk("[%s] msg:(%s) error:%d\n", __get_str(dev), + otx2_mbox_id2name(__entry->id), __entry->err) ); #endif /* __RVU_TRACE_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile index 3254b02205ca..b92c267628b8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile +++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile @@ -1,13 +1,14 @@ # SPDX-License-Identifier: GPL-2.0 # -# Makefile for Marvell's OcteonTX2 ethernet device drivers +# Makefile for Marvell's RVU Ethernet device drivers # obj-$(CONFIG_OCTEONTX2_PF) += rvu_nicpf.o obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \ - otx2_ptp.o otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o -rvu_nicvf-y := otx2_vf.o + otx2_ptp.o otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o \ + otx2_devlink.o +rvu_nicvf-y := otx2_vf.o otx2_devlink.o ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c index 184de9466286..3cc76f14d2fd 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c @@ -1,7 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Physcial Function ethernet driver +/* Marvell RVU Ethernet driver + * + * Copyright (C) 2021 Marvell. * - * Copyright (C) 2020 Marvell. */ #include "cn10k.h" @@ -92,8 +93,7 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura) aq->sq.ena = 1; /* Only one SMQ is allocated, map all SQ's to that SMQ */ aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0]; - /* FIXME: set based on NIX_AF_DWRR_RPM_MTU*/ - aq->sq.smq_rr_weight = pfvf->netdev->mtu; + aq->sq.smq_rr_weight = mtu_to_dwrr_weight(pfvf, pfvf->max_frs); aq->sq.default_chan = pfvf->hw.tx_chan_base; aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */ aq->sq.sqb_aura = sqb_aura; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h index 1a1ae334477d..8ae96815865e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h @@ -1,7 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0 - * Marvell OcteonTx2 RVU Ethernet driver +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell RVU Ethernet driver + * + * Copyright (C) 2021 Marvell. * - * Copyright (C) 2020 Marvell. */ #ifndef CN10K_H @@ -9,6 +10,20 @@ #include "otx2_common.h" +static inline int mtu_to_dwrr_weight(struct otx2_nic *pfvf, int mtu) +{ + u32 weight; + + /* On OTx2, since AF returns DWRR_MTU as '1', this logic + * will work on those silicons as well. + */ + weight = mtu / pfvf->hw.dwrr_mtu; + if (mtu % pfvf->hw.dwrr_mtu) + weight += 1; + + return weight; +} + void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq); void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx); int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c index 70fcc1fd962f..ce25c2744435 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c @@ -1,11 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Ethernet driver +/* Marvell RVU Ethernet driver * - * Copyright (C) 2020 Marvell International Ltd. + * Copyright (C) 2020 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/interrupt.h> @@ -208,7 +205,8 @@ int otx2_set_mac_address(struct net_device *netdev, void *p) if (!otx2_hw_set_mac_addr(pfvf, addr->sa_data)) { memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); /* update dmac field in vlan offload rule */ - if (pfvf->flags & OTX2_FLAG_RX_VLAN_SUPPORT) + if (netif_running(netdev) && + pfvf->flags & OTX2_FLAG_RX_VLAN_SUPPORT) otx2_install_rxvlan_offload_flow(pfvf); /* update dmac address in ntuple and DMAC filter list */ if (pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT) @@ -268,6 +266,7 @@ unlock: int otx2_set_flowkey_cfg(struct otx2_nic *pfvf) { struct otx2_rss_info *rss = &pfvf->hw.rss_info; + struct nix_rss_flowkey_cfg_rsp *rsp; struct nix_rss_flowkey_cfg *req; int err; @@ -282,6 +281,18 @@ int otx2_set_flowkey_cfg(struct otx2_nic *pfvf) req->group = DEFAULT_RSS_CONTEXT_GROUP; err = otx2_sync_mbox_msg(&pfvf->mbox); + if (err) + goto fail; + + rsp = (struct nix_rss_flowkey_cfg_rsp *) + otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); + if (IS_ERR(rsp)) { + err = PTR_ERR(rsp); + goto fail; + } + + pfvf->hw.flowkey_alg_idx = rsp->alg_idx; +fail: mutex_unlock(&pfvf->mbox.lock); return err; } @@ -572,30 +583,14 @@ void otx2_get_mac_from_af(struct net_device *netdev) } EXPORT_SYMBOL(otx2_get_mac_from_af); -static int otx2_get_link(struct otx2_nic *pfvf) -{ - int link = 0; - u16 map; - - /* cgx lmac link */ - if (pfvf->hw.tx_chan_base >= CGX_CHAN_BASE) { - map = pfvf->hw.tx_chan_base & 0x7FF; - link = 4 * ((map >> 8) & 0xF) + ((map >> 4) & 0xF); - } - /* LBK channel */ - if (pfvf->hw.tx_chan_base < SDP_CHAN_BASE) { - map = pfvf->hw.tx_chan_base & 0x7FF; - link = pfvf->hw.cgx_links | ((map >> 8) & 0xF); - } - - return link; -} - int otx2_txschq_config(struct otx2_nic *pfvf, int lvl) { struct otx2_hw *hw = &pfvf->hw; struct nix_txschq_config *req; u64 schq, parent; + u64 dwrr_val; + + dwrr_val = mtu_to_dwrr_weight(pfvf, pfvf->max_frs); req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox); if (!req) @@ -621,21 +616,21 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl) req->num_regs++; /* Set DWRR quantum */ req->reg[2] = NIX_AF_MDQX_SCHEDULE(schq); - req->regval[2] = DFLT_RR_QTM; + req->regval[2] = dwrr_val; } else if (lvl == NIX_TXSCH_LVL_TL4) { parent = hw->txschq_list[NIX_TXSCH_LVL_TL3][0]; req->reg[0] = NIX_AF_TL4X_PARENT(schq); req->regval[0] = parent << 16; req->num_regs++; req->reg[1] = NIX_AF_TL4X_SCHEDULE(schq); - req->regval[1] = DFLT_RR_QTM; + req->regval[1] = dwrr_val; } else if (lvl == NIX_TXSCH_LVL_TL3) { parent = hw->txschq_list[NIX_TXSCH_LVL_TL2][0]; req->reg[0] = NIX_AF_TL3X_PARENT(schq); req->regval[0] = parent << 16; req->num_regs++; req->reg[1] = NIX_AF_TL3X_SCHEDULE(schq); - req->regval[1] = DFLT_RR_QTM; + req->regval[1] = dwrr_val; } else if (lvl == NIX_TXSCH_LVL_TL2) { parent = hw->txschq_list[NIX_TXSCH_LVL_TL1][0]; req->reg[0] = NIX_AF_TL2X_PARENT(schq); @@ -643,11 +638,10 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl) req->num_regs++; req->reg[1] = NIX_AF_TL2X_SCHEDULE(schq); - req->regval[1] = TXSCH_TL1_DFLT_RR_PRIO << 24 | DFLT_RR_QTM; + req->regval[1] = TXSCH_TL1_DFLT_RR_PRIO << 24 | dwrr_val; req->num_regs++; - req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, - otx2_get_link(pfvf)); + req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link); /* Enable this queue and backpressure */ req->regval[2] = BIT_ULL(13) | BIT_ULL(12); @@ -656,7 +650,10 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl) * For VF this is always ignored. */ - /* Set DWRR quantum */ + /* On CN10K, if RR_WEIGHT is greater than 16384, HW will + * clip it to 16384, so configuring a 24bit max value + * will work on both OTx2 and CN10K. + */ req->reg[0] = NIX_AF_TL1X_SCHEDULE(schq); req->regval[0] = TXSCH_TL1_DFLT_RR_QTM; @@ -803,7 +800,7 @@ int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura) aq->sq.ena = 1; /* Only one SMQ is allocated, map all SQ's to that SMQ */ aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0]; - aq->sq.smq_rr_quantum = DFLT_RR_QTM; + aq->sq.smq_rr_quantum = mtu_to_dwrr_weight(pfvf, pfvf->max_frs); aq->sq.default_chan = pfvf->hw.tx_chan_base; aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */ aq->sq.sqb_aura = sqb_aura; @@ -1190,7 +1187,22 @@ static int otx2_aura_init(struct otx2_nic *pfvf, int aura_id, /* Enable backpressure for RQ aura */ if (aura_id < pfvf->hw.rqpool_cnt && !is_otx2_lbkvf(pfvf->pdev)) { aq->aura.bp_ena = 0; + /* If NIX1 LF is attached then specify NIX1_RX. + * + * Below NPA_AURA_S[BP_ENA] is set according to the + * NPA_BPINTF_E enumeration given as: + * 0x0 + a*0x1 where 'a' is 0 for NIX0_RX and 1 for NIX1_RX so + * NIX0_RX is 0x0 + 0*0x1 = 0 + * NIX1_RX is 0x0 + 1*0x1 = 1 + * But in HRM it is given that + * "NPA_AURA_S[BP_ENA](w1[33:32]) - Enable aura backpressure to + * NIX-RX based on [BP] level. One bit per NIX-RX; index + * enumerated by NPA_BPINTF_E." + */ + if (pfvf->nix_blkaddr == BLKADDR_NIX1) + aq->aura.bp_ena = 1; aq->aura.nix0_bpid = pfvf->bpid[0]; + /* Set backpressure level for RQ's Aura */ aq->aura.bp = RQ_BP_LVL_AURA; } @@ -1577,6 +1589,7 @@ void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf, pfvf->hw.lso_tsov6_idx = rsp->lso_tsov6_idx; pfvf->hw.cgx_links = rsp->cgx_links; pfvf->hw.lbk_links = rsp->lbk_links; + pfvf->hw.tx_link = rsp->tx_link; } EXPORT_SYMBOL(mbox_handler_nix_lf_alloc); @@ -1668,6 +1681,11 @@ u16 otx2_get_max_mtu(struct otx2_nic *pfvf) * SMQ errors */ max_mtu = rsp->max_mtu - 8 - OTX2_ETH_HLEN; + + /* Also save DWRR MTU, needed for DWRR weight calculation */ + pfvf->hw.dwrr_mtu = rsp->rpm_dwrr_mtu; + if (!pfvf->hw.dwrr_mtu) + pfvf->hw.dwrr_mtu = 1; } out: diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h index 8fd58cd07f50..48227cec06ee 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h @@ -1,11 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Marvell OcteonTx2 RVU Ethernet driver +/* Marvell RVU Ethernet driver * - * Copyright (C) 2020 Marvell International Ltd. + * Copyright (C) 2020 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef OTX2_COMMON_H @@ -19,11 +16,13 @@ #include <linux/timecounter.h> #include <linux/soc/marvell/octeontx2/asm.h> #include <net/pkt_cls.h> +#include <net/devlink.h> #include <mbox.h> #include <npc.h> #include "otx2_reg.h" #include "otx2_txrx.h" +#include "otx2_devlink.h" #include <rvu_trace.h> /* PCI device IDs */ @@ -181,6 +180,7 @@ struct otx2_hw { /* NIX */ u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC]; u16 matchall_ipolicer; + u32 dwrr_mtu; /* HW settings, coalescing etc */ u16 rx_chan_base; @@ -196,6 +196,9 @@ struct otx2_hw { u8 lso_udpv4_idx; u8 lso_udpv6_idx; + /* RSS */ + u8 flowkey_alg_idx; + /* MSI-X */ u8 cint_cnt; /* CQ interrupt count */ u16 npa_msixoff; /* Offset of NPA vectors */ @@ -212,6 +215,7 @@ struct otx2_hw { u64 cgx_fec_uncorr_blks; u8 cgx_links; /* No. of CGX links present in HW */ u8 lbk_links; /* No. of LBK links present in HW */ + u8 tx_link; /* Transmit channel link number */ #define HW_TSO 0 #define CN10K_MBOX 1 #define CN10K_LMTST 2 @@ -267,7 +271,6 @@ struct otx2_mac_table { }; struct otx2_flow_config { - u16 entry[NPC_MAX_NONCONTIG_ENTRIES]; u16 *flow_ent; u16 *def_ent; u16 nr_flows; @@ -278,16 +281,13 @@ struct otx2_flow_config { #define OTX2_MCAM_COUNT (OTX2_DEFAULT_FLOWCOUNT + \ OTX2_MAX_UNICAST_FLOWS + \ OTX2_MAX_VLAN_FLOWS) - u16 ntuple_offset; u16 unicast_offset; u16 rx_vlan_offset; u16 vf_vlan_offset; #define OTX2_PER_VF_VLAN_FLOWS 2 /* Rx + Tx per VF */ #define OTX2_VF_VLAN_RX_INDEX 0 #define OTX2_VF_VLAN_TX_INDEX 1 - u16 tc_flower_offset; - u16 ntuple_max_flows; - u16 tc_max_flows; + u16 max_flows; u8 dmacflt_max_flows; u8 *bmap_to_dmacindex; unsigned long dmacflt_bmap; @@ -298,8 +298,7 @@ struct otx2_tc_info { /* hash table to store TC offloaded flows */ struct rhashtable flow_table; struct rhashtable_params flow_ht_params; - DECLARE_BITMAP(tc_entries_bitmap, OTX2_MAX_TC_FLOWS); - unsigned long num_entries; + unsigned long *tc_entries_bitmap; }; struct dev_hw_ops { @@ -352,6 +351,11 @@ struct otx2_nic { struct otx2_vf_config *vf_configs; struct cgx_link_user_info linfo; + /* NPC MCAM */ + struct otx2_flow_config *flow_cfg; + struct otx2_mac_table *mac_table; + struct otx2_tc_info tc_info; + u64 reset_count; struct work_struct reset_task; struct workqueue_struct *flr_wq; @@ -359,7 +363,6 @@ struct otx2_nic { struct refill_work *refill_wrk; struct workqueue_struct *otx2_wq; struct work_struct rx_mode_work; - struct otx2_mac_table *mac_table; /* Ethtool stuff */ u32 msg_enable; @@ -375,9 +378,10 @@ struct otx2_nic { struct otx2_ptp *ptp; struct hwtstamp_config tstamp; - struct otx2_flow_config *flow_cfg; - struct otx2_tc_info tc_info; unsigned long rq_bmap; + + /* Devlink */ + struct otx2_devlink *dl; }; static inline bool is_otx2_lbkvf(struct pci_dev *pdev) @@ -709,6 +713,11 @@ MBOX_UP_CGX_MESSAGES #define RVU_PFVF_FUNC_SHIFT 0 #define RVU_PFVF_FUNC_MASK 0x3FF +static inline bool is_otx2_vf(u16 pcifunc) +{ + return !!(pcifunc & RVU_PFVF_FUNC_MASK); +} + static inline int rvu_get_pf(u16 pcifunc) { return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK; @@ -814,7 +823,8 @@ int otx2_set_real_num_queues(struct net_device *netdev, int tx_queues, int rx_queues); /* MCAM filter related APIs */ int otx2_mcam_flow_init(struct otx2_nic *pf); -int otx2_alloc_mcam_entries(struct otx2_nic *pfvf); +int otx2vf_mcam_flow_init(struct otx2_nic *pfvf); +int otx2_alloc_mcam_entries(struct otx2_nic *pfvf, u16 count); void otx2_mcam_flow_del(struct otx2_nic *pf); int otx2_destroy_ntuple_flows(struct otx2_nic *pf); int otx2_destroy_mcam_flows(struct otx2_nic *pfvf); @@ -825,8 +835,7 @@ int otx2_get_all_flows(struct otx2_nic *pfvf, int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc); int otx2_remove_flow(struct otx2_nic *pfvf, u32 location); -int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp, - struct npc_install_flow_req *req); +int otx2_get_maxflows(struct otx2_flow_config *flow_cfg); void otx2_rss_ctx_flow_del(struct otx2_nic *pfvf, int ctx_id); int otx2_del_macfilter(struct net_device *netdev, const u8 *mac); int otx2_add_macfilter(struct net_device *netdev, const u8 *mac); @@ -838,6 +847,7 @@ int otx2_init_tc(struct otx2_nic *nic); void otx2_shutdown_tc(struct otx2_nic *nic); int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type, void *type_data); +int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic); /* CGX/RPM DMAC filters support */ int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf); int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u8 bit_pos); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c new file mode 100644 index 000000000000..7ac3ef2fa06a --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c @@ -0,0 +1,156 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell RVU PF/VF Netdev Devlink + * + * Copyright (C) 2021 Marvell. + */ + +#include "otx2_common.h" + +/* Devlink Params APIs */ +static int otx2_dl_mcam_count_validate(struct devlink *devlink, u32 id, + union devlink_param_value val, + struct netlink_ext_ack *extack) +{ + struct otx2_devlink *otx2_dl = devlink_priv(devlink); + struct otx2_nic *pfvf = otx2_dl->pfvf; + struct otx2_flow_config *flow_cfg; + + if (!pfvf->flow_cfg) { + NL_SET_ERR_MSG_MOD(extack, + "pfvf->flow_cfg not initialized"); + return -EINVAL; + } + + flow_cfg = pfvf->flow_cfg; + if (flow_cfg && flow_cfg->nr_flows) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot modify count when there are active rules"); + return -EINVAL; + } + + return 0; +} + +static int otx2_dl_mcam_count_set(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct otx2_devlink *otx2_dl = devlink_priv(devlink); + struct otx2_nic *pfvf = otx2_dl->pfvf; + + if (!pfvf->flow_cfg) + return 0; + + otx2_alloc_mcam_entries(pfvf, ctx->val.vu16); + otx2_tc_alloc_ent_bitmap(pfvf); + + return 0; +} + +static int otx2_dl_mcam_count_get(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct otx2_devlink *otx2_dl = devlink_priv(devlink); + struct otx2_nic *pfvf = otx2_dl->pfvf; + struct otx2_flow_config *flow_cfg; + + if (!pfvf->flow_cfg) { + ctx->val.vu16 = 0; + return 0; + } + + flow_cfg = pfvf->flow_cfg; + ctx->val.vu16 = flow_cfg->max_flows; + + return 0; +} + +enum otx2_dl_param_id { + OTX2_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, + OTX2_DEVLINK_PARAM_ID_MCAM_COUNT, +}; + +static const struct devlink_param otx2_dl_params[] = { + DEVLINK_PARAM_DRIVER(OTX2_DEVLINK_PARAM_ID_MCAM_COUNT, + "mcam_count", DEVLINK_PARAM_TYPE_U16, + BIT(DEVLINK_PARAM_CMODE_RUNTIME), + otx2_dl_mcam_count_get, otx2_dl_mcam_count_set, + otx2_dl_mcam_count_validate), +}; + +/* Devlink OPs */ +static int otx2_devlink_info_get(struct devlink *devlink, + struct devlink_info_req *req, + struct netlink_ext_ack *extack) +{ + struct otx2_devlink *otx2_dl = devlink_priv(devlink); + struct otx2_nic *pfvf = otx2_dl->pfvf; + + if (is_otx2_vf(pfvf->pcifunc)) + return devlink_info_driver_name_put(req, "rvu_nicvf"); + + return devlink_info_driver_name_put(req, "rvu_nicpf"); +} + +static const struct devlink_ops otx2_devlink_ops = { + .info_get = otx2_devlink_info_get, +}; + +int otx2_register_dl(struct otx2_nic *pfvf) +{ + struct otx2_devlink *otx2_dl; + struct devlink *dl; + int err; + + dl = devlink_alloc(&otx2_devlink_ops, + sizeof(struct otx2_devlink), pfvf->dev); + if (!dl) { + dev_warn(pfvf->dev, "devlink_alloc failed\n"); + return -ENOMEM; + } + + err = devlink_register(dl); + if (err) { + dev_err(pfvf->dev, "devlink register failed with error %d\n", err); + devlink_free(dl); + return err; + } + + otx2_dl = devlink_priv(dl); + otx2_dl->dl = dl; + otx2_dl->pfvf = pfvf; + pfvf->dl = otx2_dl; + + err = devlink_params_register(dl, otx2_dl_params, + ARRAY_SIZE(otx2_dl_params)); + if (err) { + dev_err(pfvf->dev, + "devlink params register failed with error %d", err); + goto err_dl; + } + + devlink_params_publish(dl); + + return 0; + +err_dl: + devlink_unregister(dl); + devlink_free(dl); + return err; +} + +void otx2_unregister_dl(struct otx2_nic *pfvf) +{ + struct otx2_devlink *otx2_dl = pfvf->dl; + struct devlink *dl; + + if (!otx2_dl || !otx2_dl->dl) + return; + + dl = otx2_dl->dl; + + devlink_params_unregister(dl, otx2_dl_params, + ARRAY_SIZE(otx2_dl_params)); + + devlink_unregister(dl); + devlink_free(dl); +} diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.h new file mode 100644 index 000000000000..c7bd4f3c6c6b --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell RVU PF/VF Netdev Devlink + * + * Copyright (C) 2021 Marvell. + * + */ + +#ifndef OTX2_DEVLINK_H +#define OTX2_DEVLINK_H + +struct otx2_devlink { + struct devlink *dl; + struct otx2_nic *pfvf; +}; + +/* Devlink APIs */ +int otx2_register_dl(struct otx2_nic *pfvf); +void otx2_unregister_dl(struct otx2_nic *pfvf); + +#endif /* RVU_DEVLINK_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c index 383a6b5cb698..2ec800f741d8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c @@ -1,7 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Physcial Function ethernet driver +/* Marvell RVU Ethernet driver * * Copyright (C) 2021 Marvell. + * */ #include "otx2_common.h" diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c index b906a0eb6e0d..799486c72177 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c @@ -1,11 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Ethernet driver +/* Marvell RVU Ethernet driver * - * Copyright (C) 2020 Marvell International Ltd. + * Copyright (C) 2020 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/pci.h> @@ -33,9 +30,6 @@ struct otx2_stat { .index = offsetof(struct otx2_dev_stats, stat) / sizeof(u64), \ } -/* Physical link config */ -#define OTX2_ETHTOOL_SUPPORTED_MODES 0x638CCBF //110001110001100110010111111 - enum link_mode { OTX2_MODE_SUPPORTED, OTX2_MODE_ADVERTISED @@ -415,7 +409,9 @@ static int otx2_set_ringparam(struct net_device *netdev, } static int otx2_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *cmd) + struct ethtool_coalesce *cmd, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct otx2_nic *pfvf = netdev_priv(netdev); struct otx2_hw *hw = &pfvf->hw; @@ -429,7 +425,9 @@ static int otx2_get_coalesce(struct net_device *netdev, } static int otx2_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct otx2_nic *pfvf = netdev_priv(netdev); struct otx2_hw *hw = &pfvf->hw; @@ -645,6 +643,7 @@ static int otx2_set_rss_hash_opts(struct otx2_nic *pfvf, static int otx2_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc, u32 *rules) { + bool ntuple = !!(dev->features & NETIF_F_NTUPLE); struct otx2_nic *pfvf = netdev_priv(dev); int ret = -EOPNOTSUPP; @@ -654,14 +653,18 @@ static int otx2_get_rxnfc(struct net_device *dev, ret = 0; break; case ETHTOOL_GRXCLSRLCNT: - nfc->rule_cnt = pfvf->flow_cfg->nr_flows; - ret = 0; + if (netif_running(dev) && ntuple) { + nfc->rule_cnt = pfvf->flow_cfg->nr_flows; + ret = 0; + } break; case ETHTOOL_GRXCLSRULE: - ret = otx2_get_flow(pfvf, nfc, nfc->fs.location); + if (netif_running(dev) && ntuple) + ret = otx2_get_flow(pfvf, nfc, nfc->fs.location); break; case ETHTOOL_GRXCLSRLALL: - ret = otx2_get_all_flows(pfvf, nfc, rules); + if (netif_running(dev) && ntuple) + ret = otx2_get_all_flows(pfvf, nfc, rules); break; case ETHTOOL_GRXFH: return otx2_get_rss_hash_opts(pfvf, nfc); @@ -696,41 +699,6 @@ static int otx2_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc) return ret; } -static int otx2vf_get_rxnfc(struct net_device *dev, - struct ethtool_rxnfc *nfc, u32 *rules) -{ - struct otx2_nic *pfvf = netdev_priv(dev); - int ret = -EOPNOTSUPP; - - switch (nfc->cmd) { - case ETHTOOL_GRXRINGS: - nfc->data = pfvf->hw.rx_queues; - ret = 0; - break; - case ETHTOOL_GRXFH: - return otx2_get_rss_hash_opts(pfvf, nfc); - default: - break; - } - return ret; -} - -static int otx2vf_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc) -{ - struct otx2_nic *pfvf = netdev_priv(dev); - int ret = -EOPNOTSUPP; - - switch (nfc->cmd) { - case ETHTOOL_SRXFH: - ret = otx2_set_rss_hash_opts(pfvf, nfc); - break; - default: - break; - } - - return ret; -} - static u32 otx2_get_rxfh_key_size(struct net_device *netdev) { struct otx2_nic *pfvf = netdev_priv(netdev); @@ -1116,8 +1084,6 @@ static void otx2_get_link_mode_info(u64 link_mode_bmap, }; u8 bit; - link_mode_bmap = link_mode_bmap & OTX2_ETHTOOL_SUPPORTED_MODES; - for_each_set_bit(bit, (unsigned long *)&link_mode_bmap, 27) { /* SGMII mode is set */ if (bit == 0) @@ -1357,8 +1323,8 @@ static const struct ethtool_ops otx2vf_ethtool_ops = { .get_sset_count = otx2vf_get_sset_count, .set_channels = otx2_set_channels, .get_channels = otx2_get_channels, - .get_rxnfc = otx2vf_get_rxnfc, - .set_rxnfc = otx2vf_set_rxnfc, + .get_rxnfc = otx2_get_rxnfc, + .set_rxnfc = otx2_set_rxnfc, .get_rxfh_key_size = otx2_get_rxfh_key_size, .get_rxfh_indir_size = otx2_get_rxfh_indir_size, .get_rxfh = otx2_get_rxfh, diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c index 4d9de525802d..77a13fb555fb 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c @@ -1,15 +1,19 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Physical Function ethernet driver +/* Marvell RVU Ethernet driver * * Copyright (C) 2020 Marvell. + * */ #include <net/ipv6.h> +#include <linux/sort.h> #include "otx2_common.h" #define OTX2_DEFAULT_ACTION 0x1 +static int otx2_mcam_entry_init(struct otx2_nic *pfvf); + struct otx2_flow { struct ethtool_rx_flow_spec flow_spec; struct list_head list; @@ -30,8 +34,7 @@ static void otx2_clear_ntuple_flow_info(struct otx2_nic *pfvf, struct otx2_flow_ { devm_kfree(pfvf->dev, flow_cfg->flow_ent); flow_cfg->flow_ent = NULL; - flow_cfg->ntuple_max_flows = 0; - flow_cfg->tc_max_flows = 0; + flow_cfg->max_flows = 0; } static int otx2_free_ntuple_mcam_entries(struct otx2_nic *pfvf) @@ -40,11 +43,11 @@ static int otx2_free_ntuple_mcam_entries(struct otx2_nic *pfvf) struct npc_mcam_free_entry_req *req; int ent, err; - if (!flow_cfg->ntuple_max_flows) + if (!flow_cfg->max_flows) return 0; mutex_lock(&pfvf->mbox.lock); - for (ent = 0; ent < flow_cfg->ntuple_max_flows; ent++) { + for (ent = 0; ent < flow_cfg->max_flows; ent++) { req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox); if (!req) break; @@ -61,7 +64,12 @@ static int otx2_free_ntuple_mcam_entries(struct otx2_nic *pfvf) return 0; } -static int otx2_alloc_ntuple_mcam_entries(struct otx2_nic *pfvf, u16 count) +static int mcam_entry_cmp(const void *a, const void *b) +{ + return *(u16 *)a - *(u16 *)b; +} + +int otx2_alloc_mcam_entries(struct otx2_nic *pfvf, u16 count) { struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; struct npc_mcam_alloc_entry_req *req; @@ -76,8 +84,12 @@ static int otx2_alloc_ntuple_mcam_entries(struct otx2_nic *pfvf, u16 count) flow_cfg->flow_ent = devm_kmalloc_array(pfvf->dev, count, sizeof(u16), GFP_KERNEL); - if (!flow_cfg->flow_ent) + if (!flow_cfg->flow_ent) { + netdev_err(pfvf->netdev, + "%s: Unable to allocate memory for flow entries\n", + __func__); return -ENOMEM; + } mutex_lock(&pfvf->mbox.lock); @@ -92,8 +104,14 @@ static int otx2_alloc_ntuple_mcam_entries(struct otx2_nic *pfvf, u16 count) req->contig = false; req->count = (count - allocated) > NPC_MAX_NONCONTIG_ENTRIES ? NPC_MAX_NONCONTIG_ENTRIES : count - allocated; - req->priority = NPC_MCAM_HIGHER_PRIO; - req->ref_entry = flow_cfg->def_ent[0]; + + /* Allocate higher priority entries for PFs, so that VF's entries + * will be on top of PF. + */ + if (!is_otx2_vf(pfvf->pcifunc)) { + req->priority = NPC_MCAM_HIGHER_PRIO; + req->ref_entry = flow_cfg->def_ent[0]; + } /* Send message to AF */ if (otx2_sync_mbox_msg(&pfvf->mbox)) @@ -114,22 +132,34 @@ static int otx2_alloc_ntuple_mcam_entries(struct otx2_nic *pfvf, u16 count) break; } + /* Multiple MCAM entry alloc requests could result in non-sequential + * MCAM entries in the flow_ent[] array. Sort them in an ascending order, + * otherwise user installed ntuple filter index and MCAM entry index will + * not be in sync. + */ + if (allocated) + sort(&flow_cfg->flow_ent[0], allocated, + sizeof(flow_cfg->flow_ent[0]), mcam_entry_cmp, NULL); + exit: mutex_unlock(&pfvf->mbox.lock); - flow_cfg->ntuple_offset = 0; - flow_cfg->ntuple_max_flows = allocated; - flow_cfg->tc_max_flows = allocated; + flow_cfg->max_flows = allocated; + + if (allocated) { + pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC; + pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT; + } if (allocated != count) netdev_info(pfvf->netdev, - "Unable to allocate %d MCAM entries for ntuple, got %d\n", + "Unable to allocate %d MCAM entries, got only %d\n", count, allocated); - return allocated; } +EXPORT_SYMBOL(otx2_alloc_mcam_entries); -int otx2_alloc_mcam_entries(struct otx2_nic *pfvf) +static int otx2_mcam_entry_init(struct otx2_nic *pfvf) { struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; struct npc_mcam_alloc_entry_req *req; @@ -189,18 +219,35 @@ int otx2_alloc_mcam_entries(struct otx2_nic *pfvf) mutex_unlock(&pfvf->mbox.lock); /* Allocate entries for Ntuple filters */ - count = otx2_alloc_ntuple_mcam_entries(pfvf, OTX2_DEFAULT_FLOWCOUNT); + count = otx2_alloc_mcam_entries(pfvf, OTX2_DEFAULT_FLOWCOUNT); if (count <= 0) { otx2_clear_ntuple_flow_info(pfvf, flow_cfg); return 0; } - pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT; pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT; return 0; } +int otx2vf_mcam_flow_init(struct otx2_nic *pfvf) +{ + struct otx2_flow_config *flow_cfg; + + pfvf->flow_cfg = devm_kzalloc(pfvf->dev, + sizeof(struct otx2_flow_config), + GFP_KERNEL); + if (!pfvf->flow_cfg) + return -ENOMEM; + + flow_cfg = pfvf->flow_cfg; + INIT_LIST_HEAD(&flow_cfg->flow_list); + flow_cfg->max_flows = 0; + + return 0; +} +EXPORT_SYMBOL(otx2vf_mcam_flow_init); + int otx2_mcam_flow_init(struct otx2_nic *pf) { int err; @@ -212,7 +259,10 @@ int otx2_mcam_flow_init(struct otx2_nic *pf) INIT_LIST_HEAD(&pf->flow_cfg->flow_list); - err = otx2_alloc_mcam_entries(pf); + /* Allocate bare minimum number of MCAM entries needed for + * unicast and ntuple filters. + */ + err = otx2_mcam_entry_init(pf); if (err) return err; @@ -248,6 +298,7 @@ void otx2_mcam_flow_del(struct otx2_nic *pf) { otx2_destroy_mcam_flows(pf); } +EXPORT_SYMBOL(otx2_mcam_flow_del); /* On success adds mcam entry * On failure enable promisous mode @@ -379,15 +430,19 @@ static void otx2_add_flow_to_list(struct otx2_nic *pfvf, struct otx2_flow *flow) list_add(&flow->list, head); } -static int otx2_get_maxflows(struct otx2_flow_config *flow_cfg) +int otx2_get_maxflows(struct otx2_flow_config *flow_cfg) { - if (flow_cfg->nr_flows == flow_cfg->ntuple_max_flows || + if (!flow_cfg) + return 0; + + if (flow_cfg->nr_flows == flow_cfg->max_flows || bitmap_weight(&flow_cfg->dmacflt_bmap, flow_cfg->dmacflt_max_flows)) - return flow_cfg->ntuple_max_flows + flow_cfg->dmacflt_max_flows; + return flow_cfg->max_flows + flow_cfg->dmacflt_max_flows; else - return flow_cfg->ntuple_max_flows; + return flow_cfg->max_flows; } +EXPORT_SYMBOL(otx2_get_maxflows); int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc, u32 location) @@ -708,7 +763,7 @@ static int otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp, return 0; } -int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp, +static int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp, struct npc_install_flow_req *req) { struct ethhdr *eth_mask = &fsp->m_u.ether_spec; @@ -732,7 +787,7 @@ int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp, ether_addr_copy(pmask->dmac, eth_mask->h_dest); req->features |= BIT_ULL(NPC_DMAC); } - if (eth_mask->h_proto) { + if (eth_hdr->h_proto) { memcpy(&pkt->etype, ð_hdr->h_proto, sizeof(pkt->etype)); memcpy(&pmask->etype, ð_mask->h_proto, @@ -764,14 +819,31 @@ int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp, return -EOPNOTSUPP; } if (fsp->flow_type & FLOW_EXT) { - if (fsp->m_ext.vlan_etype) - return -EINVAL; - if (fsp->m_ext.vlan_tci) { - if (fsp->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK)) + u16 vlan_etype; + + if (fsp->m_ext.vlan_etype) { + /* Partial masks not supported */ + if (be16_to_cpu(fsp->m_ext.vlan_etype) != 0xFFFF) return -EINVAL; - if (be16_to_cpu(fsp->h_ext.vlan_tci) >= VLAN_N_VID) + + vlan_etype = be16_to_cpu(fsp->h_ext.vlan_etype); + /* Only ETH_P_8021Q and ETH_P_802AD types supported */ + if (vlan_etype != ETH_P_8021Q && + vlan_etype != ETH_P_8021AD) return -EINVAL; + memcpy(&pkt->vlan_etype, &fsp->h_ext.vlan_etype, + sizeof(pkt->vlan_etype)); + memcpy(&pmask->vlan_etype, &fsp->m_ext.vlan_etype, + sizeof(pmask->vlan_etype)); + + if (vlan_etype == ETH_P_8021Q) + req->features |= BIT_ULL(NPC_VLAN_ETYPE_CTAG); + else + req->features |= BIT_ULL(NPC_VLAN_ETYPE_STAG); + } + + if (fsp->m_ext.vlan_tci) { memcpy(&pkt->vlan_tci, &fsp->h_ext.vlan_tci, sizeof(pkt->vlan_tci)); memcpy(&pmask->vlan_tci, &fsp->m_ext.vlan_tci, @@ -858,6 +930,7 @@ static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow) if (flow->flow_spec.flow_type & FLOW_RSS) { req->op = NIX_RX_ACTIONOP_RSS; req->index = flow->rss_ctx_id; + req->flow_key_alg = pfvf->hw.flowkey_alg_idx; } else { req->op = NIX_RX_ACTIONOP_UCAST; req->index = ethtool_get_flow_spec_ring(ring_cookie); @@ -894,7 +967,7 @@ static int otx2_add_flow_with_pfmac(struct otx2_nic *pfvf, pf_mac->entry = 0; pf_mac->dmac_filter = true; - pf_mac->location = pfvf->flow_cfg->ntuple_max_flows; + pf_mac->location = pfvf->flow_cfg->max_flows; memcpy(&pf_mac->flow_spec, &flow->flow_spec, sizeof(struct ethtool_rx_flow_spec)); pf_mac->flow_spec.location = pf_mac->location; @@ -923,6 +996,12 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc) int err = 0; u32 ring; + if (!flow_cfg->max_flows) { + netdev_err(pfvf->netdev, + "Ntuple rule count is 0, allocate and retry\n"); + return -EINVAL; + } + ring = ethtool_get_flow_spec_ring(fsp->ring_cookie); if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT)) return -ENOMEM; @@ -939,6 +1018,7 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc) if (!flow) return -ENOMEM; flow->location = fsp->location; + flow->entry = flow_cfg->flow_ent[flow->location]; new = true; } /* struct copy */ @@ -975,7 +1055,7 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc) flow->dmac_filter = true; flow->entry = find_first_zero_bit(&flow_cfg->dmacflt_bmap, flow_cfg->dmacflt_max_flows); - fsp->location = flow_cfg->ntuple_max_flows + flow->entry; + fsp->location = flow_cfg->max_flows + flow->entry; flow->flow_spec.location = fsp->location; flow->location = fsp->location; @@ -983,19 +1063,20 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc) otx2_dmacflt_add(pfvf, eth_hdr->h_dest, flow->entry); } else { - if (flow->location >= pfvf->flow_cfg->ntuple_max_flows) { + if (flow->location >= pfvf->flow_cfg->max_flows) { netdev_warn(pfvf->netdev, "Can't insert non dmac ntuple rule at %d, allowed range %d-0\n", flow->location, - flow_cfg->ntuple_max_flows - 1); + flow_cfg->max_flows - 1); err = -EINVAL; } else { - flow->entry = flow_cfg->flow_ent[flow->location]; err = otx2_add_flow_msg(pfvf, flow); } } if (err) { + if (err == MBOX_MSG_INVALID) + err = -EINVAL; if (new) kfree(flow); return err; @@ -1132,6 +1213,9 @@ int otx2_destroy_ntuple_flows(struct otx2_nic *pfvf) if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT)) return 0; + if (!flow_cfg->max_flows) + return 0; + mutex_lock(&pfvf->mbox.lock); req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox); if (!req) { @@ -1140,7 +1224,7 @@ int otx2_destroy_ntuple_flows(struct otx2_nic *pfvf) } req->start = flow_cfg->flow_ent[0]; - req->end = flow_cfg->flow_ent[flow_cfg->ntuple_max_flows - 1]; + req->end = flow_cfg->flow_ent[flow_cfg->max_flows - 1]; err = otx2_sync_mbox_msg(&pfvf->mbox); mutex_unlock(&pfvf->mbox.lock); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index 2c24944a4dba..2f2e8a3d7924 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -1,11 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Physical Function ethernet driver +/* Marvell RVU Physical Function ethernet driver * - * Copyright (C) 2020 Marvell International Ltd. + * Copyright (C) 2020 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/module.h> @@ -1787,17 +1784,10 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev) static netdev_features_t otx2_fix_features(struct net_device *dev, netdev_features_t features) { - /* check if n-tuple filters are ON */ - if ((features & NETIF_F_HW_TC) && (dev->features & NETIF_F_NTUPLE)) { - netdev_info(dev, "Disabling n-tuple filters\n"); - features &= ~NETIF_F_NTUPLE; - } - - /* check if tc hw offload is ON */ - if ((features & NETIF_F_NTUPLE) && (dev->features & NETIF_F_HW_TC)) { - netdev_info(dev, "Disabling TC hardware offload\n"); - features &= ~NETIF_F_HW_TC; - } + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features |= NETIF_F_HW_VLAN_STAG_RX; + else + features &= ~NETIF_F_HW_VLAN_STAG_RX; return features; } @@ -1854,6 +1844,7 @@ static int otx2_set_features(struct net_device *netdev, netdev_features_t changed = features ^ netdev->features; bool ntuple = !!(features & NETIF_F_NTUPLE); struct otx2_nic *pf = netdev_priv(netdev); + bool tc = !!(features & NETIF_F_HW_TC); if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev)) return otx2_cgx_config_loopback(pf, @@ -1866,12 +1857,42 @@ static int otx2_set_features(struct net_device *netdev, if ((changed & NETIF_F_NTUPLE) && !ntuple) otx2_destroy_ntuple_flows(pf); - if ((netdev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC) && - pf->tc_info.num_entries) { + if ((changed & NETIF_F_NTUPLE) && ntuple) { + if (!pf->flow_cfg->max_flows) { + netdev_err(netdev, + "Can't enable NTUPLE, MCAM entries not allocated\n"); + return -EINVAL; + } + } + + if ((changed & NETIF_F_HW_TC) && tc) { + if (!pf->flow_cfg->max_flows) { + netdev_err(netdev, + "Can't enable TC, MCAM entries not allocated\n"); + return -EINVAL; + } + } + + if ((changed & NETIF_F_HW_TC) && !tc && + pf->flow_cfg && pf->flow_cfg->nr_flows) { netdev_err(netdev, "Can't disable TC hardware offload while flows are active\n"); return -EBUSY; } + if ((changed & NETIF_F_NTUPLE) && ntuple && + (netdev->features & NETIF_F_HW_TC) && !(changed & NETIF_F_HW_TC)) { + netdev_err(netdev, + "Can't enable NTUPLE when TC is active, disable TC and retry\n"); + return -EINVAL; + } + + if ((changed & NETIF_F_HW_TC) && tc && + (netdev->features & NETIF_F_NTUPLE) && !(changed & NETIF_F_NTUPLE)) { + netdev_err(netdev, + "Can't enable TC when NTUPLE is active, disable NTUPLE and retry\n"); + return -EINVAL; + } + return 0; } @@ -2331,7 +2352,7 @@ static const struct net_device_ops otx2_netdev_ops = { .ndo_set_features = otx2_set_features, .ndo_tx_timeout = otx2_tx_timeout, .ndo_get_stats64 = otx2_get_stats64, - .ndo_do_ioctl = otx2_ioctl, + .ndo_eth_ioctl = otx2_ioctl, .ndo_set_vf_mac = otx2_set_vf_mac, .ndo_set_vf_vlan = otx2_set_vf_vlan, .ndo_get_vf_config = otx2_get_vf_config, @@ -2569,8 +2590,6 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) NETIF_F_GSO_UDP_L4); netdev->features |= netdev->hw_features; - netdev->hw_features |= NETIF_F_LOOPBACK | NETIF_F_RXALL; - err = otx2_mcam_flow_init(pf); if (err) goto err_ptp_destroy; @@ -2594,12 +2613,13 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (pf->flags & OTX2_FLAG_TC_FLOWER_SUPPORT) netdev->hw_features |= NETIF_F_HW_TC; + netdev->hw_features |= NETIF_F_LOOPBACK | NETIF_F_RXALL; + netdev->gso_max_segs = OTX2_MAX_GSO_SEGS; netdev->watchdog_timeo = OTX2_TX_TIMEOUT; netdev->netdev_ops = &otx2_netdev_ops; - /* MTU range: 64 - 9190 */ netdev->min_mtu = OTX2_MIN_MTU; netdev->max_mtu = otx2_get_max_mtu(pf); @@ -2619,6 +2639,10 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (err) goto err_mcam_flow_del; + err = otx2_register_dl(pf); + if (err) + goto err_mcam_flow_del; + /* Initialize SR-IOV resources */ err = otx2_sriov_vfcfg_init(pf); if (err) @@ -2776,6 +2800,7 @@ static void otx2_remove(struct pci_dev *pdev) /* Disable link notifications */ otx2_cgx_config_linkevents(pf, false); + otx2_unregister_dl(pf); unregister_netdev(netdev); otx2_sriov_disable(pf->pdev); otx2_sriov_vfcfg_cleanup(pf); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c index 56390a664517..ec9e49985c2c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c @@ -1,7 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 PTP support for ethernet driver +/* Marvell RVU Ethernet driver + * + * Copyright (C) 2020 Marvell. * - * Copyright (C) 2020 Marvell International Ltd. */ #include "otx2_common.h" diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h index 706d63a43ae1..6ff284211d7b 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h @@ -1,5 +1,9 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Marvell OcteonTx2 PTP support for ethernet driver */ +/* Marvell RVU Ethernet driver + * + * Copyright (C) 2020 Marvell. + * + */ #ifndef OTX2_PTP_H #define OTX2_PTP_H diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h index f4fd72ee9a25..1b967eaf948b 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h @@ -1,11 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Marvell OcteonTx2 RVU Ethernet driver +/* Marvell RVU Ethernet driver * - * Copyright (C) 2020 Marvell International Ltd. + * Copyright (C) 2020 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef OTX2_REG_H diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h index 1f49b3caf5d4..4bbd12ff26e6 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h @@ -1,11 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Marvell OcteonTx2 RVU Ethernet driver +/* Marvell RVU Ethernet driver * - * Copyright (C) 2020 Marvell International Ltd. + * Copyright (C) 2020 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef OTX2_STRUCT_H diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c index 972b202b9884..626961a41089 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c @@ -1,8 +1,10 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Physcial Function ethernet driver +/* Marvell RVU Ethernet driver * * Copyright (C) 2021 Marvell. + * */ + #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/inetdevice.h> @@ -52,6 +54,29 @@ struct otx2_tc_flow { bool is_act_police; }; +int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic) +{ + struct otx2_tc_info *tc = &nic->tc_info; + + if (!nic->flow_cfg->max_flows || is_otx2_vf(nic->pcifunc)) + return 0; + + /* Max flows changed, free the existing bitmap */ + kfree(tc->tc_entries_bitmap); + + tc->tc_entries_bitmap = + kcalloc(BITS_TO_LONGS(nic->flow_cfg->max_flows), + sizeof(long), GFP_KERNEL); + if (!tc->tc_entries_bitmap) { + netdev_err(nic->netdev, + "Unable to alloc TC flow entries bitmap\n"); + return -ENOMEM; + } + + return 0; +} +EXPORT_SYMBOL(otx2_tc_alloc_ent_bitmap); + static void otx2_get_egress_burst_cfg(u32 burst, u32 *burst_exp, u32 *burst_mantissa) { @@ -485,8 +510,8 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node, match.key->vlan_priority << 13; vlan_tci_mask = match.mask->vlan_id | - match.key->vlan_dei << 12 | - match.key->vlan_priority << 13; + match.mask->vlan_dei << 12 | + match.mask->vlan_priority << 13; flow_spec->vlan_tci = htons(vlan_tci); flow_mask->vlan_tci = htons(vlan_tci_mask); @@ -596,6 +621,7 @@ static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry) static int otx2_tc_del_flow(struct otx2_nic *nic, struct flow_cls_offload *tc_flow_cmd) { + struct otx2_flow_config *flow_cfg = nic->flow_cfg; struct otx2_tc_info *tc_info = &nic->tc_info; struct otx2_tc_flow *flow_node; int err; @@ -638,7 +664,7 @@ static int otx2_tc_del_flow(struct otx2_nic *nic, kfree_rcu(flow_node, rcu); clear_bit(flow_node->bitpos, tc_info->tc_entries_bitmap); - tc_info->num_entries--; + flow_cfg->nr_flows--; return 0; } @@ -647,6 +673,7 @@ static int otx2_tc_add_flow(struct otx2_nic *nic, struct flow_cls_offload *tc_flow_cmd) { struct netlink_ext_ack *extack = tc_flow_cmd->common.extack; + struct otx2_flow_config *flow_cfg = nic->flow_cfg; struct otx2_tc_info *tc_info = &nic->tc_info; struct otx2_tc_flow *new_node, *old_node; struct npc_install_flow_req *req, dummy; @@ -655,9 +682,9 @@ static int otx2_tc_add_flow(struct otx2_nic *nic, if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT)) return -ENOMEM; - if (bitmap_full(tc_info->tc_entries_bitmap, nic->flow_cfg->tc_max_flows)) { + if (bitmap_full(tc_info->tc_entries_bitmap, flow_cfg->max_flows)) { NL_SET_ERR_MSG_MOD(extack, - "Not enough MCAM space to add the flow"); + "Free MCAM entry not available to add the flow"); return -ENOMEM; } @@ -695,10 +722,9 @@ static int otx2_tc_add_flow(struct otx2_nic *nic, memcpy(req, &dummy, sizeof(struct npc_install_flow_req)); new_node->bitpos = find_first_zero_bit(tc_info->tc_entries_bitmap, - nic->flow_cfg->tc_max_flows); + flow_cfg->max_flows); req->channel = nic->hw.rx_chan_base; - req->entry = nic->flow_cfg->flow_ent[nic->flow_cfg->tc_flower_offset + - nic->flow_cfg->tc_max_flows - new_node->bitpos]; + req->entry = flow_cfg->flow_ent[flow_cfg->max_flows - new_node->bitpos - 1]; req->intf = NIX_INTF_RX; req->set_cntr = 1; new_node->entry = req->entry; @@ -723,7 +749,7 @@ static int otx2_tc_add_flow(struct otx2_nic *nic, } set_bit(new_node->bitpos, tc_info->tc_entries_bitmap); - tc_info->num_entries++; + flow_cfg->nr_flows++; return 0; @@ -1008,10 +1034,21 @@ static const struct rhashtable_params tc_flow_ht_params = { int otx2_init_tc(struct otx2_nic *nic) { struct otx2_tc_info *tc = &nic->tc_info; + int err; /* Exclude receive queue 0 being used for police action */ set_bit(0, &nic->rq_bmap); + if (!nic->flow_cfg) { + netdev_err(nic->netdev, + "Can't init TC, nic->flow_cfg is not setup\n"); + return -EINVAL; + } + + err = otx2_tc_alloc_ent_bitmap(nic); + if (err) + return err; + tc->flow_ht_params = tc_flow_ht_params; return rhashtable_init(&tc->flow_table, &tc->flow_ht_params); } @@ -1020,5 +1057,6 @@ void otx2_shutdown_tc(struct otx2_nic *nic) { struct otx2_tc_info *tc = &nic->tc_info; + kfree(tc->tc_entries_bitmap); rhashtable_destroy(&tc->flow_table); } diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c index 22ec03a618b1..f42b1d4e0c67 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c @@ -1,11 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Ethernet driver +/* Marvell RVU Ethernet driver * - * Copyright (C) 2020 Marvell International Ltd. + * Copyright (C) 2020 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/etherdevice.h> diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h index 2f144e2cf436..869de5f59e73 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h @@ -1,11 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Marvell OcteonTx2 RVU Ethernet driver +/* Marvell RVU Ethernet driver * - * Copyright (C) 2020 Marvell International Ltd. + * Copyright (C) 2020 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef OTX2_TXRX_H diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c index a8bee5aefec1..03b4ec630432 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c @@ -1,5 +1,9 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Virtual Function ethernet driver */ +/* Marvell RVU Virtual Function ethernet driver + * + * Copyright (C) 2020 Marvell. + * + */ #include <linux/etherdevice.h> #include <linux/module.h> @@ -464,6 +468,28 @@ static void otx2vf_reset_task(struct work_struct *work) rtnl_unlock(); } +static int otx2vf_set_features(struct net_device *netdev, + netdev_features_t features) +{ + netdev_features_t changed = features ^ netdev->features; + bool ntuple_enabled = !!(features & NETIF_F_NTUPLE); + struct otx2_nic *vf = netdev_priv(netdev); + + if (changed & NETIF_F_NTUPLE) { + if (!ntuple_enabled) { + otx2_mcam_flow_del(vf); + return 0; + } + + if (!otx2_get_maxflows(vf->flow_cfg)) { + netdev_err(netdev, + "Can't enable NTUPLE, MCAM entries not allocated\n"); + return -EINVAL; + } + } + return 0; +} + static const struct net_device_ops otx2vf_netdev_ops = { .ndo_open = otx2vf_open, .ndo_stop = otx2vf_stop, @@ -471,6 +497,7 @@ static const struct net_device_ops otx2vf_netdev_ops = { .ndo_set_rx_mode = otx2vf_set_rx_mode, .ndo_set_mac_address = otx2_set_mac_address, .ndo_change_mtu = otx2vf_change_mtu, + .ndo_set_features = otx2vf_set_features, .ndo_get_stats64 = otx2_get_stats64, .ndo_tx_timeout = otx2_tx_timeout, }; @@ -627,12 +654,14 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) NETIF_F_HW_VLAN_STAG_TX; netdev->features |= netdev->hw_features; + netdev->hw_features |= NETIF_F_NTUPLE; + netdev->hw_features |= NETIF_F_RXALL; + netdev->gso_max_segs = OTX2_MAX_GSO_SEGS; netdev->watchdog_timeo = OTX2_TX_TIMEOUT; netdev->netdev_ops = &otx2vf_netdev_ops; - /* MTU range: 68 - 9190 */ netdev->min_mtu = OTX2_MIN_MTU; netdev->max_mtu = otx2_get_max_mtu(vf); @@ -658,6 +687,14 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) otx2vf_set_ethtool_ops(netdev); + err = otx2vf_mcam_flow_init(vf); + if (err) + goto err_unreg_netdev; + + err = otx2_register_dl(vf); + if (err) + goto err_unreg_netdev; + /* Enable pause frames by default */ vf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED; vf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED; @@ -695,6 +732,7 @@ static void otx2vf_remove(struct pci_dev *pdev) vf = netdev_priv(netdev); cancel_work_sync(&vf->reset_task); + otx2_unregister_dl(vf); unregister_netdev(netdev); if (vf->otx2_wq) destroy_workqueue(vf->otx2_wq); diff --git a/drivers/net/ethernet/marvell/prestera/prestera_devlink.c b/drivers/net/ethernet/marvell/prestera/prestera_devlink.c index fa7a0682ad1e..68b442eb6d69 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_devlink.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_devlink.c @@ -390,11 +390,12 @@ static const struct devlink_ops prestera_dl_ops = { .trap_drop_counter_get = prestera_drop_counter_get, }; -struct prestera_switch *prestera_devlink_alloc(void) +struct prestera_switch *prestera_devlink_alloc(struct prestera_device *dev) { struct devlink *dl; - dl = devlink_alloc(&prestera_dl_ops, sizeof(struct prestera_switch)); + dl = devlink_alloc(&prestera_dl_ops, sizeof(struct prestera_switch), + dev->dev); return devlink_priv(dl); } @@ -411,7 +412,7 @@ int prestera_devlink_register(struct prestera_switch *sw) struct devlink *dl = priv_to_devlink(sw); int err; - err = devlink_register(dl, sw->dev->dev); + err = devlink_register(dl); if (err) { dev_err(prestera_dev(sw), "devlink_register failed: %d\n", err); return err; diff --git a/drivers/net/ethernet/marvell/prestera/prestera_devlink.h b/drivers/net/ethernet/marvell/prestera/prestera_devlink.h index 5d73aa9db897..cc34c3db13a2 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_devlink.h +++ b/drivers/net/ethernet/marvell/prestera/prestera_devlink.h @@ -6,7 +6,7 @@ #include "prestera.h" -struct prestera_switch *prestera_devlink_alloc(void); +struct prestera_switch *prestera_devlink_alloc(struct prestera_device *dev); void prestera_devlink_free(struct prestera_switch *sw); int prestera_devlink_register(struct prestera_switch *sw); diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c index 226f4ff29f6e..44c670807fb3 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_main.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c @@ -746,7 +746,8 @@ static int prestera_netdev_port_event(struct net_device *lower, case NETDEV_CHANGEUPPER: if (netif_is_bridge_master(upper)) { if (info->linking) - return prestera_bridge_port_join(upper, port); + return prestera_bridge_port_join(upper, port, + extack); else prestera_bridge_port_leave(upper, port); } else if (netif_is_lag_master(upper)) { @@ -904,7 +905,7 @@ int prestera_device_register(struct prestera_device *dev) struct prestera_switch *sw; int err; - sw = prestera_devlink_alloc(); + sw = prestera_devlink_alloc(dev); if (!sw) return -ENOMEM; diff --git a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c index 9a309169dbae..3ce6ccd0f539 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c @@ -480,7 +480,8 @@ err_port_flood_set: } int prestera_bridge_port_join(struct net_device *br_dev, - struct prestera_port *port) + struct prestera_port *port, + struct netlink_ext_ack *extack) { struct prestera_switchdev *swdev = port->sw->swdev; struct prestera_bridge_port *br_port; @@ -500,6 +501,11 @@ int prestera_bridge_port_join(struct net_device *br_dev, goto err_brport_create; } + err = switchdev_bridge_port_offload(br_port->dev, port->dev, NULL, + NULL, NULL, false, extack); + if (err) + goto err_switchdev_offload; + if (bridge->vlan_enabled) return 0; @@ -510,6 +516,8 @@ int prestera_bridge_port_join(struct net_device *br_dev, return 0; err_port_join: + switchdev_bridge_port_unoffload(br_port->dev, NULL, NULL, NULL); +err_switchdev_offload: prestera_bridge_port_put(br_port); err_brport_create: prestera_bridge_put(bridge); @@ -584,6 +592,8 @@ void prestera_bridge_port_leave(struct net_device *br_dev, else prestera_bridge_1d_port_leave(br_port); + switchdev_bridge_port_unoffload(br_port->dev, NULL, NULL, NULL); + prestera_hw_port_learning_set(port, false); prestera_hw_port_flood_set(port, BR_FLOOD | BR_MCAST_FLOOD, 0); prestera_port_vid_stp_set(port, PRESTERA_VID_ALL, BR_STATE_FORWARDING); diff --git a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.h b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.h index a91bc35d235f..0e93fda3d9a5 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.h +++ b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.h @@ -8,7 +8,8 @@ int prestera_switchdev_init(struct prestera_switch *sw); void prestera_switchdev_fini(struct prestera_switch *sw); int prestera_bridge_port_join(struct net_device *br_dev, - struct prestera_port *port); + struct prestera_port *port, + struct netlink_ext_ack *extack); void prestera_bridge_port_leave(struct net_device *br_dev, struct prestera_port *port); diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index 9b48ae4bac39..fab53c9b8380 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -1377,7 +1377,7 @@ static const struct net_device_ops pxa168_eth_netdev_ops = { .ndo_set_rx_mode = pxa168_eth_set_rx_mode, .ndo_set_mac_address = pxa168_eth_set_mac_address, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = phy_do_ioctl, + .ndo_eth_ioctl = phy_do_ioctl, .ndo_change_mtu = pxa168_eth_change_mtu, .ndo_tx_timeout = pxa168_eth_tx_timeout, #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index d4bb27ba1419..051dd3fb5b03 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -615,7 +615,9 @@ static inline u32 skge_usecs2clk(const struct skge_hw *hw, u32 usec) } static int skge_get_coalesce(struct net_device *dev, - struct ethtool_coalesce *ecmd) + struct ethtool_coalesce *ecmd, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct skge_port *skge = netdev_priv(dev); struct skge_hw *hw = skge->hw; @@ -639,7 +641,9 @@ static int skge_get_coalesce(struct net_device *dev, /* Note: interrupt timer is per board, but can turn on/off per port */ static int skge_set_coalesce(struct net_device *dev, - struct ethtool_coalesce *ecmd) + struct ethtool_coalesce *ecmd, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct skge_port *skge = netdev_priv(dev); struct skge_hw *hw = skge->hw; @@ -3787,7 +3791,7 @@ static const struct net_device_ops skge_netdev_ops = { .ndo_open = skge_up, .ndo_stop = skge_down, .ndo_start_xmit = skge_xmit_frame, - .ndo_do_ioctl = skge_ioctl, + .ndo_eth_ioctl = skge_ioctl, .ndo_get_stats = skge_get_stats, .ndo_tx_timeout = skge_tx_timeout, .ndo_change_mtu = skge_change_mtu, diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 8b8bff59c8fe..e9fc74e54b22 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -4052,7 +4052,9 @@ static int sky2_set_pauseparam(struct net_device *dev, } static int sky2_get_coalesce(struct net_device *dev, - struct ethtool_coalesce *ecmd) + struct ethtool_coalesce *ecmd, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; @@ -4087,7 +4089,9 @@ static int sky2_get_coalesce(struct net_device *dev, /* Note: this affect both ports */ static int sky2_set_coalesce(struct net_device *dev, - struct ethtool_coalesce *ecmd) + struct ethtool_coalesce *ecmd, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; @@ -4693,7 +4697,7 @@ static const struct net_device_ops sky2_netdev_ops[2] = { .ndo_open = sky2_open, .ndo_stop = sky2_close, .ndo_start_xmit = sky2_xmit_frame, - .ndo_do_ioctl = sky2_ioctl, + .ndo_eth_ioctl = sky2_ioctl, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = sky2_set_mac_address, .ndo_set_rx_mode = sky2_set_multicast, @@ -4710,7 +4714,7 @@ static const struct net_device_ops sky2_netdev_ops[2] = { .ndo_open = sky2_open, .ndo_stop = sky2_close, .ndo_start_xmit = sky2_xmit_frame, - .ndo_do_ioctl = sky2_ioctl, + .ndo_eth_ioctl = sky2_ioctl, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = sky2_set_mac_address, .ndo_set_rx_mode = sky2_set_multicast, @@ -4884,7 +4888,7 @@ static int sky2_test_msi(struct sky2_hw *hw) /* This driver supports yukon2 chipset only */ static const char *sky2_name(u8 chipid, char *buf, int sz) { - const char *name[] = { + static const char *const name[] = { "XL", /* 0xb3 */ "EC Ultra", /* 0xb4 */ "Extreme", /* 0xb5 */ diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 64adfd24e134..398c23cec815 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -2933,7 +2933,7 @@ static const struct net_device_ops mtk_netdev_ops = { .ndo_start_xmit = mtk_start_xmit, .ndo_set_mac_address = mtk_set_mac_address, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = mtk_do_ioctl, + .ndo_eth_ioctl = mtk_do_ioctl, .ndo_change_mtu = mtk_change_mtu, .ndo_tx_timeout = mtk_tx_timeout, .ndo_get_stats64 = mtk_get_stats64, diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c index 96d2891f1675..1d5dd2015453 100644 --- a/drivers/net/ethernet/mediatek/mtk_star_emac.c +++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c @@ -1162,7 +1162,7 @@ static const struct net_device_ops mtk_star_netdev_ops = { .ndo_start_xmit = mtk_star_netdev_start_xmit, .ndo_get_stats64 = mtk_star_netdev_get_stats64, .ndo_set_rx_mode = mtk_star_set_rx_mode, - .ndo_do_ioctl = mtk_star_netdev_ioctl, + .ndo_eth_ioctl = mtk_star_netdev_ioctl, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig index 400e611ba041..1b4b1f642317 100644 --- a/drivers/net/ethernet/mellanox/mlx4/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig @@ -6,8 +6,8 @@ config MLX4_EN tristate "Mellanox Technologies 1/10/40Gbit Ethernet support" depends on PCI && NETDEVICES && ETHERNET && INET + depends on PTP_1588_CLOCK_OPTIONAL select MLX4_CORE - imply PTP_1588_CLOCK help This driver supports Mellanox Technologies ConnectX Ethernet devices. diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index 3616b77caa0a..ef518b1040f7 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -998,7 +998,9 @@ mlx4_en_set_link_ksettings(struct net_device *dev, } static int mlx4_en_get_coalesce(struct net_device *dev, - struct ethtool_coalesce *coal) + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct mlx4_en_priv *priv = netdev_priv(dev); @@ -1020,7 +1022,9 @@ static int mlx4_en_get_coalesce(struct net_device *dev, } static int mlx4_en_set_coalesce(struct net_device *dev, - struct ethtool_coalesce *coal) + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct mlx4_en_priv *priv = netdev_priv(dev); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 5d0c9c62382d..a2f61a87cef8 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -2828,7 +2828,7 @@ static const struct net_device_ops mlx4_netdev_ops = { .ndo_set_mac_address = mlx4_en_set_mac, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = mlx4_en_change_mtu, - .ndo_do_ioctl = mlx4_en_ioctl, + .ndo_eth_ioctl = mlx4_en_ioctl, .ndo_tx_timeout = mlx4_en_tx_timeout, .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid, diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 442991d91c15..7f6d3b82c29b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -991,7 +991,7 @@ void mlx4_en_calc_rx_buf(struct net_device *dev) * expense of more costly truesize accounting */ priv->frag_info[0].frag_stride = PAGE_SIZE; - priv->dma_dir = PCI_DMA_BIDIRECTIONAL; + priv->dma_dir = DMA_BIDIRECTIONAL; priv->rx_headroom = XDP_PACKET_HEADROOM; i = 1; } else { @@ -1021,7 +1021,7 @@ void mlx4_en_calc_rx_buf(struct net_device *dev) buf_size += frag_size; i++; } - priv->dma_dir = PCI_DMA_FROMDEVICE; + priv->dma_dir = DMA_FROM_DEVICE; priv->rx_headroom = 0; } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 31b74bddb7cd..c56b9dba4c71 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -297,12 +297,12 @@ u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, dma_unmap_single(priv->ddev, tx_info->map0_dma, tx_info->map0_byte_count, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); else dma_unmap_page(priv->ddev, tx_info->map0_dma, tx_info->map0_byte_count, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); /* Optimize the common case when there are no wraparounds */ if (likely((void *)tx_desc + (tx_info->nr_txbb << LOG_TXBB_SIZE) <= end)) { @@ -311,7 +311,7 @@ u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, dma_unmap_page(priv->ddev, (dma_addr_t)be64_to_cpu(data->addr), be32_to_cpu(data->byte_count), - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); } } else { if ((void *)data >= end) @@ -325,7 +325,7 @@ u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, dma_unmap_page(priv->ddev, (dma_addr_t)be64_to_cpu(data->addr), be32_to_cpu(data->byte_count), - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); } } } @@ -831,7 +831,7 @@ static bool mlx4_en_build_dma_wqe(struct mlx4_en_priv *priv, dma = dma_map_single(ddev, skb->data + lso_header_size, byte_count, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); if (dma_mapping_error(ddev, dma)) goto tx_drop_unmap; @@ -853,7 +853,7 @@ tx_drop_unmap: ++data; dma_unmap_page(ddev, (dma_addr_t)be64_to_cpu(data->addr), be32_to_cpu(data->byte_count), - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); } return false; @@ -1170,7 +1170,7 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring, tx_info->nr_bytes = max_t(unsigned int, length, ETH_ZLEN); dma_sync_single_range_for_device(priv->ddev, dma, frame->page_offset, - length, PCI_DMA_TODEVICE); + length, DMA_TO_DEVICE); data->addr = cpu_to_be64(dma + frame->page_offset); dma_wmb(); diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 28ac4693da3c..5a6b0fcaf7f8 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -3806,24 +3806,15 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data, pci_set_master(pdev); - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (err) { dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n"); - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n"); goto err_release_regions; } } - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); - if (err) { - dev_warn(&pdev->dev, "Warning: couldn't set 64-bit consistent PCI DMA mask\n"); - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); - if (err) { - dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, aborting\n"); - goto err_release_regions; - } - } /* Allow large DMA segments, up to the firmware limit of 1 GB */ dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024); @@ -4005,7 +3996,7 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) printk_once(KERN_INFO "%s", mlx4_version); - devlink = devlink_alloc(&mlx4_devlink_ops, sizeof(*priv)); + devlink = devlink_alloc(&mlx4_devlink_ops, sizeof(*priv), &pdev->dev); if (!devlink) return -ENOMEM; priv = devlink_priv(devlink); @@ -4024,7 +4015,7 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) mutex_init(&dev->persist->interface_state_mutex); mutex_init(&dev->persist->pci_status_mutex); - ret = devlink_register(devlink, &pdev->dev); + ret = devlink_register(devlink); if (ret) goto err_persist_free; ret = devlink_params_register(devlink, mlx4_devlink_params, diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c index 427e7a31862c..b149e601f673 100644 --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c @@ -739,7 +739,7 @@ static void mlx4_cleanup_qp_zones(struct mlx4_dev *dev) int i; for (i = 0; - i < sizeof(qp_table->zones_uids)/sizeof(qp_table->zones_uids[0]); + i < ARRAY_SIZE(qp_table->zones_uids); i++) { struct mlx4_bitmap *bitmap = mlx4_zone_get_bitmap(qp_table->zones, @@ -917,7 +917,7 @@ int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt, { int err; int i; - enum mlx4_qp_state states[] = { + static const enum mlx4_qp_state states[] = { MLX4_QP_STATE_RST, MLX4_QP_STATE_INIT, MLX4_QP_STATE_RTR, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index e1a5a79e27c7..92056452a9e3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -10,7 +10,7 @@ config MLX5_CORE select NET_DEVLINK depends on VXLAN || !VXLAN depends on MLXFW || !MLXFW - depends on PTP_1588_CLOCK || !PTP_1588_CLOCK + depends on PTP_1588_CLOCK_OPTIONAL depends on PCI_HYPERV_INTERFACE || !PCI_HYPERV_INTERFACE help Core driver for low level functionality of the ConnectX-4 and diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index b5072a3a2585..63032cd6efb1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -15,14 +15,15 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ health.o mcg.o cq.o alloc.o port.o mr.o pd.o \ transobj.o vport.o sriov.o fs_cmd.o fs_core.o pci_irq.o \ fs_counters.o fs_ft_pool.o rl.o lag.o dev.o events.o wq.o lib/gid.o \ - lib/devcom.o lib/pci_vsc.o lib/dm.o diag/fs_tracepoint.o \ + lib/devcom.o lib/pci_vsc.o lib/dm.o lib/fs_ttc.o diag/fs_tracepoint.o \ diag/fw_tracer.o diag/crdump.o devlink.o diag/rsc_dump.o \ fw_reset.o qos.o # # Netdev basic # -mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \ +mlx5_core-$(CONFIG_MLX5_CORE_EN) += en/rqt.o en/tir.o en/rss.o en/rx_res.o \ + en/channels.o en_main.o en_common.o en_fs.o en_ethtool.o \ en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o \ en_selftest.o en/port.o en/monitor_stats.o en/health.o \ en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/pool.o \ @@ -43,19 +44,22 @@ mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en_tc.o en/rep/tc.o en/rep/neigh.o \ lib/fs_chains.o en/tc_tun.o \ esw/indir_table.o en/tc_tun_encap.o \ en/tc_tun_vxlan.o en/tc_tun_gre.o en/tc_tun_geneve.o \ - en/tc_tun_mplsoudp.o diag/en_tc_tracepoint.o + en/tc_tun_mplsoudp.o diag/en_tc_tracepoint.o \ + en/tc/post_act.o mlx5_core-$(CONFIG_MLX5_TC_CT) += en/tc_ct.o +mlx5_core-$(CONFIG_MLX5_TC_SAMPLE) += en/tc/sample.o # # Core extra # mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o eswitch_offloads_termtbl.o \ - ecpf.o rdma.o esw/legacy.o + ecpf.o rdma.o esw/legacy.o \ + esw/devlink_port.o esw/vporttbl.o esw/qos.o + mlx5_core-$(CONFIG_MLX5_ESWITCH) += esw/acl/helper.o \ esw/acl/egress_lgcy.o esw/acl/egress_ofld.o \ - esw/acl/ingress_lgcy.o esw/acl/ingress_ofld.o \ - esw/devlink_port.o esw/vporttbl.o -mlx5_core-$(CONFIG_MLX5_TC_SAMPLE) += esw/sample.o + esw/acl/ingress_lgcy.o esw/acl/ingress_ofld.o + mlx5_core-$(CONFIG_MLX5_BRIDGE) += esw/bridge.o en/rep/bridge.o mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 9d79c5ec31e9..db5dfff585c9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -877,7 +877,7 @@ static void cb_timeout_handler(struct work_struct *work) ent->ret = -ETIMEDOUT; mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) Async, timeout. Will cause a leak of a command resource\n", ent->idx, mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in)); - mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true); + mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true); out: cmd_ent_put(ent); /* for the cmd_ent_get() took on schedule delayed work */ @@ -994,7 +994,7 @@ static void cmd_work_handler(struct work_struct *work) MLX5_SET(mbox_out, ent->out, status, status); MLX5_SET(mbox_out, ent->out, syndrome, drv_synd); - mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true); + mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true); return; } @@ -1008,7 +1008,7 @@ static void cmd_work_handler(struct work_struct *work) poll_timeout(ent); /* make sure we read the descriptor after ownership is SW */ rmb(); - mlx5_cmd_comp_handler(dev, 1UL << ent->idx, (ent->ret == -ETIMEDOUT)); + mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, (ent->ret == -ETIMEDOUT)); } } @@ -1068,7 +1068,7 @@ static void wait_func_handle_exec_timeout(struct mlx5_core_dev *dev, mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in)); ent->ret = -ETIMEDOUT; - mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true); + mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true); } static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c index 360e093874d4..cf97985628ab 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c @@ -89,7 +89,8 @@ static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq, int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, u32 *in, int inlen, u32 *out, int outlen) { - int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context), c_eqn); + int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context), + c_eqn_or_apu_element); u32 din[MLX5_ST_SZ_DW(destroy_cq_in)] = {}; struct mlx5_eq_comp *eq; int err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c index def2156e50ee..e8093c4e09d4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c @@ -53,7 +53,7 @@ static bool is_eth_rep_supported(struct mlx5_core_dev *dev) return true; } -static bool is_eth_supported(struct mlx5_core_dev *dev) +bool mlx5_eth_supported(struct mlx5_core_dev *dev) { if (!IS_ENABLED(CONFIG_MLX5_CORE_EN)) return false; @@ -105,7 +105,18 @@ static bool is_eth_supported(struct mlx5_core_dev *dev) return true; } -static bool is_vnet_supported(struct mlx5_core_dev *dev) +static bool is_eth_enabled(struct mlx5_core_dev *dev) +{ + union devlink_param_value val; + int err; + + err = devlink_param_driverinit_value_get(priv_to_devlink(dev), + DEVLINK_PARAM_GENERIC_ID_ENABLE_ETH, + &val); + return err ? false : val.vbool; +} + +bool mlx5_vnet_supported(struct mlx5_core_dev *dev) { if (!IS_ENABLED(CONFIG_MLX5_VDPA_NET)) return false; @@ -127,6 +138,17 @@ static bool is_vnet_supported(struct mlx5_core_dev *dev) return true; } +static bool is_vnet_enabled(struct mlx5_core_dev *dev) +{ + union devlink_param_value val; + int err; + + err = devlink_param_driverinit_value_get(priv_to_devlink(dev), + DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET, + &val); + return err ? false : val.vbool; +} + static bool is_ib_rep_supported(struct mlx5_core_dev *dev) { if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND)) @@ -170,7 +192,7 @@ static bool is_mp_supported(struct mlx5_core_dev *dev) return true; } -static bool is_ib_supported(struct mlx5_core_dev *dev) +bool mlx5_rdma_supported(struct mlx5_core_dev *dev) { if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND)) return false; @@ -187,6 +209,17 @@ static bool is_ib_supported(struct mlx5_core_dev *dev) return true; } +static bool is_ib_enabled(struct mlx5_core_dev *dev) +{ + union devlink_param_value val; + int err; + + err = devlink_param_driverinit_value_get(priv_to_devlink(dev), + DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA, + &val); + return err ? false : val.vbool; +} + enum { MLX5_INTERFACE_PROTOCOL_ETH, MLX5_INTERFACE_PROTOCOL_ETH_REP, @@ -201,13 +234,17 @@ enum { static const struct mlx5_adev_device { const char *suffix; bool (*is_supported)(struct mlx5_core_dev *dev); + bool (*is_enabled)(struct mlx5_core_dev *dev); } mlx5_adev_devices[] = { [MLX5_INTERFACE_PROTOCOL_VNET] = { .suffix = "vnet", - .is_supported = &is_vnet_supported }, + .is_supported = &mlx5_vnet_supported, + .is_enabled = &is_vnet_enabled }, [MLX5_INTERFACE_PROTOCOL_IB] = { .suffix = "rdma", - .is_supported = &is_ib_supported }, + .is_supported = &mlx5_rdma_supported, + .is_enabled = &is_ib_enabled }, [MLX5_INTERFACE_PROTOCOL_ETH] = { .suffix = "eth", - .is_supported = &is_eth_supported }, + .is_supported = &mlx5_eth_supported, + .is_enabled = &is_eth_enabled }, [MLX5_INTERFACE_PROTOCOL_ETH_REP] = { .suffix = "eth-rep", .is_supported = &is_eth_rep_supported }, [MLX5_INTERFACE_PROTOCOL_IB_REP] = { .suffix = "rdma-rep", @@ -308,6 +345,14 @@ int mlx5_attach_device(struct mlx5_core_dev *dev) if (!priv->adev[i]) { bool is_supported = false; + if (mlx5_adev_devices[i].is_enabled) { + bool enabled; + + enabled = mlx5_adev_devices[i].is_enabled(dev); + if (!enabled) + continue; + } + if (mlx5_adev_devices[i].is_supported) is_supported = mlx5_adev_devices[i].is_supported(dev); @@ -360,6 +405,14 @@ void mlx5_detach_device(struct mlx5_core_dev *dev) if (!priv->adev[i]) continue; + if (mlx5_adev_devices[i].is_enabled) { + bool enabled; + + enabled = mlx5_adev_devices[i].is_enabled(dev); + if (!enabled) + goto skip_suspend; + } + adev = &priv->adev[i]->adev; /* Auxiliary driver was unbind manually through sysfs */ if (!adev->dev.driver) @@ -397,7 +450,7 @@ int mlx5_register_device(struct mlx5_core_dev *dev) void mlx5_unregister_device(struct mlx5_core_dev *dev) { mutex_lock(&mlx5_intf_mutex); - dev->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV; + dev->priv.flags = MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV; mlx5_rescan_drivers_locked(dev); mutex_unlock(&mlx5_intf_mutex); } @@ -447,12 +500,21 @@ static void delete_drivers(struct mlx5_core_dev *dev) if (!priv->adev[i]) continue; + if (mlx5_adev_devices[i].is_enabled) { + bool enabled; + + enabled = mlx5_adev_devices[i].is_enabled(dev); + if (!enabled) + goto del_adev; + } + if (mlx5_adev_devices[i].is_supported && !delete_all) is_supported = mlx5_adev_devices[i].is_supported(dev); if (is_supported) continue; +del_adev: del_adev(&priv->adev[i]->adev); priv->adev[i] = NULL; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c index d791d351b489..e84287ffc7ce 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c @@ -7,6 +7,7 @@ #include "fw_reset.h" #include "fs_core.h" #include "eswitch.h" +#include "esw/qos.h" #include "sf/dev/dev.h" #include "sf/sf.h" @@ -292,6 +293,13 @@ static const struct devlink_ops mlx5_devlink_ops = { .eswitch_encap_mode_get = mlx5_devlink_eswitch_encap_mode_get, .port_function_hw_addr_get = mlx5_devlink_port_function_hw_addr_get, .port_function_hw_addr_set = mlx5_devlink_port_function_hw_addr_set, + .rate_leaf_tx_share_set = mlx5_esw_devlink_rate_leaf_tx_share_set, + .rate_leaf_tx_max_set = mlx5_esw_devlink_rate_leaf_tx_max_set, + .rate_node_tx_share_set = mlx5_esw_devlink_rate_node_tx_share_set, + .rate_node_tx_max_set = mlx5_esw_devlink_rate_node_tx_max_set, + .rate_node_new = mlx5_esw_devlink_rate_node_new, + .rate_node_del = mlx5_esw_devlink_rate_node_del, + .rate_leaf_parent_set = mlx5_esw_devlink_rate_parent_set, #endif #ifdef CONFIG_MLX5_SF_MANAGER .port_new = mlx5_devlink_sf_port_new, @@ -359,9 +367,10 @@ int mlx5_devlink_traps_get_action(struct mlx5_core_dev *dev, int trap_id, return 0; } -struct devlink *mlx5_devlink_alloc(void) +struct devlink *mlx5_devlink_alloc(struct device *dev) { - return devlink_alloc(&mlx5_devlink_ops, sizeof(struct mlx5_core_dev)); + return devlink_alloc(&mlx5_devlink_ops, sizeof(struct mlx5_core_dev), + dev); } void mlx5_devlink_free(struct devlink *devlink) @@ -595,6 +604,157 @@ static void mlx5_devlink_set_params_init_values(struct devlink *devlink) #endif } +static const struct devlink_param enable_eth_param = + DEVLINK_PARAM_GENERIC(ENABLE_ETH, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), + NULL, NULL, NULL); + +static int mlx5_devlink_eth_param_register(struct devlink *devlink) +{ + struct mlx5_core_dev *dev = devlink_priv(devlink); + union devlink_param_value value; + int err; + + if (!mlx5_eth_supported(dev)) + return 0; + + err = devlink_param_register(devlink, &enable_eth_param); + if (err) + return err; + + value.vbool = true; + devlink_param_driverinit_value_set(devlink, + DEVLINK_PARAM_GENERIC_ID_ENABLE_ETH, + value); + devlink_param_publish(devlink, &enable_eth_param); + return 0; +} + +static void mlx5_devlink_eth_param_unregister(struct devlink *devlink) +{ + struct mlx5_core_dev *dev = devlink_priv(devlink); + + if (!mlx5_eth_supported(dev)) + return; + + devlink_param_unpublish(devlink, &enable_eth_param); + devlink_param_unregister(devlink, &enable_eth_param); +} + +static int mlx5_devlink_enable_rdma_validate(struct devlink *devlink, u32 id, + union devlink_param_value val, + struct netlink_ext_ack *extack) +{ + struct mlx5_core_dev *dev = devlink_priv(devlink); + bool new_state = val.vbool; + + if (new_state && !mlx5_rdma_supported(dev)) + return -EOPNOTSUPP; + return 0; +} + +static const struct devlink_param enable_rdma_param = + DEVLINK_PARAM_GENERIC(ENABLE_RDMA, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), + NULL, NULL, mlx5_devlink_enable_rdma_validate); + +static int mlx5_devlink_rdma_param_register(struct devlink *devlink) +{ + struct mlx5_core_dev *dev = devlink_priv(devlink); + union devlink_param_value value; + int err; + + if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND) || MLX5_ESWITCH_MANAGER(dev)) + return 0; + + err = devlink_param_register(devlink, &enable_rdma_param); + if (err) + return err; + + value.vbool = true; + devlink_param_driverinit_value_set(devlink, + DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA, + value); + devlink_param_publish(devlink, &enable_rdma_param); + return 0; +} + +static void mlx5_devlink_rdma_param_unregister(struct devlink *devlink) +{ + struct mlx5_core_dev *dev = devlink_priv(devlink); + + if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND) || MLX5_ESWITCH_MANAGER(dev)) + return; + + devlink_param_unpublish(devlink, &enable_rdma_param); + devlink_param_unregister(devlink, &enable_rdma_param); +} + +static const struct devlink_param enable_vnet_param = + DEVLINK_PARAM_GENERIC(ENABLE_VNET, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), + NULL, NULL, NULL); + +static int mlx5_devlink_vnet_param_register(struct devlink *devlink) +{ + struct mlx5_core_dev *dev = devlink_priv(devlink); + union devlink_param_value value; + int err; + + if (!mlx5_vnet_supported(dev)) + return 0; + + err = devlink_param_register(devlink, &enable_vnet_param); + if (err) + return err; + + value.vbool = true; + devlink_param_driverinit_value_set(devlink, + DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET, + value); + devlink_param_publish(devlink, &enable_rdma_param); + return 0; +} + +static void mlx5_devlink_vnet_param_unregister(struct devlink *devlink) +{ + struct mlx5_core_dev *dev = devlink_priv(devlink); + + if (!mlx5_vnet_supported(dev)) + return; + + devlink_param_unpublish(devlink, &enable_vnet_param); + devlink_param_unregister(devlink, &enable_vnet_param); +} + +static int mlx5_devlink_auxdev_params_register(struct devlink *devlink) +{ + int err; + + err = mlx5_devlink_eth_param_register(devlink); + if (err) + return err; + + err = mlx5_devlink_rdma_param_register(devlink); + if (err) + goto rdma_err; + + err = mlx5_devlink_vnet_param_register(devlink); + if (err) + goto vnet_err; + return 0; + +vnet_err: + mlx5_devlink_rdma_param_unregister(devlink); +rdma_err: + mlx5_devlink_eth_param_unregister(devlink); + return err; +} + +static void mlx5_devlink_auxdev_params_unregister(struct devlink *devlink) +{ + mlx5_devlink_vnet_param_unregister(devlink); + mlx5_devlink_rdma_param_unregister(devlink); + mlx5_devlink_eth_param_unregister(devlink); +} + #define MLX5_TRAP_DROP(_id, _group_id) \ DEVLINK_TRAP_GENERIC(DROP, DROP, _id, \ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \ @@ -638,11 +798,11 @@ static void mlx5_devlink_traps_unregister(struct devlink *devlink) ARRAY_SIZE(mlx5_trap_groups_arr)); } -int mlx5_devlink_register(struct devlink *devlink, struct device *dev) +int mlx5_devlink_register(struct devlink *devlink) { int err; - err = devlink_register(devlink, dev); + err = devlink_register(devlink); if (err) return err; @@ -653,6 +813,10 @@ int mlx5_devlink_register(struct devlink *devlink, struct device *dev) mlx5_devlink_set_params_init_values(devlink); devlink_params_publish(devlink); + err = mlx5_devlink_auxdev_params_register(devlink); + if (err) + goto auxdev_reg_err; + err = mlx5_devlink_traps_register(devlink); if (err) goto traps_reg_err; @@ -660,6 +824,8 @@ int mlx5_devlink_register(struct devlink *devlink, struct device *dev) return 0; traps_reg_err: + mlx5_devlink_auxdev_params_unregister(devlink); +auxdev_reg_err: devlink_params_unregister(devlink, mlx5_devlink_params, ARRAY_SIZE(mlx5_devlink_params)); params_reg_err: @@ -670,6 +836,8 @@ params_reg_err: void mlx5_devlink_unregister(struct devlink *devlink) { mlx5_devlink_traps_unregister(devlink); + mlx5_devlink_auxdev_params_unregister(devlink); + devlink_params_unpublish(devlink); devlink_params_unregister(devlink, mlx5_devlink_params, ARRAY_SIZE(mlx5_devlink_params)); devlink_unregister(devlink); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h index 7318d44b774b..30bf4882779b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h @@ -31,9 +31,9 @@ int mlx5_devlink_trap_get_num_active(struct mlx5_core_dev *dev); int mlx5_devlink_traps_get_action(struct mlx5_core_dev *dev, int trap_id, enum devlink_trap_action *action); -struct devlink *mlx5_devlink_alloc(void); +struct devlink *mlx5_devlink_alloc(struct device *dev); void mlx5_devlink_free(struct devlink *devlink); -int mlx5_devlink_register(struct devlink *devlink, struct device *dev); +int mlx5_devlink_register(struct devlink *devlink); void mlx5_devlink_unregister(struct devlink *devlink); #endif /* __MLX5_DEVLINK_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index b1b51bbba054..669a75f3537a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -58,6 +58,7 @@ #include "en/qos.h" #include "lib/hv_vhca.h" #include "lib/clock.h" +#include "en/rx_res.h" extern const struct net_device_ops mlx5e_netdev_ops; struct page_pool; @@ -65,14 +66,13 @@ struct page_pool; #define MLX5E_METADATA_ETHER_TYPE (0x8CE4) #define MLX5E_METADATA_ETHER_LEN 8 -#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v) - #define MLX5E_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN) #define MLX5E_HW2SW_MTU(params, hwmtu) ((hwmtu) - ((params)->hard_mtu)) #define MLX5E_SW2HW_MTU(params, swmtu) ((swmtu) + ((params)->hard_mtu)) #define MLX5E_MAX_NUM_TC 8 +#define MLX5E_MAX_NUM_MQPRIO_CH_TC TC_QOPT_MAX_QUEUE #define MLX5_RX_HEADROOM NET_SKB_PAD #define MLX5_SKB_FRAG_SZ(len) (SKB_DATA_ALIGN(len) + \ @@ -126,7 +126,6 @@ struct page_pool; #define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2 -#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024) #define MLX5E_DEFAULT_LRO_TIMEOUT 32 #define MLX5E_LRO_TIMEOUT_ARR_SIZE 4 @@ -139,8 +138,6 @@ struct page_pool; #define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES 0x80 #define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW 0x2 -#define MLX5E_LOG_INDIR_RQT_SIZE 0x8 -#define MLX5E_INDIR_RQT_SIZE BIT(MLX5E_LOG_INDIR_RQT_SIZE) #define MLX5E_MIN_NUM_CHANNELS 0x1 #define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE / 2) #define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC) @@ -252,7 +249,10 @@ struct mlx5e_params { u8 rq_wq_type; u8 log_rq_mtu_frames; u16 num_channels; - u8 num_tc; + struct { + u16 mode; + u8 num_tc; + } mqprio; bool rx_cqe_compress_def; bool tunneled_offload_en; struct dim_cq_moder rx_cq_moderation; @@ -272,6 +272,12 @@ struct mlx5e_params { bool ptp_rx; }; +static inline u8 mlx5e_get_dcb_num_tc(struct mlx5e_params *params) +{ + return params->mqprio.mode == TC_MQPRIO_MODE_DCB ? + params->mqprio.num_tc : 1; +} + enum { MLX5E_RQ_STATE_ENABLED, MLX5E_RQ_STATE_RECOVERING, @@ -745,29 +751,11 @@ enum { MLX5E_STATE_XDP_ACTIVE, }; -struct mlx5e_rqt { - u32 rqtn; - bool enabled; -}; - -struct mlx5e_tir { - u32 tirn; - struct mlx5e_rqt rqt; - struct list_head list; -}; - enum { MLX5E_TC_PRIO = 0, MLX5E_NIC_PRIO }; -struct mlx5e_rss_params { - u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE]; - u32 rx_hash_fields[MLX5E_NUM_INDIR_TIRS]; - u8 toeplitz_hash_key[40]; - u8 hfunc; -}; - struct mlx5e_modify_sq_param { int curr_state; int next_state; @@ -837,13 +825,7 @@ struct mlx5e_priv { struct mlx5e_channels channels; u32 tisn[MLX5_MAX_PORTS][MLX5E_MAX_NUM_TC]; - struct mlx5e_rqt indir_rqt; - struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS]; - struct mlx5e_tir inner_indir_tir[MLX5E_NUM_INDIR_TIRS]; - struct mlx5e_tir direct_tir[MLX5E_MAX_NUM_CHANNELS]; - struct mlx5e_tir xsk_tir[MLX5E_MAX_NUM_CHANNELS]; - struct mlx5e_tir ptp_tir; - struct mlx5e_rss_params rss_params; + struct mlx5e_rx_res *rx_res; u32 tx_rates[MLX5E_MAX_NUM_SQS]; struct mlx5e_flow_steering fs; @@ -948,25 +930,6 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto, u16 vid); void mlx5e_timestamp_init(struct mlx5e_priv *priv); -struct mlx5e_redirect_rqt_param { - bool is_rss; - union { - u32 rqn; /* Direct RQN (Non-RSS) */ - struct { - u8 hfunc; - struct mlx5e_channels *channels; - } rss; /* RSS data */ - }; -}; - -int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, - struct mlx5e_redirect_rqt_param rrp); -void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_rss_params *rss_params, - const struct mlx5e_tirc_config *ttconfig, - void *tirc, bool inner); -void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in); -struct mlx5e_tirc_config mlx5e_tirc_get_default_config(enum mlx5e_traffic_types tt); - struct mlx5e_xsk_param; struct mlx5e_rq_param; @@ -1028,9 +991,6 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv); void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv); int mlx5e_ptp_rx_manage_fs_ctx(struct mlx5e_priv *priv, void *ctx); -void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len, - int num_channels); - int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state); void mlx5e_activate_rq(struct mlx5e_rq *rq); void mlx5e_deactivate_rq(struct mlx5e_rq *rq); @@ -1065,10 +1025,6 @@ static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev) extern const struct ethtool_ops mlx5e_ethtool_ops; -int mlx5e_create_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir, - u32 *in); -void mlx5e_destroy_tir(struct mlx5_core_dev *mdev, - struct mlx5e_tir *tir); int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev); void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev); int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb, @@ -1084,17 +1040,6 @@ void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq); int mlx5e_init_di_list(struct mlx5e_rq *rq, int wq_sz, int node); void mlx5e_free_di_list(struct mlx5e_rq *rq); -int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv); - -int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc); -void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv); - -int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n); -void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n); -int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n); -void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n); -void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt); - int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn); void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn); @@ -1106,7 +1051,6 @@ int mlx5e_close(struct net_device *netdev); int mlx5e_open(struct net_device *netdev); void mlx5e_queue_update_stats(struct mlx5e_priv *priv); -int mlx5e_bits_invert(unsigned long a, int size); int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv); int mlx5e_set_dev_port_mtu_ctx(struct mlx5e_priv *priv, void *context); @@ -1183,8 +1127,6 @@ int mlx5e_netdev_change_profile(struct mlx5e_priv *priv, void mlx5e_netdev_attach_nic_profile(struct mlx5e_priv *priv); void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv); void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 mtu); -void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params, - u16 num_channels); void mlx5e_rx_dim_work(struct work_struct *work); void mlx5e_tx_dim_work(struct work_struct *work); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c new file mode 100644 index 000000000000..e7c14c0de0a7 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2021, Mellanox Technologies inc. All rights reserved. */ + +#include "channels.h" +#include "en.h" +#include "en/ptp.h" + +unsigned int mlx5e_channels_get_num(struct mlx5e_channels *chs) +{ + return chs->num; +} + +void mlx5e_channels_get_regular_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn) +{ + struct mlx5e_channel *c; + + WARN_ON(ix >= mlx5e_channels_get_num(chs)); + c = chs->c[ix]; + + *rqn = c->rq.rqn; +} + +bool mlx5e_channels_get_xsk_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn) +{ + struct mlx5e_channel *c; + + WARN_ON(ix >= mlx5e_channels_get_num(chs)); + c = chs->c[ix]; + + if (!test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)) + return false; + + *rqn = c->xskrq.rqn; + return true; +} + +bool mlx5e_channels_get_ptp_rqn(struct mlx5e_channels *chs, u32 *rqn) +{ + struct mlx5e_ptp *c = chs->ptp; + + if (!c || !test_bit(MLX5E_PTP_STATE_RX, c->state)) + return false; + + *rqn = c->rq.rqn; + return true; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h new file mode 100644 index 000000000000..ca00cbc827cb --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2021, Mellanox Technologies inc. All rights reserved. */ + +#ifndef __MLX5_EN_CHANNELS_H__ +#define __MLX5_EN_CHANNELS_H__ + +#include <linux/kernel.h> + +struct mlx5e_channels; + +unsigned int mlx5e_channels_get_num(struct mlx5e_channels *chs); +void mlx5e_channels_get_regular_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn); +bool mlx5e_channels_get_xsk_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn); +bool mlx5e_channels_get_ptp_rqn(struct mlx5e_channels *chs, u32 *rqn); + +#endif /* __MLX5_EN_CHANNELS_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c index bc33eaada3b9..86e079310ac3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c @@ -55,19 +55,15 @@ void mlx5e_devlink_port_unregister(struct mlx5e_priv *priv) { struct devlink_port *dl_port = mlx5e_devlink_get_dl_port(priv); - if (dl_port->registered) - devlink_port_unregister(dl_port); + devlink_port_unregister(dl_port); } struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev) { struct mlx5e_priv *priv = netdev_priv(dev); - struct devlink_port *port; if (!netif_device_present(dev)) return NULL; - port = mlx5e_devlink_get_dl_port(priv); - if (port->registered) - return port; - return NULL; + + return mlx5e_devlink_get_dl_port(priv); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h index 1d5ce07b83f4..41684a6c44e9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h @@ -5,6 +5,9 @@ #define __MLX5E_FLOW_STEER_H__ #include "mod_hdr.h" +#include "lib/fs_ttc.h" + +struct mlx5e_post_act; enum { MLX5E_TC_FT_LEVEL = 0, @@ -18,6 +21,7 @@ struct mlx5e_tc_table { struct mutex t_lock; struct mlx5_flow_table *t; struct mlx5_fs_chains *chains; + struct mlx5e_post_act *post_act; struct rhashtable ht; @@ -67,27 +71,7 @@ struct mlx5e_l2_table { bool promisc_enabled; }; -enum mlx5e_traffic_types { - MLX5E_TT_IPV4_TCP, - MLX5E_TT_IPV6_TCP, - MLX5E_TT_IPV4_UDP, - MLX5E_TT_IPV6_UDP, - MLX5E_TT_IPV4_IPSEC_AH, - MLX5E_TT_IPV6_IPSEC_AH, - MLX5E_TT_IPV4_IPSEC_ESP, - MLX5E_TT_IPV6_IPSEC_ESP, - MLX5E_TT_IPV4, - MLX5E_TT_IPV6, - MLX5E_TT_ANY, - MLX5E_NUM_TT, - MLX5E_NUM_INDIR_TIRS = MLX5E_TT_ANY, -}; - -struct mlx5e_tirc_config { - u8 l3_prot_type; - u8 l4_prot_type; - u32 rx_hash_fields; -}; +#define MLX5E_NUM_INDIR_TIRS (MLX5_NUM_TT - 1) #define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\ MLX5_HASH_FIELD_SEL_DST_IP) @@ -99,30 +83,6 @@ struct mlx5e_tirc_config { MLX5_HASH_FIELD_SEL_DST_IP |\ MLX5_HASH_FIELD_SEL_IPSEC_SPI) -enum mlx5e_tunnel_types { - MLX5E_TT_IPV4_GRE, - MLX5E_TT_IPV6_GRE, - MLX5E_TT_IPV4_IPIP, - MLX5E_TT_IPV6_IPIP, - MLX5E_TT_IPV4_IPV6, - MLX5E_TT_IPV6_IPV6, - MLX5E_NUM_TUNNEL_TT, -}; - -bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev); - -struct mlx5e_ttc_rule { - struct mlx5_flow_handle *rule; - struct mlx5_flow_destination default_dest; -}; - -/* L3/L4 traffic type classifier */ -struct mlx5e_ttc_table { - struct mlx5e_flow_table ft; - struct mlx5e_ttc_rule rules[MLX5E_NUM_TT]; - struct mlx5_flow_handle *tunnel_rules[MLX5E_NUM_TUNNEL_TT]; -}; - /* NIC prio FTS */ enum { MLX5E_PROMISC_FT_LEVEL, @@ -144,21 +104,7 @@ enum { #endif }; -#define MLX5E_TTC_NUM_GROUPS 3 -#define MLX5E_TTC_GROUP1_SIZE (BIT(3) + MLX5E_NUM_TUNNEL_TT) -#define MLX5E_TTC_GROUP2_SIZE BIT(1) -#define MLX5E_TTC_GROUP3_SIZE BIT(0) -#define MLX5E_TTC_TABLE_SIZE (MLX5E_TTC_GROUP1_SIZE +\ - MLX5E_TTC_GROUP2_SIZE +\ - MLX5E_TTC_GROUP3_SIZE) - -#define MLX5E_INNER_TTC_NUM_GROUPS 3 -#define MLX5E_INNER_TTC_GROUP1_SIZE BIT(3) -#define MLX5E_INNER_TTC_GROUP2_SIZE BIT(1) -#define MLX5E_INNER_TTC_GROUP3_SIZE BIT(0) -#define MLX5E_INNER_TTC_TABLE_SIZE (MLX5E_INNER_TTC_GROUP1_SIZE +\ - MLX5E_INNER_TTC_GROUP2_SIZE +\ - MLX5E_INNER_TTC_GROUP3_SIZE) +struct mlx5e_priv; #ifdef CONFIG_MLX5_EN_RXNFC @@ -226,8 +172,8 @@ struct mlx5e_flow_steering { struct mlx5e_promisc_table promisc; struct mlx5e_vlan_table *vlan; struct mlx5e_l2_table l2; - struct mlx5e_ttc_table ttc; - struct mlx5e_ttc_table inner_ttc; + struct mlx5_ttc_table *ttc; + struct mlx5_ttc_table *inner_ttc; #ifdef CONFIG_MLX5_EN_ARFS struct mlx5e_arfs_tables *arfs; #endif @@ -239,33 +185,13 @@ struct mlx5e_flow_steering { struct mlx5e_ptp_fs *ptp_fs; }; -struct ttc_params { - struct mlx5_flow_table_attr ft_attr; - u32 any_tt_tirn; - u32 indir_tirn[MLX5E_NUM_INDIR_TIRS]; - struct mlx5e_ttc_table *inner_ttc; -}; - -void mlx5e_set_ttc_basic_params(struct mlx5e_priv *priv, struct ttc_params *ttc_params); -void mlx5e_set_ttc_ft_params(struct ttc_params *ttc_params); -void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params); - -int mlx5e_create_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params, - struct mlx5e_ttc_table *ttc); -void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv, - struct mlx5e_ttc_table *ttc); +void mlx5e_set_ttc_params(struct mlx5e_priv *priv, + struct ttc_params *ttc_params, bool tunnel); -int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params, - struct mlx5e_ttc_table *ttc); -void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv, - struct mlx5e_ttc_table *ttc); +void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv); +int mlx5e_create_ttc_table(struct mlx5e_priv *priv); void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft); -int mlx5e_ttc_fwd_dest(struct mlx5e_priv *priv, enum mlx5e_traffic_types type, - struct mlx5_flow_destination *new_dest); -struct mlx5_flow_destination -mlx5e_ttc_get_default_dest(struct mlx5e_priv *priv, enum mlx5e_traffic_types type); -int mlx5e_ttc_fwd_default_dest(struct mlx5e_priv *priv, enum mlx5e_traffic_types type); void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv); void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv); @@ -273,7 +199,6 @@ void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv); int mlx5e_create_flow_steering(struct mlx5e_priv *priv); void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv); -u8 mlx5e_get_proto_by_tunnel_type(enum mlx5e_tunnel_types tt); int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num); void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv); int mlx5e_add_mac_trap(struct mlx5e_priv *priv, int trap_id, int tir_num); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c index 909faa6c89d7..7aa25a5e29d7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c @@ -33,22 +33,22 @@ static char *fs_udp_type2str(enum fs_udp_type i) } } -static enum mlx5e_traffic_types fs_udp2tt(enum fs_udp_type i) +static enum mlx5_traffic_types fs_udp2tt(enum fs_udp_type i) { switch (i) { case FS_IPV4_UDP: - return MLX5E_TT_IPV4_UDP; + return MLX5_TT_IPV4_UDP; default: /* FS_IPV6_UDP */ - return MLX5E_TT_IPV6_UDP; + return MLX5_TT_IPV6_UDP; } } -static enum fs_udp_type tt2fs_udp(enum mlx5e_traffic_types i) +static enum fs_udp_type tt2fs_udp(enum mlx5_traffic_types i) { switch (i) { - case MLX5E_TT_IPV4_UDP: + case MLX5_TT_IPV4_UDP: return FS_IPV4_UDP; - case MLX5E_TT_IPV6_UDP: + case MLX5_TT_IPV6_UDP: return FS_IPV6_UDP; default: return FS_UDP_NUM_TYPES; @@ -75,7 +75,7 @@ static void fs_udp_set_dport_flow(struct mlx5_flow_spec *spec, enum fs_udp_type struct mlx5_flow_handle * mlx5e_fs_tt_redirect_udp_add_rule(struct mlx5e_priv *priv, - enum mlx5e_traffic_types ttc_type, + enum mlx5_traffic_types ttc_type, u32 tir_num, u16 d_port) { enum fs_udp_type type = tt2fs_udp(ttc_type); @@ -124,7 +124,7 @@ static int fs_udp_add_default_rule(struct mlx5e_priv *priv, enum fs_udp_type typ fs_udp = priv->fs.udp; fs_udp_t = &fs_udp->tables[type]; - dest = mlx5e_ttc_get_default_dest(priv, fs_udp2tt(type)); + dest = mlx5_ttc_get_default_dest(priv->fs.ttc, fs_udp2tt(type)); rule = mlx5_add_flow_rules(fs_udp_t->t, NULL, &flow_act, &dest, 1); if (IS_ERR(rule)) { err = PTR_ERR(rule); @@ -259,7 +259,7 @@ static int fs_udp_disable(struct mlx5e_priv *priv) for (i = 0; i < FS_UDP_NUM_TYPES; i++) { /* Modify ttc rules destination to point back to the indir TIRs */ - err = mlx5e_ttc_fwd_default_dest(priv, fs_udp2tt(i)); + err = mlx5_ttc_fwd_default_dest(priv->fs.ttc, fs_udp2tt(i)); if (err) { netdev_err(priv->netdev, "%s: modify ttc[%d] default destination failed, err(%d)\n", @@ -281,7 +281,7 @@ static int fs_udp_enable(struct mlx5e_priv *priv) dest.ft = priv->fs.udp->tables[i].t; /* Modify ttc rules destination to point on the accel_fs FTs */ - err = mlx5e_ttc_fwd_dest(priv, fs_udp2tt(i), &dest); + err = mlx5_ttc_fwd_dest(priv->fs.ttc, fs_udp2tt(i), &dest); if (err) { netdev_err(priv->netdev, "%s: modify ttc[%d] destination to accel failed, err(%d)\n", @@ -401,7 +401,7 @@ static int fs_any_add_default_rule(struct mlx5e_priv *priv) fs_any = priv->fs.any; fs_any_t = &fs_any->table; - dest = mlx5e_ttc_get_default_dest(priv, MLX5E_TT_ANY); + dest = mlx5_ttc_get_default_dest(priv->fs.ttc, MLX5_TT_ANY); rule = mlx5_add_flow_rules(fs_any_t->t, NULL, &flow_act, &dest, 1); if (IS_ERR(rule)) { err = PTR_ERR(rule); @@ -514,11 +514,11 @@ static int fs_any_disable(struct mlx5e_priv *priv) int err; /* Modify ttc rules destination to point back to the indir TIRs */ - err = mlx5e_ttc_fwd_default_dest(priv, MLX5E_TT_ANY); + err = mlx5_ttc_fwd_default_dest(priv->fs.ttc, MLX5_TT_ANY); if (err) { netdev_err(priv->netdev, "%s: modify ttc[%d] default destination failed, err(%d)\n", - __func__, MLX5E_TT_ANY, err); + __func__, MLX5_TT_ANY, err); return err; } return 0; @@ -533,11 +533,11 @@ static int fs_any_enable(struct mlx5e_priv *priv) dest.ft = priv->fs.any->table.t; /* Modify ttc rules destination to point on the accel_fs FTs */ - err = mlx5e_ttc_fwd_dest(priv, MLX5E_TT_ANY, &dest); + err = mlx5_ttc_fwd_dest(priv->fs.ttc, MLX5_TT_ANY, &dest); if (err) { netdev_err(priv->netdev, "%s: modify ttc[%d] destination to accel failed, err(%d)\n", - __func__, MLX5E_TT_ANY, err); + __func__, MLX5_TT_ANY, err); return err; } return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.h index 8385df24eb99..7a70c4f38fda 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.h @@ -12,7 +12,7 @@ void mlx5e_fs_tt_redirect_del_rule(struct mlx5_flow_handle *rule); /* UDP traffic type redirect */ struct mlx5_flow_handle * mlx5e_fs_tt_redirect_udp_add_rule(struct mlx5e_priv *priv, - enum mlx5e_traffic_types ttc_type, + enum mlx5_traffic_types ttc_type, u32 tir_num, u16 d_port); void mlx5e_fs_tt_redirect_udp_destroy(struct mlx5e_priv *priv); int mlx5e_fs_tt_redirect_udp_create(struct mlx5e_priv *priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c b/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c index ea321e528749..4e72ca8070e2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c @@ -5,11 +5,15 @@ #include <linux/slab.h> #include <linux/xarray.h> #include <linux/hashtable.h> +#include <linux/refcount.h> #include "mapping.h" #define MAPPING_GRACE_PERIOD 2000 +static LIST_HEAD(shared_ctx_list); +static DEFINE_MUTEX(shared_ctx_lock); + struct mapping_ctx { struct xarray xarray; DECLARE_HASHTABLE(ht, 8); @@ -20,6 +24,10 @@ struct mapping_ctx { struct delayed_work dwork; struct list_head pending_list; spinlock_t pending_list_lock; /* Guards pending list */ + u64 id; + u8 type; + struct list_head list; + refcount_t refcount; }; struct mapping_item { @@ -205,11 +213,48 @@ mapping_create(size_t data_size, u32 max_id, bool delayed_removal) mutex_init(&ctx->lock); xa_init_flags(&ctx->xarray, XA_FLAGS_ALLOC1); + refcount_set(&ctx->refcount, 1); + INIT_LIST_HEAD(&ctx->list); + + return ctx; +} + +struct mapping_ctx * +mapping_create_for_id(u64 id, u8 type, size_t data_size, u32 max_id, bool delayed_removal) +{ + struct mapping_ctx *ctx; + + mutex_lock(&shared_ctx_lock); + list_for_each_entry(ctx, &shared_ctx_list, list) { + if (ctx->id == id && ctx->type == type) { + if (refcount_inc_not_zero(&ctx->refcount)) + goto unlock; + break; + } + } + + ctx = mapping_create(data_size, max_id, delayed_removal); + if (IS_ERR(ctx)) + goto unlock; + + ctx->id = id; + ctx->type = type; + list_add(&ctx->list, &shared_ctx_list); + +unlock: + mutex_unlock(&shared_ctx_lock); return ctx; } void mapping_destroy(struct mapping_ctx *ctx) { + if (!refcount_dec_and_test(&ctx->refcount)) + return; + + mutex_lock(&shared_ctx_lock); + list_del(&ctx->list); + mutex_unlock(&shared_ctx_lock); + mapping_flush_work(ctx); xa_destroy(&ctx->xarray); mutex_destroy(&ctx->lock); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.h b/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.h index 285525cc5470..4e2119f0f4c1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.h @@ -24,4 +24,9 @@ struct mapping_ctx *mapping_create(size_t data_size, u32 max_id, bool delayed_removal); void mapping_destroy(struct mapping_ctx *ctx); +/* adds mapping with an id or get an existing mapping with the same id + */ +struct mapping_ctx * +mapping_create_for_id(u64 id, u8 type, size_t data_size, u32 max_id, bool delayed_removal); + #endif /* __MLX5_MAPPING_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c index 2cbf18c967f7..3cbb596821e8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c @@ -167,6 +167,18 @@ u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev, return is_linear_skb ? mlx5e_get_linear_rq_headroom(params, xsk) : 0; } +struct mlx5e_lro_param mlx5e_get_lro_param(struct mlx5e_params *params) +{ + struct mlx5e_lro_param lro_param; + + lro_param = (struct mlx5e_lro_param) { + .enabled = params->lro_en, + .timeout = params->lro_timeout, + }; + + return lro_param; +} + u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params) { bool is_mpwqe = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h index e9593f5f0661..879ad46d754e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h @@ -11,6 +11,11 @@ struct mlx5e_xsk_param { u16 chunk_size; }; +struct mlx5e_lro_param { + bool enabled; + u32 timeout; +}; + struct mlx5e_cq_param { u32 cqc[MLX5_ST_SZ_DW(cqc)]; struct mlx5_wq_param wq; @@ -120,6 +125,7 @@ u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev, u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev, struct mlx5e_params *params, struct mlx5e_xsk_param *xsk); +struct mlx5e_lro_param mlx5e_get_lro_param(struct mlx5e_params *params); /* Build queue parameters */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c index efef4adce086..ee688dec67a9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c @@ -326,13 +326,14 @@ static int mlx5e_ptp_open_txqsqs(struct mlx5e_ptp *c, struct mlx5e_ptp_params *cparams) { struct mlx5e_params *params = &cparams->params; + u8 num_tc = mlx5e_get_dcb_num_tc(params); int ix_base; int err; int tc; - ix_base = params->num_tc * params->num_channels; + ix_base = num_tc * params->num_channels; - for (tc = 0; tc < params->num_tc; tc++) { + for (tc = 0; tc < num_tc; tc++) { int txq_ix = ix_base + tc; err = mlx5e_ptp_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix, @@ -365,9 +366,12 @@ static int mlx5e_ptp_open_tx_cqs(struct mlx5e_ptp *c, struct mlx5e_create_cq_param ccp = {}; struct dim_cq_moder ptp_moder = {}; struct mlx5e_cq_param *cq_param; + u8 num_tc; int err; int tc; + num_tc = mlx5e_get_dcb_num_tc(params); + ccp.node = dev_to_node(mlx5_core_dma_dev(c->mdev)); ccp.ch_stats = c->stats; ccp.napi = &c->napi; @@ -375,7 +379,7 @@ static int mlx5e_ptp_open_tx_cqs(struct mlx5e_ptp *c, cq_param = &cparams->txq_sq_param.cqp; - for (tc = 0; tc < params->num_tc; tc++) { + for (tc = 0; tc < num_tc; tc++) { struct mlx5e_cq *cq = &c->ptpsq[tc].txqsq.cq; err = mlx5e_open_cq(c->priv, ptp_moder, cq_param, &ccp, cq); @@ -383,7 +387,7 @@ static int mlx5e_ptp_open_tx_cqs(struct mlx5e_ptp *c, goto out_err_txqsq_cq; } - for (tc = 0; tc < params->num_tc; tc++) { + for (tc = 0; tc < num_tc; tc++) { struct mlx5e_cq *cq = &c->ptpsq[tc].ts_cq; struct mlx5e_ptpsq *ptpsq = &c->ptpsq[tc]; @@ -399,7 +403,7 @@ static int mlx5e_ptp_open_tx_cqs(struct mlx5e_ptp *c, out_err_ts_cq: for (--tc; tc >= 0; tc--) mlx5e_close_cq(&c->ptpsq[tc].ts_cq); - tc = params->num_tc; + tc = num_tc; out_err_txqsq_cq: for (--tc; tc >= 0; tc--) mlx5e_close_cq(&c->ptpsq[tc].txqsq.cq); @@ -475,7 +479,7 @@ static void mlx5e_ptp_build_params(struct mlx5e_ptp *c, params->num_channels = orig->num_channels; params->hard_mtu = orig->hard_mtu; params->sw_mtu = orig->sw_mtu; - params->num_tc = orig->num_tc; + params->mqprio = orig->mqprio; /* SQ */ if (test_bit(MLX5E_PTP_STATE_TX, c->state)) { @@ -605,9 +609,9 @@ static void mlx5e_ptp_rx_unset_fs(struct mlx5e_priv *priv) static int mlx5e_ptp_rx_set_fs(struct mlx5e_priv *priv) { + u32 tirn = mlx5e_rx_res_get_tirn_ptp(priv->rx_res); struct mlx5e_ptp_fs *ptp_fs = priv->fs.ptp_fs; struct mlx5_flow_handle *rule; - u32 tirn = priv->ptp_tir.tirn; int err; if (ptp_fs->valid) @@ -617,7 +621,7 @@ static int mlx5e_ptp_rx_set_fs(struct mlx5e_priv *priv) if (err) goto out_free; - rule = mlx5e_fs_tt_redirect_udp_add_rule(priv, MLX5E_TT_IPV4_UDP, + rule = mlx5e_fs_tt_redirect_udp_add_rule(priv, MLX5_TT_IPV4_UDP, tirn, PTP_EV_PORT); if (IS_ERR(rule)) { err = PTR_ERR(rule); @@ -625,7 +629,7 @@ static int mlx5e_ptp_rx_set_fs(struct mlx5e_priv *priv) } ptp_fs->udp_v4_rule = rule; - rule = mlx5e_fs_tt_redirect_udp_add_rule(priv, MLX5E_TT_IPV6_UDP, + rule = mlx5e_fs_tt_redirect_udp_add_rule(priv, MLX5_TT_IPV6_UDP, tirn, PTP_EV_PORT); if (IS_ERR(rule)) { err = PTR_ERR(rule); @@ -680,7 +684,7 @@ int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params, c->pdev = mlx5_core_dma_dev(priv->mdev); c->netdev = priv->netdev; c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey.key); - c->num_tc = params->num_tc; + c->num_tc = mlx5e_get_dcb_num_tc(params); c->stats = &priv->ptp_stats.ch; c->lag_port = lag_port; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c index 5efe3278b0f6..e8a8d78e3e4d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c @@ -132,7 +132,7 @@ static u16 mlx5e_qid_from_qos(struct mlx5e_channels *chs, u16 qid) */ bool is_ptp = MLX5E_GET_PFLAG(&chs->params, MLX5E_PFLAG_TX_PORT_TS); - return (chs->params.num_channels + is_ptp) * chs->params.num_tc + qid; + return (chs->params.num_channels + is_ptp) * mlx5e_get_dcb_num_tc(&chs->params) + qid; } int mlx5e_get_txq_by_classid(struct mlx5e_priv *priv, u16 classid) @@ -733,8 +733,8 @@ static void mlx5e_reset_qdisc(struct net_device *dev, u16 qid) spin_unlock_bh(qdisc_lock(qdisc)); } -int mlx5e_htb_leaf_del(struct mlx5e_priv *priv, u16 classid, u16 *old_qid, - u16 *new_qid, struct netlink_ext_ack *extack) +int mlx5e_htb_leaf_del(struct mlx5e_priv *priv, u16 *classid, + struct netlink_ext_ack *extack) { struct mlx5e_qos_node *node; struct netdev_queue *txq; @@ -742,11 +742,9 @@ int mlx5e_htb_leaf_del(struct mlx5e_priv *priv, u16 classid, u16 *old_qid, bool opened; int err; - qos_dbg(priv->mdev, "TC_HTB_LEAF_DEL classid %04x\n", classid); - - *old_qid = *new_qid = 0; + qos_dbg(priv->mdev, "TC_HTB_LEAF_DEL classid %04x\n", *classid); - node = mlx5e_sw_node_find(priv, classid); + node = mlx5e_sw_node_find(priv, *classid); if (!node) return -ENOENT; @@ -764,7 +762,7 @@ int mlx5e_htb_leaf_del(struct mlx5e_priv *priv, u16 classid, u16 *old_qid, err = mlx5_qos_destroy_node(priv->mdev, node->hw_id); if (err) /* Not fatal. */ qos_warn(priv->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n", - node->hw_id, classid, err); + node->hw_id, *classid, err); mlx5e_sw_node_delete(priv, node); @@ -826,8 +824,7 @@ int mlx5e_htb_leaf_del(struct mlx5e_priv *priv, u16 classid, u16 *old_qid, if (opened) mlx5e_reactivate_qos_sq(priv, moved_qid, txq); - *old_qid = mlx5e_qid_from_qos(&priv->channels, moved_qid); - *new_qid = mlx5e_qid_from_qos(&priv->channels, qid); + *classid = node->classid; return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h index 5af7991fcd19..757682b7c0e0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h @@ -34,8 +34,8 @@ int mlx5e_htb_leaf_alloc_queue(struct mlx5e_priv *priv, u16 classid, struct netlink_ext_ack *extack); int mlx5e_htb_leaf_to_inner(struct mlx5e_priv *priv, u16 classid, u16 child_classid, u64 rate, u64 ceil, struct netlink_ext_ack *extack); -int mlx5e_htb_leaf_del(struct mlx5e_priv *priv, u16 classid, u16 *old_qid, - u16 *new_qid, struct netlink_ext_ack *extack); +int mlx5e_htb_leaf_del(struct mlx5e_priv *priv, u16 *classid, + struct netlink_ext_ack *extack); int mlx5e_htb_leaf_del_last(struct mlx5e_priv *priv, u16 classid, bool force, struct netlink_ext_ack *extack); int mlx5e_htb_node_modify(struct mlx5e_priv *priv, u16 classid, u64 rate, u64 ceil, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c index 3c0032c9647c..0c38c2e319be 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c @@ -15,9 +15,116 @@ struct mlx5_bridge_switchdev_fdb_work { struct work_struct work; struct switchdev_notifier_fdb_info fdb_info; struct net_device *dev; + struct mlx5_esw_bridge_offloads *br_offloads; bool add; }; +static bool mlx5_esw_bridge_dev_same_esw(struct net_device *dev, struct mlx5_eswitch *esw) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + + return esw == priv->mdev->priv.eswitch; +} + +static bool mlx5_esw_bridge_dev_same_hw(struct net_device *dev, struct mlx5_eswitch *esw) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_core_dev *mdev, *esw_mdev; + u64 system_guid, esw_system_guid; + + mdev = priv->mdev; + esw_mdev = esw->dev; + + system_guid = mlx5_query_nic_system_image_guid(mdev); + esw_system_guid = mlx5_query_nic_system_image_guid(esw_mdev); + + return system_guid == esw_system_guid; +} + +static struct net_device * +mlx5_esw_bridge_lag_rep_get(struct net_device *dev, struct mlx5_eswitch *esw) +{ + struct net_device *lower; + struct list_head *iter; + + netdev_for_each_lower_dev(dev, lower, iter) { + struct mlx5_core_dev *mdev; + struct mlx5e_priv *priv; + + if (!mlx5e_eswitch_rep(lower)) + continue; + + priv = netdev_priv(lower); + mdev = priv->mdev; + if (mlx5_lag_is_shared_fdb(mdev) && mlx5_esw_bridge_dev_same_esw(lower, esw)) + return lower; + } + + return NULL; +} + +static struct net_device * +mlx5_esw_bridge_rep_vport_num_vhca_id_get(struct net_device *dev, struct mlx5_eswitch *esw, + u16 *vport_num, u16 *esw_owner_vhca_id) +{ + struct mlx5e_rep_priv *rpriv; + struct mlx5e_priv *priv; + + if (netif_is_lag_master(dev)) + dev = mlx5_esw_bridge_lag_rep_get(dev, esw); + + if (!dev || !mlx5e_eswitch_rep(dev) || !mlx5_esw_bridge_dev_same_hw(dev, esw)) + return NULL; + + priv = netdev_priv(dev); + rpriv = priv->ppriv; + *vport_num = rpriv->rep->vport; + *esw_owner_vhca_id = MLX5_CAP_GEN(priv->mdev, vhca_id); + return dev; +} + +static struct net_device * +mlx5_esw_bridge_lower_rep_vport_num_vhca_id_get(struct net_device *dev, struct mlx5_eswitch *esw, + u16 *vport_num, u16 *esw_owner_vhca_id) +{ + struct net_device *lower_dev; + struct list_head *iter; + + if (netif_is_lag_master(dev) || mlx5e_eswitch_rep(dev)) + return mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, esw, vport_num, + esw_owner_vhca_id); + + netdev_for_each_lower_dev(dev, lower_dev, iter) { + struct net_device *rep; + + if (netif_is_bridge_master(lower_dev)) + continue; + + rep = mlx5_esw_bridge_lower_rep_vport_num_vhca_id_get(lower_dev, esw, vport_num, + esw_owner_vhca_id); + if (rep) + return rep; + } + + return NULL; +} + +static bool mlx5_esw_bridge_is_local(struct net_device *dev, struct net_device *rep, + struct mlx5_eswitch *esw) +{ + struct mlx5_core_dev *mdev; + struct mlx5e_priv *priv; + + if (!mlx5_esw_bridge_dev_same_esw(rep, esw)) + return false; + + priv = netdev_priv(rep); + mdev = priv->mdev; + if (netif_is_lag_master(dev)) + return mlx5_lag_is_shared_fdb(mdev) && mlx5_lag_is_master(mdev); + return true; +} + static int mlx5_esw_bridge_port_changeupper(struct notifier_block *nb, void *ptr) { struct mlx5_esw_bridge_offloads *br_offloads = container_of(nb, @@ -25,37 +132,36 @@ static int mlx5_esw_bridge_port_changeupper(struct notifier_block *nb, void *ptr netdev_nb); struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct netdev_notifier_changeupper_info *info = ptr; + struct net_device *upper = info->upper_dev, *rep; + struct mlx5_eswitch *esw = br_offloads->esw; + u16 vport_num, esw_owner_vhca_id; struct netlink_ext_ack *extack; - struct mlx5e_rep_priv *rpriv; - struct mlx5_eswitch *esw; - struct mlx5_vport *vport; - struct net_device *upper; - struct mlx5e_priv *priv; - u16 vport_num; - - if (!mlx5e_eswitch_rep(dev)) - return 0; + int ifindex = upper->ifindex; + int err; - upper = info->upper_dev; if (!netif_is_bridge_master(upper)) return 0; - esw = br_offloads->esw; - priv = netdev_priv(dev); - if (esw != priv->mdev->priv.eswitch) + rep = mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, esw, &vport_num, &esw_owner_vhca_id); + if (!rep) return 0; - rpriv = priv->ppriv; - vport_num = rpriv->rep->vport; - vport = mlx5_eswitch_get_vport(esw, vport_num); - if (IS_ERR(vport)) - return PTR_ERR(vport); - extack = netdev_notifier_info_to_extack(&info->info); - return info->linking ? - mlx5_esw_bridge_vport_link(upper->ifindex, br_offloads, vport, extack) : - mlx5_esw_bridge_vport_unlink(upper->ifindex, br_offloads, vport, extack); + if (mlx5_esw_bridge_is_local(dev, rep, esw)) + err = info->linking ? + mlx5_esw_bridge_vport_link(ifindex, vport_num, esw_owner_vhca_id, + br_offloads, extack) : + mlx5_esw_bridge_vport_unlink(ifindex, vport_num, esw_owner_vhca_id, + br_offloads, extack); + else if (mlx5_esw_bridge_dev_same_hw(rep, esw)) + err = info->linking ? + mlx5_esw_bridge_vport_peer_link(ifindex, vport_num, esw_owner_vhca_id, + br_offloads, extack) : + mlx5_esw_bridge_vport_peer_unlink(ifindex, vport_num, esw_owner_vhca_id, + br_offloads, extack); + + return err; } static int mlx5_esw_bridge_switchdev_port_event(struct notifier_block *nb, @@ -75,31 +181,28 @@ static int mlx5_esw_bridge_switchdev_port_event(struct notifier_block *nb, return notifier_from_errno(err); } -static int mlx5_esw_bridge_port_obj_add(struct net_device *dev, - const void *ctx, - const struct switchdev_obj *obj, - struct netlink_ext_ack *extack) +static int +mlx5_esw_bridge_port_obj_add(struct net_device *dev, + struct switchdev_notifier_port_obj_info *port_obj_info, + struct mlx5_esw_bridge_offloads *br_offloads) { + struct netlink_ext_ack *extack = switchdev_notifier_info_to_extack(&port_obj_info->info); + const struct switchdev_obj *obj = port_obj_info->obj; const struct switchdev_obj_port_vlan *vlan; - struct mlx5e_rep_priv *rpriv; - struct mlx5_eswitch *esw; - struct mlx5_vport *vport; - struct mlx5e_priv *priv; - u16 vport_num; - int err = 0; + u16 vport_num, esw_owner_vhca_id; + int err; - priv = netdev_priv(dev); - rpriv = priv->ppriv; - vport_num = rpriv->rep->vport; - esw = priv->mdev->priv.eswitch; - vport = mlx5_eswitch_get_vport(esw, vport_num); - if (IS_ERR(vport)) - return PTR_ERR(vport); + if (!mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, br_offloads->esw, &vport_num, + &esw_owner_vhca_id)) + return 0; + + port_obj_info->handled = true; switch (obj->id) { case SWITCHDEV_OBJ_ID_PORT_VLAN: vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); - err = mlx5_esw_bridge_port_vlan_add(vlan->vid, vlan->flags, esw, vport, extack); + err = mlx5_esw_bridge_port_vlan_add(vport_num, esw_owner_vhca_id, vlan->vid, + vlan->flags, br_offloads, extack); break; default: return -EOPNOTSUPP; @@ -107,29 +210,25 @@ static int mlx5_esw_bridge_port_obj_add(struct net_device *dev, return err; } -static int mlx5_esw_bridge_port_obj_del(struct net_device *dev, - const void *ctx, - const struct switchdev_obj *obj) +static int +mlx5_esw_bridge_port_obj_del(struct net_device *dev, + struct switchdev_notifier_port_obj_info *port_obj_info, + struct mlx5_esw_bridge_offloads *br_offloads) { + const struct switchdev_obj *obj = port_obj_info->obj; const struct switchdev_obj_port_vlan *vlan; - struct mlx5e_rep_priv *rpriv; - struct mlx5_eswitch *esw; - struct mlx5_vport *vport; - struct mlx5e_priv *priv; - u16 vport_num; + u16 vport_num, esw_owner_vhca_id; - priv = netdev_priv(dev); - rpriv = priv->ppriv; - vport_num = rpriv->rep->vport; - esw = priv->mdev->priv.eswitch; - vport = mlx5_eswitch_get_vport(esw, vport_num); - if (IS_ERR(vport)) - return PTR_ERR(vport); + if (!mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, br_offloads->esw, &vport_num, + &esw_owner_vhca_id)) + return 0; + + port_obj_info->handled = true; switch (obj->id) { case SWITCHDEV_OBJ_ID_PORT_VLAN: vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); - mlx5_esw_bridge_port_vlan_del(vlan->vid, esw, vport); + mlx5_esw_bridge_port_vlan_del(vport_num, esw_owner_vhca_id, vlan->vid, br_offloads); break; default: return -EOPNOTSUPP; @@ -137,25 +236,21 @@ static int mlx5_esw_bridge_port_obj_del(struct net_device *dev, return 0; } -static int mlx5_esw_bridge_port_obj_attr_set(struct net_device *dev, - const void *ctx, - const struct switchdev_attr *attr, - struct netlink_ext_ack *extack) +static int +mlx5_esw_bridge_port_obj_attr_set(struct net_device *dev, + struct switchdev_notifier_port_attr_info *port_attr_info, + struct mlx5_esw_bridge_offloads *br_offloads) { - struct mlx5e_rep_priv *rpriv; - struct mlx5_eswitch *esw; - struct mlx5_vport *vport; - struct mlx5e_priv *priv; - u16 vport_num; - int err = 0; + struct netlink_ext_ack *extack = switchdev_notifier_info_to_extack(&port_attr_info->info); + const struct switchdev_attr *attr = port_attr_info->attr; + u16 vport_num, esw_owner_vhca_id; + int err; - priv = netdev_priv(dev); - rpriv = priv->ppriv; - vport_num = rpriv->rep->vport; - esw = priv->mdev->priv.eswitch; - vport = mlx5_eswitch_get_vport(esw, vport_num); - if (IS_ERR(vport)) - return PTR_ERR(vport); + if (!mlx5_esw_bridge_lower_rep_vport_num_vhca_id_get(dev, br_offloads->esw, &vport_num, + &esw_owner_vhca_id)) + return 0; + + port_attr_info->handled = true; switch (attr->id) { case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS: @@ -167,10 +262,12 @@ static int mlx5_esw_bridge_port_obj_attr_set(struct net_device *dev, case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: break; case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: - err = mlx5_esw_bridge_ageing_time_set(attr->u.ageing_time, esw, vport); + err = mlx5_esw_bridge_ageing_time_set(vport_num, esw_owner_vhca_id, + attr->u.ageing_time, br_offloads); break; case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: - err = mlx5_esw_bridge_vlan_filtering_set(attr->u.vlan_filtering, esw, vport); + err = mlx5_esw_bridge_vlan_filtering_set(vport_num, esw_owner_vhca_id, + attr->u.vlan_filtering, br_offloads); break; default: err = -EOPNOTSUPP; @@ -179,27 +276,24 @@ static int mlx5_esw_bridge_port_obj_attr_set(struct net_device *dev, return err; } -static int mlx5_esw_bridge_event_blocking(struct notifier_block *unused, +static int mlx5_esw_bridge_event_blocking(struct notifier_block *nb, unsigned long event, void *ptr) { + struct mlx5_esw_bridge_offloads *br_offloads = container_of(nb, + struct mlx5_esw_bridge_offloads, + nb_blk); struct net_device *dev = switchdev_notifier_info_to_dev(ptr); int err; switch (event) { case SWITCHDEV_PORT_OBJ_ADD: - err = switchdev_handle_port_obj_add(dev, ptr, - mlx5e_eswitch_rep, - mlx5_esw_bridge_port_obj_add); + err = mlx5_esw_bridge_port_obj_add(dev, ptr, br_offloads); break; case SWITCHDEV_PORT_OBJ_DEL: - err = switchdev_handle_port_obj_del(dev, ptr, - mlx5e_eswitch_rep, - mlx5_esw_bridge_port_obj_del); + err = mlx5_esw_bridge_port_obj_del(dev, ptr, br_offloads); break; case SWITCHDEV_PORT_ATTR_SET: - err = switchdev_handle_port_attr_set(dev, ptr, - mlx5e_eswitch_rep, - mlx5_esw_bridge_port_obj_attr_set); + err = mlx5_esw_bridge_port_obj_attr_set(dev, ptr, br_offloads); break; default: err = 0; @@ -222,27 +316,23 @@ static void mlx5_esw_bridge_switchdev_fdb_event_work(struct work_struct *work) container_of(work, struct mlx5_bridge_switchdev_fdb_work, work); struct switchdev_notifier_fdb_info *fdb_info = &fdb_work->fdb_info; + struct mlx5_esw_bridge_offloads *br_offloads = + fdb_work->br_offloads; struct net_device *dev = fdb_work->dev; - struct mlx5e_rep_priv *rpriv; - struct mlx5_eswitch *esw; - struct mlx5_vport *vport; - struct mlx5e_priv *priv; - u16 vport_num; + u16 vport_num, esw_owner_vhca_id; rtnl_lock(); - priv = netdev_priv(dev); - rpriv = priv->ppriv; - vport_num = rpriv->rep->vport; - esw = priv->mdev->priv.eswitch; - vport = mlx5_eswitch_get_vport(esw, vport_num); - if (IS_ERR(vport)) + if (!mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, br_offloads->esw, &vport_num, + &esw_owner_vhca_id)) goto out; if (fdb_work->add) - mlx5_esw_bridge_fdb_create(dev, esw, vport, fdb_info); + mlx5_esw_bridge_fdb_create(dev, vport_num, esw_owner_vhca_id, br_offloads, + fdb_info); else - mlx5_esw_bridge_fdb_remove(dev, esw, vport, fdb_info); + mlx5_esw_bridge_fdb_remove(dev, vport_num, esw_owner_vhca_id, br_offloads, + fdb_info); out: rtnl_unlock(); @@ -251,7 +341,8 @@ out: static struct mlx5_bridge_switchdev_fdb_work * mlx5_esw_bridge_init_switchdev_fdb_work(struct net_device *dev, bool add, - struct switchdev_notifier_fdb_info *fdb_info) + struct switchdev_notifier_fdb_info *fdb_info, + struct mlx5_esw_bridge_offloads *br_offloads) { struct mlx5_bridge_switchdev_fdb_work *work; u8 *addr; @@ -273,6 +364,7 @@ mlx5_esw_bridge_init_switchdev_fdb_work(struct net_device *dev, bool add, dev_hold(dev); work->dev = dev; + work->br_offloads = br_offloads; work->add = add; return work; } @@ -286,20 +378,14 @@ static int mlx5_esw_bridge_switchdev_event(struct notifier_block *nb, struct net_device *dev = switchdev_notifier_info_to_dev(ptr); struct switchdev_notifier_fdb_info *fdb_info; struct mlx5_bridge_switchdev_fdb_work *work; + struct mlx5_eswitch *esw = br_offloads->esw; struct switchdev_notifier_info *info = ptr; - struct net_device *upper; - struct mlx5e_priv *priv; - - if (!mlx5e_eswitch_rep(dev)) - return NOTIFY_DONE; - priv = netdev_priv(dev); - if (priv->mdev->priv.eswitch != br_offloads->esw) - return NOTIFY_DONE; + u16 vport_num, esw_owner_vhca_id; + struct net_device *upper, *rep; if (event == SWITCHDEV_PORT_ATTR_SET) { - int err = switchdev_handle_port_attr_set(dev, ptr, - mlx5e_eswitch_rep, - mlx5_esw_bridge_port_obj_attr_set); + int err = mlx5_esw_bridge_port_obj_attr_set(dev, ptr, br_offloads); + return notifier_from_errno(err); } @@ -309,7 +395,27 @@ static int mlx5_esw_bridge_switchdev_event(struct notifier_block *nb, if (!netif_is_bridge_master(upper)) return NOTIFY_DONE; + rep = mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, esw, &vport_num, &esw_owner_vhca_id); + if (!rep) + return NOTIFY_DONE; + switch (event) { + case SWITCHDEV_FDB_ADD_TO_BRIDGE: + /* only handle the event on native eswtich of representor */ + if (!mlx5_esw_bridge_is_local(dev, rep, esw)) + break; + + fdb_info = container_of(info, + struct switchdev_notifier_fdb_info, + info); + mlx5_esw_bridge_fdb_update_used(dev, vport_num, esw_owner_vhca_id, br_offloads, + fdb_info); + break; + case SWITCHDEV_FDB_DEL_TO_BRIDGE: + /* only handle the event on peers */ + if (mlx5_esw_bridge_is_local(dev, rep, esw)) + break; + fallthrough; case SWITCHDEV_FDB_ADD_TO_DEVICE: case SWITCHDEV_FDB_DEL_TO_DEVICE: fdb_info = container_of(info, @@ -318,7 +424,8 @@ static int mlx5_esw_bridge_switchdev_event(struct notifier_block *nb, work = mlx5_esw_bridge_init_switchdev_fdb_work(dev, event == SWITCHDEV_FDB_ADD_TO_DEVICE, - fdb_info); + fdb_info, + br_offloads); if (IS_ERR(work)) { WARN_ONCE(1, "Failed to init switchdev work, err=%ld", PTR_ERR(work)); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c index 059799e4f483..51a4d80f7fa3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c @@ -17,7 +17,7 @@ #include "en/mapping.h" #include "en/tc_tun.h" #include "lib/port_tun.h" -#include "esw/sample.h" +#include "en/tc/sample.h" struct mlx5e_rep_indr_block_priv { struct net_device *netdev; @@ -516,7 +516,6 @@ void mlx5e_rep_tc_netdevice_event_unregister(struct mlx5e_rep_priv *rpriv) mlx5e_rep_indr_block_unbind); } -#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) static bool mlx5e_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb, struct mlx5e_tc_update_priv *tc_priv, u32 tunnel_id) @@ -609,12 +608,13 @@ static bool mlx5e_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb, return true; } -static bool mlx5e_restore_skb(struct sk_buff *skb, u32 chain, u32 reg_c1, - struct mlx5e_tc_update_priv *tc_priv) +static bool mlx5e_restore_skb_chain(struct sk_buff *skb, u32 chain, u32 reg_c1, + struct mlx5e_tc_update_priv *tc_priv) { struct mlx5e_priv *priv = netdev_priv(skb->dev); u32 tunnel_id = (reg_c1 >> ESW_TUN_OFFSET) & TUNNEL_ID_MASK; +#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) if (chain) { struct mlx5_rep_uplink_priv *uplink_priv; struct mlx5e_rep_priv *uplink_rpriv; @@ -636,9 +636,25 @@ static bool mlx5e_restore_skb(struct sk_buff *skb, u32 chain, u32 reg_c1, zone_restore_id)) return false; } +#endif /* CONFIG_NET_TC_SKB_EXT */ + return mlx5e_restore_tunnel(priv, skb, tc_priv, tunnel_id); } -#endif /* CONFIG_NET_TC_SKB_EXT */ + +static void mlx5e_restore_skb_sample(struct mlx5e_priv *priv, struct sk_buff *skb, + struct mlx5_mapped_obj *mapped_obj, + struct mlx5e_tc_update_priv *tc_priv) +{ + if (!mlx5e_restore_tunnel(priv, skb, tc_priv, mapped_obj->sample.tunnel_id)) { + netdev_dbg(priv->netdev, + "Failed to restore tunnel info for sampled packet\n"); + return; + } +#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE) + mlx5e_tc_sample_skb(skb, mapped_obj); +#endif /* CONFIG_MLX5_TC_SAMPLE */ + mlx5_rep_tc_post_napi_receive(tc_priv); +} bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb, @@ -647,7 +663,7 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe, struct mlx5_mapped_obj mapped_obj; struct mlx5_eswitch *esw; struct mlx5e_priv *priv; - u32 reg_c0, reg_c1; + u32 reg_c0; int err; reg_c0 = (be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK); @@ -659,8 +675,6 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe, */ skb->mark = 0; - reg_c1 = be32_to_cpu(cqe->ft_metadata); - priv = netdev_priv(skb->dev); esw = priv->mdev->priv.eswitch; err = mapping_find(esw->offloads.reg_c0_obj_pool, reg_c0, &mapped_obj); @@ -671,18 +685,14 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe, return false; } -#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) - if (mapped_obj.type == MLX5_MAPPED_OBJ_CHAIN) - return mlx5e_restore_skb(skb, mapped_obj.chain, reg_c1, tc_priv); -#endif /* CONFIG_NET_TC_SKB_EXT */ -#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE) - if (mapped_obj.type == MLX5_MAPPED_OBJ_SAMPLE) { - mlx5_esw_sample_skb(skb, &mapped_obj); + if (mapped_obj.type == MLX5_MAPPED_OBJ_CHAIN) { + u32 reg_c1 = be32_to_cpu(cqe->ft_metadata); + + return mlx5e_restore_skb_chain(skb, mapped_obj.chain, reg_c1, tc_priv); + } else if (mapped_obj.type == MLX5_MAPPED_OBJ_SAMPLE) { + mlx5e_restore_skb_sample(priv, skb, &mapped_obj, tc_priv); return false; - } -#endif /* CONFIG_MLX5_TC_SAMPLE */ - if (mapped_obj.type != MLX5_MAPPED_OBJ_SAMPLE && - mapped_obj.type != MLX5_MAPPED_OBJ_CHAIN) { + } else { netdev_dbg(priv->netdev, "Invalid mapped object type: %d\n", mapped_obj.type); return false; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c index 9d361efd5ff7..bb682fd751c9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c @@ -372,7 +372,7 @@ static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter, for (i = 0; i < priv->channels.num; i++) { struct mlx5e_channel *c = priv->channels.c[i]; - for (tc = 0; tc < priv->channels.params.num_tc; tc++) { + for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++) { struct mlx5e_txqsq *sq = &c->sq[tc]; err = mlx5e_tx_reporter_build_diagnose_output(fmsg, sq, tc); @@ -384,7 +384,7 @@ static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter, if (!ptp_ch || !test_bit(MLX5E_PTP_STATE_TX, ptp_ch->state)) goto close_sqs_nest; - for (tc = 0; tc < priv->channels.params.num_tc; tc++) { + for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++) { err = mlx5e_tx_reporter_build_diagnose_output_ptpsq(fmsg, &ptp_ch->ptpsq[tc], tc); @@ -494,7 +494,7 @@ static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv, for (i = 0; i < priv->channels.num; i++) { struct mlx5e_channel *c = priv->channels.c[i]; - for (tc = 0; tc < priv->channels.params.num_tc; tc++) { + for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++) { struct mlx5e_txqsq *sq = &c->sq[tc]; err = mlx5e_health_queue_dump(priv, fmsg, sq->sqn, "SQ"); @@ -504,7 +504,7 @@ static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv, } if (ptp_ch && test_bit(MLX5E_PTP_STATE_TX, ptp_ch->state)) { - for (tc = 0; tc < priv->channels.params.num_tc; tc++) { + for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++) { struct mlx5e_txqsq *sq = &ptp_ch->ptpsq[tc].txqsq; err = mlx5e_health_queue_dump(priv, fmsg, sq->sqn, "PTP SQ"); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c new file mode 100644 index 000000000000..b915fb29dd2c --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c @@ -0,0 +1,170 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2021, Mellanox Technologies inc. All rights reserved. */ + +#include "rqt.h" +#include <linux/mlx5/transobj.h> + +void mlx5e_rss_params_indir_init_uniform(struct mlx5e_rss_params_indir *indir, + unsigned int num_channels) +{ + unsigned int i; + + for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) + indir->table[i] = i % num_channels; +} + +static int mlx5e_rqt_init(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev, + u16 max_size, u32 *init_rqns, u16 init_size) +{ + void *rqtc; + int inlen; + int err; + u32 *in; + int i; + + rqt->mdev = mdev; + rqt->size = max_size; + + inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * init_size; + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; + + rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context); + + MLX5_SET(rqtc, rqtc, rqt_max_size, rqt->size); + + MLX5_SET(rqtc, rqtc, rqt_actual_size, init_size); + for (i = 0; i < init_size; i++) + MLX5_SET(rqtc, rqtc, rq_num[i], init_rqns[i]); + + err = mlx5_core_create_rqt(rqt->mdev, in, inlen, &rqt->rqtn); + + kvfree(in); + return err; +} + +int mlx5e_rqt_init_direct(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev, + bool indir_enabled, u32 init_rqn) +{ + u16 max_size = indir_enabled ? MLX5E_INDIR_RQT_SIZE : 1; + + return mlx5e_rqt_init(rqt, mdev, max_size, &init_rqn, 1); +} + +static int mlx5e_bits_invert(unsigned long a, int size) +{ + int inv = 0; + int i; + + for (i = 0; i < size; i++) + inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i; + + return inv; +} + +static int mlx5e_calc_indir_rqns(u32 *rss_rqns, u32 *rqns, unsigned int num_rqns, + u8 hfunc, struct mlx5e_rss_params_indir *indir) +{ + unsigned int i; + + for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) { + unsigned int ix = i; + + if (hfunc == ETH_RSS_HASH_XOR) + ix = mlx5e_bits_invert(ix, ilog2(MLX5E_INDIR_RQT_SIZE)); + + ix = indir->table[ix]; + + if (WARN_ON(ix >= num_rqns)) + /* Could be a bug in the driver or in the kernel part of + * ethtool: indir table refers to non-existent RQs. + */ + return -EINVAL; + rss_rqns[i] = rqns[ix]; + } + + return 0; +} + +int mlx5e_rqt_init_indir(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev, + u32 *rqns, unsigned int num_rqns, + u8 hfunc, struct mlx5e_rss_params_indir *indir) +{ + u32 *rss_rqns; + int err; + + rss_rqns = kvmalloc_array(MLX5E_INDIR_RQT_SIZE, sizeof(*rss_rqns), GFP_KERNEL); + if (!rss_rqns) + return -ENOMEM; + + err = mlx5e_calc_indir_rqns(rss_rqns, rqns, num_rqns, hfunc, indir); + if (err) + goto out; + + err = mlx5e_rqt_init(rqt, mdev, MLX5E_INDIR_RQT_SIZE, rss_rqns, MLX5E_INDIR_RQT_SIZE); + +out: + kvfree(rss_rqns); + return err; +} + +void mlx5e_rqt_destroy(struct mlx5e_rqt *rqt) +{ + mlx5_core_destroy_rqt(rqt->mdev, rqt->rqtn); +} + +static int mlx5e_rqt_redirect(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int size) +{ + unsigned int i; + void *rqtc; + int inlen; + u32 *in; + int err; + + inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * size; + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; + + rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx); + + MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1); + MLX5_SET(rqtc, rqtc, rqt_actual_size, size); + for (i = 0; i < size; i++) + MLX5_SET(rqtc, rqtc, rq_num[i], rqns[i]); + + err = mlx5_core_modify_rqt(rqt->mdev, rqt->rqtn, in, inlen); + + kvfree(in); + return err; +} + +int mlx5e_rqt_redirect_direct(struct mlx5e_rqt *rqt, u32 rqn) +{ + return mlx5e_rqt_redirect(rqt, &rqn, 1); +} + +int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int num_rqns, + u8 hfunc, struct mlx5e_rss_params_indir *indir) +{ + u32 *rss_rqns; + int err; + + if (WARN_ON(rqt->size != MLX5E_INDIR_RQT_SIZE)) + return -EINVAL; + + rss_rqns = kvmalloc_array(MLX5E_INDIR_RQT_SIZE, sizeof(*rss_rqns), GFP_KERNEL); + if (!rss_rqns) + return -ENOMEM; + + err = mlx5e_calc_indir_rqns(rss_rqns, rqns, num_rqns, hfunc, indir); + if (err) + goto out; + + err = mlx5e_rqt_redirect(rqt, rss_rqns, MLX5E_INDIR_RQT_SIZE); + +out: + kvfree(rss_rqns); + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h new file mode 100644 index 000000000000..60c985a12f24 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2021, Mellanox Technologies inc. All rights reserved. */ + +#ifndef __MLX5_EN_RQT_H__ +#define __MLX5_EN_RQT_H__ + +#include <linux/kernel.h> + +#define MLX5E_INDIR_RQT_SIZE (1 << 8) + +struct mlx5_core_dev; + +struct mlx5e_rss_params_indir { + u32 table[MLX5E_INDIR_RQT_SIZE]; +}; + +void mlx5e_rss_params_indir_init_uniform(struct mlx5e_rss_params_indir *indir, + unsigned int num_channels); + +struct mlx5e_rqt { + struct mlx5_core_dev *mdev; + u32 rqtn; + u16 size; +}; + +int mlx5e_rqt_init_direct(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev, + bool indir_enabled, u32 init_rqn); +int mlx5e_rqt_init_indir(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev, + u32 *rqns, unsigned int num_rqns, + u8 hfunc, struct mlx5e_rss_params_indir *indir); +void mlx5e_rqt_destroy(struct mlx5e_rqt *rqt); + +static inline u32 mlx5e_rqt_get_rqtn(struct mlx5e_rqt *rqt) +{ + return rqt->rqtn; +} + +int mlx5e_rqt_redirect_direct(struct mlx5e_rqt *rqt, u32 rqn); +int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int num_rqns, + u8 hfunc, struct mlx5e_rss_params_indir *indir); + +#endif /* __MLX5_EN_RQT_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c new file mode 100644 index 000000000000..625cd49ef96c --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c @@ -0,0 +1,588 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. + +#include "rss.h" + +#define mlx5e_rss_warn(__dev, format, ...) \ + dev_warn((__dev)->device, "%s:%d:(pid %d): " format, \ + __func__, __LINE__, current->pid, \ + ##__VA_ARGS__) + +static const struct mlx5e_rss_params_traffic_type rss_default_config[MLX5E_NUM_INDIR_TIRS] = { + [MLX5_TT_IPV4_TCP] = { + .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4, + .l4_prot_type = MLX5_L4_PROT_TYPE_TCP, + .rx_hash_fields = MLX5_HASH_IP_L4PORTS, + }, + [MLX5_TT_IPV6_TCP] = { + .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6, + .l4_prot_type = MLX5_L4_PROT_TYPE_TCP, + .rx_hash_fields = MLX5_HASH_IP_L4PORTS, + }, + [MLX5_TT_IPV4_UDP] = { + .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4, + .l4_prot_type = MLX5_L4_PROT_TYPE_UDP, + .rx_hash_fields = MLX5_HASH_IP_L4PORTS, + }, + [MLX5_TT_IPV6_UDP] = { + .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6, + .l4_prot_type = MLX5_L4_PROT_TYPE_UDP, + .rx_hash_fields = MLX5_HASH_IP_L4PORTS, + }, + [MLX5_TT_IPV4_IPSEC_AH] = { + .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4, + .l4_prot_type = 0, + .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI, + }, + [MLX5_TT_IPV6_IPSEC_AH] = { + .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6, + .l4_prot_type = 0, + .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI, + }, + [MLX5_TT_IPV4_IPSEC_ESP] = { + .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4, + .l4_prot_type = 0, + .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI, + }, + [MLX5_TT_IPV6_IPSEC_ESP] = { + .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6, + .l4_prot_type = 0, + .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI, + }, + [MLX5_TT_IPV4] = { + .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4, + .l4_prot_type = 0, + .rx_hash_fields = MLX5_HASH_IP, + }, + [MLX5_TT_IPV6] = { + .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6, + .l4_prot_type = 0, + .rx_hash_fields = MLX5_HASH_IP, + }, +}; + +struct mlx5e_rss_params_traffic_type +mlx5e_rss_get_default_tt_config(enum mlx5_traffic_types tt) +{ + return rss_default_config[tt]; +} + +struct mlx5e_rss { + struct mlx5e_rss_params_hash hash; + struct mlx5e_rss_params_indir indir; + u32 rx_hash_fields[MLX5E_NUM_INDIR_TIRS]; + struct mlx5e_tir *tir[MLX5E_NUM_INDIR_TIRS]; + struct mlx5e_tir *inner_tir[MLX5E_NUM_INDIR_TIRS]; + struct mlx5e_rqt rqt; + struct mlx5_core_dev *mdev; + u32 drop_rqn; + bool inner_ft_support; + bool enabled; + refcount_t refcnt; +}; + +struct mlx5e_rss *mlx5e_rss_alloc(void) +{ + return kvzalloc(sizeof(struct mlx5e_rss), GFP_KERNEL); +} + +void mlx5e_rss_free(struct mlx5e_rss *rss) +{ + kvfree(rss); +} + +static void mlx5e_rss_params_init(struct mlx5e_rss *rss) +{ + enum mlx5_traffic_types tt; + + rss->hash.hfunc = ETH_RSS_HASH_TOP; + netdev_rss_key_fill(rss->hash.toeplitz_hash_key, + sizeof(rss->hash.toeplitz_hash_key)); + for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) + rss->rx_hash_fields[tt] = + mlx5e_rss_get_default_tt_config(tt).rx_hash_fields; +} + +static struct mlx5e_tir **rss_get_tirp(struct mlx5e_rss *rss, enum mlx5_traffic_types tt, + bool inner) +{ + return inner ? &rss->inner_tir[tt] : &rss->tir[tt]; +} + +static struct mlx5e_tir *rss_get_tir(struct mlx5e_rss *rss, enum mlx5_traffic_types tt, + bool inner) +{ + return *rss_get_tirp(rss, tt, inner); +} + +static struct mlx5e_rss_params_traffic_type +mlx5e_rss_get_tt_config(struct mlx5e_rss *rss, enum mlx5_traffic_types tt) +{ + struct mlx5e_rss_params_traffic_type rss_tt; + + rss_tt = mlx5e_rss_get_default_tt_config(tt); + rss_tt.rx_hash_fields = rss->rx_hash_fields[tt]; + return rss_tt; +} + +static int mlx5e_rss_create_tir(struct mlx5e_rss *rss, + enum mlx5_traffic_types tt, + const struct mlx5e_lro_param *init_lro_param, + bool inner) +{ + struct mlx5e_rss_params_traffic_type rss_tt; + struct mlx5e_tir_builder *builder; + struct mlx5e_tir **tir_p; + struct mlx5e_tir *tir; + u32 rqtn; + int err; + + if (inner && !rss->inner_ft_support) { + mlx5e_rss_warn(rss->mdev, + "Cannot create inner indirect TIR[%d], RSS inner FT is not supported.\n", + tt); + return -EINVAL; + } + + tir_p = rss_get_tirp(rss, tt, inner); + if (*tir_p) + return -EINVAL; + + tir = kvzalloc(sizeof(*tir), GFP_KERNEL); + if (!tir) + return -ENOMEM; + + builder = mlx5e_tir_builder_alloc(false); + if (!builder) { + err = -ENOMEM; + goto free_tir; + } + + rqtn = mlx5e_rqt_get_rqtn(&rss->rqt); + mlx5e_tir_builder_build_rqt(builder, rss->mdev->mlx5e_res.hw_objs.td.tdn, + rqtn, rss->inner_ft_support); + mlx5e_tir_builder_build_lro(builder, init_lro_param); + rss_tt = mlx5e_rss_get_tt_config(rss, tt); + mlx5e_tir_builder_build_rss(builder, &rss->hash, &rss_tt, inner); + + err = mlx5e_tir_init(tir, builder, rss->mdev, true); + mlx5e_tir_builder_free(builder); + if (err) { + mlx5e_rss_warn(rss->mdev, "Failed to create %sindirect TIR: err = %d, tt = %d\n", + inner ? "inner " : "", err, tt); + goto free_tir; + } + + *tir_p = tir; + return 0; + +free_tir: + kvfree(tir); + return err; +} + +static void mlx5e_rss_destroy_tir(struct mlx5e_rss *rss, enum mlx5_traffic_types tt, + bool inner) +{ + struct mlx5e_tir **tir_p; + struct mlx5e_tir *tir; + + tir_p = rss_get_tirp(rss, tt, inner); + if (!*tir_p) + return; + + tir = *tir_p; + mlx5e_tir_destroy(tir); + kvfree(tir); + *tir_p = NULL; +} + +static int mlx5e_rss_create_tirs(struct mlx5e_rss *rss, + const struct mlx5e_lro_param *init_lro_param, + bool inner) +{ + enum mlx5_traffic_types tt, max_tt; + int err; + + for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { + err = mlx5e_rss_create_tir(rss, tt, init_lro_param, inner); + if (err) + goto err_destroy_tirs; + } + + return 0; + +err_destroy_tirs: + max_tt = tt; + for (tt = 0; tt < max_tt; tt++) + mlx5e_rss_destroy_tir(rss, tt, inner); + return err; +} + +static void mlx5e_rss_destroy_tirs(struct mlx5e_rss *rss, bool inner) +{ + enum mlx5_traffic_types tt; + + for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) + mlx5e_rss_destroy_tir(rss, tt, inner); +} + +static int mlx5e_rss_update_tir(struct mlx5e_rss *rss, enum mlx5_traffic_types tt, + bool inner) +{ + struct mlx5e_rss_params_traffic_type rss_tt; + struct mlx5e_tir_builder *builder; + struct mlx5e_tir *tir; + int err; + + tir = rss_get_tir(rss, tt, inner); + if (!tir) + return 0; + + builder = mlx5e_tir_builder_alloc(true); + if (!builder) + return -ENOMEM; + + rss_tt = mlx5e_rss_get_tt_config(rss, tt); + + mlx5e_tir_builder_build_rss(builder, &rss->hash, &rss_tt, inner); + err = mlx5e_tir_modify(tir, builder); + + mlx5e_tir_builder_free(builder); + return err; +} + +static int mlx5e_rss_update_tirs(struct mlx5e_rss *rss) +{ + enum mlx5_traffic_types tt; + int err, retval; + + retval = 0; + + for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { + err = mlx5e_rss_update_tir(rss, tt, false); + if (err) { + retval = retval ? : err; + mlx5e_rss_warn(rss->mdev, + "Failed to update RSS hash of indirect TIR for traffic type %d: err = %d\n", + tt, err); + } + + if (!rss->inner_ft_support) + continue; + + err = mlx5e_rss_update_tir(rss, tt, true); + if (err) { + retval = retval ? : err; + mlx5e_rss_warn(rss->mdev, + "Failed to update RSS hash of inner indirect TIR for traffic type %d: err = %d\n", + tt, err); + } + } + return retval; +} + +int mlx5e_rss_init_no_tirs(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev, + bool inner_ft_support, u32 drop_rqn) +{ + rss->mdev = mdev; + rss->inner_ft_support = inner_ft_support; + rss->drop_rqn = drop_rqn; + + mlx5e_rss_params_init(rss); + refcount_set(&rss->refcnt, 1); + + return mlx5e_rqt_init_direct(&rss->rqt, mdev, true, drop_rqn); +} + +int mlx5e_rss_init(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev, + bool inner_ft_support, u32 drop_rqn, + const struct mlx5e_lro_param *init_lro_param) +{ + int err; + + err = mlx5e_rss_init_no_tirs(rss, mdev, inner_ft_support, drop_rqn); + if (err) + goto err_out; + + err = mlx5e_rss_create_tirs(rss, init_lro_param, false); + if (err) + goto err_destroy_rqt; + + if (inner_ft_support) { + err = mlx5e_rss_create_tirs(rss, init_lro_param, true); + if (err) + goto err_destroy_tirs; + } + + return 0; + +err_destroy_tirs: + mlx5e_rss_destroy_tirs(rss, false); +err_destroy_rqt: + mlx5e_rqt_destroy(&rss->rqt); +err_out: + return err; +} + +int mlx5e_rss_cleanup(struct mlx5e_rss *rss) +{ + if (!refcount_dec_if_one(&rss->refcnt)) + return -EBUSY; + + mlx5e_rss_destroy_tirs(rss, false); + + if (rss->inner_ft_support) + mlx5e_rss_destroy_tirs(rss, true); + + mlx5e_rqt_destroy(&rss->rqt); + + return 0; +} + +void mlx5e_rss_refcnt_inc(struct mlx5e_rss *rss) +{ + refcount_inc(&rss->refcnt); +} + +void mlx5e_rss_refcnt_dec(struct mlx5e_rss *rss) +{ + refcount_dec(&rss->refcnt); +} + +unsigned int mlx5e_rss_refcnt_read(struct mlx5e_rss *rss) +{ + return refcount_read(&rss->refcnt); +} + +u32 mlx5e_rss_get_tirn(struct mlx5e_rss *rss, enum mlx5_traffic_types tt, + bool inner) +{ + struct mlx5e_tir *tir; + + WARN_ON(inner && !rss->inner_ft_support); + tir = rss_get_tir(rss, tt, inner); + WARN_ON(!tir); + + return mlx5e_tir_get_tirn(tir); +} + +/* Fill the "tirn" output parameter. + * Create the requested TIR if it's its first usage. + */ +int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss, + enum mlx5_traffic_types tt, + const struct mlx5e_lro_param *init_lro_param, + bool inner, u32 *tirn) +{ + struct mlx5e_tir *tir; + + tir = rss_get_tir(rss, tt, inner); + if (!tir) { /* TIR doesn't exist, create one */ + int err; + + err = mlx5e_rss_create_tir(rss, tt, init_lro_param, inner); + if (err) + return err; + tir = rss_get_tir(rss, tt, inner); + } + + *tirn = mlx5e_tir_get_tirn(tir); + return 0; +} + +static void mlx5e_rss_apply(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_rqns) +{ + int err; + + err = mlx5e_rqt_redirect_indir(&rss->rqt, rqns, num_rqns, rss->hash.hfunc, &rss->indir); + if (err) + mlx5e_rss_warn(rss->mdev, "Failed to redirect RQT %#x to channels: err = %d\n", + mlx5e_rqt_get_rqtn(&rss->rqt), err); +} + +void mlx5e_rss_enable(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_rqns) +{ + rss->enabled = true; + mlx5e_rss_apply(rss, rqns, num_rqns); +} + +void mlx5e_rss_disable(struct mlx5e_rss *rss) +{ + int err; + + rss->enabled = false; + err = mlx5e_rqt_redirect_direct(&rss->rqt, rss->drop_rqn); + if (err) + mlx5e_rss_warn(rss->mdev, "Failed to redirect RQT %#x to drop RQ %#x: err = %d\n", + mlx5e_rqt_get_rqtn(&rss->rqt), rss->drop_rqn, err); +} + +int mlx5e_rss_lro_set_param(struct mlx5e_rss *rss, struct mlx5e_lro_param *lro_param) +{ + struct mlx5e_tir_builder *builder; + enum mlx5_traffic_types tt; + int err, final_err; + + builder = mlx5e_tir_builder_alloc(true); + if (!builder) + return -ENOMEM; + + mlx5e_tir_builder_build_lro(builder, lro_param); + + final_err = 0; + + for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { + struct mlx5e_tir *tir; + + tir = rss_get_tir(rss, tt, false); + if (!tir) + goto inner_tir; + err = mlx5e_tir_modify(tir, builder); + if (err) { + mlx5e_rss_warn(rss->mdev, "Failed to update LRO state of indirect TIR %#x for traffic type %d: err = %d\n", + mlx5e_tir_get_tirn(tir), tt, err); + if (!final_err) + final_err = err; + } + +inner_tir: + if (!rss->inner_ft_support) + continue; + + tir = rss_get_tir(rss, tt, true); + if (!tir) + continue; + err = mlx5e_tir_modify(tir, builder); + if (err) { + mlx5e_rss_warn(rss->mdev, "Failed to update LRO state of inner indirect TIR %#x for traffic type %d: err = %d\n", + mlx5e_tir_get_tirn(tir), tt, err); + if (!final_err) + final_err = err; + } + } + + mlx5e_tir_builder_free(builder); + return final_err; +} + +int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc) +{ + unsigned int i; + + if (indir) + for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) + indir[i] = rss->indir.table[i]; + + if (key) + memcpy(key, rss->hash.toeplitz_hash_key, + sizeof(rss->hash.toeplitz_hash_key)); + + if (hfunc) + *hfunc = rss->hash.hfunc; + + return 0; +} + +int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir, + const u8 *key, const u8 *hfunc, + u32 *rqns, unsigned int num_rqns) +{ + bool changed_indir = false; + bool changed_hash = false; + + if (hfunc && *hfunc != rss->hash.hfunc) { + switch (*hfunc) { + case ETH_RSS_HASH_XOR: + case ETH_RSS_HASH_TOP: + break; + default: + return -EINVAL; + } + changed_hash = true; + changed_indir = true; + rss->hash.hfunc = *hfunc; + } + + if (key) { + if (rss->hash.hfunc == ETH_RSS_HASH_TOP) + changed_hash = true; + memcpy(rss->hash.toeplitz_hash_key, key, + sizeof(rss->hash.toeplitz_hash_key)); + } + + if (indir) { + unsigned int i; + + changed_indir = true; + + for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) + rss->indir.table[i] = indir[i]; + } + + if (changed_indir && rss->enabled) + mlx5e_rss_apply(rss, rqns, num_rqns); + + if (changed_hash) + mlx5e_rss_update_tirs(rss); + + return 0; +} + +struct mlx5e_rss_params_hash mlx5e_rss_get_hash(struct mlx5e_rss *rss) +{ + return rss->hash; +} + +u8 mlx5e_rss_get_hash_fields(struct mlx5e_rss *rss, enum mlx5_traffic_types tt) +{ + return rss->rx_hash_fields[tt]; +} + +int mlx5e_rss_set_hash_fields(struct mlx5e_rss *rss, enum mlx5_traffic_types tt, + u8 rx_hash_fields) +{ + u8 old_rx_hash_fields; + int err; + + old_rx_hash_fields = rss->rx_hash_fields[tt]; + + if (old_rx_hash_fields == rx_hash_fields) + return 0; + + rss->rx_hash_fields[tt] = rx_hash_fields; + + err = mlx5e_rss_update_tir(rss, tt, false); + if (err) { + rss->rx_hash_fields[tt] = old_rx_hash_fields; + mlx5e_rss_warn(rss->mdev, + "Failed to update RSS hash fields of indirect TIR for traffic type %d: err = %d\n", + tt, err); + return err; + } + + if (!(rss->inner_ft_support)) + return 0; + + err = mlx5e_rss_update_tir(rss, tt, true); + if (err) { + /* Partial update happened. Try to revert - it may fail too, but + * there is nothing more we can do. + */ + rss->rx_hash_fields[tt] = old_rx_hash_fields; + mlx5e_rss_warn(rss->mdev, + "Failed to update RSS hash fields of inner indirect TIR for traffic type %d: err = %d\n", + tt, err); + if (mlx5e_rss_update_tir(rss, tt, false)) + mlx5e_rss_warn(rss->mdev, + "Partial update of RSS hash fields happened: failed to revert indirect TIR for traffic type %d to the old values\n", + tt); + } + + return err; +} + +void mlx5e_rss_set_indir_uniform(struct mlx5e_rss *rss, unsigned int nch) +{ + mlx5e_rss_params_indir_init_uniform(&rss->indir, nch); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h new file mode 100644 index 000000000000..d522a10dadf3 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. */ + +#ifndef __MLX5_EN_RSS_H__ +#define __MLX5_EN_RSS_H__ + +#include "rqt.h" +#include "tir.h" +#include "fs.h" + +struct mlx5e_rss_params_traffic_type +mlx5e_rss_get_default_tt_config(enum mlx5_traffic_types tt); + +struct mlx5e_rss; + +struct mlx5e_rss *mlx5e_rss_alloc(void); +void mlx5e_rss_free(struct mlx5e_rss *rss); +int mlx5e_rss_init(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev, + bool inner_ft_support, u32 drop_rqn, + const struct mlx5e_lro_param *init_lro_param); +int mlx5e_rss_init_no_tirs(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev, + bool inner_ft_support, u32 drop_rqn); +int mlx5e_rss_cleanup(struct mlx5e_rss *rss); + +void mlx5e_rss_refcnt_inc(struct mlx5e_rss *rss); +void mlx5e_rss_refcnt_dec(struct mlx5e_rss *rss); +unsigned int mlx5e_rss_refcnt_read(struct mlx5e_rss *rss); + +u32 mlx5e_rss_get_tirn(struct mlx5e_rss *rss, enum mlx5_traffic_types tt, + bool inner); +int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss, + enum mlx5_traffic_types tt, + const struct mlx5e_lro_param *init_lro_param, + bool inner, u32 *tirn); + +void mlx5e_rss_enable(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_rqns); +void mlx5e_rss_disable(struct mlx5e_rss *rss); + +int mlx5e_rss_lro_set_param(struct mlx5e_rss *rss, struct mlx5e_lro_param *lro_param); +int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc); +int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir, + const u8 *key, const u8 *hfunc, + u32 *rqns, unsigned int num_rqns); +struct mlx5e_rss_params_hash mlx5e_rss_get_hash(struct mlx5e_rss *rss); +u8 mlx5e_rss_get_hash_fields(struct mlx5e_rss *rss, enum mlx5_traffic_types tt); +int mlx5e_rss_set_hash_fields(struct mlx5e_rss *rss, enum mlx5_traffic_types tt, + u8 rx_hash_fields); +void mlx5e_rss_set_indir_uniform(struct mlx5e_rss *rss, unsigned int nch); +#endif /* __MLX5_EN_RSS_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c new file mode 100644 index 000000000000..bf0313e2682b --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c @@ -0,0 +1,690 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2021, Mellanox Technologies inc. All rights reserved. */ + +#include "rx_res.h" +#include "channels.h" +#include "params.h" + +#define MLX5E_MAX_NUM_RSS 16 + +struct mlx5e_rx_res { + struct mlx5_core_dev *mdev; + enum mlx5e_rx_res_features features; + unsigned int max_nch; + u32 drop_rqn; + + struct mlx5e_rss *rss[MLX5E_MAX_NUM_RSS]; + bool rss_active; + u32 rss_rqns[MLX5E_INDIR_RQT_SIZE]; + unsigned int rss_nch; + + struct { + struct mlx5e_rqt direct_rqt; + struct mlx5e_tir direct_tir; + struct mlx5e_rqt xsk_rqt; + struct mlx5e_tir xsk_tir; + } *channels; + + struct { + struct mlx5e_rqt rqt; + struct mlx5e_tir tir; + } ptp; +}; + +/* API for rx_res_rss_* */ + +static int mlx5e_rx_res_rss_init_def(struct mlx5e_rx_res *res, + const struct mlx5e_lro_param *init_lro_param, + unsigned int init_nch) +{ + bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT; + struct mlx5e_rss *rss; + int err; + + if (WARN_ON(res->rss[0])) + return -EINVAL; + + rss = mlx5e_rss_alloc(); + if (!rss) + return -ENOMEM; + + err = mlx5e_rss_init(rss, res->mdev, inner_ft_support, res->drop_rqn, + init_lro_param); + if (err) + goto err_rss_free; + + mlx5e_rss_set_indir_uniform(rss, init_nch); + + res->rss[0] = rss; + + return 0; + +err_rss_free: + mlx5e_rss_free(rss); + return err; +} + +int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 *rss_idx, unsigned int init_nch) +{ + bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT; + struct mlx5e_rss *rss; + int err, i; + + for (i = 1; i < MLX5E_MAX_NUM_RSS; i++) + if (!res->rss[i]) + break; + + if (i == MLX5E_MAX_NUM_RSS) + return -ENOSPC; + + rss = mlx5e_rss_alloc(); + if (!rss) + return -ENOMEM; + + err = mlx5e_rss_init_no_tirs(rss, res->mdev, inner_ft_support, res->drop_rqn); + if (err) + goto err_rss_free; + + mlx5e_rss_set_indir_uniform(rss, init_nch); + if (res->rss_active) + mlx5e_rss_enable(rss, res->rss_rqns, res->rss_nch); + + res->rss[i] = rss; + *rss_idx = i; + + return 0; + +err_rss_free: + mlx5e_rss_free(rss); + return err; +} + +static int __mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res *res, u32 rss_idx) +{ + struct mlx5e_rss *rss = res->rss[rss_idx]; + int err; + + err = mlx5e_rss_cleanup(rss); + if (err) + return err; + + mlx5e_rss_free(rss); + res->rss[rss_idx] = NULL; + + return 0; +} + +int mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res *res, u32 rss_idx) +{ + struct mlx5e_rss *rss; + + if (rss_idx >= MLX5E_MAX_NUM_RSS) + return -EINVAL; + + rss = res->rss[rss_idx]; + if (!rss) + return -EINVAL; + + return __mlx5e_rx_res_rss_destroy(res, rss_idx); +} + +static void mlx5e_rx_res_rss_destroy_all(struct mlx5e_rx_res *res) +{ + int i; + + for (i = 0; i < MLX5E_MAX_NUM_RSS; i++) { + struct mlx5e_rss *rss = res->rss[i]; + int err; + + if (!rss) + continue; + + err = __mlx5e_rx_res_rss_destroy(res, i); + if (err) { + unsigned int refcount; + + refcount = mlx5e_rss_refcnt_read(rss); + mlx5_core_warn(res->mdev, + "Failed to destroy RSS context %d, refcount = %u, err = %d\n", + i, refcount, err); + } + } +} + +static void mlx5e_rx_res_rss_enable(struct mlx5e_rx_res *res) +{ + int i; + + res->rss_active = true; + + for (i = 0; i < MLX5E_MAX_NUM_RSS; i++) { + struct mlx5e_rss *rss = res->rss[i]; + + if (!rss) + continue; + mlx5e_rss_enable(rss, res->rss_rqns, res->rss_nch); + } +} + +static void mlx5e_rx_res_rss_disable(struct mlx5e_rx_res *res) +{ + int i; + + res->rss_active = false; + + for (i = 0; i < MLX5E_MAX_NUM_RSS; i++) { + struct mlx5e_rss *rss = res->rss[i]; + + if (!rss) + continue; + mlx5e_rss_disable(rss); + } +} + +/* Updates the indirection table SW shadow, does not update the HW resources yet */ +void mlx5e_rx_res_rss_set_indir_uniform(struct mlx5e_rx_res *res, unsigned int nch) +{ + WARN_ON_ONCE(res->rss_active); + mlx5e_rss_set_indir_uniform(res->rss[0], nch); +} + +int mlx5e_rx_res_rss_get_rxfh(struct mlx5e_rx_res *res, u32 rss_idx, + u32 *indir, u8 *key, u8 *hfunc) +{ + struct mlx5e_rss *rss; + + if (rss_idx >= MLX5E_MAX_NUM_RSS) + return -EINVAL; + + rss = res->rss[rss_idx]; + if (!rss) + return -ENOENT; + + return mlx5e_rss_get_rxfh(rss, indir, key, hfunc); +} + +int mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res *res, u32 rss_idx, + const u32 *indir, const u8 *key, const u8 *hfunc) +{ + struct mlx5e_rss *rss; + + if (rss_idx >= MLX5E_MAX_NUM_RSS) + return -EINVAL; + + rss = res->rss[rss_idx]; + if (!rss) + return -ENOENT; + + return mlx5e_rss_set_rxfh(rss, indir, key, hfunc, res->rss_rqns, res->rss_nch); +} + +u8 mlx5e_rx_res_rss_get_hash_fields(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt) +{ + struct mlx5e_rss *rss = res->rss[0]; + + return mlx5e_rss_get_hash_fields(rss, tt); +} + +int mlx5e_rx_res_rss_set_hash_fields(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt, + u8 rx_hash_fields) +{ + struct mlx5e_rss *rss = res->rss[0]; + + return mlx5e_rss_set_hash_fields(rss, tt, rx_hash_fields); +} + +int mlx5e_rx_res_rss_cnt(struct mlx5e_rx_res *res) +{ + int i, cnt; + + cnt = 0; + for (i = 0; i < MLX5E_MAX_NUM_RSS; i++) + if (res->rss[i]) + cnt++; + + return cnt; +} + +int mlx5e_rx_res_rss_index(struct mlx5e_rx_res *res, struct mlx5e_rss *rss) +{ + int i; + + if (!rss) + return -EINVAL; + + for (i = 0; i < MLX5E_MAX_NUM_RSS; i++) + if (rss == res->rss[i]) + return i; + + return -ENOENT; +} + +struct mlx5e_rss *mlx5e_rx_res_rss_get(struct mlx5e_rx_res *res, u32 rss_idx) +{ + if (rss_idx >= MLX5E_MAX_NUM_RSS) + return NULL; + + return res->rss[rss_idx]; +} + +/* End of API rx_res_rss_* */ + +struct mlx5e_rx_res *mlx5e_rx_res_alloc(void) +{ + return kvzalloc(sizeof(struct mlx5e_rx_res), GFP_KERNEL); +} + +static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res, + const struct mlx5e_lro_param *init_lro_param) +{ + bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT; + struct mlx5e_tir_builder *builder; + int err = 0; + int ix; + + builder = mlx5e_tir_builder_alloc(false); + if (!builder) + return -ENOMEM; + + res->channels = kvcalloc(res->max_nch, sizeof(*res->channels), GFP_KERNEL); + if (!res->channels) { + err = -ENOMEM; + goto out; + } + + for (ix = 0; ix < res->max_nch; ix++) { + err = mlx5e_rqt_init_direct(&res->channels[ix].direct_rqt, + res->mdev, false, res->drop_rqn); + if (err) { + mlx5_core_warn(res->mdev, "Failed to create a direct RQT: err = %d, ix = %u\n", + err, ix); + goto err_destroy_direct_rqts; + } + } + + for (ix = 0; ix < res->max_nch; ix++) { + mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn, + mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt), + inner_ft_support); + mlx5e_tir_builder_build_lro(builder, init_lro_param); + mlx5e_tir_builder_build_direct(builder); + + err = mlx5e_tir_init(&res->channels[ix].direct_tir, builder, res->mdev, true); + if (err) { + mlx5_core_warn(res->mdev, "Failed to create a direct TIR: err = %d, ix = %u\n", + err, ix); + goto err_destroy_direct_tirs; + } + + mlx5e_tir_builder_clear(builder); + } + + if (!(res->features & MLX5E_RX_RES_FEATURE_XSK)) + goto out; + + for (ix = 0; ix < res->max_nch; ix++) { + err = mlx5e_rqt_init_direct(&res->channels[ix].xsk_rqt, + res->mdev, false, res->drop_rqn); + if (err) { + mlx5_core_warn(res->mdev, "Failed to create an XSK RQT: err = %d, ix = %u\n", + err, ix); + goto err_destroy_xsk_rqts; + } + } + + for (ix = 0; ix < res->max_nch; ix++) { + mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn, + mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt), + inner_ft_support); + mlx5e_tir_builder_build_lro(builder, init_lro_param); + mlx5e_tir_builder_build_direct(builder); + + err = mlx5e_tir_init(&res->channels[ix].xsk_tir, builder, res->mdev, true); + if (err) { + mlx5_core_warn(res->mdev, "Failed to create an XSK TIR: err = %d, ix = %u\n", + err, ix); + goto err_destroy_xsk_tirs; + } + + mlx5e_tir_builder_clear(builder); + } + + goto out; + +err_destroy_xsk_tirs: + while (--ix >= 0) + mlx5e_tir_destroy(&res->channels[ix].xsk_tir); + + ix = res->max_nch; +err_destroy_xsk_rqts: + while (--ix >= 0) + mlx5e_rqt_destroy(&res->channels[ix].xsk_rqt); + + ix = res->max_nch; +err_destroy_direct_tirs: + while (--ix >= 0) + mlx5e_tir_destroy(&res->channels[ix].direct_tir); + + ix = res->max_nch; +err_destroy_direct_rqts: + while (--ix >= 0) + mlx5e_rqt_destroy(&res->channels[ix].direct_rqt); + + kvfree(res->channels); + +out: + mlx5e_tir_builder_free(builder); + + return err; +} + +static int mlx5e_rx_res_ptp_init(struct mlx5e_rx_res *res) +{ + bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT; + struct mlx5e_tir_builder *builder; + int err; + + builder = mlx5e_tir_builder_alloc(false); + if (!builder) + return -ENOMEM; + + err = mlx5e_rqt_init_direct(&res->ptp.rqt, res->mdev, false, res->drop_rqn); + if (err) + goto out; + + mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn, + mlx5e_rqt_get_rqtn(&res->ptp.rqt), + inner_ft_support); + mlx5e_tir_builder_build_direct(builder); + + err = mlx5e_tir_init(&res->ptp.tir, builder, res->mdev, true); + if (err) + goto err_destroy_ptp_rqt; + + goto out; + +err_destroy_ptp_rqt: + mlx5e_rqt_destroy(&res->ptp.rqt); + +out: + mlx5e_tir_builder_free(builder); + return err; +} + +static void mlx5e_rx_res_channels_destroy(struct mlx5e_rx_res *res) +{ + unsigned int ix; + + for (ix = 0; ix < res->max_nch; ix++) { + mlx5e_tir_destroy(&res->channels[ix].direct_tir); + mlx5e_rqt_destroy(&res->channels[ix].direct_rqt); + + if (!(res->features & MLX5E_RX_RES_FEATURE_XSK)) + continue; + + mlx5e_tir_destroy(&res->channels[ix].xsk_tir); + mlx5e_rqt_destroy(&res->channels[ix].xsk_rqt); + } + + kvfree(res->channels); +} + +static void mlx5e_rx_res_ptp_destroy(struct mlx5e_rx_res *res) +{ + mlx5e_tir_destroy(&res->ptp.tir); + mlx5e_rqt_destroy(&res->ptp.rqt); +} + +int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev, + enum mlx5e_rx_res_features features, unsigned int max_nch, + u32 drop_rqn, const struct mlx5e_lro_param *init_lro_param, + unsigned int init_nch) +{ + int err; + + res->mdev = mdev; + res->features = features; + res->max_nch = max_nch; + res->drop_rqn = drop_rqn; + + err = mlx5e_rx_res_rss_init_def(res, init_lro_param, init_nch); + if (err) + goto err_out; + + err = mlx5e_rx_res_channels_init(res, init_lro_param); + if (err) + goto err_rss_destroy; + + err = mlx5e_rx_res_ptp_init(res); + if (err) + goto err_channels_destroy; + + return 0; + +err_channels_destroy: + mlx5e_rx_res_channels_destroy(res); +err_rss_destroy: + __mlx5e_rx_res_rss_destroy(res, 0); +err_out: + return err; +} + +void mlx5e_rx_res_destroy(struct mlx5e_rx_res *res) +{ + mlx5e_rx_res_ptp_destroy(res); + mlx5e_rx_res_channels_destroy(res); + mlx5e_rx_res_rss_destroy_all(res); +} + +void mlx5e_rx_res_free(struct mlx5e_rx_res *res) +{ + kvfree(res); +} + +u32 mlx5e_rx_res_get_tirn_direct(struct mlx5e_rx_res *res, unsigned int ix) +{ + return mlx5e_tir_get_tirn(&res->channels[ix].direct_tir); +} + +u32 mlx5e_rx_res_get_tirn_xsk(struct mlx5e_rx_res *res, unsigned int ix) +{ + WARN_ON(!(res->features & MLX5E_RX_RES_FEATURE_XSK)); + + return mlx5e_tir_get_tirn(&res->channels[ix].xsk_tir); +} + +u32 mlx5e_rx_res_get_tirn_rss(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt) +{ + struct mlx5e_rss *rss = res->rss[0]; + + return mlx5e_rss_get_tirn(rss, tt, false); +} + +u32 mlx5e_rx_res_get_tirn_rss_inner(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt) +{ + struct mlx5e_rss *rss = res->rss[0]; + + return mlx5e_rss_get_tirn(rss, tt, true); +} + +u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res) +{ + WARN_ON(!(res->features & MLX5E_RX_RES_FEATURE_PTP)); + return mlx5e_tir_get_tirn(&res->ptp.tir); +} + +u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix) +{ + return mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt); +} + +void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs) +{ + unsigned int nch, ix; + int err; + + nch = mlx5e_channels_get_num(chs); + + for (ix = 0; ix < chs->num; ix++) + mlx5e_channels_get_regular_rqn(chs, ix, &res->rss_rqns[ix]); + res->rss_nch = chs->num; + + mlx5e_rx_res_rss_enable(res); + + for (ix = 0; ix < nch; ix++) { + u32 rqn; + + mlx5e_channels_get_regular_rqn(chs, ix, &rqn); + err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, rqn); + if (err) + mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to RQ %#x (channel %u): err = %d\n", + mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt), + rqn, ix, err); + + if (!(res->features & MLX5E_RX_RES_FEATURE_XSK)) + continue; + + if (!mlx5e_channels_get_xsk_rqn(chs, ix, &rqn)) + rqn = res->drop_rqn; + err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, rqn); + if (err) + mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to RQ %#x (channel %u): err = %d\n", + mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt), + rqn, ix, err); + } + for (ix = nch; ix < res->max_nch; ix++) { + err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, res->drop_rqn); + if (err) + mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to drop RQ %#x (channel %u): err = %d\n", + mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt), + res->drop_rqn, ix, err); + + if (!(res->features & MLX5E_RX_RES_FEATURE_XSK)) + continue; + + err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, res->drop_rqn); + if (err) + mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to drop RQ %#x (channel %u): err = %d\n", + mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt), + res->drop_rqn, ix, err); + } + + if (res->features & MLX5E_RX_RES_FEATURE_PTP) { + u32 rqn; + + if (mlx5e_channels_get_ptp_rqn(chs, &rqn)) + rqn = res->drop_rqn; + + err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, rqn); + if (err) + mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to RQ %#x (PTP): err = %d\n", + mlx5e_rqt_get_rqtn(&res->ptp.rqt), + rqn, err); + } +} + +void mlx5e_rx_res_channels_deactivate(struct mlx5e_rx_res *res) +{ + unsigned int ix; + int err; + + mlx5e_rx_res_rss_disable(res); + + for (ix = 0; ix < res->max_nch; ix++) { + err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, res->drop_rqn); + if (err) + mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to drop RQ %#x (channel %u): err = %d\n", + mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt), + res->drop_rqn, ix, err); + + if (!(res->features & MLX5E_RX_RES_FEATURE_XSK)) + continue; + + err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, res->drop_rqn); + if (err) + mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to drop RQ %#x (channel %u): err = %d\n", + mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt), + res->drop_rqn, ix, err); + } + + if (res->features & MLX5E_RX_RES_FEATURE_PTP) { + err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, res->drop_rqn); + if (err) + mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to drop RQ %#x (PTP): err = %d\n", + mlx5e_rqt_get_rqtn(&res->ptp.rqt), + res->drop_rqn, err); + } +} + +int mlx5e_rx_res_xsk_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs, + unsigned int ix) +{ + u32 rqn; + int err; + + if (!mlx5e_channels_get_xsk_rqn(chs, ix, &rqn)) + return -EINVAL; + + err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, rqn); + if (err) + mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to XSK RQ %#x (channel %u): err = %d\n", + mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt), + rqn, ix, err); + return err; +} + +int mlx5e_rx_res_xsk_deactivate(struct mlx5e_rx_res *res, unsigned int ix) +{ + int err; + + err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, res->drop_rqn); + if (err) + mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to drop RQ %#x (channel %u): err = %d\n", + mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt), + res->drop_rqn, ix, err); + return err; +} + +int mlx5e_rx_res_lro_set_param(struct mlx5e_rx_res *res, struct mlx5e_lro_param *lro_param) +{ + struct mlx5e_tir_builder *builder; + int err, final_err; + unsigned int ix; + + builder = mlx5e_tir_builder_alloc(true); + if (!builder) + return -ENOMEM; + + mlx5e_tir_builder_build_lro(builder, lro_param); + + final_err = 0; + + for (ix = 0; ix < MLX5E_MAX_NUM_RSS; ix++) { + struct mlx5e_rss *rss = res->rss[ix]; + + if (!rss) + continue; + + err = mlx5e_rss_lro_set_param(rss, lro_param); + if (err) + final_err = final_err ? : err; + } + + for (ix = 0; ix < res->max_nch; ix++) { + err = mlx5e_tir_modify(&res->channels[ix].direct_tir, builder); + if (err) { + mlx5_core_warn(res->mdev, "Failed to update LRO state of direct TIR %#x for channel %u: err = %d\n", + mlx5e_tir_get_tirn(&res->channels[ix].direct_tir), ix, err); + if (!final_err) + final_err = err; + } + } + + mlx5e_tir_builder_free(builder); + return final_err; +} + +struct mlx5e_rss_params_hash mlx5e_rx_res_get_current_hash(struct mlx5e_rx_res *res) +{ + return mlx5e_rss_get_hash(res->rss[0]); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h new file mode 100644 index 000000000000..4a15942d79f7 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2021, Mellanox Technologies inc. All rights reserved. */ + +#ifndef __MLX5_EN_RX_RES_H__ +#define __MLX5_EN_RX_RES_H__ + +#include <linux/kernel.h> +#include "rqt.h" +#include "tir.h" +#include "fs.h" +#include "rss.h" + +struct mlx5e_rx_res; + +struct mlx5e_channels; +struct mlx5e_rss_params_hash; + +enum mlx5e_rx_res_features { + MLX5E_RX_RES_FEATURE_INNER_FT = BIT(0), + MLX5E_RX_RES_FEATURE_XSK = BIT(1), + MLX5E_RX_RES_FEATURE_PTP = BIT(2), +}; + +/* Setup */ +struct mlx5e_rx_res *mlx5e_rx_res_alloc(void); +int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev, + enum mlx5e_rx_res_features features, unsigned int max_nch, + u32 drop_rqn, const struct mlx5e_lro_param *init_lro_param, + unsigned int init_nch); +void mlx5e_rx_res_destroy(struct mlx5e_rx_res *res); +void mlx5e_rx_res_free(struct mlx5e_rx_res *res); + +/* TIRN getters for flow steering */ +u32 mlx5e_rx_res_get_tirn_direct(struct mlx5e_rx_res *res, unsigned int ix); +u32 mlx5e_rx_res_get_tirn_xsk(struct mlx5e_rx_res *res, unsigned int ix); +u32 mlx5e_rx_res_get_tirn_rss(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt); +u32 mlx5e_rx_res_get_tirn_rss_inner(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt); +u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res); + +/* RQTN getters for modules that create their own TIRs */ +u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix); + +/* Activate/deactivate API */ +void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs); +void mlx5e_rx_res_channels_deactivate(struct mlx5e_rx_res *res); +int mlx5e_rx_res_xsk_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs, + unsigned int ix); +int mlx5e_rx_res_xsk_deactivate(struct mlx5e_rx_res *res, unsigned int ix); + +/* Configuration API */ +void mlx5e_rx_res_rss_set_indir_uniform(struct mlx5e_rx_res *res, unsigned int nch); +int mlx5e_rx_res_rss_get_rxfh(struct mlx5e_rx_res *res, u32 rss_idx, + u32 *indir, u8 *key, u8 *hfunc); +int mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res *res, u32 rss_idx, + const u32 *indir, const u8 *key, const u8 *hfunc); + +u8 mlx5e_rx_res_rss_get_hash_fields(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt); +int mlx5e_rx_res_rss_set_hash_fields(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt, + u8 rx_hash_fields); +int mlx5e_rx_res_lro_set_param(struct mlx5e_rx_res *res, struct mlx5e_lro_param *lro_param); + +int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 *rss_idx, unsigned int init_nch); +int mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res *res, u32 rss_idx); +int mlx5e_rx_res_rss_cnt(struct mlx5e_rx_res *res); +int mlx5e_rx_res_rss_index(struct mlx5e_rx_res *res, struct mlx5e_rss *rss); +struct mlx5e_rss *mlx5e_rx_res_rss_get(struct mlx5e_rx_res *res, u32 rss_idx); + +/* Workaround for hairpin */ +struct mlx5e_rss_params_hash mlx5e_rx_res_get_current_hash(struct mlx5e_rx_res *res); + +#endif /* __MLX5_EN_RX_RES_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c new file mode 100644 index 000000000000..a3e43e898a56 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c @@ -0,0 +1,164 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include "en_tc.h" +#include "post_act.h" +#include "mlx5_core.h" + +struct mlx5e_post_act { + enum mlx5_flow_namespace_type ns_type; + struct mlx5_fs_chains *chains; + struct mlx5_flow_table *ft; + struct mlx5e_priv *priv; + struct xarray ids; +}; + +struct mlx5e_post_act_handle { + enum mlx5_flow_namespace_type ns_type; + struct mlx5_flow_attr *attr; + struct mlx5_flow_handle *rule; + u32 id; +}; + +#define MLX5_POST_ACTION_BITS (mlx5e_tc_attr_to_reg_mappings[FTEID_TO_REG].mlen) +#define MLX5_POST_ACTION_MAX GENMASK(MLX5_POST_ACTION_BITS - 1, 0) +#define MLX5_POST_ACTION_MASK MLX5_POST_ACTION_MAX + +struct mlx5e_post_act * +mlx5e_tc_post_act_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains, + enum mlx5_flow_namespace_type ns_type) +{ + struct mlx5e_post_act *post_act; + int err; + + if (ns_type == MLX5_FLOW_NAMESPACE_FDB && + !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, ignore_flow_level)) { + mlx5_core_warn(priv->mdev, "firmware level support is missing\n"); + err = -EOPNOTSUPP; + goto err_check; + } else if (!MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level)) { + mlx5_core_warn(priv->mdev, "firmware level support is missing\n"); + err = -EOPNOTSUPP; + goto err_check; + } + + post_act = kzalloc(sizeof(*post_act), GFP_KERNEL); + if (!post_act) { + err = -ENOMEM; + goto err_check; + } + post_act->ft = mlx5_chains_create_global_table(chains); + if (IS_ERR(post_act->ft)) { + err = PTR_ERR(post_act->ft); + mlx5_core_warn(priv->mdev, "failed to create post action table, err: %d\n", err); + goto err_ft; + } + post_act->chains = chains; + post_act->ns_type = ns_type; + post_act->priv = priv; + xa_init_flags(&post_act->ids, XA_FLAGS_ALLOC1); + return post_act; + +err_ft: + kfree(post_act); +err_check: + return ERR_PTR(err); +} + +void +mlx5e_tc_post_act_destroy(struct mlx5e_post_act *post_act) +{ + if (IS_ERR_OR_NULL(post_act)) + return; + + xa_destroy(&post_act->ids); + mlx5_chains_destroy_global_table(post_act->chains, post_act->ft); + kfree(post_act); +} + +struct mlx5e_post_act_handle * +mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *attr) +{ + u32 attr_sz = ns_to_attr_sz(post_act->ns_type); + struct mlx5e_post_act_handle *handle = NULL; + struct mlx5_flow_attr *post_attr = NULL; + struct mlx5_flow_spec *spec = NULL; + int err; + + handle = kzalloc(sizeof(*handle), GFP_KERNEL); + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + post_attr = mlx5_alloc_flow_attr(post_act->ns_type); + if (!handle || !spec || !post_attr) { + kfree(post_attr); + kvfree(spec); + kfree(handle); + return ERR_PTR(-ENOMEM); + } + + memcpy(post_attr, attr, attr_sz); + post_attr->chain = 0; + post_attr->prio = 0; + post_attr->ft = post_act->ft; + post_attr->inner_match_level = MLX5_MATCH_NONE; + post_attr->outer_match_level = MLX5_MATCH_NONE; + post_attr->action &= ~(MLX5_FLOW_CONTEXT_ACTION_DECAP); + + handle->ns_type = post_act->ns_type; + /* Splits were handled before post action */ + if (handle->ns_type == MLX5_FLOW_NAMESPACE_FDB) + post_attr->esw_attr->split_count = 0; + + err = xa_alloc(&post_act->ids, &handle->id, post_attr, + XA_LIMIT(1, MLX5_POST_ACTION_MAX), GFP_KERNEL); + if (err) + goto err_xarray; + + /* Post action rule matches on fte_id and executes original rule's + * tc rule action + */ + mlx5e_tc_match_to_reg_match(spec, FTEID_TO_REG, + handle->id, MLX5_POST_ACTION_MASK); + + handle->rule = mlx5_tc_rule_insert(post_act->priv, spec, post_attr); + if (IS_ERR(handle->rule)) { + err = PTR_ERR(handle->rule); + netdev_warn(post_act->priv->netdev, "Failed to add post action rule"); + goto err_rule; + } + handle->attr = post_attr; + + kvfree(spec); + return handle; + +err_rule: + xa_erase(&post_act->ids, handle->id); +err_xarray: + kfree(post_attr); + kvfree(spec); + kfree(handle); + return ERR_PTR(err); +} + +void +mlx5e_tc_post_act_del(struct mlx5e_post_act *post_act, struct mlx5e_post_act_handle *handle) +{ + mlx5_tc_rule_delete(post_act->priv, handle->rule, handle->attr); + xa_erase(&post_act->ids, handle->id); + kfree(handle->attr); + kfree(handle); +} + +struct mlx5_flow_table * +mlx5e_tc_post_act_get_ft(struct mlx5e_post_act *post_act) +{ + return post_act->ft; +} + +/* Allocate a header modify action to write the post action handle fte id to a register. */ +int +mlx5e_tc_post_act_set_handle(struct mlx5_core_dev *dev, + struct mlx5e_post_act_handle *handle, + struct mlx5e_tc_mod_hdr_acts *acts) +{ + return mlx5e_tc_match_to_reg_set(dev, acts, handle->ns_type, FTEID_TO_REG, handle->id); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.h new file mode 100644 index 000000000000..b530ec1981a5 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#ifndef __MLX5_POST_ACTION_H__ +#define __MLX5_POST_ACTION_H__ + +#include "en.h" +#include "lib/fs_chains.h" + +struct mlx5_flow_attr; +struct mlx5e_priv; +struct mlx5e_tc_mod_hdr_acts; + +struct mlx5e_post_act * +mlx5e_tc_post_act_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains, + enum mlx5_flow_namespace_type ns_type); + +void +mlx5e_tc_post_act_destroy(struct mlx5e_post_act *post_act); + +struct mlx5e_post_act_handle * +mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *attr); + +void +mlx5e_tc_post_act_del(struct mlx5e_post_act *post_act, struct mlx5e_post_act_handle *handle); + +struct mlx5_flow_table * +mlx5e_tc_post_act_get_ft(struct mlx5e_post_act *post_act); + +int +mlx5e_tc_post_act_set_handle(struct mlx5_core_dev *dev, + struct mlx5e_post_act_handle *handle, + struct mlx5e_tc_mod_hdr_acts *acts); + +#endif /* __MLX5_POST_ACTION_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/sample.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c index d3ad78aa9d45..6552ecee3f9b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/sample.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c @@ -4,7 +4,8 @@ #include <linux/skbuff.h> #include <net/psample.h> #include "en/mapping.h" -#include "esw/sample.h" +#include "en/tc/post_act.h" +#include "sample.h" #include "eswitch.h" #include "en_tc.h" #include "fs_core.h" @@ -17,17 +18,18 @@ static const struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_sample_ns = { .flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT | MLX5_FLOW_TABLE_TUNNEL_EN_DECAP, }; -struct mlx5_esw_psample { - struct mlx5e_priv *priv; +struct mlx5e_tc_psample { + struct mlx5_eswitch *esw; struct mlx5_flow_table *termtbl; struct mlx5_flow_handle *termtbl_rule; DECLARE_HASHTABLE(hashtbl, 8); struct mutex ht_lock; /* protect hashtbl */ DECLARE_HASHTABLE(restore_hashtbl, 8); struct mutex restore_lock; /* protect restore_hashtbl */ + struct mlx5e_post_act *post_act; }; -struct mlx5_sampler { +struct mlx5e_sampler { struct hlist_node hlist; u32 sampler_id; u32 sample_ratio; @@ -36,29 +38,32 @@ struct mlx5_sampler { int count; }; -struct mlx5_sample_flow { - struct mlx5_sampler *sampler; - struct mlx5_sample_restore *restore; +struct mlx5e_sample_flow { + struct mlx5e_sampler *sampler; + struct mlx5e_sample_restore *restore; struct mlx5_flow_attr *pre_attr; struct mlx5_flow_handle *pre_rule; - struct mlx5_flow_handle *rule; + struct mlx5_flow_attr *post_attr; + struct mlx5_flow_handle *post_rule; + struct mlx5e_post_act_handle *post_act_handle; }; -struct mlx5_sample_restore { +struct mlx5e_sample_restore { struct hlist_node hlist; struct mlx5_modify_hdr *modify_hdr; struct mlx5_flow_handle *rule; + struct mlx5e_post_act_handle *post_act_handle; u32 obj_id; int count; }; static int -sampler_termtbl_create(struct mlx5_esw_psample *esw_psample) +sampler_termtbl_create(struct mlx5e_tc_psample *tc_psample) { - struct mlx5_core_dev *dev = esw_psample->priv->mdev; - struct mlx5_eswitch *esw = dev->priv.eswitch; + struct mlx5_eswitch *esw = tc_psample->esw; struct mlx5_flow_table_attr ft_attr = {}; struct mlx5_flow_destination dest = {}; + struct mlx5_core_dev *dev = esw->dev; struct mlx5_flow_namespace *root_ns; struct mlx5_flow_act act = {}; int err; @@ -79,20 +84,20 @@ sampler_termtbl_create(struct mlx5_esw_psample *esw_psample) ft_attr.prio = FDB_SLOW_PATH; ft_attr.max_fte = 1; ft_attr.level = 1; - esw_psample->termtbl = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr); - if (IS_ERR(esw_psample->termtbl)) { - err = PTR_ERR(esw_psample->termtbl); + tc_psample->termtbl = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr); + if (IS_ERR(tc_psample->termtbl)) { + err = PTR_ERR(tc_psample->termtbl); mlx5_core_warn(dev, "failed to create termtbl, err: %d\n", err); return err; } act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; dest.vport.num = esw->manager_vport; - esw_psample->termtbl_rule = mlx5_add_flow_rules(esw_psample->termtbl, NULL, &act, &dest, 1); - if (IS_ERR(esw_psample->termtbl_rule)) { - err = PTR_ERR(esw_psample->termtbl_rule); + tc_psample->termtbl_rule = mlx5_add_flow_rules(tc_psample->termtbl, NULL, &act, &dest, 1); + if (IS_ERR(tc_psample->termtbl_rule)) { + err = PTR_ERR(tc_psample->termtbl_rule); mlx5_core_warn(dev, "failed to create termtbl rule, err: %d\n", err); - mlx5_destroy_flow_table(esw_psample->termtbl); + mlx5_destroy_flow_table(tc_psample->termtbl); return err; } @@ -100,14 +105,14 @@ sampler_termtbl_create(struct mlx5_esw_psample *esw_psample) } static void -sampler_termtbl_destroy(struct mlx5_esw_psample *esw_psample) +sampler_termtbl_destroy(struct mlx5e_tc_psample *tc_psample) { - mlx5_del_flow_rules(esw_psample->termtbl_rule); - mlx5_destroy_flow_table(esw_psample->termtbl); + mlx5_del_flow_rules(tc_psample->termtbl_rule); + mlx5_destroy_flow_table(tc_psample->termtbl); } static int -sampler_obj_create(struct mlx5_core_dev *mdev, struct mlx5_sampler *sampler) +sampler_obj_create(struct mlx5_core_dev *mdev, struct mlx5e_sampler *sampler) { u32 in[MLX5_ST_SZ_DW(create_sampler_obj_in)] = {}; u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)]; @@ -163,16 +168,16 @@ sampler_cmp(u32 sample_ratio1, u32 default_table_id1, u32 sample_ratio2, u32 def return sample_ratio1 != sample_ratio2 || default_table_id1 != default_table_id2; } -static struct mlx5_sampler * -sampler_get(struct mlx5_esw_psample *esw_psample, u32 sample_ratio, u32 default_table_id) +static struct mlx5e_sampler * +sampler_get(struct mlx5e_tc_psample *tc_psample, u32 sample_ratio, u32 default_table_id) { - struct mlx5_sampler *sampler; + struct mlx5e_sampler *sampler; u32 hash_key; int err; - mutex_lock(&esw_psample->ht_lock); + mutex_lock(&tc_psample->ht_lock); hash_key = sampler_hash(sample_ratio, default_table_id); - hash_for_each_possible(esw_psample->hashtbl, sampler, hlist, hash_key) + hash_for_each_possible(tc_psample->hashtbl, sampler, hlist, hash_key) if (!sampler_cmp(sampler->sample_ratio, sampler->default_table_id, sample_ratio, default_table_id)) goto add_ref; @@ -183,42 +188,49 @@ sampler_get(struct mlx5_esw_psample *esw_psample, u32 sample_ratio, u32 default_ goto err_alloc; } - sampler->sample_table_id = esw_psample->termtbl->id; + sampler->sample_table_id = tc_psample->termtbl->id; sampler->default_table_id = default_table_id; sampler->sample_ratio = sample_ratio; - err = sampler_obj_create(esw_psample->priv->mdev, sampler); + err = sampler_obj_create(tc_psample->esw->dev, sampler); if (err) goto err_create; - hash_add(esw_psample->hashtbl, &sampler->hlist, hash_key); + hash_add(tc_psample->hashtbl, &sampler->hlist, hash_key); add_ref: sampler->count++; - mutex_unlock(&esw_psample->ht_lock); + mutex_unlock(&tc_psample->ht_lock); return sampler; err_create: kfree(sampler); err_alloc: - mutex_unlock(&esw_psample->ht_lock); + mutex_unlock(&tc_psample->ht_lock); return ERR_PTR(err); } static void -sampler_put(struct mlx5_esw_psample *esw_psample, struct mlx5_sampler *sampler) +sampler_put(struct mlx5e_tc_psample *tc_psample, struct mlx5e_sampler *sampler) { - mutex_lock(&esw_psample->ht_lock); + mutex_lock(&tc_psample->ht_lock); if (--sampler->count == 0) { hash_del(&sampler->hlist); - sampler_obj_destroy(esw_psample->priv->mdev, sampler->sampler_id); + sampler_obj_destroy(tc_psample->esw->dev, sampler->sampler_id); kfree(sampler); } - mutex_unlock(&esw_psample->ht_lock); + mutex_unlock(&tc_psample->ht_lock); } +/* obj_id is used to restore the sample parameters. + * Set fte_id in original flow table, then match it in the default table. + * Only set it for NICs can preserve reg_c or decap action. For other cases, + * use the same match in the default table. + * Use one header rewrite for both obj_id and fte_id. + */ static struct mlx5_modify_hdr * -sample_metadata_rule_get(struct mlx5_core_dev *mdev, u32 obj_id) +sample_modify_hdr_get(struct mlx5_core_dev *mdev, u32 obj_id, + struct mlx5e_post_act_handle *handle) { struct mlx5e_tc_mod_hdr_acts mod_acts = {}; struct mlx5_modify_hdr *modify_hdr; @@ -229,6 +241,12 @@ sample_metadata_rule_get(struct mlx5_core_dev *mdev, u32 obj_id) if (err) goto err_set_regc0; + if (handle) { + err = mlx5e_tc_post_act_set_handle(mdev, handle, &mod_acts); + if (err) + goto err_post_act; + } + modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_FDB, mod_acts.num_actions, mod_acts.actions); @@ -241,23 +259,40 @@ sample_metadata_rule_get(struct mlx5_core_dev *mdev, u32 obj_id) return modify_hdr; err_modify_hdr: +err_post_act: dealloc_mod_hdr_actions(&mod_acts); err_set_regc0: return ERR_PTR(err); } -static struct mlx5_sample_restore * -sample_restore_get(struct mlx5_esw_psample *esw_psample, u32 obj_id) +static u32 +restore_hash(u32 obj_id, struct mlx5e_post_act_handle *post_act_handle) { - struct mlx5_core_dev *mdev = esw_psample->priv->mdev; - struct mlx5_eswitch *esw = mdev->priv.eswitch; - struct mlx5_sample_restore *restore; + return jhash_2words(obj_id, hash32_ptr(post_act_handle), 0); +} + +static bool +restore_equal(struct mlx5e_sample_restore *restore, u32 obj_id, + struct mlx5e_post_act_handle *post_act_handle) +{ + return restore->obj_id == obj_id && restore->post_act_handle == post_act_handle; +} + +static struct mlx5e_sample_restore * +sample_restore_get(struct mlx5e_tc_psample *tc_psample, u32 obj_id, + struct mlx5e_post_act_handle *post_act_handle) +{ + struct mlx5_eswitch *esw = tc_psample->esw; + struct mlx5_core_dev *mdev = esw->dev; + struct mlx5e_sample_restore *restore; struct mlx5_modify_hdr *modify_hdr; + u32 hash_key; int err; - mutex_lock(&esw_psample->restore_lock); - hash_for_each_possible(esw_psample->restore_hashtbl, restore, hlist, obj_id) - if (restore->obj_id == obj_id) + mutex_lock(&tc_psample->restore_lock); + hash_key = restore_hash(obj_id, post_act_handle); + hash_for_each_possible(tc_psample->restore_hashtbl, restore, hlist, hash_key) + if (restore_equal(restore, obj_id, post_act_handle)) goto add_ref; restore = kzalloc(sizeof(*restore), GFP_KERNEL); @@ -266,8 +301,9 @@ sample_restore_get(struct mlx5_esw_psample *esw_psample, u32 obj_id) goto err_alloc; } restore->obj_id = obj_id; + restore->post_act_handle = post_act_handle; - modify_hdr = sample_metadata_rule_get(mdev, obj_id); + modify_hdr = sample_modify_hdr_get(mdev, obj_id, post_act_handle); if (IS_ERR(modify_hdr)) { err = PTR_ERR(modify_hdr); goto err_modify_hdr; @@ -280,10 +316,10 @@ sample_restore_get(struct mlx5_esw_psample *esw_psample, u32 obj_id) goto err_restore; } - hash_add(esw_psample->restore_hashtbl, &restore->hlist, obj_id); + hash_add(tc_psample->restore_hashtbl, &restore->hlist, hash_key); add_ref: restore->count++; - mutex_unlock(&esw_psample->restore_lock); + mutex_unlock(&tc_psample->restore_lock); return restore; err_restore: @@ -291,26 +327,26 @@ err_restore: err_modify_hdr: kfree(restore); err_alloc: - mutex_unlock(&esw_psample->restore_lock); + mutex_unlock(&tc_psample->restore_lock); return ERR_PTR(err); } static void -sample_restore_put(struct mlx5_esw_psample *esw_psample, struct mlx5_sample_restore *restore) +sample_restore_put(struct mlx5e_tc_psample *tc_psample, struct mlx5e_sample_restore *restore) { - mutex_lock(&esw_psample->restore_lock); + mutex_lock(&tc_psample->restore_lock); if (--restore->count == 0) hash_del(&restore->hlist); - mutex_unlock(&esw_psample->restore_lock); + mutex_unlock(&tc_psample->restore_lock); if (!restore->count) { mlx5_del_flow_rules(restore->rule); - mlx5_modify_header_dealloc(esw_psample->priv->mdev, restore->modify_hdr); + mlx5_modify_header_dealloc(tc_psample->esw->dev, restore->modify_hdr); kfree(restore); } } -void mlx5_esw_sample_skb(struct sk_buff *skb, struct mlx5_mapped_obj *mapped_obj) +void mlx5e_tc_sample_skb(struct sk_buff *skb, struct mlx5_mapped_obj *mapped_obj) { u32 trunc_size = mapped_obj->sample.trunc_size; struct psample_group psample_group = {}; @@ -325,6 +361,87 @@ void mlx5_esw_sample_skb(struct sk_buff *skb, struct mlx5_mapped_obj *mapped_obj psample_sample_packet(&psample_group, skb, mapped_obj->sample.rate, &md); } +static int +add_post_rule(struct mlx5_eswitch *esw, struct mlx5e_sample_flow *sample_flow, + struct mlx5_flow_spec *spec, struct mlx5_flow_attr *attr, + u32 *default_tbl_id) +{ + struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; + u32 attr_sz = ns_to_attr_sz(MLX5_FLOW_NAMESPACE_FDB); + struct mlx5_vport_tbl_attr per_vport_tbl_attr; + struct mlx5_flow_table *default_tbl; + struct mlx5_flow_attr *post_attr; + int err; + + /* Allocate default table per vport, chain and prio. Otherwise, there is + * only one default table for the same sampler object. Rules with different + * prio and chain may overlap. For CT sample action, per vport default + * table is needed to resotre the metadata. + */ + per_vport_tbl_attr.chain = attr->chain; + per_vport_tbl_attr.prio = attr->prio; + per_vport_tbl_attr.vport = esw_attr->in_rep->vport; + per_vport_tbl_attr.vport_ns = &mlx5_esw_vport_tbl_sample_ns; + default_tbl = mlx5_esw_vporttbl_get(esw, &per_vport_tbl_attr); + if (IS_ERR(default_tbl)) { + err = PTR_ERR(default_tbl); + goto err_default_tbl; + } + *default_tbl_id = default_tbl->id; + + post_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB); + if (!post_attr) { + err = -ENOMEM; + goto err_attr; + } + sample_flow->post_attr = post_attr; + memcpy(post_attr, attr, attr_sz); + /* Perform the original matches on the default table. + * Offload all actions except the sample action. + */ + post_attr->chain = 0; + post_attr->prio = 0; + post_attr->ft = default_tbl; + post_attr->flags = MLX5_ESW_ATTR_FLAG_NO_IN_PORT; + + /* When offloading sample and encap action, if there is no valid + * neigh data struct, a slow path rule is offloaded first. Source + * port metadata match is set at that time. A per vport table is + * already allocated. No need to match it again. So clear the source + * port metadata match. + */ + mlx5_eswitch_clear_rule_source_port(esw, spec); + sample_flow->post_rule = mlx5_eswitch_add_offloaded_rule(esw, spec, post_attr); + if (IS_ERR(sample_flow->post_rule)) { + err = PTR_ERR(sample_flow->post_rule); + goto err_rule; + } + return 0; + +err_rule: + kfree(post_attr); +err_attr: + mlx5_esw_vporttbl_put(esw, &per_vport_tbl_attr); +err_default_tbl: + return err; +} + +static void +del_post_rule(struct mlx5_eswitch *esw, struct mlx5e_sample_flow *sample_flow, + struct mlx5_flow_attr *attr) +{ + struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; + struct mlx5_vport_tbl_attr tbl_attr; + + mlx5_eswitch_del_offloaded_rule(esw, sample_flow->post_rule, sample_flow->post_attr); + kfree(sample_flow->post_attr); + tbl_attr.chain = attr->chain; + tbl_attr.prio = attr->prio; + tbl_attr.vport = esw_attr->in_rep->vport; + tbl_attr.vport_ns = &mlx5_esw_vport_tbl_sample_ns; + mlx5_esw_vporttbl_put(esw, &tbl_attr); +} + /* For the following typical flow table: * * +-------------------------------+ @@ -342,8 +459,9 @@ void mlx5_esw_sample_skb(struct sk_buff *skb, struct mlx5_mapped_obj *mapped_obj * +---------------------+ * + original match + * +---------------------+ - * | - * v + * | set fte_id (if reg_c preserve cap) + * | do decap (if required) + * v * +------------------------------------------------+ * + Flow Sampler Object + * +------------------------------------------------+ @@ -353,80 +471,82 @@ void mlx5_esw_sample_skb(struct sk_buff *skb, struct mlx5_mapped_obj *mapped_obj * +------------------------------------------------+ * | | * v v - * +-----------------------------+ +----------------------------------------+ - * + sample table + + default table per <vport, chain, prio> + - * +-----------------------------+ +----------------------------------------+ - * + forward to management vport + + original match + - * +-----------------------------+ +----------------------------------------+ - * + other actions + - * +----------------------------------------+ + * +-----------------------------+ +-------------------+ + * + sample table + + default table + + * +-----------------------------+ +-------------------+ + * + forward to management vport + | + * +-----------------------------+ | + * +-------+------+ + * | |reg_c preserve cap + * | |or decap action + * v v + * +-----------------+ +-------------+ + * + per vport table + + post action + + * +-----------------+ +-------------+ + * + original match + + * +-----------------+ + * + other actions + + * +-----------------+ */ struct mlx5_flow_handle * -mlx5_esw_sample_offload(struct mlx5_esw_psample *esw_psample, +mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample, struct mlx5_flow_spec *spec, - struct mlx5_flow_attr *attr) + struct mlx5_flow_attr *attr, + u32 tunnel_id) { + struct mlx5e_post_act_handle *post_act_handle = NULL; struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; - struct mlx5_vport_tbl_attr per_vport_tbl_attr; struct mlx5_esw_flow_attr *pre_esw_attr; struct mlx5_mapped_obj restore_obj = {}; - struct mlx5_sample_flow *sample_flow; - struct mlx5_sample_attr *sample_attr; - struct mlx5_flow_table *default_tbl; + struct mlx5e_sample_flow *sample_flow; + struct mlx5e_sample_attr *sample_attr; struct mlx5_flow_attr *pre_attr; struct mlx5_eswitch *esw; + u32 default_tbl_id; u32 obj_id; int err; - if (IS_ERR_OR_NULL(esw_psample)) + if (IS_ERR_OR_NULL(tc_psample)) return ERR_PTR(-EOPNOTSUPP); /* If slow path flag is set, eg. when the neigh is invalid for encap, * don't offload sample action. */ - esw = esw_psample->priv->mdev->priv.eswitch; + esw = tc_psample->esw; if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) return mlx5_eswitch_add_offloaded_rule(esw, spec, attr); sample_flow = kzalloc(sizeof(*sample_flow), GFP_KERNEL); if (!sample_flow) return ERR_PTR(-ENOMEM); - esw_attr->sample->sample_flow = sample_flow; - - /* Allocate default table per vport, chain and prio. Otherwise, there is - * only one default table for the same sampler object. Rules with different - * prio and chain may overlap. For CT sample action, per vport default - * table is needed to resotre the metadata. - */ - per_vport_tbl_attr.chain = attr->chain; - per_vport_tbl_attr.prio = attr->prio; - per_vport_tbl_attr.vport = esw_attr->in_rep->vport; - per_vport_tbl_attr.vport_ns = &mlx5_esw_vport_tbl_sample_ns; - default_tbl = mlx5_esw_vporttbl_get(esw, &per_vport_tbl_attr); - if (IS_ERR(default_tbl)) { - err = PTR_ERR(default_tbl); - goto err_default_tbl; - } + sample_attr = attr->sample_attr; + sample_attr->sample_flow = sample_flow; - /* Perform the original matches on the default table. - * Offload all actions except the sample action. - */ - esw_attr->sample->sample_default_tbl = default_tbl; - /* When offloading sample and encap action, if there is no valid - * neigh data struct, a slow path rule is offloaded first. Source - * port metadata match is set at that time. A per vport table is - * already allocated. No need to match it again. So clear the source - * port metadata match. + /* For NICs with reg_c_preserve support or decap action, use + * post action instead of the per vport, chain and prio table. + * Only match the fte id instead of the same match in the + * original flow table. */ - mlx5_eswitch_clear_rule_source_port(esw, spec); - sample_flow->rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr); - if (IS_ERR(sample_flow->rule)) { - err = PTR_ERR(sample_flow->rule); - goto err_offload_rule; + if (MLX5_CAP_GEN(esw->dev, reg_c_preserve) || + attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP) { + struct mlx5_flow_table *ft; + + ft = mlx5e_tc_post_act_get_ft(tc_psample->post_act); + default_tbl_id = ft->id; + post_act_handle = mlx5e_tc_post_act_add(tc_psample->post_act, attr); + if (IS_ERR(post_act_handle)) { + err = PTR_ERR(post_act_handle); + goto err_post_act; + } + sample_flow->post_act_handle = post_act_handle; + } else { + err = add_post_rule(esw, sample_flow, spec, attr, &default_tbl_id); + if (err) + goto err_post_rule; } /* Create sampler object. */ - sample_flow->sampler = sampler_get(esw_psample, esw_attr->sample->rate, default_tbl->id); + sample_flow->sampler = sampler_get(tc_psample, sample_attr->rate, default_tbl_id); if (IS_ERR(sample_flow->sampler)) { err = PTR_ERR(sample_flow->sampler); goto err_sampler; @@ -434,16 +554,17 @@ mlx5_esw_sample_offload(struct mlx5_esw_psample *esw_psample, /* Create an id mapping reg_c0 value to sample object. */ restore_obj.type = MLX5_MAPPED_OBJ_SAMPLE; - restore_obj.sample.group_id = esw_attr->sample->group_num; - restore_obj.sample.rate = esw_attr->sample->rate; - restore_obj.sample.trunc_size = esw_attr->sample->trunc_size; + restore_obj.sample.group_id = sample_attr->group_num; + restore_obj.sample.rate = sample_attr->rate; + restore_obj.sample.trunc_size = sample_attr->trunc_size; + restore_obj.sample.tunnel_id = tunnel_id; err = mapping_add(esw->offloads.reg_c0_obj_pool, &restore_obj, &obj_id); if (err) goto err_obj_id; - esw_attr->sample->restore_obj_id = obj_id; + sample_attr->restore_obj_id = obj_id; /* Create sample restore context. */ - sample_flow->restore = sample_restore_get(esw_psample, obj_id); + sample_flow->restore = sample_restore_get(tc_psample, obj_id, post_act_handle); if (IS_ERR(sample_flow->restore)) { err = PTR_ERR(sample_flow->restore); goto err_sample_restore; @@ -455,21 +576,23 @@ mlx5_esw_sample_offload(struct mlx5_esw_psample *esw_psample, pre_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB); if (!pre_attr) { err = -ENOMEM; - goto err_alloc_flow_attr; - } - sample_attr = kzalloc(sizeof(*sample_attr), GFP_KERNEL); - if (!sample_attr) { - err = -ENOMEM; - goto err_alloc_sample_attr; + goto err_alloc_pre_flow_attr; } - pre_esw_attr = pre_attr->esw_attr; pre_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; + /* For decap action, do decap in the original flow table instead of the + * default flow table. + */ + if (tunnel_id) + pre_attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP; pre_attr->modify_hdr = sample_flow->restore->modify_hdr; pre_attr->flags = MLX5_ESW_ATTR_FLAG_SAMPLE; + pre_attr->inner_match_level = attr->inner_match_level; + pre_attr->outer_match_level = attr->outer_match_level; pre_attr->chain = attr->chain; pre_attr->prio = attr->prio; - pre_esw_attr->sample = sample_attr; - pre_esw_attr->sample->sampler_id = sample_flow->sampler->sampler_id; + pre_attr->sample_attr = attr->sample_attr; + sample_attr->sampler_id = sample_flow->sampler->sampler_id; + pre_esw_attr = pre_attr->esw_attr; pre_esw_attr->in_mdev = esw_attr->in_mdev; pre_esw_attr->in_rep = esw_attr->in_rep; sample_flow->pre_rule = mlx5_eswitch_add_offloaded_rule(esw, spec, pre_attr); @@ -479,108 +602,113 @@ mlx5_esw_sample_offload(struct mlx5_esw_psample *esw_psample, } sample_flow->pre_attr = pre_attr; - return sample_flow->rule; + return sample_flow->post_rule; err_pre_offload_rule: - kfree(sample_attr); -err_alloc_sample_attr: kfree(pre_attr); -err_alloc_flow_attr: - sample_restore_put(esw_psample, sample_flow->restore); +err_alloc_pre_flow_attr: + sample_restore_put(tc_psample, sample_flow->restore); err_sample_restore: mapping_remove(esw->offloads.reg_c0_obj_pool, obj_id); err_obj_id: - sampler_put(esw_psample, sample_flow->sampler); + sampler_put(tc_psample, sample_flow->sampler); err_sampler: - /* For sample offload, rule is added in default_tbl. No need to call - * mlx5_esw_chains_put_table() - */ - attr->prio = 0; - attr->chain = 0; - mlx5_eswitch_del_offloaded_rule(esw, sample_flow->rule, attr); -err_offload_rule: - mlx5_esw_vporttbl_put(esw, &per_vport_tbl_attr); -err_default_tbl: + if (!post_act_handle) + del_post_rule(esw, sample_flow, attr); +err_post_rule: + if (post_act_handle) + mlx5e_tc_post_act_del(tc_psample->post_act, post_act_handle); +err_post_act: kfree(sample_flow); return ERR_PTR(err); } void -mlx5_esw_sample_unoffload(struct mlx5_esw_psample *esw_psample, +mlx5e_tc_sample_unoffload(struct mlx5e_tc_psample *tc_psample, struct mlx5_flow_handle *rule, struct mlx5_flow_attr *attr) { struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; - struct mlx5_sample_flow *sample_flow; + struct mlx5e_sample_flow *sample_flow; struct mlx5_vport_tbl_attr tbl_attr; - struct mlx5_flow_attr *pre_attr; struct mlx5_eswitch *esw; - if (IS_ERR_OR_NULL(esw_psample)) + if (IS_ERR_OR_NULL(tc_psample)) return; /* If slow path flag is set, sample action is not offloaded. * No need to delete sample rule. */ - esw = esw_psample->priv->mdev->priv.eswitch; + esw = tc_psample->esw; if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) { mlx5_eswitch_del_offloaded_rule(esw, rule, attr); return; } - sample_flow = esw_attr->sample->sample_flow; - pre_attr = sample_flow->pre_attr; - memset(pre_attr, 0, sizeof(*pre_attr)); - esw = esw_psample->priv->mdev->priv.eswitch; - mlx5_eswitch_del_offloaded_rule(esw, sample_flow->pre_rule, pre_attr); - mlx5_eswitch_del_offloaded_rule(esw, sample_flow->rule, attr); - - sample_restore_put(esw_psample, sample_flow->restore); - mapping_remove(esw->offloads.reg_c0_obj_pool, esw_attr->sample->restore_obj_id); - sampler_put(esw_psample, sample_flow->sampler); - tbl_attr.chain = attr->chain; - tbl_attr.prio = attr->prio; - tbl_attr.vport = esw_attr->in_rep->vport; - tbl_attr.vport_ns = &mlx5_esw_vport_tbl_sample_ns; - mlx5_esw_vporttbl_put(esw, &tbl_attr); + /* The following delete order can't be changed, otherwise, + * will hit fw syndromes. + */ + sample_flow = attr->sample_attr->sample_flow; + mlx5_eswitch_del_offloaded_rule(esw, sample_flow->pre_rule, sample_flow->pre_attr); + if (!sample_flow->post_act_handle) + mlx5_eswitch_del_offloaded_rule(esw, sample_flow->post_rule, + sample_flow->post_attr); + + sample_restore_put(tc_psample, sample_flow->restore); + mapping_remove(esw->offloads.reg_c0_obj_pool, attr->sample_attr->restore_obj_id); + sampler_put(tc_psample, sample_flow->sampler); + if (sample_flow->post_act_handle) { + mlx5e_tc_post_act_del(tc_psample->post_act, sample_flow->post_act_handle); + } else { + tbl_attr.chain = attr->chain; + tbl_attr.prio = attr->prio; + tbl_attr.vport = esw_attr->in_rep->vport; + tbl_attr.vport_ns = &mlx5_esw_vport_tbl_sample_ns; + mlx5_esw_vporttbl_put(esw, &tbl_attr); + kfree(sample_flow->post_attr); + } - kfree(pre_attr->esw_attr->sample); - kfree(pre_attr); + kfree(sample_flow->pre_attr); kfree(sample_flow); } -struct mlx5_esw_psample * -mlx5_esw_sample_init(struct mlx5e_priv *priv) +struct mlx5e_tc_psample * +mlx5e_tc_sample_init(struct mlx5_eswitch *esw, struct mlx5e_post_act *post_act) { - struct mlx5_esw_psample *esw_psample; + struct mlx5e_tc_psample *tc_psample; int err; - esw_psample = kzalloc(sizeof(*esw_psample), GFP_KERNEL); - if (!esw_psample) + tc_psample = kzalloc(sizeof(*tc_psample), GFP_KERNEL); + if (!tc_psample) return ERR_PTR(-ENOMEM); - esw_psample->priv = priv; - err = sampler_termtbl_create(esw_psample); + if (IS_ERR_OR_NULL(post_act)) { + err = PTR_ERR(post_act); + goto err_post_act; + } + tc_psample->post_act = post_act; + tc_psample->esw = esw; + err = sampler_termtbl_create(tc_psample); if (err) - goto err_termtbl; + goto err_post_act; - mutex_init(&esw_psample->ht_lock); - mutex_init(&esw_psample->restore_lock); + mutex_init(&tc_psample->ht_lock); + mutex_init(&tc_psample->restore_lock); - return esw_psample; + return tc_psample; -err_termtbl: - kfree(esw_psample); +err_post_act: + kfree(tc_psample); return ERR_PTR(err); } void -mlx5_esw_sample_cleanup(struct mlx5_esw_psample *esw_psample) +mlx5e_tc_sample_cleanup(struct mlx5e_tc_psample *tc_psample) { - if (IS_ERR_OR_NULL(esw_psample)) + if (IS_ERR_OR_NULL(tc_psample)) return; - mutex_destroy(&esw_psample->restore_lock); - mutex_destroy(&esw_psample->ht_lock); - sampler_termtbl_destroy(esw_psample); - kfree(esw_psample); + mutex_destroy(&tc_psample->restore_lock); + mutex_destroy(&tc_psample->ht_lock); + sampler_termtbl_destroy(tc_psample); + kfree(tc_psample); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h new file mode 100644 index 000000000000..db0146df9b30 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2021 Mellanox Technologies. */ + +#ifndef __MLX5_EN_TC_SAMPLE_H__ +#define __MLX5_EN_TC_SAMPLE_H__ + +#include "eswitch.h" + +struct mlx5_flow_attr; +struct mlx5e_tc_psample; +struct mlx5e_post_act; + +struct mlx5e_sample_attr { + u32 group_num; + u32 rate; + u32 trunc_size; + u32 restore_obj_id; + u32 sampler_id; + struct mlx5e_sample_flow *sample_flow; +}; + +void mlx5e_tc_sample_skb(struct sk_buff *skb, struct mlx5_mapped_obj *mapped_obj); + +struct mlx5_flow_handle * +mlx5e_tc_sample_offload(struct mlx5e_tc_psample *sample_priv, + struct mlx5_flow_spec *spec, + struct mlx5_flow_attr *attr, + u32 tunnel_id); + +void +mlx5e_tc_sample_unoffload(struct mlx5e_tc_psample *sample_priv, + struct mlx5_flow_handle *rule, + struct mlx5_flow_attr *attr); + +struct mlx5e_tc_psample * +mlx5e_tc_sample_init(struct mlx5_eswitch *esw, struct mlx5e_post_act *post_act); + +void +mlx5e_tc_sample_cleanup(struct mlx5e_tc_psample *tc_psample); + +#endif /* __MLX5_EN_TC_SAMPLE_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index 91e7a01e32be..6c949abcd2e1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@ -19,6 +19,7 @@ #include "en/tc_ct.h" #include "en/mod_hdr.h" #include "en/mapping.h" +#include "en/tc/post_act.h" #include "en.h" #include "en_tc.h" #include "en_rep.h" @@ -32,10 +33,6 @@ #define MLX5_CT_STATE_RELATED_BIT BIT(5) #define MLX5_CT_STATE_INVALID_BIT BIT(6) -#define MLX5_FTE_ID_BITS (mlx5e_tc_attr_to_reg_mappings[FTEID_TO_REG].mlen) -#define MLX5_FTE_ID_MAX GENMASK(MLX5_FTE_ID_BITS - 1, 0) -#define MLX5_FTE_ID_MASK MLX5_FTE_ID_MAX - #define MLX5_CT_LABELS_BITS (mlx5e_tc_attr_to_reg_mappings[LABELS_TO_REG].mlen) #define MLX5_CT_LABELS_MASK GENMASK(MLX5_CT_LABELS_BITS - 1, 0) @@ -46,14 +43,13 @@ struct mlx5_tc_ct_priv { struct mlx5_core_dev *dev; const struct net_device *netdev; struct mod_hdr_tbl *mod_hdr_tbl; - struct idr fte_ids; struct xarray tuple_ids; struct rhashtable zone_ht; struct rhashtable ct_tuples_ht; struct rhashtable ct_tuples_nat_ht; struct mlx5_flow_table *ct; struct mlx5_flow_table *ct_nat; - struct mlx5_flow_table *post_ct; + struct mlx5e_post_act *post_act; struct mutex control_lock; /* guards parallel adds/dels */ struct mapping_ctx *zone_mapping; struct mapping_ctx *labels_mapping; @@ -64,11 +60,9 @@ struct mlx5_tc_ct_priv { struct mlx5_ct_flow { struct mlx5_flow_attr *pre_ct_attr; - struct mlx5_flow_attr *post_ct_attr; struct mlx5_flow_handle *pre_ct_rule; - struct mlx5_flow_handle *post_ct_rule; + struct mlx5e_post_act_handle *post_act_handle; struct mlx5_ct_ft *ft; - u32 fte_id; u32 chain_mapping; }; @@ -768,7 +762,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_COUNT; attr->dest_chain = 0; - attr->dest_ft = ct_priv->post_ct; + attr->dest_ft = mlx5e_tc_post_act_get_ft(ct_priv->post_act); attr->ft = nat ? ct_priv->ct_nat : ct_priv->ct; attr->outer_match_level = MLX5_MATCH_L4; attr->counter = entry->counter->counter; @@ -1432,7 +1426,7 @@ static int tc_ct_pre_ct_add_rules(struct mlx5_ct_ft *ct_ft, ctstate |= MLX5_CT_STATE_NAT_BIT; mlx5e_tc_match_to_reg_match(spec, CTSTATE_TO_REG, ctstate, ctstate); - dest.ft = ct_priv->post_ct; + dest.ft = mlx5e_tc_post_act_get_ft(ct_priv->post_act); rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); if (IS_ERR(rule)) { err = PTR_ERR(rule); @@ -1716,9 +1710,9 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft) * | do decap * v * +---------------------+ - * + pre_ct/pre_ct_nat + if matches +---------------------+ - * + zone+nat match +---------------->+ post_ct (see below) + - * +---------------------+ set zone +---------------------+ + * + pre_ct/pre_ct_nat + if matches +-------------------------+ + * + zone+nat match +---------------->+ post_act (see below) + + * +---------------------+ set zone +-------------------------+ * | set zone * v * +--------------------+ @@ -1732,7 +1726,7 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft) * | do nat (if needed) * v * +--------------+ - * + post_ct + original filter actions + * + post_act + original filter actions * + fte_id match +------------------------> * +--------------+ */ @@ -1746,19 +1740,15 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv, struct mlx5e_priv *priv = netdev_priv(ct_priv->netdev); struct mlx5e_tc_mod_hdr_acts pre_mod_acts = {}; u32 attr_sz = ns_to_attr_sz(ct_priv->ns_type); - struct mlx5_flow_spec *post_ct_spec = NULL; + struct mlx5e_post_act_handle *handle; struct mlx5_flow_attr *pre_ct_attr; struct mlx5_modify_hdr *mod_hdr; - struct mlx5_flow_handle *rule; struct mlx5_ct_flow *ct_flow; int chain_mapping = 0, err; struct mlx5_ct_ft *ft; - u32 fte_id = 1; - post_ct_spec = kvzalloc(sizeof(*post_ct_spec), GFP_KERNEL); ct_flow = kzalloc(sizeof(*ct_flow), GFP_KERNEL); - if (!post_ct_spec || !ct_flow) { - kvfree(post_ct_spec); + if (!ct_flow) { kfree(ct_flow); return ERR_PTR(-ENOMEM); } @@ -1773,14 +1763,13 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv, } ct_flow->ft = ft; - err = idr_alloc_u32(&ct_priv->fte_ids, ct_flow, &fte_id, - MLX5_FTE_ID_MAX, GFP_KERNEL); - if (err) { - netdev_warn(priv->netdev, - "Failed to allocate fte id, err: %d\n", err); - goto err_idr; + handle = mlx5e_tc_post_act_add(ct_priv->post_act, attr); + if (IS_ERR(handle)) { + err = PTR_ERR(handle); + ct_dbg("Failed to allocate post action handle"); + goto err_post_act_handle; } - ct_flow->fte_id = fte_id; + ct_flow->post_act_handle = handle; /* Base flow attributes of both rules on original rule attribute */ ct_flow->pre_ct_attr = mlx5_alloc_flow_attr(ct_priv->ns_type); @@ -1789,15 +1778,8 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv, goto err_alloc_pre; } - ct_flow->post_ct_attr = mlx5_alloc_flow_attr(ct_priv->ns_type); - if (!ct_flow->post_ct_attr) { - err = -ENOMEM; - goto err_alloc_post; - } - pre_ct_attr = ct_flow->pre_ct_attr; memcpy(pre_ct_attr, attr, attr_sz); - memcpy(ct_flow->post_ct_attr, attr, attr_sz); /* Modify the original rule's action to fwd and modify, leave decap */ pre_ct_attr->action = attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP; @@ -1823,10 +1805,9 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv, goto err_mapping; } - err = mlx5e_tc_match_to_reg_set(priv->mdev, &pre_mod_acts, ct_priv->ns_type, - FTEID_TO_REG, fte_id); + err = mlx5e_tc_post_act_set_handle(priv->mdev, handle, &pre_mod_acts); if (err) { - ct_dbg("Failed to set fte_id register mapping"); + ct_dbg("Failed to set post action handle"); goto err_mapping; } @@ -1857,33 +1838,6 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv, } pre_ct_attr->modify_hdr = mod_hdr; - /* Post ct rule matches on fte_id and executes original rule's - * tc rule action - */ - mlx5e_tc_match_to_reg_match(post_ct_spec, FTEID_TO_REG, - fte_id, MLX5_FTE_ID_MASK); - - /* Put post_ct rule on post_ct flow table */ - ct_flow->post_ct_attr->chain = 0; - ct_flow->post_ct_attr->prio = 0; - ct_flow->post_ct_attr->ft = ct_priv->post_ct; - - /* Splits were handled before CT */ - if (ct_priv->ns_type == MLX5_FLOW_NAMESPACE_FDB) - ct_flow->post_ct_attr->esw_attr->split_count = 0; - - ct_flow->post_ct_attr->inner_match_level = MLX5_MATCH_NONE; - ct_flow->post_ct_attr->outer_match_level = MLX5_MATCH_NONE; - ct_flow->post_ct_attr->action &= ~(MLX5_FLOW_CONTEXT_ACTION_DECAP); - rule = mlx5_tc_rule_insert(priv, post_ct_spec, - ct_flow->post_ct_attr); - ct_flow->post_ct_rule = rule; - if (IS_ERR(ct_flow->post_ct_rule)) { - err = PTR_ERR(ct_flow->post_ct_rule); - ct_dbg("Failed to add post ct rule"); - goto err_insert_post_ct; - } - /* Change original rule point to ct table */ pre_ct_attr->dest_chain = 0; pre_ct_attr->dest_ft = nat ? ft->pre_ct_nat.ft : ft->pre_ct.ft; @@ -1897,28 +1851,21 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv, attr->ct_attr.ct_flow = ct_flow; dealloc_mod_hdr_actions(&pre_mod_acts); - kvfree(post_ct_spec); - return rule; + return ct_flow->pre_ct_rule; err_insert_orig: - mlx5_tc_rule_delete(priv, ct_flow->post_ct_rule, - ct_flow->post_ct_attr); -err_insert_post_ct: mlx5_modify_header_dealloc(priv->mdev, pre_ct_attr->modify_hdr); err_mapping: dealloc_mod_hdr_actions(&pre_mod_acts); mlx5_chains_put_chain_mapping(ct_priv->chains, ct_flow->chain_mapping); err_get_chain: - kfree(ct_flow->post_ct_attr); -err_alloc_post: kfree(ct_flow->pre_ct_attr); err_alloc_pre: - idr_remove(&ct_priv->fte_ids, fte_id); -err_idr: + mlx5e_tc_post_act_del(ct_priv->post_act, handle); +err_post_act_handle: mlx5_tc_ct_del_ft_cb(ct_priv, ft); err_ft: - kvfree(post_ct_spec); kfree(ct_flow); netdev_warn(priv->netdev, "Failed to offload ct flow, err %d\n", err); return ERR_PTR(err); @@ -2029,16 +1976,13 @@ __mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *ct_priv, pre_ct_attr); mlx5_modify_header_dealloc(priv->mdev, pre_ct_attr->modify_hdr); - if (ct_flow->post_ct_rule) { - mlx5_tc_rule_delete(priv, ct_flow->post_ct_rule, - ct_flow->post_ct_attr); + if (ct_flow->post_act_handle) { mlx5_chains_put_chain_mapping(ct_priv->chains, ct_flow->chain_mapping); - idr_remove(&ct_priv->fte_ids, ct_flow->fte_id); + mlx5e_tc_post_act_del(ct_priv->post_act, ct_flow->post_act_handle); mlx5_tc_ct_del_ft_cb(ct_priv, ct_flow->ft); } kfree(ct_flow->pre_ct_attr); - kfree(ct_flow->post_ct_attr); kfree(ct_flow); } @@ -2064,11 +2008,6 @@ static int mlx5_tc_ct_init_check_esw_support(struct mlx5_eswitch *esw, const char **err_msg) { - if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level)) { - *err_msg = "firmware level support is missing"; - return -EOPNOTSUPP; - } - if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1)) { /* vlan workaround should be avoided for multi chain rules. * This is just a sanity check as pop vlan action should @@ -2098,20 +2037,9 @@ mlx5_tc_ct_init_check_esw_support(struct mlx5_eswitch *esw, } static int -mlx5_tc_ct_init_check_nic_support(struct mlx5e_priv *priv, - const char **err_msg) -{ - if (!MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level)) { - *err_msg = "firmware level support is missing"; - return -EOPNOTSUPP; - } - - return 0; -} - -static int mlx5_tc_ct_init_check_support(struct mlx5e_priv *priv, enum mlx5_flow_namespace_type ns_type, + struct mlx5e_post_act *post_act, const char **err_msg) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; @@ -2122,10 +2050,14 @@ mlx5_tc_ct_init_check_support(struct mlx5e_priv *priv, *err_msg = "tc skb extension missing"; return -EOPNOTSUPP; #endif + if (IS_ERR_OR_NULL(post_act)) { + *err_msg = "tc ct offload not supported, post action is missing"; + return -EOPNOTSUPP; + } + if (ns_type == MLX5_FLOW_NAMESPACE_FDB) return mlx5_tc_ct_init_check_esw_support(esw, err_msg); - else - return mlx5_tc_ct_init_check_nic_support(priv, err_msg); + return 0; } #define INIT_ERR_PREFIX "tc ct offload init failed" @@ -2133,19 +2065,19 @@ mlx5_tc_ct_init_check_support(struct mlx5e_priv *priv, struct mlx5_tc_ct_priv * mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains, struct mod_hdr_tbl *mod_hdr, - enum mlx5_flow_namespace_type ns_type) + enum mlx5_flow_namespace_type ns_type, + struct mlx5e_post_act *post_act) { struct mlx5_tc_ct_priv *ct_priv; struct mlx5_core_dev *dev; const char *msg; + u64 mapping_id; int err; dev = priv->mdev; - err = mlx5_tc_ct_init_check_support(priv, ns_type, &msg); + err = mlx5_tc_ct_init_check_support(priv, ns_type, post_act, &msg); if (err) { - mlx5_core_warn(dev, - "tc ct offload not supported, %s\n", - msg); + mlx5_core_warn(dev, "tc ct offload not supported, %s\n", msg); goto err_support; } @@ -2153,13 +2085,17 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains, if (!ct_priv) goto err_alloc; - ct_priv->zone_mapping = mapping_create(sizeof(u16), 0, true); + mapping_id = mlx5_query_nic_system_image_guid(dev); + + ct_priv->zone_mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_ZONE, + sizeof(u16), 0, true); if (IS_ERR(ct_priv->zone_mapping)) { err = PTR_ERR(ct_priv->zone_mapping); goto err_mapping_zone; } - ct_priv->labels_mapping = mapping_create(sizeof(u32) * 4, 0, true); + ct_priv->labels_mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_LABELS, + sizeof(u32) * 4, 0, true); if (IS_ERR(ct_priv->labels_mapping)) { err = PTR_ERR(ct_priv->labels_mapping); goto err_mapping_labels; @@ -2189,16 +2125,7 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains, goto err_ct_nat_tbl; } - ct_priv->post_ct = mlx5_chains_create_global_table(chains); - if (IS_ERR(ct_priv->post_ct)) { - err = PTR_ERR(ct_priv->post_ct); - mlx5_core_warn(dev, - "%s, failed to create post ct table err: %d\n", - INIT_ERR_PREFIX, err); - goto err_post_ct_tbl; - } - - idr_init(&ct_priv->fte_ids); + ct_priv->post_act = post_act; mutex_init(&ct_priv->control_lock); rhashtable_init(&ct_priv->zone_ht, &zone_params); rhashtable_init(&ct_priv->ct_tuples_ht, &tuples_ht_params); @@ -2206,8 +2133,6 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains, return ct_priv; -err_post_ct_tbl: - mlx5_chains_destroy_global_table(chains, ct_priv->ct_nat); err_ct_nat_tbl: mlx5_chains_destroy_global_table(chains, ct_priv->ct); err_ct_tbl: @@ -2232,7 +2157,6 @@ mlx5_tc_ct_clean(struct mlx5_tc_ct_priv *ct_priv) chains = ct_priv->chains; - mlx5_chains_destroy_global_table(chains, ct_priv->post_ct); mlx5_chains_destroy_global_table(chains, ct_priv->ct_nat); mlx5_chains_destroy_global_table(chains, ct_priv->ct); mapping_destroy(ct_priv->zone_mapping); @@ -2242,7 +2166,6 @@ mlx5_tc_ct_clean(struct mlx5_tc_ct_priv *ct_priv) rhashtable_destroy(&ct_priv->ct_tuples_nat_ht); rhashtable_destroy(&ct_priv->zone_ht); mutex_destroy(&ct_priv->control_lock); - idr_destroy(&ct_priv->fte_ids); kfree(ct_priv); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h index 644cf1641cde..363329f4aac6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h @@ -92,7 +92,8 @@ struct mlx5_ct_attr { struct mlx5_tc_ct_priv * mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains, struct mod_hdr_tbl *mod_hdr, - enum mlx5_flow_namespace_type ns_type); + enum mlx5_flow_namespace_type ns_type, + struct mlx5e_post_act *post_act); void mlx5_tc_ct_clean(struct mlx5_tc_ct_priv *ct_priv); @@ -132,7 +133,8 @@ mlx5e_tc_ct_restore_flow(struct mlx5_tc_ct_priv *ct_priv, static inline struct mlx5_tc_ct_priv * mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains, struct mod_hdr_tbl *mod_hdr, - enum mlx5_flow_namespace_type ns_type) + enum mlx5_flow_namespace_type ns_type, + struct mlx5e_post_act *post_act) { return NULL; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c index 1e2d117082d4..b4e986818794 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c @@ -525,7 +525,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, e->out_dev = attr.out_dev; e->route_dev_ifindex = attr.route_dev->ifindex; - /* It's importent to add the neigh to the hash table before checking + /* It's important to add the neigh to the hash table before checking * the neigh validity state. So if we'll get a notification, in case the * neigh changes it's validity state, we would find the relevant neigh * in the hash. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c index 2e846b741280..1c44c6c345f5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c @@ -147,7 +147,7 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv, mlx5e_rep_queue_neigh_stats_work(priv); list_for_each_entry(flow, flow_list, tmp_list) { - if (!mlx5e_is_offloaded_flow(flow)) + if (!mlx5e_is_offloaded_flow(flow) || !flow_flag_test(flow, SLOW)) continue; attr = flow->attr; esw_attr = attr->esw_attr; @@ -188,7 +188,7 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv, int err; list_for_each_entry(flow, flow_list, tmp_list) { - if (!mlx5e_is_offloaded_flow(flow)) + if (!mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, SLOW)) continue; attr = flow->attr; esw_attr = attr->esw_attr; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c new file mode 100644 index 000000000000..de936dc4bc48 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c @@ -0,0 +1,200 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2021, Mellanox Technologies inc. All rights reserved. */ + +#include "tir.h" +#include "params.h" +#include <linux/mlx5/transobj.h> + +#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024) + +/* max() doesn't work inside square brackets. */ +#define MLX5E_TIR_CMD_IN_SZ_DW ( \ + MLX5_ST_SZ_DW(create_tir_in) > MLX5_ST_SZ_DW(modify_tir_in) ? \ + MLX5_ST_SZ_DW(create_tir_in) : MLX5_ST_SZ_DW(modify_tir_in) \ +) + +struct mlx5e_tir_builder { + u32 in[MLX5E_TIR_CMD_IN_SZ_DW]; + bool modify; +}; + +struct mlx5e_tir_builder *mlx5e_tir_builder_alloc(bool modify) +{ + struct mlx5e_tir_builder *builder; + + builder = kvzalloc(sizeof(*builder), GFP_KERNEL); + builder->modify = modify; + + return builder; +} + +void mlx5e_tir_builder_free(struct mlx5e_tir_builder *builder) +{ + kvfree(builder); +} + +void mlx5e_tir_builder_clear(struct mlx5e_tir_builder *builder) +{ + memset(builder->in, 0, sizeof(builder->in)); +} + +static void *mlx5e_tir_builder_get_tirc(struct mlx5e_tir_builder *builder) +{ + if (builder->modify) + return MLX5_ADDR_OF(modify_tir_in, builder->in, ctx); + return MLX5_ADDR_OF(create_tir_in, builder->in, ctx); +} + +void mlx5e_tir_builder_build_inline(struct mlx5e_tir_builder *builder, u32 tdn, u32 rqn) +{ + void *tirc = mlx5e_tir_builder_get_tirc(builder); + + WARN_ON(builder->modify); + + MLX5_SET(tirc, tirc, transport_domain, tdn); + MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT); + MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_NONE); + MLX5_SET(tirc, tirc, inline_rqn, rqn); +} + +void mlx5e_tir_builder_build_rqt(struct mlx5e_tir_builder *builder, u32 tdn, + u32 rqtn, bool inner_ft_support) +{ + void *tirc = mlx5e_tir_builder_get_tirc(builder); + + WARN_ON(builder->modify); + + MLX5_SET(tirc, tirc, transport_domain, tdn); + MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); + MLX5_SET(tirc, tirc, indirect_table, rqtn); + MLX5_SET(tirc, tirc, tunneled_offload_en, inner_ft_support); +} + +void mlx5e_tir_builder_build_lro(struct mlx5e_tir_builder *builder, + const struct mlx5e_lro_param *lro_param) +{ + void *tirc = mlx5e_tir_builder_get_tirc(builder); + const unsigned int rough_max_l2_l3_hdr_sz = 256; + + if (builder->modify) + MLX5_SET(modify_tir_in, builder->in, bitmask.lro, 1); + + if (!lro_param->enabled) + return; + + MLX5_SET(tirc, tirc, lro_enable_mask, + MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO | + MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO); + MLX5_SET(tirc, tirc, lro_max_ip_payload_size, + (MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ - rough_max_l2_l3_hdr_sz) >> 8); + MLX5_SET(tirc, tirc, lro_timeout_period_usecs, lro_param->timeout); +} + +static int mlx5e_hfunc_to_hw(u8 hfunc) +{ + switch (hfunc) { + case ETH_RSS_HASH_TOP: + return MLX5_RX_HASH_FN_TOEPLITZ; + case ETH_RSS_HASH_XOR: + return MLX5_RX_HASH_FN_INVERTED_XOR8; + default: + return MLX5_RX_HASH_FN_NONE; + } +} + +void mlx5e_tir_builder_build_rss(struct mlx5e_tir_builder *builder, + const struct mlx5e_rss_params_hash *rss_hash, + const struct mlx5e_rss_params_traffic_type *rss_tt, + bool inner) +{ + void *tirc = mlx5e_tir_builder_get_tirc(builder); + void *hfso; + + if (builder->modify) + MLX5_SET(modify_tir_in, builder->in, bitmask.hash, 1); + + MLX5_SET(tirc, tirc, rx_hash_fn, mlx5e_hfunc_to_hw(rss_hash->hfunc)); + if (rss_hash->hfunc == ETH_RSS_HASH_TOP) { + const size_t len = MLX5_FLD_SZ_BYTES(tirc, rx_hash_toeplitz_key); + void *rss_key = MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key); + + MLX5_SET(tirc, tirc, rx_hash_symmetric, 1); + memcpy(rss_key, rss_hash->toeplitz_hash_key, len); + } + + if (inner) + hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_inner); + else + hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer); + MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, rss_tt->l3_prot_type); + MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, rss_tt->l4_prot_type); + MLX5_SET(rx_hash_field_select, hfso, selected_fields, rss_tt->rx_hash_fields); +} + +void mlx5e_tir_builder_build_direct(struct mlx5e_tir_builder *builder) +{ + void *tirc = mlx5e_tir_builder_get_tirc(builder); + + WARN_ON(builder->modify); + + MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8); +} + +void mlx5e_tir_builder_build_tls(struct mlx5e_tir_builder *builder) +{ + void *tirc = mlx5e_tir_builder_get_tirc(builder); + + WARN_ON(builder->modify); + + MLX5_SET(tirc, tirc, tls_en, 1); + MLX5_SET(tirc, tirc, self_lb_block, + MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST | + MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST); +} + +int mlx5e_tir_init(struct mlx5e_tir *tir, struct mlx5e_tir_builder *builder, + struct mlx5_core_dev *mdev, bool reg) +{ + int err; + + tir->mdev = mdev; + + err = mlx5_core_create_tir(tir->mdev, builder->in, &tir->tirn); + if (err) + return err; + + if (reg) { + struct mlx5e_hw_objs *res = &tir->mdev->mlx5e_res.hw_objs; + + mutex_lock(&res->td.list_lock); + list_add(&tir->list, &res->td.tirs_list); + mutex_unlock(&res->td.list_lock); + } else { + INIT_LIST_HEAD(&tir->list); + } + + return 0; +} + +void mlx5e_tir_destroy(struct mlx5e_tir *tir) +{ + struct mlx5e_hw_objs *res = &tir->mdev->mlx5e_res.hw_objs; + + /* Skip mutex if list_del is no-op (the TIR wasn't registered in the + * list). list_empty will never return true for an item of tirs_list, + * and READ_ONCE/WRITE_ONCE in list_empty/list_del guarantee consistency + * of the list->next value. + */ + if (!list_empty(&tir->list)) { + mutex_lock(&res->td.list_lock); + list_del(&tir->list); + mutex_unlock(&res->td.list_lock); + } + + mlx5_core_destroy_tir(tir->mdev, tir->tirn); +} + +int mlx5e_tir_modify(struct mlx5e_tir *tir, struct mlx5e_tir_builder *builder) +{ + return mlx5_core_modify_tir(tir->mdev, tir->tirn, builder->in); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h new file mode 100644 index 000000000000..e45149a78ed9 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2021, Mellanox Technologies inc. All rights reserved. */ + +#ifndef __MLX5_EN_TIR_H__ +#define __MLX5_EN_TIR_H__ + +#include <linux/kernel.h> + +struct mlx5e_rss_params_hash { + u8 hfunc; + u8 toeplitz_hash_key[40]; +}; + +struct mlx5e_rss_params_traffic_type { + u8 l3_prot_type; + u8 l4_prot_type; + u32 rx_hash_fields; +}; + +struct mlx5e_tir_builder; +struct mlx5e_lro_param; + +struct mlx5e_tir_builder *mlx5e_tir_builder_alloc(bool modify); +void mlx5e_tir_builder_free(struct mlx5e_tir_builder *builder); +void mlx5e_tir_builder_clear(struct mlx5e_tir_builder *builder); + +void mlx5e_tir_builder_build_inline(struct mlx5e_tir_builder *builder, u32 tdn, u32 rqn); +void mlx5e_tir_builder_build_rqt(struct mlx5e_tir_builder *builder, u32 tdn, + u32 rqtn, bool inner_ft_support); +void mlx5e_tir_builder_build_lro(struct mlx5e_tir_builder *builder, + const struct mlx5e_lro_param *lro_param); +void mlx5e_tir_builder_build_rss(struct mlx5e_tir_builder *builder, + const struct mlx5e_rss_params_hash *rss_hash, + const struct mlx5e_rss_params_traffic_type *rss_tt, + bool inner); +void mlx5e_tir_builder_build_direct(struct mlx5e_tir_builder *builder); +void mlx5e_tir_builder_build_tls(struct mlx5e_tir_builder *builder); + +struct mlx5_core_dev; + +struct mlx5e_tir { + struct mlx5_core_dev *mdev; + u32 tirn; + struct list_head list; +}; + +int mlx5e_tir_init(struct mlx5e_tir *tir, struct mlx5e_tir_builder *builder, + struct mlx5_core_dev *mdev, bool reg); +void mlx5e_tir_destroy(struct mlx5e_tir *tir); + +static inline u32 mlx5e_tir_get_tirn(struct mlx5e_tir *tir) +{ + return tir->tirn; +} + +int mlx5e_tir_modify(struct mlx5e_tir *tir, struct mlx5e_tir_builder *builder); + +#endif /* __MLX5_EN_TIR_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c index 7f94508594fb..d54607a42740 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c @@ -92,30 +92,19 @@ static void mlx5e_close_trap_rq(struct mlx5e_rq *rq) static int mlx5e_create_trap_direct_rq_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir, u32 rqn) { - void *tirc; - int inlen; - u32 *in; + struct mlx5e_tir_builder *builder; int err; - inlen = MLX5_ST_SZ_BYTES(create_tir_in); - in = kvzalloc(inlen, GFP_KERNEL); - if (!in) + builder = mlx5e_tir_builder_alloc(false); + if (!builder) return -ENOMEM; - tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); - MLX5_SET(tirc, tirc, transport_domain, mdev->mlx5e_res.hw_objs.td.tdn); - MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_NONE); - MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT); - MLX5_SET(tirc, tirc, inline_rqn, rqn); - err = mlx5e_create_tir(mdev, tir, in); - kvfree(in); + mlx5e_tir_builder_build_inline(builder, mdev->mlx5e_res.hw_objs.td.tdn, rqn); + err = mlx5e_tir_init(tir, builder, mdev, true); - return err; -} + mlx5e_tir_builder_free(builder); -static void mlx5e_destroy_trap_direct_rq_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir) -{ - mlx5e_destroy_tir(mdev, tir); + return err; } static void mlx5e_build_trap_params(struct mlx5_core_dev *mdev, @@ -173,7 +162,7 @@ err_napi_del: void mlx5e_close_trap(struct mlx5e_trap *trap) { - mlx5e_destroy_trap_direct_rq_tir(trap->mdev, &trap->tir); + mlx5e_tir_destroy(&trap->tir); mlx5e_close_trap_rq(&trap->rq); netif_napi_del(&trap->napi); kvfree(trap); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c index 71e8d66fa150..7b562d2c8a19 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c @@ -122,7 +122,7 @@ static int mlx5e_xsk_enable_locked(struct mlx5e_priv *priv, * any Fill Ring entries at the setup stage. */ - err = mlx5e_xsk_redirect_rqt_to_channel(priv, priv->channels.c[ix]); + err = mlx5e_rx_res_xsk_activate(priv->rx_res, &priv->channels, ix); if (unlikely(err)) goto err_deactivate; @@ -169,7 +169,7 @@ static int mlx5e_xsk_disable_locked(struct mlx5e_priv *priv, u16 ix) goto remove_pool; c = priv->channels.c[ix]; - mlx5e_xsk_redirect_rqt_to_drop(priv, ix); + mlx5e_rx_res_xsk_deactivate(priv->rx_res, ix); mlx5e_deactivate_xsk(c); mlx5e_close_xsk(c); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c index a8315f166696..538bc2419bd8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c @@ -126,7 +126,7 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params, /* Create a separate SQ, so that when the buff pool is disabled, we could * close this SQ safely and stop receiving CQEs. In other case, e.g., if * the XDPSQ was used instead, we might run into trouble when the buff pool - * is disabled and then reenabled, but the SQ continues receiving CQEs + * is disabled and then re-enabled, but the SQ continues receiving CQEs * from the old buff pool. */ err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, pool, &c->xsksq, true); @@ -183,73 +183,3 @@ void mlx5e_deactivate_xsk(struct mlx5e_channel *c) mlx5e_deactivate_rq(&c->xskrq); /* TX queue is disabled on close. */ } - -static int mlx5e_redirect_xsk_rqt(struct mlx5e_priv *priv, u16 ix, u32 rqn) -{ - struct mlx5e_redirect_rqt_param direct_rrp = { - .is_rss = false, - { - .rqn = rqn, - }, - }; - - u32 rqtn = priv->xsk_tir[ix].rqt.rqtn; - - return mlx5e_redirect_rqt(priv, rqtn, 1, direct_rrp); -} - -int mlx5e_xsk_redirect_rqt_to_channel(struct mlx5e_priv *priv, struct mlx5e_channel *c) -{ - return mlx5e_redirect_xsk_rqt(priv, c->ix, c->xskrq.rqn); -} - -int mlx5e_xsk_redirect_rqt_to_drop(struct mlx5e_priv *priv, u16 ix) -{ - return mlx5e_redirect_xsk_rqt(priv, ix, priv->drop_rq.rqn); -} - -int mlx5e_xsk_redirect_rqts_to_channels(struct mlx5e_priv *priv, struct mlx5e_channels *chs) -{ - int err, i; - - if (!priv->xsk.refcnt) - return 0; - - for (i = 0; i < chs->num; i++) { - struct mlx5e_channel *c = chs->c[i]; - - if (!test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)) - continue; - - err = mlx5e_xsk_redirect_rqt_to_channel(priv, c); - if (unlikely(err)) - goto err_stop; - } - - return 0; - -err_stop: - for (i--; i >= 0; i--) { - if (!test_bit(MLX5E_CHANNEL_STATE_XSK, chs->c[i]->state)) - continue; - - mlx5e_xsk_redirect_rqt_to_drop(priv, i); - } - - return err; -} - -void mlx5e_xsk_redirect_rqts_to_drop(struct mlx5e_priv *priv, struct mlx5e_channels *chs) -{ - int i; - - if (!priv->xsk.refcnt) - return; - - for (i = 0; i < chs->num; i++) { - if (!test_bit(MLX5E_CHANNEL_STATE_XSK, chs->c[i]->state)) - continue; - - mlx5e_xsk_redirect_rqt_to_drop(priv, i); - } -} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.h index ca20f1ff5e39..50e111b85efd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.h @@ -17,9 +17,5 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params, void mlx5e_close_xsk(struct mlx5e_channel *c); void mlx5e_activate_xsk(struct mlx5e_channel *c); void mlx5e_deactivate_xsk(struct mlx5e_channel *c); -int mlx5e_xsk_redirect_rqt_to_channel(struct mlx5e_priv *priv, struct mlx5e_channel *c); -int mlx5e_xsk_redirect_rqt_to_drop(struct mlx5e_priv *priv, u16 ix); -int mlx5e_xsk_redirect_rqts_to_channels(struct mlx5e_priv *priv, struct mlx5e_channels *chs); -void mlx5e_xsk_redirect_rqts_to_drop(struct mlx5e_priv *priv, struct mlx5e_channels *chs); #endif /* __MLX5_EN_XSK_SETUP_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c index e51f60b55daa..4c4ee524176c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c @@ -16,13 +16,13 @@ struct mlx5e_accel_fs_tcp { struct mlx5_flow_handle *default_rules[ACCEL_FS_TCP_NUM_TYPES]; }; -static enum mlx5e_traffic_types fs_accel2tt(enum accel_fs_tcp_type i) +static enum mlx5_traffic_types fs_accel2tt(enum accel_fs_tcp_type i) { switch (i) { case ACCEL_FS_IPV4_TCP: - return MLX5E_TT_IPV4_TCP; + return MLX5_TT_IPV4_TCP; default: /* ACCEL_FS_IPV6_TCP */ - return MLX5E_TT_IPV6_TCP; + return MLX5_TT_IPV6_TCP; } } @@ -161,7 +161,7 @@ static int accel_fs_tcp_add_default_rule(struct mlx5e_priv *priv, fs_tcp = priv->fs.accel_tcp; accel_fs_t = &fs_tcp->tables[type]; - dest = mlx5e_ttc_get_default_dest(priv, fs_accel2tt(type)); + dest = mlx5_ttc_get_default_dest(priv->fs.ttc, fs_accel2tt(type)); rule = mlx5_add_flow_rules(accel_fs_t->t, NULL, &flow_act, &dest, 1); if (IS_ERR(rule)) { err = PTR_ERR(rule); @@ -307,7 +307,7 @@ static int accel_fs_tcp_disable(struct mlx5e_priv *priv) for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) { /* Modify ttc rules destination to point back to the indir TIRs */ - err = mlx5e_ttc_fwd_default_dest(priv, fs_accel2tt(i)); + err = mlx5_ttc_fwd_default_dest(priv->fs.ttc, fs_accel2tt(i)); if (err) { netdev_err(priv->netdev, "%s: modify ttc[%d] default destination failed, err(%d)\n", @@ -329,7 +329,7 @@ static int accel_fs_tcp_enable(struct mlx5e_priv *priv) dest.ft = priv->fs.accel_tcp->tables[i].t; /* Modify ttc rules destination to point on the accel_fs FTs */ - err = mlx5e_ttc_fwd_dest(priv, fs_accel2tt(i), &dest); + err = mlx5_ttc_fwd_dest(priv->fs.ttc, fs_accel2tt(i), &dest); if (err) { netdev_err(priv->netdev, "%s: modify ttc[%d] destination to accel failed, err(%d)\n", diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c index 34119ce92031..17da23dff0ed 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c @@ -41,11 +41,11 @@ struct mlx5e_ipsec_tx { }; /* IPsec RX flow steering */ -static enum mlx5e_traffic_types fs_esp2tt(enum accel_fs_esp_type i) +static enum mlx5_traffic_types fs_esp2tt(enum accel_fs_esp_type i) { if (i == ACCEL_FS_ESP4) - return MLX5E_TT_IPV4_IPSEC_ESP; - return MLX5E_TT_IPV6_IPSEC_ESP; + return MLX5_TT_IPV4_IPSEC_ESP; + return MLX5_TT_IPV6_IPSEC_ESP; } static int rx_err_add_rule(struct mlx5e_priv *priv, @@ -265,7 +265,8 @@ static int rx_create(struct mlx5e_priv *priv, enum accel_fs_esp_type type) accel_esp = priv->ipsec->rx_fs; fs_prot = &accel_esp->fs_prot[type]; - fs_prot->default_dest = mlx5e_ttc_get_default_dest(priv, fs_esp2tt(type)); + fs_prot->default_dest = + mlx5_ttc_get_default_dest(priv->fs.ttc, fs_esp2tt(type)); err = rx_err_create_ft(priv, fs_prot, &fs_prot->rx_err); if (err) @@ -301,7 +302,7 @@ static int rx_ft_get(struct mlx5e_priv *priv, enum accel_fs_esp_type type) /* connect */ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; dest.ft = fs_prot->ft; - mlx5e_ttc_fwd_dest(priv, fs_esp2tt(type), &dest); + mlx5_ttc_fwd_dest(priv->fs.ttc, fs_esp2tt(type), &dest); out: mutex_unlock(&fs_prot->prot_mutex); @@ -320,7 +321,7 @@ static void rx_ft_put(struct mlx5e_priv *priv, enum accel_fs_esp_type type) goto out; /* disconnect */ - mlx5e_ttc_fwd_default_dest(priv, fs_esp2tt(type)); + mlx5_ttc_fwd_default_dest(priv->fs.ttc, fs_esp2tt(type)); /* remove FT */ rx_destroy(priv, type); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c index 4e58fade7a60..62abce008c7b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c @@ -49,7 +49,7 @@ struct mlx5e_ktls_offload_context_rx { struct mlx5e_rq_stats *rq_stats; struct mlx5e_tls_sw_stats *sw_stats; struct completion add_ctx; - u32 tirn; + struct mlx5e_tir tir; u32 key_id; u32 rxq; DECLARE_BITMAP(flags, MLX5E_NUM_PRIV_RX_FLAGS); @@ -99,31 +99,22 @@ mlx5e_ktls_rx_resync_create_resp_list(void) return resp_list; } -static int mlx5e_ktls_create_tir(struct mlx5_core_dev *mdev, u32 *tirn, u32 rqtn) +static int mlx5e_ktls_create_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir, u32 rqtn) { - int err, inlen; - void *tirc; - u32 *in; + struct mlx5e_tir_builder *builder; + int err; - inlen = MLX5_ST_SZ_BYTES(create_tir_in); - in = kvzalloc(inlen, GFP_KERNEL); - if (!in) + builder = mlx5e_tir_builder_alloc(false); + if (!builder) return -ENOMEM; - tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); - - MLX5_SET(tirc, tirc, transport_domain, mdev->mlx5e_res.hw_objs.td.tdn); - MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); - MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8); - MLX5_SET(tirc, tirc, indirect_table, rqtn); - MLX5_SET(tirc, tirc, tls_en, 1); - MLX5_SET(tirc, tirc, self_lb_block, - MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST | - MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST); + mlx5e_tir_builder_build_rqt(builder, mdev->mlx5e_res.hw_objs.td.tdn, rqtn, false); + mlx5e_tir_builder_build_direct(builder); + mlx5e_tir_builder_build_tls(builder); + err = mlx5e_tir_init(tir, builder, mdev, false); - err = mlx5_core_create_tir(mdev, in, tirn); + mlx5e_tir_builder_free(builder); - kvfree(in); return err; } @@ -139,7 +130,8 @@ static void accel_rule_handle_work(struct work_struct *work) goto out; rule = mlx5e_accel_fs_add_sk(accel_rule->priv, priv_rx->sk, - priv_rx->tirn, MLX5_FS_DEFAULT_FLOW_TAG); + mlx5e_tir_get_tirn(&priv_rx->tir), + MLX5_FS_DEFAULT_FLOW_TAG); if (!IS_ERR_OR_NULL(rule)) accel_rule->rule = rule; out: @@ -173,8 +165,8 @@ post_static_params(struct mlx5e_icosq *sq, pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs); wqe = MLX5E_TLS_FETCH_SET_STATIC_PARAMS_WQE(sq, pi); mlx5e_ktls_build_static_params(wqe, sq->pc, sq->sqn, &priv_rx->crypto_info, - priv_rx->tirn, priv_rx->key_id, - priv_rx->resync.seq, false, + mlx5e_tir_get_tirn(&priv_rx->tir), + priv_rx->key_id, priv_rx->resync.seq, false, TLS_OFFLOAD_CTX_DIR_RX); wi = (struct mlx5e_icosq_wqe_info) { .wqe_type = MLX5E_ICOSQ_WQE_UMR_TLS, @@ -202,8 +194,9 @@ post_progress_params(struct mlx5e_icosq *sq, pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs); wqe = MLX5E_TLS_FETCH_SET_PROGRESS_PARAMS_WQE(sq, pi); - mlx5e_ktls_build_progress_params(wqe, sq->pc, sq->sqn, priv_rx->tirn, false, - next_record_tcp_sn, + mlx5e_ktls_build_progress_params(wqe, sq->pc, sq->sqn, + mlx5e_tir_get_tirn(&priv_rx->tir), + false, next_record_tcp_sn, TLS_OFFLOAD_CTX_DIR_RX); wi = (struct mlx5e_icosq_wqe_info) { .wqe_type = MLX5E_ICOSQ_WQE_SET_PSV_TLS, @@ -325,7 +318,7 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq, psv = &wqe->psv; psv->num_psv = 1 << 4; psv->l_key = sq->channel->mkey_be; - psv->psv_index[0] = cpu_to_be32(priv_rx->tirn); + psv->psv_index[0] = cpu_to_be32(mlx5e_tir_get_tirn(&priv_rx->tir)); psv->va = cpu_to_be64(buf->dma_addr); wi = (struct mlx5e_icosq_wqe_info) { @@ -635,9 +628,9 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk, priv_rx->sw_stats = &priv->tls->sw_stats; mlx5e_set_ktls_rx_priv_ctx(tls_ctx, priv_rx); - rqtn = priv->direct_tir[rxq].rqt.rqtn; + rqtn = mlx5e_rx_res_get_rqtn_direct(priv->rx_res, rxq); - err = mlx5e_ktls_create_tir(mdev, &priv_rx->tirn, rqtn); + err = mlx5e_ktls_create_tir(mdev, &priv_rx->tir, rqtn); if (err) goto err_create_tir; @@ -658,7 +651,7 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk, return 0; err_post_wqes: - mlx5_core_destroy_tir(mdev, priv_rx->tirn); + mlx5e_tir_destroy(&priv_rx->tir); err_create_tir: mlx5_ktls_destroy_key(mdev, priv_rx->key_id); err_create_key: @@ -693,7 +686,7 @@ void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx) if (priv_rx->rule.rule) mlx5e_accel_fs_del_sk(priv_rx->rule.rule); - mlx5_core_destroy_tir(mdev, priv_rx->tirn); + mlx5e_tir_destroy(&priv_rx->tir); mlx5_ktls_destroy_key(mdev, priv_rx->key_id); /* priv_rx should normally be freed here, but if there is an outstanding * GET_PSV, deallocation will be delayed until the CQE for GET_PSV is diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c index 25403af32859..fe5d82fa6e92 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c @@ -98,17 +98,17 @@ struct arfs_rule { for (j = 0; j < ARFS_HASH_SIZE; j++) \ hlist_for_each_entry_safe(hn, tmp, &hash[j], hlist) -static enum mlx5e_traffic_types arfs_get_tt(enum arfs_type type) +static enum mlx5_traffic_types arfs_get_tt(enum arfs_type type) { switch (type) { case ARFS_IPV4_TCP: - return MLX5E_TT_IPV4_TCP; + return MLX5_TT_IPV4_TCP; case ARFS_IPV4_UDP: - return MLX5E_TT_IPV4_UDP; + return MLX5_TT_IPV4_UDP; case ARFS_IPV6_TCP: - return MLX5E_TT_IPV6_TCP; + return MLX5_TT_IPV6_TCP; case ARFS_IPV6_UDP: - return MLX5E_TT_IPV6_UDP; + return MLX5_TT_IPV6_UDP; default: return -EINVAL; } @@ -120,7 +120,7 @@ static int arfs_disable(struct mlx5e_priv *priv) for (i = 0; i < ARFS_NUM_TYPES; i++) { /* Modify ttc rules destination back to their default */ - err = mlx5e_ttc_fwd_default_dest(priv, arfs_get_tt(i)); + err = mlx5_ttc_fwd_default_dest(priv->fs.ttc, arfs_get_tt(i)); if (err) { netdev_err(priv->netdev, "%s: modify ttc[%d] default destination failed, err(%d)\n", @@ -149,7 +149,7 @@ int mlx5e_arfs_enable(struct mlx5e_priv *priv) for (i = 0; i < ARFS_NUM_TYPES; i++) { dest.ft = priv->fs.arfs->arfs_tables[i].ft.t; /* Modify ttc rules destination to point on the aRFS FTs */ - err = mlx5e_ttc_fwd_dest(priv, arfs_get_tt(i), &dest); + err = mlx5_ttc_fwd_dest(priv->fs.ttc, arfs_get_tt(i), &dest); if (err) { netdev_err(priv->netdev, "%s: modify ttc[%d] dest to arfs, failed err(%d)\n", @@ -192,10 +192,9 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv, enum arfs_type type) { struct arfs_table *arfs_t = &priv->fs.arfs->arfs_tables[type]; - struct mlx5e_tir *tir = priv->indir_tir; struct mlx5_flow_destination dest = {}; MLX5_DECLARE_FLOW_ACT(flow_act); - enum mlx5e_traffic_types tt; + enum mlx5_traffic_types tt; int err = 0; dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; @@ -206,10 +205,10 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv, return -EINVAL; } - /* FIXME: Must use mlx5e_ttc_get_default_dest(), + /* FIXME: Must use mlx5_ttc_get_default_dest(), * but can't since TTC default is not setup yet ! */ - dest.tir_num = tir[tt].tirn; + dest.tir_num = mlx5e_rx_res_get_tirn_rss(priv->rx_res, tt); arfs_t->default_rule = mlx5_add_flow_rules(arfs_t->ft.t, NULL, &flow_act, &dest, 1); @@ -553,7 +552,7 @@ static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv, 16); } dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; - dest.tir_num = priv->direct_tir[arfs_rule->rxq].tirn; + dest.tir_num = mlx5e_rx_res_get_tirn_direct(priv->rx_res, arfs_rule->rxq); rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); if (IS_ERR(rule)) { err = PTR_ERR(rule); @@ -576,7 +575,7 @@ static void arfs_modify_rule_rq(struct mlx5e_priv *priv, int err = 0; dst.type = MLX5_FLOW_DESTINATION_TYPE_TIR; - dst.tir_num = priv->direct_tir[rxq].tirn; + dst.tir_num = mlx5e_rx_res_get_tirn_direct(priv->rx_res, rxq); err = mlx5_modify_rule_destination(rule, &dst, NULL); if (err) netdev_warn(priv->netdev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c index 8c166ee56d8b..84eb7201c142 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c @@ -33,36 +33,9 @@ #include "en.h" /* mlx5e global resources should be placed in this file. - * Global resources are common to all the netdevices crated on the same nic. + * Global resources are common to all the netdevices created on the same nic. */ -int mlx5e_create_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir, u32 *in) -{ - struct mlx5e_hw_objs *res = &mdev->mlx5e_res.hw_objs; - int err; - - err = mlx5_core_create_tir(mdev, in, &tir->tirn); - if (err) - return err; - - mutex_lock(&res->td.list_lock); - list_add(&tir->list, &res->td.tirs_list); - mutex_unlock(&res->td.list_lock); - - return 0; -} - -void mlx5e_destroy_tir(struct mlx5_core_dev *mdev, - struct mlx5e_tir *tir) -{ - struct mlx5e_hw_objs *res = &mdev->mlx5e_res.hw_objs; - - mutex_lock(&res->td.list_lock); - mlx5_core_destroy_tir(mdev, tir->tirn); - list_del(&tir->list); - mutex_unlock(&res->td.list_lock); -} - void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc) { bool ro_pci_enable = pcie_relaxed_ordering_enabled(mdev->pdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index bd72572e03d1..2cfd12953909 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -420,6 +420,7 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv, unsigned int count = ch->combined_count; struct mlx5e_params new_params; bool arfs_enabled; + int rss_cnt; bool opened; int err = 0; @@ -455,6 +456,27 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv, goto out; } + /* Don't allow changing the number of channels if non-default RSS contexts exist, + * the kernel doesn't protect against set_channels operations that break them. + */ + rss_cnt = mlx5e_rx_res_rss_cnt(priv->rx_res) - 1; + if (rss_cnt) { + err = -EINVAL; + netdev_err(priv->netdev, "%s: Non-default RSS contexts exist (%d), cannot change the number of channels\n", + __func__, rss_cnt); + goto out; + } + + /* Don't allow changing the number of channels if MQPRIO mode channel offload is active, + * because it defines a partition over the channels queues. + */ + if (cur_params->mqprio.mode == TC_MQPRIO_MODE_CHANNEL) { + err = -EINVAL; + netdev_err(priv->netdev, "%s: MQPRIO mode channel offload is active, cannot change the number of channels\n", + __func__); + goto out; + } + new_params = *cur_params; new_params.num_channels = count; @@ -512,7 +534,9 @@ int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv, } static int mlx5e_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *coal) + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct mlx5e_priv *priv = netdev_priv(netdev); @@ -630,7 +654,9 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, } static int mlx5e_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *coal) + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct mlx5e_priv *priv = netdev_priv(netdev); @@ -1172,7 +1198,7 @@ static int mlx5e_set_link_ksettings(struct net_device *netdev, u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv) { - return sizeof(priv->rss_params.toeplitz_hash_key); + return sizeof_field(struct mlx5e_rss_params_hash, toeplitz_hash_key); } static u32 mlx5e_get_rxfh_key_size(struct net_device *netdev) @@ -1194,88 +1220,64 @@ static u32 mlx5e_get_rxfh_indir_size(struct net_device *netdev) return mlx5e_ethtool_get_rxfh_indir_size(priv); } -int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, - u8 *hfunc) +static int mlx5e_get_rxfh_context(struct net_device *dev, u32 *indir, + u8 *key, u8 *hfunc, u32 rss_context) { - struct mlx5e_priv *priv = netdev_priv(netdev); - struct mlx5e_rss_params *rss = &priv->rss_params; - - if (indir) - memcpy(indir, rss->indirection_rqt, - sizeof(rss->indirection_rqt)); - - if (key) - memcpy(key, rss->toeplitz_hash_key, - sizeof(rss->toeplitz_hash_key)); - - if (hfunc) - *hfunc = rss->hfunc; + struct mlx5e_priv *priv = netdev_priv(dev); + int err; - return 0; + mutex_lock(&priv->state_lock); + err = mlx5e_rx_res_rss_get_rxfh(priv->rx_res, rss_context, indir, key, hfunc); + mutex_unlock(&priv->state_lock); + return err; } -int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, - const u8 *key, const u8 hfunc) +static int mlx5e_set_rxfh_context(struct net_device *dev, const u32 *indir, + const u8 *key, const u8 hfunc, + u32 *rss_context, bool delete) { struct mlx5e_priv *priv = netdev_priv(dev); - struct mlx5e_rss_params *rss = &priv->rss_params; - int inlen = MLX5_ST_SZ_BYTES(modify_tir_in); - bool refresh_tirs = false; - bool refresh_rqt = false; - void *in; - - if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && - (hfunc != ETH_RSS_HASH_XOR) && - (hfunc != ETH_RSS_HASH_TOP)) - return -EINVAL; - - in = kvzalloc(inlen, GFP_KERNEL); - if (!in) - return -ENOMEM; + int err; mutex_lock(&priv->state_lock); - - if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != rss->hfunc) { - rss->hfunc = hfunc; - refresh_rqt = true; - refresh_tirs = true; - } - - if (indir) { - memcpy(rss->indirection_rqt, indir, - sizeof(rss->indirection_rqt)); - refresh_rqt = true; + if (delete) { + err = mlx5e_rx_res_rss_destroy(priv->rx_res, *rss_context); + goto unlock; } - if (key) { - memcpy(rss->toeplitz_hash_key, key, - sizeof(rss->toeplitz_hash_key)); - refresh_tirs = refresh_tirs || rss->hfunc == ETH_RSS_HASH_TOP; - } + if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) { + unsigned int count = priv->channels.params.num_channels; - if (refresh_rqt && test_bit(MLX5E_STATE_OPENED, &priv->state)) { - struct mlx5e_redirect_rqt_param rrp = { - .is_rss = true, - { - .rss = { - .hfunc = rss->hfunc, - .channels = &priv->channels, - }, - }, - }; - u32 rqtn = priv->indir_rqt.rqtn; - - mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, rrp); + err = mlx5e_rx_res_rss_init(priv->rx_res, rss_context, count); + if (err) + goto unlock; } - if (refresh_tirs) - mlx5e_modify_tirs_hash(priv, in); + err = mlx5e_rx_res_rss_set_rxfh(priv->rx_res, *rss_context, indir, key, + hfunc == ETH_RSS_HASH_NO_CHANGE ? NULL : &hfunc); +unlock: mutex_unlock(&priv->state_lock); + return err; +} - kvfree(in); +int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + return mlx5e_get_rxfh_context(netdev, indir, key, hfunc, 0); +} - return 0; +int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + int err; + + mutex_lock(&priv->state_lock); + err = mlx5e_rx_res_rss_set_rxfh(priv->rx_res, 0, indir, key, + hfunc == ETH_RSS_HASH_NO_CHANGE ? NULL : &hfunc); + mutex_unlock(&priv->state_lock); + return err; } #define MLX5E_PFC_PREVEN_AUTO_TOUT_MSEC 100 @@ -2358,6 +2360,8 @@ const struct ethtool_ops mlx5e_ethtool_ops = { .get_rxfh_indir_size = mlx5e_get_rxfh_indir_size, .get_rxfh = mlx5e_get_rxfh, .set_rxfh = mlx5e_set_rxfh, + .get_rxfh_context = mlx5e_get_rxfh_context, + .set_rxfh_context = mlx5e_set_rxfh_context, .get_rxnfc = mlx5e_get_rxnfc, .set_rxnfc = mlx5e_set_rxnfc, .get_tunable = mlx5e_get_tunable, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index 0b75fab41ae8..c06b4b938ae7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -718,7 +718,7 @@ static int mlx5e_add_promisc_rule(struct mlx5e_priv *priv) if (!spec) return -ENOMEM; dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; - dest.ft = priv->fs.ttc.ft.t; + dest.ft = mlx5_get_ttc_flow_table(priv->fs.ttc); rule_p = &priv->fs.promisc.rule; *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); @@ -854,593 +854,59 @@ void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft) ft->t = NULL; } -static void mlx5e_cleanup_ttc_rules(struct mlx5e_ttc_table *ttc) -{ - int i; - - for (i = 0; i < MLX5E_NUM_TT; i++) { - if (!IS_ERR_OR_NULL(ttc->rules[i].rule)) { - mlx5_del_flow_rules(ttc->rules[i].rule); - ttc->rules[i].rule = NULL; - } - } - - for (i = 0; i < MLX5E_NUM_TUNNEL_TT; i++) { - if (!IS_ERR_OR_NULL(ttc->tunnel_rules[i])) { - mlx5_del_flow_rules(ttc->tunnel_rules[i]); - ttc->tunnel_rules[i] = NULL; - } - } -} - -struct mlx5e_etype_proto { - u16 etype; - u8 proto; -}; - -static struct mlx5e_etype_proto ttc_rules[] = { - [MLX5E_TT_IPV4_TCP] = { - .etype = ETH_P_IP, - .proto = IPPROTO_TCP, - }, - [MLX5E_TT_IPV6_TCP] = { - .etype = ETH_P_IPV6, - .proto = IPPROTO_TCP, - }, - [MLX5E_TT_IPV4_UDP] = { - .etype = ETH_P_IP, - .proto = IPPROTO_UDP, - }, - [MLX5E_TT_IPV6_UDP] = { - .etype = ETH_P_IPV6, - .proto = IPPROTO_UDP, - }, - [MLX5E_TT_IPV4_IPSEC_AH] = { - .etype = ETH_P_IP, - .proto = IPPROTO_AH, - }, - [MLX5E_TT_IPV6_IPSEC_AH] = { - .etype = ETH_P_IPV6, - .proto = IPPROTO_AH, - }, - [MLX5E_TT_IPV4_IPSEC_ESP] = { - .etype = ETH_P_IP, - .proto = IPPROTO_ESP, - }, - [MLX5E_TT_IPV6_IPSEC_ESP] = { - .etype = ETH_P_IPV6, - .proto = IPPROTO_ESP, - }, - [MLX5E_TT_IPV4] = { - .etype = ETH_P_IP, - .proto = 0, - }, - [MLX5E_TT_IPV6] = { - .etype = ETH_P_IPV6, - .proto = 0, - }, - [MLX5E_TT_ANY] = { - .etype = 0, - .proto = 0, - }, -}; - -static struct mlx5e_etype_proto ttc_tunnel_rules[] = { - [MLX5E_TT_IPV4_GRE] = { - .etype = ETH_P_IP, - .proto = IPPROTO_GRE, - }, - [MLX5E_TT_IPV6_GRE] = { - .etype = ETH_P_IPV6, - .proto = IPPROTO_GRE, - }, - [MLX5E_TT_IPV4_IPIP] = { - .etype = ETH_P_IP, - .proto = IPPROTO_IPIP, - }, - [MLX5E_TT_IPV6_IPIP] = { - .etype = ETH_P_IPV6, - .proto = IPPROTO_IPIP, - }, - [MLX5E_TT_IPV4_IPV6] = { - .etype = ETH_P_IP, - .proto = IPPROTO_IPV6, - }, - [MLX5E_TT_IPV6_IPV6] = { - .etype = ETH_P_IPV6, - .proto = IPPROTO_IPV6, - }, - -}; - -u8 mlx5e_get_proto_by_tunnel_type(enum mlx5e_tunnel_types tt) -{ - return ttc_tunnel_rules[tt].proto; -} - -static bool mlx5e_tunnel_proto_supported_rx(struct mlx5_core_dev *mdev, u8 proto_type) -{ - switch (proto_type) { - case IPPROTO_GRE: - return MLX5_CAP_ETH(mdev, tunnel_stateless_gre); - case IPPROTO_IPIP: - case IPPROTO_IPV6: - return (MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip) || - MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip_rx)); - default: - return false; - } -} - -static bool mlx5e_tunnel_any_rx_proto_supported(struct mlx5_core_dev *mdev) -{ - int tt; - - for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) { - if (mlx5e_tunnel_proto_supported_rx(mdev, ttc_tunnel_rules[tt].proto)) - return true; - } - return false; -} - -bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev) -{ - return (mlx5e_tunnel_any_rx_proto_supported(mdev) && - MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.inner_ip_version)); -} - -static u8 mlx5e_etype_to_ipv(u16 ethertype) -{ - if (ethertype == ETH_P_IP) - return 4; - - if (ethertype == ETH_P_IPV6) - return 6; - - return 0; -} - -static struct mlx5_flow_handle * -mlx5e_generate_ttc_rule(struct mlx5e_priv *priv, - struct mlx5_flow_table *ft, - struct mlx5_flow_destination *dest, - u16 etype, - u8 proto) -{ - int match_ipv_outer = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version); - MLX5_DECLARE_FLOW_ACT(flow_act); - struct mlx5_flow_handle *rule; - struct mlx5_flow_spec *spec; - int err = 0; - u8 ipv; - - spec = kvzalloc(sizeof(*spec), GFP_KERNEL); - if (!spec) - return ERR_PTR(-ENOMEM); - - if (proto) { - spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol); - MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, proto); - } - - ipv = mlx5e_etype_to_ipv(etype); - if (match_ipv_outer && ipv) { - spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version); - MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, ipv); - } else if (etype) { - spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype); - MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype); - } - - rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1); - if (IS_ERR(rule)) { - err = PTR_ERR(rule); - netdev_err(priv->netdev, "%s: add rule failed\n", __func__); - } - - kvfree(spec); - return err ? ERR_PTR(err) : rule; -} - -static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv, - struct ttc_params *params, - struct mlx5e_ttc_table *ttc) -{ - struct mlx5_flow_destination dest = {}; - struct mlx5_flow_handle **trules; - struct mlx5e_ttc_rule *rules; - struct mlx5_flow_table *ft; - int tt; - int err; - - ft = ttc->ft.t; - rules = ttc->rules; - - dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; - for (tt = 0; tt < MLX5E_NUM_TT; tt++) { - struct mlx5e_ttc_rule *rule = &rules[tt]; - - if (tt == MLX5E_TT_ANY) - dest.tir_num = params->any_tt_tirn; - else - dest.tir_num = params->indir_tirn[tt]; - - rule->rule = mlx5e_generate_ttc_rule(priv, ft, &dest, - ttc_rules[tt].etype, - ttc_rules[tt].proto); - if (IS_ERR(rule->rule)) { - err = PTR_ERR(rule->rule); - rule->rule = NULL; - goto del_rules; - } - rule->default_dest = dest; - } - - if (!params->inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev)) - return 0; - - trules = ttc->tunnel_rules; - dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; - dest.ft = params->inner_ttc->ft.t; - for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) { - if (!mlx5e_tunnel_proto_supported_rx(priv->mdev, - ttc_tunnel_rules[tt].proto)) - continue; - trules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest, - ttc_tunnel_rules[tt].etype, - ttc_tunnel_rules[tt].proto); - if (IS_ERR(trules[tt])) { - err = PTR_ERR(trules[tt]); - trules[tt] = NULL; - goto del_rules; - } - } - - return 0; - -del_rules: - mlx5e_cleanup_ttc_rules(ttc); - return err; -} - -static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc, - bool use_ipv) -{ - int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); - struct mlx5e_flow_table *ft = &ttc->ft; - int ix = 0; - u32 *in; - int err; - u8 *mc; - - ft->g = kcalloc(MLX5E_TTC_NUM_GROUPS, - sizeof(*ft->g), GFP_KERNEL); - if (!ft->g) - return -ENOMEM; - in = kvzalloc(inlen, GFP_KERNEL); - if (!in) { - kfree(ft->g); - ft->g = NULL; - return -ENOMEM; - } - - /* L4 Group */ - mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); - MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol); - if (use_ipv) - MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_version); - else - MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); - MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); - MLX5_SET_CFG(in, start_flow_index, ix); - ix += MLX5E_TTC_GROUP1_SIZE; - MLX5_SET_CFG(in, end_flow_index, ix - 1); - ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); - if (IS_ERR(ft->g[ft->num_groups])) - goto err; - ft->num_groups++; - - /* L3 Group */ - MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0); - MLX5_SET_CFG(in, start_flow_index, ix); - ix += MLX5E_TTC_GROUP2_SIZE; - MLX5_SET_CFG(in, end_flow_index, ix - 1); - ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); - if (IS_ERR(ft->g[ft->num_groups])) - goto err; - ft->num_groups++; - - /* Any Group */ - memset(in, 0, inlen); - MLX5_SET_CFG(in, start_flow_index, ix); - ix += MLX5E_TTC_GROUP3_SIZE; - MLX5_SET_CFG(in, end_flow_index, ix - 1); - ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); - if (IS_ERR(ft->g[ft->num_groups])) - goto err; - ft->num_groups++; - - kvfree(in); - return 0; - -err: - err = PTR_ERR(ft->g[ft->num_groups]); - ft->g[ft->num_groups] = NULL; - kvfree(in); - - return err; -} - -static struct mlx5_flow_handle * -mlx5e_generate_inner_ttc_rule(struct mlx5e_priv *priv, - struct mlx5_flow_table *ft, - struct mlx5_flow_destination *dest, - u16 etype, u8 proto) -{ - MLX5_DECLARE_FLOW_ACT(flow_act); - struct mlx5_flow_handle *rule; - struct mlx5_flow_spec *spec; - int err = 0; - u8 ipv; - - spec = kvzalloc(sizeof(*spec), GFP_KERNEL); - if (!spec) - return ERR_PTR(-ENOMEM); - - ipv = mlx5e_etype_to_ipv(etype); - if (etype && ipv) { - spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS; - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, inner_headers.ip_version); - MLX5_SET(fte_match_param, spec->match_value, inner_headers.ip_version, ipv); - } - - if (proto) { - spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS; - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, inner_headers.ip_protocol); - MLX5_SET(fte_match_param, spec->match_value, inner_headers.ip_protocol, proto); - } - - rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1); - if (IS_ERR(rule)) { - err = PTR_ERR(rule); - netdev_err(priv->netdev, "%s: add rule failed\n", __func__); - } - - kvfree(spec); - return err ? ERR_PTR(err) : rule; -} - -static int mlx5e_generate_inner_ttc_table_rules(struct mlx5e_priv *priv, - struct ttc_params *params, - struct mlx5e_ttc_table *ttc) -{ - struct mlx5_flow_destination dest = {}; - struct mlx5e_ttc_rule *rules; - struct mlx5_flow_table *ft; - int err; - int tt; - - ft = ttc->ft.t; - rules = ttc->rules; - dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; - - for (tt = 0; tt < MLX5E_NUM_TT; tt++) { - struct mlx5e_ttc_rule *rule = &rules[tt]; - - if (tt == MLX5E_TT_ANY) - dest.tir_num = params->any_tt_tirn; - else - dest.tir_num = params->indir_tirn[tt]; - - rule->rule = mlx5e_generate_inner_ttc_rule(priv, ft, &dest, - ttc_rules[tt].etype, - ttc_rules[tt].proto); - if (IS_ERR(rule->rule)) { - err = PTR_ERR(rule->rule); - rule->rule = NULL; - goto del_rules; - } - rule->default_dest = dest; - } - - return 0; - -del_rules: - - mlx5e_cleanup_ttc_rules(ttc); - return err; -} - -static int mlx5e_create_inner_ttc_table_groups(struct mlx5e_ttc_table *ttc) -{ - int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); - struct mlx5e_flow_table *ft = &ttc->ft; - int ix = 0; - u32 *in; - int err; - u8 *mc; - - ft->g = kcalloc(MLX5E_INNER_TTC_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL); - if (!ft->g) - return -ENOMEM; - in = kvzalloc(inlen, GFP_KERNEL); - if (!in) { - kfree(ft->g); - ft->g = NULL; - return -ENOMEM; - } - - /* L4 Group */ - mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); - MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol); - MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_version); - MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS); - MLX5_SET_CFG(in, start_flow_index, ix); - ix += MLX5E_INNER_TTC_GROUP1_SIZE; - MLX5_SET_CFG(in, end_flow_index, ix - 1); - ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); - if (IS_ERR(ft->g[ft->num_groups])) - goto err; - ft->num_groups++; - - /* L3 Group */ - MLX5_SET(fte_match_param, mc, inner_headers.ip_protocol, 0); - MLX5_SET_CFG(in, start_flow_index, ix); - ix += MLX5E_INNER_TTC_GROUP2_SIZE; - MLX5_SET_CFG(in, end_flow_index, ix - 1); - ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); - if (IS_ERR(ft->g[ft->num_groups])) - goto err; - ft->num_groups++; - - /* Any Group */ - memset(in, 0, inlen); - MLX5_SET_CFG(in, start_flow_index, ix); - ix += MLX5E_INNER_TTC_GROUP3_SIZE; - MLX5_SET_CFG(in, end_flow_index, ix - 1); - ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); - if (IS_ERR(ft->g[ft->num_groups])) - goto err; - ft->num_groups++; - - kvfree(in); - return 0; - -err: - err = PTR_ERR(ft->g[ft->num_groups]); - ft->g[ft->num_groups] = NULL; - kvfree(in); - - return err; -} - -void mlx5e_set_ttc_basic_params(struct mlx5e_priv *priv, - struct ttc_params *ttc_params) -{ - ttc_params->any_tt_tirn = priv->direct_tir[0].tirn; - ttc_params->inner_ttc = &priv->fs.inner_ttc; -} - -void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params) +static void mlx5e_set_inner_ttc_params(struct mlx5e_priv *priv, + struct ttc_params *ttc_params) { struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr; + int tt; - ft_attr->max_fte = MLX5E_INNER_TTC_TABLE_SIZE; + memset(ttc_params, 0, sizeof(*ttc_params)); + ttc_params->ns = mlx5_get_flow_namespace(priv->mdev, + MLX5_FLOW_NAMESPACE_KERNEL); ft_attr->level = MLX5E_INNER_TTC_FT_LEVEL; ft_attr->prio = MLX5E_NIC_PRIO; + + for (tt = 0; tt < MLX5_NUM_TT; tt++) { + ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR; + ttc_params->dests[tt].tir_num = + tt == MLX5_TT_ANY ? + mlx5e_rx_res_get_tirn_direct(priv->rx_res, 0) : + mlx5e_rx_res_get_tirn_rss_inner(priv->rx_res, + tt); + } } -void mlx5e_set_ttc_ft_params(struct ttc_params *ttc_params) +void mlx5e_set_ttc_params(struct mlx5e_priv *priv, + struct ttc_params *ttc_params, bool tunnel) { struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr; + int tt; - ft_attr->max_fte = MLX5E_TTC_TABLE_SIZE; + memset(ttc_params, 0, sizeof(*ttc_params)); + ttc_params->ns = mlx5_get_flow_namespace(priv->mdev, + MLX5_FLOW_NAMESPACE_KERNEL); ft_attr->level = MLX5E_TTC_FT_LEVEL; ft_attr->prio = MLX5E_NIC_PRIO; -} - -int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params, - struct mlx5e_ttc_table *ttc) -{ - struct mlx5e_flow_table *ft = &ttc->ft; - int err; - if (!mlx5e_tunnel_inner_ft_supported(priv->mdev)) - return 0; - - ft->t = mlx5_create_flow_table(priv->fs.ns, ¶ms->ft_attr); - if (IS_ERR(ft->t)) { - err = PTR_ERR(ft->t); - ft->t = NULL; - return err; + for (tt = 0; tt < MLX5_NUM_TT; tt++) { + ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR; + ttc_params->dests[tt].tir_num = + tt == MLX5_TT_ANY ? + mlx5e_rx_res_get_tirn_direct(priv->rx_res, 0) : + mlx5e_rx_res_get_tirn_rss(priv->rx_res, tt); } - err = mlx5e_create_inner_ttc_table_groups(ttc); - if (err) - goto err; - - err = mlx5e_generate_inner_ttc_table_rules(priv, params, ttc); - if (err) - goto err; - - return 0; - -err: - mlx5e_destroy_flow_table(ft); - return err; -} - -void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv, - struct mlx5e_ttc_table *ttc) -{ - if (!mlx5e_tunnel_inner_ft_supported(priv->mdev)) + ttc_params->inner_ttc = tunnel; + if (!tunnel || !mlx5_tunnel_inner_ft_supported(priv->mdev)) return; - mlx5e_cleanup_ttc_rules(ttc); - mlx5e_destroy_flow_table(&ttc->ft); -} - -void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv, - struct mlx5e_ttc_table *ttc) -{ - mlx5e_cleanup_ttc_rules(ttc); - mlx5e_destroy_flow_table(&ttc->ft); -} - -int mlx5e_create_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params, - struct mlx5e_ttc_table *ttc) -{ - bool match_ipv_outer = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version); - struct mlx5e_flow_table *ft = &ttc->ft; - int err; - - ft->t = mlx5_create_flow_table(priv->fs.ns, ¶ms->ft_attr); - if (IS_ERR(ft->t)) { - err = PTR_ERR(ft->t); - ft->t = NULL; - return err; + for (tt = 0; tt < MLX5_NUM_TUNNEL_TT; tt++) { + ttc_params->tunnel_dests[tt].type = + MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + ttc_params->tunnel_dests[tt].ft = + mlx5_get_ttc_flow_table(priv->fs.inner_ttc); } - - err = mlx5e_create_ttc_table_groups(ttc, match_ipv_outer); - if (err) - goto err; - - err = mlx5e_generate_ttc_table_rules(priv, params, ttc); - if (err) - goto err; - - return 0; -err: - mlx5e_destroy_flow_table(ft); - return err; -} - -int mlx5e_ttc_fwd_dest(struct mlx5e_priv *priv, enum mlx5e_traffic_types type, - struct mlx5_flow_destination *new_dest) -{ - return mlx5_modify_rule_destination(priv->fs.ttc.rules[type].rule, new_dest, NULL); -} - -struct mlx5_flow_destination -mlx5e_ttc_get_default_dest(struct mlx5e_priv *priv, enum mlx5e_traffic_types type) -{ - struct mlx5_flow_destination *dest = &priv->fs.ttc.rules[type].default_dest; - - WARN_ONCE(dest->type != MLX5_FLOW_DESTINATION_TYPE_TIR, - "TTC[%d] default dest is not setup yet", type); - - return *dest; -} - -int mlx5e_ttc_fwd_default_dest(struct mlx5e_priv *priv, enum mlx5e_traffic_types type) -{ - struct mlx5_flow_destination dest = mlx5e_ttc_get_default_dest(priv, type); - - return mlx5e_ttc_fwd_dest(priv, type, &dest); } static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv, @@ -1473,7 +939,7 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv, outer_headers.dmac_47_16); dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; - dest.ft = priv->fs.ttc.ft.t; + dest.ft = mlx5_get_ttc_flow_table(priv->fs.ttc); switch (type) { case MLX5E_FULLMATCH: @@ -1769,10 +1235,47 @@ static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv) kvfree(priv->fs.vlan); } -int mlx5e_create_flow_steering(struct mlx5e_priv *priv) +static void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv) +{ + if (!mlx5_tunnel_inner_ft_supported(priv->mdev)) + return; + mlx5_destroy_ttc_table(priv->fs.inner_ttc); +} + +void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv) +{ + mlx5_destroy_ttc_table(priv->fs.ttc); +} + +static int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv) { struct ttc_params ttc_params = {}; - int tt, err; + + if (!mlx5_tunnel_inner_ft_supported(priv->mdev)) + return 0; + + mlx5e_set_inner_ttc_params(priv, &ttc_params); + priv->fs.inner_ttc = mlx5_create_inner_ttc_table(priv->mdev, + &ttc_params); + if (IS_ERR(priv->fs.inner_ttc)) + return PTR_ERR(priv->fs.inner_ttc); + return 0; +} + +int mlx5e_create_ttc_table(struct mlx5e_priv *priv) +{ + struct ttc_params ttc_params = {}; + + mlx5e_set_ttc_params(priv, &ttc_params, true); + priv->fs.ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params); + if (IS_ERR(priv->fs.ttc)) + return PTR_ERR(priv->fs.ttc); + return 0; +} + +int mlx5e_create_flow_steering(struct mlx5e_priv *priv) +{ + int err; priv->fs.ns = mlx5_get_flow_namespace(priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL); @@ -1787,23 +1290,15 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv) priv->netdev->hw_features &= ~NETIF_F_NTUPLE; } - mlx5e_set_ttc_basic_params(priv, &ttc_params); - mlx5e_set_inner_ttc_ft_params(&ttc_params); - for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) - ttc_params.indir_tirn[tt] = priv->inner_indir_tir[tt].tirn; - - err = mlx5e_create_inner_ttc_table(priv, &ttc_params, &priv->fs.inner_ttc); + err = mlx5e_create_inner_ttc_table(priv); if (err) { - netdev_err(priv->netdev, "Failed to create inner ttc table, err=%d\n", + netdev_err(priv->netdev, + "Failed to create inner ttc table, err=%d\n", err); goto err_destroy_arfs_tables; } - mlx5e_set_ttc_ft_params(&ttc_params); - for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) - ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn; - - err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc); + err = mlx5e_create_ttc_table(priv); if (err) { netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n", err); @@ -1837,9 +1332,9 @@ err_destory_vlan_table: err_destroy_l2_table: mlx5e_destroy_l2_table(priv); err_destroy_ttc_table: - mlx5e_destroy_ttc_table(priv, &priv->fs.ttc); + mlx5e_destroy_ttc_table(priv); err_destroy_inner_ttc_table: - mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc); + mlx5e_destroy_inner_ttc_table(priv); err_destroy_arfs_tables: mlx5e_arfs_destroy_tables(priv); @@ -1851,8 +1346,8 @@ void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv) mlx5e_ptp_free_rx_fs(priv); mlx5e_destroy_vlan_table(priv); mlx5e_destroy_l2_table(priv); - mlx5e_destroy_ttc_table(priv, &priv->fs.ttc); - mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc); + mlx5e_destroy_ttc_table(priv); + mlx5e_destroy_inner_ttc_table(priv); mlx5e_arfs_destroy_tables(priv); mlx5e_ethtool_cleanup_steering(priv); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c index b416a8ee2eed..03693fa74a70 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c @@ -35,11 +35,19 @@ #include "en/params.h" #include "en/xsk/pool.h" +static int flow_type_to_traffic_type(u32 flow_type); + +static u32 flow_type_mask(u32 flow_type) +{ + return flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS); +} + struct mlx5e_ethtool_rule { struct list_head list; struct ethtool_rx_flow_spec flow_spec; struct mlx5_flow_handle *rule; struct mlx5e_ethtool_table *eth_ft; + struct mlx5e_rss *rss; }; static void put_flow_table(struct mlx5e_ethtool_table *eth_ft) @@ -66,7 +74,7 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv, int table_size; int prio; - switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { + switch (flow_type_mask(fs->flow_type)) { case TCP_V4_FLOW: case UDP_V4_FLOW: case TCP_V6_FLOW: @@ -329,7 +337,7 @@ static int set_flow_attrs(u32 *match_c, u32 *match_v, outer_headers); void *outer_headers_v = MLX5_ADDR_OF(fte_match_param, match_v, outer_headers); - u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); + u32 flow_type = flow_type_mask(fs->flow_type); switch (flow_type) { case TCP_V4_FLOW: @@ -397,10 +405,53 @@ static bool outer_header_zero(u32 *match_criteria) size - 1); } +static int flow_get_tirn(struct mlx5e_priv *priv, + struct mlx5e_ethtool_rule *eth_rule, + struct ethtool_rx_flow_spec *fs, + u32 rss_context, u32 *tirn) +{ + if (fs->flow_type & FLOW_RSS) { + struct mlx5e_lro_param lro_param; + struct mlx5e_rss *rss; + u32 flow_type; + int err; + int tt; + + rss = mlx5e_rx_res_rss_get(priv->rx_res, rss_context); + if (!rss) + return -ENOENT; + + flow_type = flow_type_mask(fs->flow_type); + tt = flow_type_to_traffic_type(flow_type); + if (tt < 0) + return -EINVAL; + + lro_param = mlx5e_get_lro_param(&priv->channels.params); + err = mlx5e_rss_obtain_tirn(rss, tt, &lro_param, false, tirn); + if (err) + return err; + eth_rule->rss = rss; + mlx5e_rss_refcnt_inc(eth_rule->rss); + } else { + struct mlx5e_params *params = &priv->channels.params; + enum mlx5e_rq_group group; + u16 ix; + + mlx5e_qid_get_ch_and_group(params, fs->ring_cookie, &ix, &group); + + *tirn = group == MLX5E_RQ_GROUP_XSK ? + mlx5e_rx_res_get_tirn_xsk(priv->rx_res, ix) : + mlx5e_rx_res_get_tirn_direct(priv->rx_res, ix); + } + + return 0; +} + static struct mlx5_flow_handle * add_ethtool_flow_rule(struct mlx5e_priv *priv, + struct mlx5e_ethtool_rule *eth_rule, struct mlx5_flow_table *ft, - struct ethtool_rx_flow_spec *fs) + struct ethtool_rx_flow_spec *fs, u32 rss_context) { struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND }; struct mlx5_flow_destination *dst = NULL; @@ -419,22 +470,17 @@ add_ethtool_flow_rule(struct mlx5e_priv *priv, if (fs->ring_cookie == RX_CLS_FLOW_DISC) { flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP; } else { - struct mlx5e_params *params = &priv->channels.params; - enum mlx5e_rq_group group; - struct mlx5e_tir *tir; - u16 ix; - - mlx5e_qid_get_ch_and_group(params, fs->ring_cookie, &ix, &group); - tir = group == MLX5E_RQ_GROUP_XSK ? priv->xsk_tir : priv->direct_tir; - dst = kzalloc(sizeof(*dst), GFP_KERNEL); if (!dst) { err = -ENOMEM; goto free; } + err = flow_get_tirn(priv, eth_rule, fs, rss_context, &dst->tir_num); + if (err) + goto free; + dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR; - dst->tir_num = tir[ix].tirn; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; } @@ -458,6 +504,8 @@ static void del_ethtool_rule(struct mlx5e_priv *priv, { if (eth_rule->rule) mlx5_del_flow_rules(eth_rule->rule); + if (eth_rule->rss) + mlx5e_rss_refcnt_dec(eth_rule->rss); list_del(ð_rule->list); priv->fs.ethtool.tot_num_rules--; put_flow_table(eth_rule->eth_ft); @@ -618,7 +666,7 @@ static int validate_flow(struct mlx5e_priv *priv, fs->ring_cookie)) return -EINVAL; - switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { + switch (flow_type_mask(fs->flow_type)) { case ETHER_FLOW: num_tuples += validate_ethter(fs); break; @@ -667,7 +715,7 @@ static int validate_flow(struct mlx5e_priv *priv, static int mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv, - struct ethtool_rx_flow_spec *fs) + struct ethtool_rx_flow_spec *fs, u32 rss_context) { struct mlx5e_ethtool_table *eth_ft; struct mlx5e_ethtool_rule *eth_rule; @@ -698,7 +746,7 @@ mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv, err = -EINVAL; goto del_ethtool_rule; } - rule = add_ethtool_flow_rule(priv, eth_ft->ft, fs); + rule = add_ethtool_flow_rule(priv, eth_rule, eth_ft->ft, fs, rss_context); if (IS_ERR(rule)) { err = PTR_ERR(rule); goto del_ethtool_rule; @@ -744,10 +792,20 @@ mlx5e_ethtool_get_flow(struct mlx5e_priv *priv, return -EINVAL; list_for_each_entry(eth_rule, &priv->fs.ethtool.rules, list) { - if (eth_rule->flow_spec.location == location) { - info->fs = eth_rule->flow_spec; + int index; + + if (eth_rule->flow_spec.location != location) + continue; + if (!info) return 0; - } + info->fs = eth_rule->flow_spec; + if (!eth_rule->rss) + return 0; + index = mlx5e_rx_res_rss_index(priv->rx_res, eth_rule->rss); + if (index < 0) + return index; + info->rss_context = index; + return 0; } return -ENOENT; @@ -763,7 +821,7 @@ mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv, info->data = MAX_NUM_OF_ETHTOOL_RULES; while ((!err || err == -ENOENT) && idx < info->rule_cnt) { - err = mlx5e_ethtool_get_flow(priv, info, location); + err = mlx5e_ethtool_get_flow(priv, NULL, location); if (!err) rule_locs[idx++] = location; location++; @@ -785,45 +843,44 @@ void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv) INIT_LIST_HEAD(&priv->fs.ethtool.rules); } -static enum mlx5e_traffic_types flow_type_to_traffic_type(u32 flow_type) +static int flow_type_to_traffic_type(u32 flow_type) { switch (flow_type) { case TCP_V4_FLOW: - return MLX5E_TT_IPV4_TCP; + return MLX5_TT_IPV4_TCP; case TCP_V6_FLOW: - return MLX5E_TT_IPV6_TCP; + return MLX5_TT_IPV6_TCP; case UDP_V4_FLOW: - return MLX5E_TT_IPV4_UDP; + return MLX5_TT_IPV4_UDP; case UDP_V6_FLOW: - return MLX5E_TT_IPV6_UDP; + return MLX5_TT_IPV6_UDP; case AH_V4_FLOW: - return MLX5E_TT_IPV4_IPSEC_AH; + return MLX5_TT_IPV4_IPSEC_AH; case AH_V6_FLOW: - return MLX5E_TT_IPV6_IPSEC_AH; + return MLX5_TT_IPV6_IPSEC_AH; case ESP_V4_FLOW: - return MLX5E_TT_IPV4_IPSEC_ESP; + return MLX5_TT_IPV4_IPSEC_ESP; case ESP_V6_FLOW: - return MLX5E_TT_IPV6_IPSEC_ESP; + return MLX5_TT_IPV6_IPSEC_ESP; case IPV4_FLOW: - return MLX5E_TT_IPV4; + return MLX5_TT_IPV4; case IPV6_FLOW: - return MLX5E_TT_IPV6; + return MLX5_TT_IPV6; default: - return MLX5E_NUM_INDIR_TIRS; + return -EINVAL; } } static int mlx5e_set_rss_hash_opt(struct mlx5e_priv *priv, struct ethtool_rxnfc *nfc) { - int inlen = MLX5_ST_SZ_BYTES(modify_tir_in); - enum mlx5e_traffic_types tt; u8 rx_hash_field = 0; - void *in; + int err; + int tt; tt = flow_type_to_traffic_type(nfc->flow_type); - if (tt == MLX5E_NUM_INDIR_TIRS) - return -EINVAL; + if (tt < 0) + return tt; /* RSS does not support anything other than hashing to queues * on src IP, dest IP, TCP/UDP src port and TCP/UDP dest @@ -848,35 +905,24 @@ static int mlx5e_set_rss_hash_opt(struct mlx5e_priv *priv, if (nfc->data & RXH_L4_B_2_3) rx_hash_field |= MLX5_HASH_FIELD_SEL_L4_DPORT; - in = kvzalloc(inlen, GFP_KERNEL); - if (!in) - return -ENOMEM; - mutex_lock(&priv->state_lock); - - if (rx_hash_field == priv->rss_params.rx_hash_fields[tt]) - goto out; - - priv->rss_params.rx_hash_fields[tt] = rx_hash_field; - mlx5e_modify_tirs_hash(priv, in); - -out: + err = mlx5e_rx_res_rss_set_hash_fields(priv->rx_res, tt, rx_hash_field); mutex_unlock(&priv->state_lock); - kvfree(in); - return 0; + + return err; } static int mlx5e_get_rss_hash_opt(struct mlx5e_priv *priv, struct ethtool_rxnfc *nfc) { - enum mlx5e_traffic_types tt; u32 hash_field = 0; + int tt; tt = flow_type_to_traffic_type(nfc->flow_type); - if (tt == MLX5E_NUM_INDIR_TIRS) - return -EINVAL; + if (tt < 0) + return tt; - hash_field = priv->rss_params.rx_hash_fields[tt]; + hash_field = mlx5e_rx_res_rss_get_hash_fields(priv->rx_res, tt); nfc->data = 0; if (hash_field & MLX5_HASH_FIELD_SEL_SRC_IP) @@ -898,7 +944,7 @@ int mlx5e_ethtool_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) switch (cmd->cmd) { case ETHTOOL_SRXCLSRLINS: - err = mlx5e_ethtool_flow_replace(priv, &cmd->fs); + err = mlx5e_ethtool_flow_replace(priv, &cmd->fs, cmd->rss_context); break; case ETHTOOL_SRXCLSRLDEL: err = mlx5e_ethtool_flow_remove(priv, cmd->fs.location); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 24f919ef9b8e..47efd858964d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1619,7 +1619,7 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas)); MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode); - MLX5_SET(cqc, cqc, c_eqn, eqn); + MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn); MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index); MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); @@ -1711,7 +1711,7 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c, { int err, tc; - for (tc = 0; tc < params->num_tc; tc++) { + for (tc = 0; tc < mlx5e_get_dcb_num_tc(params); tc++) { int txq_ix = c->ix + tc * params->num_channels; err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix, @@ -1992,7 +1992,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, c->pdev = mlx5_core_dma_dev(priv->mdev); c->netdev = priv->netdev; c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey.key); - c->num_tc = params->num_tc; + c->num_tc = mlx5e_get_dcb_num_tc(params); c->xdp = !!params->xdp_prog; c->stats = &priv->channel_stats[ix].ch; c->aff_mask = irq_get_effective_affinity_mask(irq); @@ -2185,400 +2185,14 @@ void mlx5e_close_channels(struct mlx5e_channels *chs) chs->num = 0; } -static int -mlx5e_create_rqt(struct mlx5e_priv *priv, int sz, struct mlx5e_rqt *rqt) -{ - struct mlx5_core_dev *mdev = priv->mdev; - void *rqtc; - int inlen; - int err; - u32 *in; - int i; - - inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz; - in = kvzalloc(inlen, GFP_KERNEL); - if (!in) - return -ENOMEM; - - rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context); - - MLX5_SET(rqtc, rqtc, rqt_actual_size, sz); - MLX5_SET(rqtc, rqtc, rqt_max_size, sz); - - for (i = 0; i < sz; i++) - MLX5_SET(rqtc, rqtc, rq_num[i], priv->drop_rq.rqn); - - err = mlx5_core_create_rqt(mdev, in, inlen, &rqt->rqtn); - if (!err) - rqt->enabled = true; - - kvfree(in); - return err; -} - -void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt) -{ - rqt->enabled = false; - mlx5_core_destroy_rqt(priv->mdev, rqt->rqtn); -} - -int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv) -{ - struct mlx5e_rqt *rqt = &priv->indir_rqt; - int err; - - err = mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, rqt); - if (err) - mlx5_core_warn(priv->mdev, "create indirect rqts failed, %d\n", err); - return err; -} - -int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n) -{ - int err; - int ix; - - for (ix = 0; ix < n; ix++) { - err = mlx5e_create_rqt(priv, 1 /*size */, &tirs[ix].rqt); - if (unlikely(err)) - goto err_destroy_rqts; - } - - return 0; - -err_destroy_rqts: - mlx5_core_warn(priv->mdev, "create rqts failed, %d\n", err); - for (ix--; ix >= 0; ix--) - mlx5e_destroy_rqt(priv, &tirs[ix].rqt); - - return err; -} - -void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n) -{ - int i; - - for (i = 0; i < n; i++) - mlx5e_destroy_rqt(priv, &tirs[i].rqt); -} - -static int mlx5e_rx_hash_fn(int hfunc) -{ - return (hfunc == ETH_RSS_HASH_TOP) ? - MLX5_RX_HASH_FN_TOEPLITZ : - MLX5_RX_HASH_FN_INVERTED_XOR8; -} - -int mlx5e_bits_invert(unsigned long a, int size) -{ - int inv = 0; - int i; - - for (i = 0; i < size; i++) - inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i; - - return inv; -} - -static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, int sz, - struct mlx5e_redirect_rqt_param rrp, void *rqtc) -{ - int i; - - for (i = 0; i < sz; i++) { - u32 rqn; - - if (rrp.is_rss) { - int ix = i; - - if (rrp.rss.hfunc == ETH_RSS_HASH_XOR) - ix = mlx5e_bits_invert(i, ilog2(sz)); - - ix = priv->rss_params.indirection_rqt[ix]; - rqn = rrp.rss.channels->c[ix]->rq.rqn; - } else { - rqn = rrp.rqn; - } - MLX5_SET(rqtc, rqtc, rq_num[i], rqn); - } -} - -int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, - struct mlx5e_redirect_rqt_param rrp) -{ - struct mlx5_core_dev *mdev = priv->mdev; - void *rqtc; - int inlen; - u32 *in; - int err; - - inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz; - in = kvzalloc(inlen, GFP_KERNEL); - if (!in) - return -ENOMEM; - - rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx); - - MLX5_SET(rqtc, rqtc, rqt_actual_size, sz); - MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1); - mlx5e_fill_rqt_rqns(priv, sz, rrp, rqtc); - err = mlx5_core_modify_rqt(mdev, rqtn, in, inlen); - - kvfree(in); - return err; -} - -static u32 mlx5e_get_direct_rqn(struct mlx5e_priv *priv, int ix, - struct mlx5e_redirect_rqt_param rrp) -{ - if (!rrp.is_rss) - return rrp.rqn; - - if (ix >= rrp.rss.channels->num) - return priv->drop_rq.rqn; - - return rrp.rss.channels->c[ix]->rq.rqn; -} - -static void mlx5e_redirect_rqts(struct mlx5e_priv *priv, - struct mlx5e_redirect_rqt_param rrp, - struct mlx5e_redirect_rqt_param *ptp_rrp) -{ - u32 rqtn; - int ix; - - if (priv->indir_rqt.enabled) { - /* RSS RQ table */ - rqtn = priv->indir_rqt.rqtn; - mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, rrp); - } - - for (ix = 0; ix < priv->max_nch; ix++) { - struct mlx5e_redirect_rqt_param direct_rrp = { - .is_rss = false, - { - .rqn = mlx5e_get_direct_rqn(priv, ix, rrp) - }, - }; - - /* Direct RQ Tables */ - if (!priv->direct_tir[ix].rqt.enabled) - continue; - - rqtn = priv->direct_tir[ix].rqt.rqtn; - mlx5e_redirect_rqt(priv, rqtn, 1, direct_rrp); - } - if (ptp_rrp) { - rqtn = priv->ptp_tir.rqt.rqtn; - mlx5e_redirect_rqt(priv, rqtn, 1, *ptp_rrp); - } -} - -static void mlx5e_redirect_rqts_to_channels(struct mlx5e_priv *priv, - struct mlx5e_channels *chs) -{ - bool rx_ptp_support = priv->profile->rx_ptp_support; - struct mlx5e_redirect_rqt_param *ptp_rrp_p = NULL; - struct mlx5e_redirect_rqt_param rrp = { - .is_rss = true, - { - .rss = { - .channels = chs, - .hfunc = priv->rss_params.hfunc, - } - }, - }; - struct mlx5e_redirect_rqt_param ptp_rrp; - - if (rx_ptp_support) { - u32 ptp_rqn; - - ptp_rrp.is_rss = false; - ptp_rrp.rqn = mlx5e_ptp_get_rqn(priv->channels.ptp, &ptp_rqn) ? - priv->drop_rq.rqn : ptp_rqn; - ptp_rrp_p = &ptp_rrp; - } - mlx5e_redirect_rqts(priv, rrp, ptp_rrp_p); -} - -static void mlx5e_redirect_rqts_to_drop(struct mlx5e_priv *priv) -{ - bool rx_ptp_support = priv->profile->rx_ptp_support; - struct mlx5e_redirect_rqt_param drop_rrp = { - .is_rss = false, - { - .rqn = priv->drop_rq.rqn, - }, - }; - - mlx5e_redirect_rqts(priv, drop_rrp, rx_ptp_support ? &drop_rrp : NULL); -} - -static const struct mlx5e_tirc_config tirc_default_config[MLX5E_NUM_INDIR_TIRS] = { - [MLX5E_TT_IPV4_TCP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4, - .l4_prot_type = MLX5_L4_PROT_TYPE_TCP, - .rx_hash_fields = MLX5_HASH_IP_L4PORTS, - }, - [MLX5E_TT_IPV6_TCP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6, - .l4_prot_type = MLX5_L4_PROT_TYPE_TCP, - .rx_hash_fields = MLX5_HASH_IP_L4PORTS, - }, - [MLX5E_TT_IPV4_UDP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4, - .l4_prot_type = MLX5_L4_PROT_TYPE_UDP, - .rx_hash_fields = MLX5_HASH_IP_L4PORTS, - }, - [MLX5E_TT_IPV6_UDP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6, - .l4_prot_type = MLX5_L4_PROT_TYPE_UDP, - .rx_hash_fields = MLX5_HASH_IP_L4PORTS, - }, - [MLX5E_TT_IPV4_IPSEC_AH] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4, - .l4_prot_type = 0, - .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI, - }, - [MLX5E_TT_IPV6_IPSEC_AH] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6, - .l4_prot_type = 0, - .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI, - }, - [MLX5E_TT_IPV4_IPSEC_ESP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4, - .l4_prot_type = 0, - .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI, - }, - [MLX5E_TT_IPV6_IPSEC_ESP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6, - .l4_prot_type = 0, - .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI, - }, - [MLX5E_TT_IPV4] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4, - .l4_prot_type = 0, - .rx_hash_fields = MLX5_HASH_IP, - }, - [MLX5E_TT_IPV6] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6, - .l4_prot_type = 0, - .rx_hash_fields = MLX5_HASH_IP, - }, -}; - -struct mlx5e_tirc_config mlx5e_tirc_get_default_config(enum mlx5e_traffic_types tt) -{ - return tirc_default_config[tt]; -} - -static void mlx5e_build_tir_ctx_lro(struct mlx5e_params *params, void *tirc) -{ - if (!params->lro_en) - return; - -#define ROUGH_MAX_L2_L3_HDR_SZ 256 - - MLX5_SET(tirc, tirc, lro_enable_mask, - MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO | - MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO); - MLX5_SET(tirc, tirc, lro_max_ip_payload_size, - (MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ - ROUGH_MAX_L2_L3_HDR_SZ) >> 8); - MLX5_SET(tirc, tirc, lro_timeout_period_usecs, params->lro_timeout); -} - -void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_rss_params *rss_params, - const struct mlx5e_tirc_config *ttconfig, - void *tirc, bool inner) -{ - void *hfso = inner ? MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_inner) : - MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer); - - MLX5_SET(tirc, tirc, rx_hash_fn, mlx5e_rx_hash_fn(rss_params->hfunc)); - if (rss_params->hfunc == ETH_RSS_HASH_TOP) { - void *rss_key = MLX5_ADDR_OF(tirc, tirc, - rx_hash_toeplitz_key); - size_t len = MLX5_FLD_SZ_BYTES(tirc, - rx_hash_toeplitz_key); - - MLX5_SET(tirc, tirc, rx_hash_symmetric, 1); - memcpy(rss_key, rss_params->toeplitz_hash_key, len); - } - MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, - ttconfig->l3_prot_type); - MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, - ttconfig->l4_prot_type); - MLX5_SET(rx_hash_field_select, hfso, selected_fields, - ttconfig->rx_hash_fields); -} - -static void mlx5e_update_rx_hash_fields(struct mlx5e_tirc_config *ttconfig, - enum mlx5e_traffic_types tt, - u32 rx_hash_fields) -{ - *ttconfig = tirc_default_config[tt]; - ttconfig->rx_hash_fields = rx_hash_fields; -} - -void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in) -{ - void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx); - struct mlx5e_rss_params *rss = &priv->rss_params; - struct mlx5_core_dev *mdev = priv->mdev; - int ctxlen = MLX5_ST_SZ_BYTES(tirc); - struct mlx5e_tirc_config ttconfig; - int tt; - - MLX5_SET(modify_tir_in, in, bitmask.hash, 1); - - for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { - memset(tirc, 0, ctxlen); - mlx5e_update_rx_hash_fields(&ttconfig, tt, - rss->rx_hash_fields[tt]); - mlx5e_build_indir_tir_ctx_hash(rss, &ttconfig, tirc, false); - mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in); - } - - /* Verify inner tirs resources allocated */ - if (!priv->inner_indir_tir[0].tirn) - return; - - for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { - memset(tirc, 0, ctxlen); - mlx5e_update_rx_hash_fields(&ttconfig, tt, - rss->rx_hash_fields[tt]); - mlx5e_build_indir_tir_ctx_hash(rss, &ttconfig, tirc, true); - mlx5_core_modify_tir(mdev, priv->inner_indir_tir[tt].tirn, in); - } -} - static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv) { - struct mlx5_core_dev *mdev = priv->mdev; - - void *in; - void *tirc; - int inlen; - int err; - int tt; - int ix; - - inlen = MLX5_ST_SZ_BYTES(modify_tir_in); - in = kvzalloc(inlen, GFP_KERNEL); - if (!in) - return -ENOMEM; - - MLX5_SET(modify_tir_in, in, bitmask.lro, 1); - tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx); + struct mlx5e_rx_res *res = priv->rx_res; + struct mlx5e_lro_param lro_param; - mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc); + lro_param = mlx5e_get_lro_param(&priv->channels.params); - for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { - err = mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in); - if (err) - goto free_in; - } - - for (ix = 0; ix < priv->max_nch; ix++) { - err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn, in); - if (err) - goto free_in; - } - -free_in: - kvfree(in); - - return err; + return mlx5e_rx_res_lro_set_param(res, &lro_param); } static MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_modify_tirs_lro); @@ -2649,22 +2263,34 @@ void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv) ETH_MAX_MTU); } -static void mlx5e_netdev_set_tcs(struct net_device *netdev, u16 nch, u8 ntc) +static int mlx5e_netdev_set_tcs(struct net_device *netdev, u16 nch, u8 ntc, + struct tc_mqprio_qopt_offload *mqprio) { - int tc; + int tc, err; netdev_reset_tc(netdev); if (ntc == 1) - return; + return 0; - netdev_set_num_tc(netdev, ntc); + err = netdev_set_num_tc(netdev, ntc); + if (err) { + netdev_WARN(netdev, "netdev_set_num_tc failed (%d), ntc = %d\n", err, ntc); + return err; + } - /* Map netdev TCs to offset 0 - * We have our own UP to TXQ mapping for QoS - */ - for (tc = 0; tc < ntc; tc++) - netdev_set_tc_queue(netdev, tc, nch, 0); + for (tc = 0; tc < ntc; tc++) { + u16 count, offset; + + /* For DCB mode, map netdev TCs to offset 0 + * We have our own UP to TXQ mapping for QoS + */ + count = mqprio ? mqprio->qopt.count[tc] : nch; + offset = mqprio ? mqprio->qopt.offset[tc] : 0; + netdev_set_tc_queue(netdev, tc, count, offset); + } + + return 0; } int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv) @@ -2674,7 +2300,7 @@ int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv) qos_queues = mlx5e_qos_cur_leaf_nodes(priv); nch = priv->channels.params.num_channels; - ntc = priv->channels.params.num_tc; + ntc = mlx5e_get_dcb_num_tc(&priv->channels.params); num_txqs = nch * ntc + qos_queues; if (MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_TX_PORT_TS)) num_txqs += ntc; @@ -2698,11 +2324,12 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv) old_ntc = netdev->num_tc ? : 1; nch = priv->channels.params.num_channels; - ntc = priv->channels.params.num_tc; + ntc = mlx5e_get_dcb_num_tc(&priv->channels.params); num_rxqs = nch * priv->profile->rq_groups; - mlx5e_netdev_set_tcs(netdev, nch, ntc); - + err = mlx5e_netdev_set_tcs(netdev, nch, ntc, NULL); + if (err) + goto err_out; err = mlx5e_update_tx_netdev_queues(priv); if (err) goto err_tcs; @@ -2723,7 +2350,8 @@ err_txqs: WARN_ON_ONCE(netif_set_real_num_tx_queues(netdev, old_num_txqs)); err_tcs: - mlx5e_netdev_set_tcs(netdev, old_num_txqs / old_ntc, old_ntc); + mlx5e_netdev_set_tcs(netdev, old_num_txqs / old_ntc, old_ntc, NULL); +err_out: return err; } @@ -2759,9 +2387,9 @@ int mlx5e_num_channels_changed(struct mlx5e_priv *priv) mlx5e_set_default_xps_cpumasks(priv, &priv->channels.params); - if (!netif_is_rxfh_configured(priv->netdev)) - mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt, - MLX5E_INDIR_RQT_SIZE, count); + /* This function may be called on attach, before priv->rx_res is created. */ + if (!netif_is_rxfh_configured(priv->netdev) && priv->rx_res) + mlx5e_rx_res_rss_set_indir_uniform(priv->rx_res, count); return 0; } @@ -2773,7 +2401,7 @@ static void mlx5e_build_txq_maps(struct mlx5e_priv *priv) int i, ch, tc, num_tc; ch = priv->channels.num; - num_tc = priv->channels.params.num_tc; + num_tc = mlx5e_get_dcb_num_tc(&priv->channels.params); for (i = 0; i < ch; i++) { for (tc = 0; tc < num_tc; tc++) { @@ -2804,7 +2432,7 @@ static void mlx5e_update_num_tc_x_num_ch(struct mlx5e_priv *priv) { /* Sync with mlx5e_select_queue. */ WRITE_ONCE(priv->num_tc_x_num_ch, - priv->channels.params.num_tc * priv->channels.num); + mlx5e_get_dcb_num_tc(&priv->channels.params) * priv->channels.num); } void mlx5e_activate_priv_channels(struct mlx5e_priv *priv) @@ -2820,16 +2448,15 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv) mlx5e_add_sqs_fwd_rules(priv); mlx5e_wait_channels_min_rx_wqes(&priv->channels); - mlx5e_redirect_rqts_to_channels(priv, &priv->channels); - mlx5e_xsk_redirect_rqts_to_channels(priv, &priv->channels); + if (priv->rx_res) + mlx5e_rx_res_channels_activate(priv->rx_res, &priv->channels); } void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv) { - mlx5e_xsk_redirect_rqts_to_drop(priv, &priv->channels); - - mlx5e_redirect_rqts_to_drop(priv); + if (priv->rx_res) + mlx5e_rx_res_channels_deactivate(priv->rx_res); if (mlx5e_is_vport_rep(priv)) mlx5e_remove_sqs_fwd_rules(priv); @@ -3204,224 +2831,152 @@ static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv) mlx5e_destroy_tises(priv); } -static void mlx5e_build_indir_tir_ctx_common(struct mlx5e_priv *priv, - u32 rqtn, u32 *tirc) -{ - MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.hw_objs.td.tdn); - MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); - MLX5_SET(tirc, tirc, indirect_table, rqtn); - MLX5_SET(tirc, tirc, tunneled_offload_en, - priv->channels.params.tunneled_offload_en); - - mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc); -} - -static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, - enum mlx5e_traffic_types tt, - u32 *tirc) +static int mlx5e_modify_channels_scatter_fcs(struct mlx5e_channels *chs, bool enable) { - mlx5e_build_indir_tir_ctx_common(priv, priv->indir_rqt.rqtn, tirc); - mlx5e_build_indir_tir_ctx_hash(&priv->rss_params, - &tirc_default_config[tt], tirc, false); -} + int err = 0; + int i; -static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 rqtn, u32 *tirc) -{ - mlx5e_build_indir_tir_ctx_common(priv, rqtn, tirc); - MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8); -} + for (i = 0; i < chs->num; i++) { + err = mlx5e_modify_rq_scatter_fcs(&chs->c[i]->rq, enable); + if (err) + return err; + } -static void mlx5e_build_inner_indir_tir_ctx(struct mlx5e_priv *priv, - enum mlx5e_traffic_types tt, - u32 *tirc) -{ - mlx5e_build_indir_tir_ctx_common(priv, priv->indir_rqt.rqtn, tirc); - mlx5e_build_indir_tir_ctx_hash(&priv->rss_params, - &tirc_default_config[tt], tirc, true); + return 0; } -int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc) +static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd) { - struct mlx5e_tir *tir; - void *tirc; - int inlen; - int i = 0; int err; - u32 *in; - int tt; - - inlen = MLX5_ST_SZ_BYTES(create_tir_in); - in = kvzalloc(inlen, GFP_KERNEL); - if (!in) - return -ENOMEM; - - for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { - memset(in, 0, inlen); - tir = &priv->indir_tir[tt]; - tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); - mlx5e_build_indir_tir_ctx(priv, tt, tirc); - err = mlx5e_create_tir(priv->mdev, tir, in); - if (err) { - mlx5_core_warn(priv->mdev, "create indirect tirs failed, %d\n", err); - goto err_destroy_inner_tirs; - } - } - - if (!inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev)) - goto out; + int i; - for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) { - memset(in, 0, inlen); - tir = &priv->inner_indir_tir[i]; - tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); - mlx5e_build_inner_indir_tir_ctx(priv, i, tirc); - err = mlx5e_create_tir(priv->mdev, tir, in); - if (err) { - mlx5_core_warn(priv->mdev, "create inner indirect tirs failed, %d\n", err); - goto err_destroy_inner_tirs; - } + for (i = 0; i < chs->num; i++) { + err = mlx5e_modify_rq_vsd(&chs->c[i]->rq, vsd); + if (err) + return err; } - -out: - kvfree(in); + if (chs->ptp && test_bit(MLX5E_PTP_STATE_RX, chs->ptp->state)) + return mlx5e_modify_rq_vsd(&chs->ptp->rq, vsd); return 0; - -err_destroy_inner_tirs: - for (i--; i >= 0; i--) - mlx5e_destroy_tir(priv->mdev, &priv->inner_indir_tir[i]); - - for (tt--; tt >= 0; tt--) - mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[tt]); - - kvfree(in); - - return err; } -int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n) +static int mlx5e_setup_tc_mqprio_dcb(struct mlx5e_priv *priv, + struct tc_mqprio_qopt *mqprio) { - struct mlx5e_tir *tir; - void *tirc; - int inlen; - int err = 0; - u32 *in; - int ix; - - inlen = MLX5_ST_SZ_BYTES(create_tir_in); - in = kvzalloc(inlen, GFP_KERNEL); - if (!in) - return -ENOMEM; + struct mlx5e_params new_params; + u8 tc = mqprio->num_tc; + int err; - for (ix = 0; ix < n; ix++) { - memset(in, 0, inlen); - tir = &tirs[ix]; - tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); - mlx5e_build_direct_tir_ctx(priv, tir->rqt.rqtn, tirc); - err = mlx5e_create_tir(priv->mdev, tir, in); - if (unlikely(err)) - goto err_destroy_ch_tirs; - } + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; - goto out; + if (tc && tc != MLX5E_MAX_NUM_TC) + return -EINVAL; -err_destroy_ch_tirs: - mlx5_core_warn(priv->mdev, "create tirs failed, %d\n", err); - for (ix--; ix >= 0; ix--) - mlx5e_destroy_tir(priv->mdev, &tirs[ix]); + new_params = priv->channels.params; + new_params.mqprio.mode = TC_MQPRIO_MODE_DCB; + new_params.mqprio.num_tc = tc ? tc : 1; -out: - kvfree(in); + err = mlx5e_safe_switch_params(priv, &new_params, + mlx5e_num_channels_changed_ctx, NULL, true); + priv->max_opened_tc = max_t(u8, priv->max_opened_tc, + mlx5e_get_dcb_num_tc(&priv->channels.params)); return err; } -void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv) +static int mlx5e_mqprio_channel_validate(struct mlx5e_priv *priv, + struct tc_mqprio_qopt_offload *mqprio) { + struct net_device *netdev = priv->netdev; + int agg_count = 0; int i; - for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) - mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]); - - /* Verify inner tirs resources allocated */ - if (!priv->inner_indir_tir[0].tirn) - return; - - for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) - mlx5e_destroy_tir(priv->mdev, &priv->inner_indir_tir[i]); -} - -void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n) -{ - int i; + if (mqprio->qopt.offset[0] != 0 || mqprio->qopt.num_tc < 1 || + mqprio->qopt.num_tc > MLX5E_MAX_NUM_MQPRIO_CH_TC) + return -EINVAL; - for (i = 0; i < n; i++) - mlx5e_destroy_tir(priv->mdev, &tirs[i]); -} + for (i = 0; i < mqprio->qopt.num_tc; i++) { + if (!mqprio->qopt.count[i]) { + netdev_err(netdev, "Zero size for queue-group (%d) is not supported\n", i); + return -EINVAL; + } + if (mqprio->min_rate[i]) { + netdev_err(netdev, "Min tx rate is not supported\n"); + return -EINVAL; + } + if (mqprio->max_rate[i]) { + netdev_err(netdev, "Max tx rate is not supported\n"); + return -EINVAL; + } -static int mlx5e_modify_channels_scatter_fcs(struct mlx5e_channels *chs, bool enable) -{ - int err = 0; - int i; + if (mqprio->qopt.offset[i] != agg_count) { + netdev_err(netdev, "Discontinuous queues config is not supported\n"); + return -EINVAL; + } + agg_count += mqprio->qopt.count[i]; + } - for (i = 0; i < chs->num; i++) { - err = mlx5e_modify_rq_scatter_fcs(&chs->c[i]->rq, enable); - if (err) - return err; + if (priv->channels.params.num_channels < agg_count) { + netdev_err(netdev, "Num of queues (%d) exceeds available (%d)\n", + agg_count, priv->channels.params.num_channels); + return -EINVAL; } return 0; } -static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd) +static int mlx5e_mqprio_channel_set_tcs_ctx(struct mlx5e_priv *priv, void *ctx) { - int err; - int i; + struct tc_mqprio_qopt_offload *mqprio = (struct tc_mqprio_qopt_offload *)ctx; + struct net_device *netdev = priv->netdev; + u8 num_tc; - for (i = 0; i < chs->num; i++) { - err = mlx5e_modify_rq_vsd(&chs->c[i]->rq, vsd); - if (err) - return err; - } - if (chs->ptp && test_bit(MLX5E_PTP_STATE_RX, chs->ptp->state)) - return mlx5e_modify_rq_vsd(&chs->ptp->rq, vsd); + if (priv->channels.params.mqprio.mode != TC_MQPRIO_MODE_CHANNEL) + return -EINVAL; + + num_tc = priv->channels.params.mqprio.num_tc; + mlx5e_netdev_set_tcs(netdev, 0, num_tc, mqprio); return 0; } -static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv, - struct tc_mqprio_qopt *mqprio) +static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv, + struct tc_mqprio_qopt_offload *mqprio) { struct mlx5e_params new_params; - u8 tc = mqprio->num_tc; - int err = 0; + int err; - mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + err = mlx5e_mqprio_channel_validate(priv, mqprio); + if (err) + return err; - if (tc && tc != MLX5E_MAX_NUM_TC) - return -EINVAL; + new_params = priv->channels.params; + new_params.mqprio.mode = TC_MQPRIO_MODE_CHANNEL; + new_params.mqprio.num_tc = mqprio->qopt.num_tc; + err = mlx5e_safe_switch_params(priv, &new_params, + mlx5e_mqprio_channel_set_tcs_ctx, mqprio, true); - mutex_lock(&priv->state_lock); + return err; +} +static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv, + struct tc_mqprio_qopt_offload *mqprio) +{ /* MQPRIO is another toplevel qdisc that can't be attached * simultaneously with the offloaded HTB. */ - if (WARN_ON(priv->htb.maj_id)) { - err = -EINVAL; - goto out; - } - - new_params = priv->channels.params; - new_params.num_tc = tc ? tc : 1; - - err = mlx5e_safe_switch_params(priv, &new_params, - mlx5e_num_channels_changed_ctx, NULL, true); + if (WARN_ON(priv->htb.maj_id)) + return -EINVAL; -out: - priv->max_opened_tc = max_t(u8, priv->max_opened_tc, - priv->channels.params.num_tc); - mutex_unlock(&priv->state_lock); - return err; + switch (mqprio->mode) { + case TC_MQPRIO_MODE_DCB: + return mlx5e_setup_tc_mqprio_dcb(priv, &mqprio->qopt); + case TC_MQPRIO_MODE_CHANNEL: + return mlx5e_setup_tc_mqprio_channel(priv, mqprio); + default: + return -EOPNOTSUPP; + } } static int mlx5e_setup_tc_htb(struct mlx5e_priv *priv, struct tc_htb_qopt_offload *htb) @@ -3445,8 +3000,7 @@ static int mlx5e_setup_tc_htb(struct mlx5e_priv *priv, struct tc_htb_qopt_offloa return mlx5e_htb_leaf_to_inner(priv, htb->parent_classid, htb->classid, htb->rate, htb->ceil, htb->extack); case TC_HTB_LEAF_DEL: - return mlx5e_htb_leaf_del(priv, htb->classid, &htb->moved_qid, &htb->qid, - htb->extack); + return mlx5e_htb_leaf_del(priv, &htb->classid, htb->extack); case TC_HTB_LEAF_DEL_LAST: case TC_HTB_LEAF_DEL_LAST_FORCE: return mlx5e_htb_leaf_del_last(priv, htb->classid, @@ -3493,7 +3047,10 @@ static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type, priv, priv, true); } case TC_SETUP_QDISC_MQPRIO: - return mlx5e_setup_tc_mqprio(priv, type_data); + mutex_lock(&priv->state_lock); + err = mlx5e_setup_tc_mqprio(priv, type_data); + mutex_unlock(&priv->state_lock); + return err; case TC_SETUP_QDISC_HTB: mutex_lock(&priv->state_lock); err = mlx5e_setup_tc_htb(priv, type_data); @@ -4582,7 +4139,7 @@ const struct net_device_ops mlx5e_netdev_ops = { .ndo_set_features = mlx5e_set_features, .ndo_fix_features = mlx5e_fix_features, .ndo_change_mtu = mlx5e_change_nic_mtu, - .ndo_do_ioctl = mlx5e_ioctl, + .ndo_eth_ioctl = mlx5e_ioctl, .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate, .ndo_features_check = mlx5e_features_check, .ndo_tx_timeout = mlx5e_tx_timeout, @@ -4611,15 +4168,6 @@ const struct net_device_ops mlx5e_netdev_ops = { .ndo_get_devlink_port = mlx5e_get_devlink_port, }; -void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len, - int num_channels) -{ - int i; - - for (i = 0; i < len; i++) - indirection_rqt[i] = i % num_channels; -} - static u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout) { int i; @@ -4632,24 +4180,8 @@ static u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeo return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]); } -void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params, - u16 num_channels) -{ - enum mlx5e_traffic_types tt; - - rss_params->hfunc = ETH_RSS_HASH_TOP; - netdev_rss_key_fill(rss_params->toeplitz_hash_key, - sizeof(rss_params->toeplitz_hash_key)); - mlx5e_build_default_indir_rqt(rss_params->indirection_rqt, - MLX5E_INDIR_RQT_SIZE, num_channels); - for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) - rss_params->rx_hash_fields[tt] = - tirc_default_config[tt].rx_hash_fields; -} - void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 mtu) { - struct mlx5e_rss_params *rss_params = &priv->rss_params; struct mlx5e_params *params = &priv->channels.params; struct mlx5_core_dev *mdev = priv->mdev; u8 rx_cq_period_mode; @@ -4660,12 +4192,12 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 params->hard_mtu = MLX5E_ETH_HARD_MTU; params->num_channels = min_t(unsigned int, MLX5E_MAX_NUM_CHANNELS / 2, priv->max_nch); - params->num_tc = 1; + params->mqprio.num_tc = 1; /* Set an initial non-zero value, so that mlx5e_select_queue won't * divide by zero if called before first activating channels. */ - priv->num_tc_x_num_ch = params->num_channels * params->num_tc; + priv->num_tc_x_num_ch = params->num_channels * params->mqprio.num_tc; /* SQ */ params->log_sq_size = is_kdump_kernel() ? @@ -4709,10 +4241,7 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 /* TX inline */ mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode); - /* RSS */ - mlx5e_build_rss_params(rss_params, params->num_channels); - params->tunneled_offload_en = - mlx5e_tunnel_inner_ft_supported(mdev); + params->tunneled_offload_en = mlx5_tunnel_inner_ft_supported(mdev); /* AF_XDP */ params->xsk = xsk; @@ -4772,8 +4301,8 @@ static bool mlx5e_tunnel_any_tx_proto_supported(struct mlx5_core_dev *mdev) { int tt; - for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) { - if (mlx5e_tunnel_proto_supported_tx(mdev, mlx5e_get_proto_by_tunnel_type(tt))) + for (tt = 0; tt < MLX5_NUM_TUNNEL_TT; tt++) { + if (mlx5e_tunnel_proto_supported_tx(mdev, mlx5_get_proto_by_tunnel_type(tt))) return true; } return (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev)); @@ -4812,7 +4341,14 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_TX; netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_RX; + /* Tunneled LRO is not supported in the driver, and the same RQs are + * shared between inner and outer TIRs, so the driver can't disable LRO + * for inner TIRs while having it enabled for outer TIRs. Due to this, + * block LRO altogether if the firmware declares tunneled LRO support. + */ if (!!MLX5_CAP_ETH(mdev, lro_cap) && + !MLX5_CAP_ETH(mdev, tunnel_lro_vxlan) && + !MLX5_CAP_ETH(mdev, tunnel_lro_gre) && mlx5e_check_fragmented_striding_rq_cap(mdev)) netdev->vlan_features |= NETIF_F_LRO; @@ -4939,7 +4475,6 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev, struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); - struct devlink_port *dl_port; int err; mlx5e_build_nic_params(priv, &priv->xsk, netdev->mtu); @@ -4955,19 +4490,13 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev, if (err) mlx5_core_err(mdev, "TLS initialization failed, %d\n", err); - dl_port = mlx5e_devlink_get_dl_port(priv); - if (dl_port->registered) - mlx5e_health_create_reporters(priv); - + mlx5e_health_create_reporters(priv); return 0; } static void mlx5e_nic_cleanup(struct mlx5e_priv *priv) { - struct devlink_port *dl_port = mlx5e_devlink_get_dl_port(priv); - - if (dl_port->registered) - mlx5e_health_destroy_reporters(priv); + mlx5e_health_destroy_reporters(priv); mlx5e_tls_cleanup(priv); mlx5e_ipsec_cleanup(priv); } @@ -4975,9 +4504,14 @@ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv) static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; - u16 max_nch = priv->max_nch; + enum mlx5e_rx_res_features features; + struct mlx5e_lro_param lro_param; int err; + priv->rx_res = mlx5e_rx_res_alloc(); + if (!priv->rx_res) + return -ENOMEM; + mlx5e_create_q_counters(priv); err = mlx5e_open_drop_rq(priv, &priv->drop_rq); @@ -4986,42 +4520,20 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) goto err_destroy_q_counters; } - err = mlx5e_create_indirect_rqt(priv); + features = MLX5E_RX_RES_FEATURE_XSK | MLX5E_RX_RES_FEATURE_PTP; + if (priv->channels.params.tunneled_offload_en) + features |= MLX5E_RX_RES_FEATURE_INNER_FT; + lro_param = mlx5e_get_lro_param(&priv->channels.params); + err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, features, + priv->max_nch, priv->drop_rq.rqn, &lro_param, + priv->channels.params.num_channels); if (err) goto err_close_drop_rq; - err = mlx5e_create_direct_rqts(priv, priv->direct_tir, max_nch); - if (err) - goto err_destroy_indirect_rqts; - - err = mlx5e_create_indirect_tirs(priv, true); - if (err) - goto err_destroy_direct_rqts; - - err = mlx5e_create_direct_tirs(priv, priv->direct_tir, max_nch); - if (err) - goto err_destroy_indirect_tirs; - - err = mlx5e_create_direct_rqts(priv, priv->xsk_tir, max_nch); - if (unlikely(err)) - goto err_destroy_direct_tirs; - - err = mlx5e_create_direct_tirs(priv, priv->xsk_tir, max_nch); - if (unlikely(err)) - goto err_destroy_xsk_rqts; - - err = mlx5e_create_direct_rqts(priv, &priv->ptp_tir, 1); - if (err) - goto err_destroy_xsk_tirs; - - err = mlx5e_create_direct_tirs(priv, &priv->ptp_tir, 1); - if (err) - goto err_destroy_ptp_rqt; - err = mlx5e_create_flow_steering(priv); if (err) { mlx5_core_warn(mdev, "create flow steering failed, %d\n", err); - goto err_destroy_ptp_direct_tir; + goto err_destroy_rx_res; } err = mlx5e_tc_nic_init(priv); @@ -5042,46 +4554,27 @@ err_tc_nic_cleanup: mlx5e_tc_nic_cleanup(priv); err_destroy_flow_steering: mlx5e_destroy_flow_steering(priv); -err_destroy_ptp_direct_tir: - mlx5e_destroy_direct_tirs(priv, &priv->ptp_tir, 1); -err_destroy_ptp_rqt: - mlx5e_destroy_direct_rqts(priv, &priv->ptp_tir, 1); -err_destroy_xsk_tirs: - mlx5e_destroy_direct_tirs(priv, priv->xsk_tir, max_nch); -err_destroy_xsk_rqts: - mlx5e_destroy_direct_rqts(priv, priv->xsk_tir, max_nch); -err_destroy_direct_tirs: - mlx5e_destroy_direct_tirs(priv, priv->direct_tir, max_nch); -err_destroy_indirect_tirs: - mlx5e_destroy_indirect_tirs(priv); -err_destroy_direct_rqts: - mlx5e_destroy_direct_rqts(priv, priv->direct_tir, max_nch); -err_destroy_indirect_rqts: - mlx5e_destroy_rqt(priv, &priv->indir_rqt); +err_destroy_rx_res: + mlx5e_rx_res_destroy(priv->rx_res); err_close_drop_rq: mlx5e_close_drop_rq(&priv->drop_rq); err_destroy_q_counters: mlx5e_destroy_q_counters(priv); + mlx5e_rx_res_free(priv->rx_res); + priv->rx_res = NULL; return err; } static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv) { - u16 max_nch = priv->max_nch; - mlx5e_accel_cleanup_rx(priv); mlx5e_tc_nic_cleanup(priv); mlx5e_destroy_flow_steering(priv); - mlx5e_destroy_direct_tirs(priv, &priv->ptp_tir, 1); - mlx5e_destroy_direct_rqts(priv, &priv->ptp_tir, 1); - mlx5e_destroy_direct_tirs(priv, priv->xsk_tir, max_nch); - mlx5e_destroy_direct_rqts(priv, priv->xsk_tir, max_nch); - mlx5e_destroy_direct_tirs(priv, priv->direct_tir, max_nch); - mlx5e_destroy_indirect_tirs(priv); - mlx5e_destroy_direct_rqts(priv, priv->direct_tir, max_nch); - mlx5e_destroy_rqt(priv, &priv->indir_rqt); + mlx5e_rx_res_destroy(priv->rx_res); mlx5e_close_drop_rq(&priv->drop_rq); mlx5e_destroy_q_counters(priv); + mlx5e_rx_res_free(priv->rx_res); + priv->rx_res = NULL; } static int mlx5e_init_nic_tx(struct mlx5e_priv *priv) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index bf94bcb6fa5d..ae71a17fdb27 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -49,6 +49,7 @@ #include "en/devlink.h" #include "fs_core.h" #include "lib/mlx5.h" +#include "lib/devcom.h" #define CREATE_TRACE_POINTS #include "diag/en_rep_tracepoint.h" #include "en_accel/ipsec.h" @@ -250,7 +251,9 @@ static int mlx5e_rep_set_channels(struct net_device *dev, } static int mlx5e_rep_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *coal) + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct mlx5e_priv *priv = netdev_priv(netdev); @@ -258,7 +261,9 @@ static int mlx5e_rep_get_coalesce(struct net_device *netdev, } static int mlx5e_rep_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *coal) + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct mlx5e_priv *priv = netdev_priv(netdev); @@ -310,6 +315,8 @@ static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw, rpriv = mlx5e_rep_to_rep_priv(rep); list_for_each_entry_safe(rep_sq, tmp, &rpriv->vport_sqs_list, list) { mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule); + if (rep_sq->send_to_vport_rule_peer) + mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule_peer); list_del(&rep_sq->list); kfree(rep_sq); } @@ -319,6 +326,7 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep, u32 *sqns_array, int sqns_num) { + struct mlx5_eswitch *peer_esw = NULL; struct mlx5_flow_handle *flow_rule; struct mlx5e_rep_priv *rpriv; struct mlx5e_rep_sq *rep_sq; @@ -329,6 +337,10 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw, return 0; rpriv = mlx5e_rep_to_rep_priv(rep); + if (mlx5_devcom_is_paired(esw->dev->priv.devcom, MLX5_DEVCOM_ESW_OFFLOADS)) + peer_esw = mlx5_devcom_get_peer_data(esw->dev->priv.devcom, + MLX5_DEVCOM_ESW_OFFLOADS); + for (i = 0; i < sqns_num; i++) { rep_sq = kzalloc(sizeof(*rep_sq), GFP_KERNEL); if (!rep_sq) { @@ -337,7 +349,7 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw, } /* Add re-inject rule to the PF/representor sqs */ - flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw, rep, + flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw, esw, rep, sqns_array[i]); if (IS_ERR(flow_rule)) { err = PTR_ERR(flow_rule); @@ -345,12 +357,34 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw, goto out_err; } rep_sq->send_to_vport_rule = flow_rule; + rep_sq->sqn = sqns_array[i]; + + if (peer_esw) { + flow_rule = mlx5_eswitch_add_send_to_vport_rule(peer_esw, esw, + rep, sqns_array[i]); + if (IS_ERR(flow_rule)) { + err = PTR_ERR(flow_rule); + mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule); + kfree(rep_sq); + goto out_err; + } + rep_sq->send_to_vport_rule_peer = flow_rule; + } + list_add(&rep_sq->list, &rpriv->vport_sqs_list); } + + if (peer_esw) + mlx5_devcom_release_peer_data(esw->dev->priv.devcom, MLX5_DEVCOM_ESW_OFFLOADS); + return 0; out_err: mlx5e_sqs2vport_stop(esw, rep); + + if (peer_esw) + mlx5_devcom_release_peer_data(esw->dev->priv.devcom, MLX5_DEVCOM_ESW_OFFLOADS); + return err; } @@ -364,7 +398,8 @@ int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv) int err = -ENOMEM; u32 *sqs; - sqs = kcalloc(priv->channels.num * priv->channels.params.num_tc, sizeof(*sqs), GFP_KERNEL); + sqs = kcalloc(priv->channels.num * mlx5e_get_dcb_num_tc(&priv->channels.params), + sizeof(*sqs), GFP_KERNEL); if (!sqs) goto out; @@ -581,13 +616,10 @@ static void mlx5e_build_rep_params(struct net_device *netdev) params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation); mlx5e_set_rx_cq_mode_params(params, cq_period_mode); - params->num_tc = 1; + params->mqprio.num_tc = 1; params->tunneled_offload_en = false; mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode); - - /* RSS */ - mlx5e_build_rss_params(&priv->rss_params, params->num_channels); } static void mlx5e_build_rep_netdev(struct net_device *netdev, @@ -651,25 +683,23 @@ static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv) struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5_eswitch_rep *rep = rpriv->rep; struct ttc_params ttc_params = {}; - int tt, err; + int err; priv->fs.ns = mlx5_get_flow_namespace(priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL); /* The inner_ttc in the ttc params is intentionally not set */ - ttc_params.any_tt_tirn = priv->direct_tir[0].tirn; - mlx5e_set_ttc_ft_params(&ttc_params); + mlx5e_set_ttc_params(priv, &ttc_params, false); if (rep->vport != MLX5_VPORT_UPLINK) /* To give uplik rep TTC a lower level for chaining from root ft */ ttc_params.ft_attr.level = MLX5E_TTC_FT_LEVEL + 1; - for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) - ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn; - - err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc); - if (err) { - netdev_err(priv->netdev, "Failed to create rep ttc table, err=%d\n", err); + priv->fs.ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params); + if (IS_ERR(priv->fs.ttc)) { + err = PTR_ERR(priv->fs.ttc); + netdev_err(priv->netdev, "Failed to create rep ttc table, err=%d\n", + err); return err; } return 0; @@ -687,7 +717,7 @@ static int mlx5e_create_rep_root_ft(struct mlx5e_priv *priv) /* non uplik reps will skip any bypass tables and go directly to * their own ttc */ - rpriv->root_ft = priv->fs.ttc.ft.t; + rpriv->root_ft = mlx5_get_ttc_flow_table(priv->fs.ttc); return 0; } @@ -760,9 +790,13 @@ int mlx5e_rep_bond_update(struct mlx5e_priv *priv, bool cleanup) static int mlx5e_init_rep_rx(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; - u16 max_nch = priv->max_nch; + struct mlx5e_lro_param lro_param; int err; + priv->rx_res = mlx5e_rx_res_alloc(); + if (!priv->rx_res) + return -ENOMEM; + mlx5e_init_l2_addr(priv); err = mlx5e_open_drop_rq(priv, &priv->drop_rq); @@ -771,25 +805,16 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv) return err; } - err = mlx5e_create_indirect_rqt(priv); + lro_param = mlx5e_get_lro_param(&priv->channels.params); + err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, 0, + priv->max_nch, priv->drop_rq.rqn, &lro_param, + priv->channels.params.num_channels); if (err) goto err_close_drop_rq; - err = mlx5e_create_direct_rqts(priv, priv->direct_tir, max_nch); - if (err) - goto err_destroy_indirect_rqts; - - err = mlx5e_create_indirect_tirs(priv, false); - if (err) - goto err_destroy_direct_rqts; - - err = mlx5e_create_direct_tirs(priv, priv->direct_tir, max_nch); - if (err) - goto err_destroy_indirect_tirs; - err = mlx5e_create_rep_ttc_table(priv); if (err) - goto err_destroy_direct_tirs; + goto err_destroy_rx_res; err = mlx5e_create_rep_root_ft(priv); if (err) @@ -806,33 +831,26 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv) err_destroy_root_ft: mlx5e_destroy_rep_root_ft(priv); err_destroy_ttc_table: - mlx5e_destroy_ttc_table(priv, &priv->fs.ttc); -err_destroy_direct_tirs: - mlx5e_destroy_direct_tirs(priv, priv->direct_tir, max_nch); -err_destroy_indirect_tirs: - mlx5e_destroy_indirect_tirs(priv); -err_destroy_direct_rqts: - mlx5e_destroy_direct_rqts(priv, priv->direct_tir, max_nch); -err_destroy_indirect_rqts: - mlx5e_destroy_rqt(priv, &priv->indir_rqt); + mlx5_destroy_ttc_table(priv->fs.ttc); +err_destroy_rx_res: + mlx5e_rx_res_destroy(priv->rx_res); err_close_drop_rq: mlx5e_close_drop_rq(&priv->drop_rq); + mlx5e_rx_res_free(priv->rx_res); + priv->rx_res = NULL; return err; } static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv) { - u16 max_nch = priv->max_nch; - mlx5e_ethtool_cleanup_steering(priv); rep_vport_rx_rule_destroy(priv); mlx5e_destroy_rep_root_ft(priv); - mlx5e_destroy_ttc_table(priv, &priv->fs.ttc); - mlx5e_destroy_direct_tirs(priv, priv->direct_tir, max_nch); - mlx5e_destroy_indirect_tirs(priv); - mlx5e_destroy_direct_rqts(priv, priv->direct_tir, max_nch); - mlx5e_destroy_rqt(priv, &priv->indir_rqt); + mlx5_destroy_ttc_table(priv->fs.ttc); + mlx5e_rx_res_destroy(priv->rx_res); mlx5e_close_drop_rq(&priv->drop_rq); + mlx5e_rx_res_free(priv->rx_res); + priv->rx_res = NULL; } static int mlx5e_init_ul_rep_rx(struct mlx5e_priv *priv) @@ -1264,10 +1282,64 @@ static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep) return rpriv->netdev; } +static void mlx5e_vport_rep_event_unpair(struct mlx5_eswitch_rep *rep) +{ + struct mlx5e_rep_priv *rpriv; + struct mlx5e_rep_sq *rep_sq; + + rpriv = mlx5e_rep_to_rep_priv(rep); + list_for_each_entry(rep_sq, &rpriv->vport_sqs_list, list) { + if (!rep_sq->send_to_vport_rule_peer) + continue; + mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule_peer); + rep_sq->send_to_vport_rule_peer = NULL; + } +} + +static int mlx5e_vport_rep_event_pair(struct mlx5_eswitch *esw, + struct mlx5_eswitch_rep *rep, + struct mlx5_eswitch *peer_esw) +{ + struct mlx5_flow_handle *flow_rule; + struct mlx5e_rep_priv *rpriv; + struct mlx5e_rep_sq *rep_sq; + + rpriv = mlx5e_rep_to_rep_priv(rep); + list_for_each_entry(rep_sq, &rpriv->vport_sqs_list, list) { + if (rep_sq->send_to_vport_rule_peer) + continue; + flow_rule = mlx5_eswitch_add_send_to_vport_rule(peer_esw, esw, rep, rep_sq->sqn); + if (IS_ERR(flow_rule)) + goto err_out; + rep_sq->send_to_vport_rule_peer = flow_rule; + } + + return 0; +err_out: + mlx5e_vport_rep_event_unpair(rep); + return PTR_ERR(flow_rule); +} + +static int mlx5e_vport_rep_event(struct mlx5_eswitch *esw, + struct mlx5_eswitch_rep *rep, + enum mlx5_switchdev_event event, + void *data) +{ + int err = 0; + + if (event == MLX5_SWITCHDEV_EVENT_PAIR) + err = mlx5e_vport_rep_event_pair(esw, rep, data); + else if (event == MLX5_SWITCHDEV_EVENT_UNPAIR) + mlx5e_vport_rep_event_unpair(rep); + + return err; +} + static const struct mlx5_eswitch_rep_ops rep_ops = { .load = mlx5e_vport_rep_load, .unload = mlx5e_vport_rep_unload, - .get_proto_dev = mlx5e_vport_rep_get_proto_dev + .get_proto_dev = mlx5e_vport_rep_get_proto_dev, + .event = mlx5e_vport_rep_event, }; static int mlx5e_rep_probe(struct auxiliary_device *adev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h index 47a2dfb7792a..48a203a9e7d9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h @@ -60,6 +60,7 @@ struct mlx5e_neigh_update_table { struct mlx5_tc_ct_priv; struct mlx5e_rep_bond; struct mlx5e_tc_tun_encap; +struct mlx5e_post_act; struct mlx5_rep_uplink_priv { /* Filters DB - instantiated by the uplink representor and shared by @@ -88,8 +89,9 @@ struct mlx5_rep_uplink_priv { /* maps tun_enc_opts to a unique id*/ struct mapping_ctx *tunnel_enc_opts_mapping; + struct mlx5e_post_act *post_act; struct mlx5_tc_ct_priv *ct_priv; - struct mlx5_esw_psample *esw_psample; + struct mlx5e_tc_psample *tc_psample; /* support eswitch vports bonding */ struct mlx5e_rep_bond *bond; @@ -146,7 +148,7 @@ struct mlx5e_neigh_hash_entry { */ refcount_t refcnt; - /* Save the last reported time offloaded trafic pass over one of the + /* Save the last reported time offloaded traffic pass over one of the * neigh hash entry flows. Use it to periodically update the neigh * 'used' value and avoid neigh deleting by the kernel. */ @@ -207,6 +209,8 @@ struct mlx5e_encap_entry { struct mlx5e_rep_sq { struct mlx5_flow_handle *send_to_vport_rule; + struct mlx5_flow_handle *send_to_vport_rule_peer; + u32 sqn; struct list_head list; }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index d273758255c3..ba8164792016 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -34,25 +34,20 @@ #include <net/flow_offload.h> #include <net/sch_generic.h> #include <net/pkt_cls.h> -#include <net/tc_act/tc_gact.h> -#include <net/tc_act/tc_skbedit.h> #include <linux/mlx5/fs.h> #include <linux/mlx5/device.h> #include <linux/rhashtable.h> #include <linux/refcount.h> #include <linux/completion.h> -#include <net/tc_act/tc_mirred.h> -#include <net/tc_act/tc_vlan.h> -#include <net/tc_act/tc_tunnel_key.h> #include <net/tc_act/tc_pedit.h> #include <net/tc_act/tc_csum.h> -#include <net/tc_act/tc_mpls.h> #include <net/psample.h> #include <net/arp.h> #include <net/ipv6_stubs.h> #include <net/bareudp.h> #include <net/bonding.h> #include "en.h" +#include "en/tc/post_act.h" #include "en_rep.h" #include "en/rep/tc.h" #include "en/rep/neigh.h" @@ -66,7 +61,7 @@ #include "en/mod_hdr.h" #include "en/tc_priv.h" #include "en/tc_tun_encap.h" -#include "esw/sample.h" +#include "en/tc/sample.h" #include "lib/devcom.h" #include "lib/geneve.h" #include "lib/fs_chains.h" @@ -103,7 +98,7 @@ struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = { [MARK_TO_REG] = mark_to_reg_ct, [LABELS_TO_REG] = labels_to_reg_ct, [FTEID_TO_REG] = fteid_to_reg_ct, - /* For NIC rules we store the retore metadata directly + /* For NIC rules we store the restore metadata directly * into reg_b that is passed to SW since we don't * jump between steering domains. */ @@ -252,7 +247,7 @@ get_ct_priv(struct mlx5e_priv *priv) } #if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE) -static struct mlx5_esw_psample * +static struct mlx5e_tc_psample * get_sample_priv(struct mlx5e_priv *priv) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; @@ -263,7 +258,7 @@ get_sample_priv(struct mlx5e_priv *priv) uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); uplink_priv = &uplink_rpriv->uplink_priv; - return uplink_priv->esw_psample; + return uplink_priv->tc_psample; } return NULL; @@ -340,12 +335,12 @@ struct mlx5e_hairpin { struct mlx5_core_dev *func_mdev; struct mlx5e_priv *func_priv; u32 tdn; - u32 tirn; + struct mlx5e_tir direct_tir; int num_channels; struct mlx5e_rqt indir_rqt; - u32 indir_tirn[MLX5E_NUM_INDIR_TIRS]; - struct mlx5e_ttc_table ttc; + struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS]; + struct mlx5_ttc_table *ttc; }; struct mlx5e_hairpin_entry { @@ -482,126 +477,101 @@ struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex) static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp) { - u32 in[MLX5_ST_SZ_DW(create_tir_in)] = {}; - void *tirc; + struct mlx5e_tir_builder *builder; int err; + builder = mlx5e_tir_builder_alloc(false); + if (!builder) + return -ENOMEM; + err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn); if (err) - goto alloc_tdn_err; - - tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); - - MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT); - MLX5_SET(tirc, tirc, inline_rqn, hp->pair->rqn[0]); - MLX5_SET(tirc, tirc, transport_domain, hp->tdn); + goto out; - err = mlx5_core_create_tir(hp->func_mdev, in, &hp->tirn); + mlx5e_tir_builder_build_inline(builder, hp->tdn, hp->pair->rqn[0]); + err = mlx5e_tir_init(&hp->direct_tir, builder, hp->func_mdev, false); if (err) goto create_tir_err; - return 0; +out: + mlx5e_tir_builder_free(builder); + return err; create_tir_err: mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn); -alloc_tdn_err: - return err; + + goto out; } static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp) { - mlx5_core_destroy_tir(hp->func_mdev, hp->tirn); + mlx5e_tir_destroy(&hp->direct_tir); mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn); } -static int mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc) -{ - struct mlx5e_priv *priv = hp->func_priv; - int i, ix, sz = MLX5E_INDIR_RQT_SIZE; - u32 *indirection_rqt, rqn; - - indirection_rqt = kcalloc(sz, sizeof(*indirection_rqt), GFP_KERNEL); - if (!indirection_rqt) - return -ENOMEM; - - mlx5e_build_default_indir_rqt(indirection_rqt, sz, - hp->num_channels); - - for (i = 0; i < sz; i++) { - ix = i; - if (priv->rss_params.hfunc == ETH_RSS_HASH_XOR) - ix = mlx5e_bits_invert(i, ilog2(sz)); - ix = indirection_rqt[ix]; - rqn = hp->pair->rqn[ix]; - MLX5_SET(rqtc, rqtc, rq_num[i], rqn); - } - - kfree(indirection_rqt); - return 0; -} - static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp) { - int inlen, err, sz = MLX5E_INDIR_RQT_SIZE; struct mlx5e_priv *priv = hp->func_priv; struct mlx5_core_dev *mdev = priv->mdev; - void *rqtc; - u32 *in; + struct mlx5e_rss_params_indir *indir; + int err; - inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz; - in = kvzalloc(inlen, GFP_KERNEL); - if (!in) + indir = kvmalloc(sizeof(*indir), GFP_KERNEL); + if (!indir) return -ENOMEM; - rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context); - - MLX5_SET(rqtc, rqtc, rqt_actual_size, sz); - MLX5_SET(rqtc, rqtc, rqt_max_size, sz); - - err = mlx5e_hairpin_fill_rqt_rqns(hp, rqtc); - if (err) - goto out; - - err = mlx5_core_create_rqt(mdev, in, inlen, &hp->indir_rqt.rqtn); - if (!err) - hp->indir_rqt.enabled = true; + mlx5e_rss_params_indir_init_uniform(indir, hp->num_channels); + err = mlx5e_rqt_init_indir(&hp->indir_rqt, mdev, hp->pair->rqn, hp->num_channels, + mlx5e_rx_res_get_current_hash(priv->rx_res).hfunc, + indir); -out: - kvfree(in); + kvfree(indir); return err; } static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp) { struct mlx5e_priv *priv = hp->func_priv; - u32 in[MLX5_ST_SZ_DW(create_tir_in)]; - int tt, i, err; - void *tirc; + struct mlx5e_rss_params_hash rss_hash; + enum mlx5_traffic_types tt, max_tt; + struct mlx5e_tir_builder *builder; + int err = 0; + + builder = mlx5e_tir_builder_alloc(false); + if (!builder) + return -ENOMEM; + + rss_hash = mlx5e_rx_res_get_current_hash(priv->rx_res); for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { - struct mlx5e_tirc_config ttconfig = mlx5e_tirc_get_default_config(tt); + struct mlx5e_rss_params_traffic_type rss_tt; - memset(in, 0, MLX5_ST_SZ_BYTES(create_tir_in)); - tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); + rss_tt = mlx5e_rss_get_default_tt_config(tt); - MLX5_SET(tirc, tirc, transport_domain, hp->tdn); - MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); - MLX5_SET(tirc, tirc, indirect_table, hp->indir_rqt.rqtn); - mlx5e_build_indir_tir_ctx_hash(&priv->rss_params, &ttconfig, tirc, false); + mlx5e_tir_builder_build_rqt(builder, hp->tdn, + mlx5e_rqt_get_rqtn(&hp->indir_rqt), + false); + mlx5e_tir_builder_build_rss(builder, &rss_hash, &rss_tt, false); - err = mlx5_core_create_tir(hp->func_mdev, in, - &hp->indir_tirn[tt]); + err = mlx5e_tir_init(&hp->indir_tir[tt], builder, hp->func_mdev, false); if (err) { mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err); goto err_destroy_tirs; } + + mlx5e_tir_builder_clear(builder); } - return 0; -err_destroy_tirs: - for (i = 0; i < tt; i++) - mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[i]); +out: + mlx5e_tir_builder_free(builder); return err; + +err_destroy_tirs: + max_tt = tt; + for (tt = 0; tt < max_tt; tt++) + mlx5e_tir_destroy(&hp->indir_tir[tt]); + + goto out; } static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp) @@ -609,7 +579,7 @@ static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp) int tt; for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) - mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[tt]); + mlx5e_tir_destroy(&hp->indir_tir[tt]); } static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp, @@ -620,12 +590,16 @@ static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp, memset(ttc_params, 0, sizeof(*ttc_params)); - ttc_params->any_tt_tirn = hp->tirn; - - for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) - ttc_params->indir_tirn[tt] = hp->indir_tirn[tt]; + ttc_params->ns = mlx5_get_flow_namespace(hp->func_mdev, + MLX5_FLOW_NAMESPACE_KERNEL); + for (tt = 0; tt < MLX5_NUM_TT; tt++) { + ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR; + ttc_params->dests[tt].tir_num = + tt == MLX5_TT_ANY ? + mlx5e_tir_get_tirn(&hp->direct_tir) : + mlx5e_tir_get_tirn(&hp->indir_tir[tt]); + } - ft_attr->max_fte = MLX5E_TTC_TABLE_SIZE; ft_attr->level = MLX5E_TC_TTC_FT_LEVEL; ft_attr->prio = MLX5E_TC_PRIO; } @@ -645,30 +619,31 @@ static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp) goto err_create_indirect_tirs; mlx5e_hairpin_set_ttc_params(hp, &ttc_params); - err = mlx5e_create_ttc_table(priv, &ttc_params, &hp->ttc); - if (err) + hp->ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params); + if (IS_ERR(hp->ttc)) { + err = PTR_ERR(hp->ttc); goto err_create_ttc_table; + } netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n", - hp->num_channels, hp->ttc.ft.t->id); + hp->num_channels, + mlx5_get_ttc_flow_table(priv->fs.ttc)->id); return 0; err_create_ttc_table: mlx5e_hairpin_destroy_indirect_tirs(hp); err_create_indirect_tirs: - mlx5e_destroy_rqt(priv, &hp->indir_rqt); + mlx5e_rqt_destroy(&hp->indir_rqt); return err; } static void mlx5e_hairpin_rss_cleanup(struct mlx5e_hairpin *hp) { - struct mlx5e_priv *priv = hp->func_priv; - - mlx5e_destroy_ttc_table(priv, &hp->ttc); + mlx5_destroy_ttc_table(hp->ttc); mlx5e_hairpin_destroy_indirect_tirs(hp); - mlx5e_destroy_rqt(priv, &hp->indir_rqt); + mlx5e_rqt_destroy(&hp->indir_rqt); } static struct mlx5e_hairpin * @@ -903,16 +878,17 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv, } netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n", - hp->tirn, hp->pair->rqn[0], + mlx5e_tir_get_tirn(&hp->direct_tir), hp->pair->rqn[0], dev_name(hp->pair->peer_mdev->device), hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets); attach_flow: if (hpe->hp->num_channels > 1) { flow_flag_set(flow, HAIRPIN_RSS); - flow->attr->nic_attr->hairpin_ft = hpe->hp->ttc.ft.t; + flow->attr->nic_attr->hairpin_ft = + mlx5_get_ttc_flow_table(hpe->hp->ttc); } else { - flow->attr->nic_attr->hairpin_tirn = hpe->hp->tirn; + flow->attr->nic_attr->hairpin_tirn = mlx5e_tir_get_tirn(&hpe->hp->direct_tir); } flow->hpe = hpe; @@ -1056,15 +1032,17 @@ err_ft_get: static int mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, - struct mlx5e_tc_flow_parse_attr *parse_attr, struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack) { + struct mlx5e_tc_flow_parse_attr *parse_attr; struct mlx5_flow_attr *attr = flow->attr; struct mlx5_core_dev *dev = priv->mdev; - struct mlx5_fc *counter = NULL; + struct mlx5_fc *counter; int err; + parse_attr = attr->parse_attr; + if (flow_flag_test(flow, HAIRPIN)) { err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack); if (err) @@ -1170,7 +1148,8 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw, mod_hdr_acts); #if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE) } else if (flow_flag_test(flow, SAMPLE)) { - rule = mlx5_esw_sample_offload(get_sample_priv(flow->priv), spec, attr); + rule = mlx5e_tc_sample_offload(get_sample_priv(flow->priv), spec, attr, + mlx5e_tc_get_flow_tun_id(flow)); #endif } else { rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr); @@ -1209,7 +1188,7 @@ void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw, #if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE) if (flow_flag_test(flow, SAMPLE)) { - mlx5_esw_sample_unoffload(get_sample_priv(flow->priv), flow->rule[0], attr); + mlx5e_tc_sample_unoffload(get_sample_priv(flow->priv), flow->rule[0], attr); return; } #endif @@ -1338,6 +1317,7 @@ bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev, u16 *vport) { struct mlx5e_priv *out_priv, *route_priv; + struct mlx5_devcom *devcom = NULL; struct mlx5_core_dev *route_mdev; struct mlx5_eswitch *esw; u16 vhca_id; @@ -1349,7 +1329,24 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro route_mdev = route_priv->mdev; vhca_id = MLX5_CAP_GEN(route_mdev, vhca_id); + if (mlx5_lag_is_active(out_priv->mdev)) { + /* In lag case we may get devices from different eswitch instances. + * If we failed to get vport num, it means, mostly, that we on the wrong + * eswitch. + */ + err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport); + if (err != -ENOENT) + return err; + + devcom = out_priv->mdev->priv.devcom; + esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); + if (!esw) + return -ENODEV; + } + err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport); + if (devcom) + mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); return err; } @@ -1384,9 +1381,9 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, bool vf_tun = false, encap_valid = true; struct net_device *encap_dev = NULL; struct mlx5_esw_flow_attr *esw_attr; - struct mlx5_fc *counter = NULL; struct mlx5e_rep_priv *rpriv; struct mlx5e_priv *out_priv; + struct mlx5_fc *counter; u32 max_prio, max_chain; int err = 0; int out_index; @@ -1573,6 +1570,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, else mlx5e_detach_mod_hdr(priv, flow); } + kfree(attr->sample_attr); kvfree(attr->parse_attr); kvfree(attr->esw_attr->rx_tun_attr); @@ -1582,7 +1580,6 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, if (flow_flag_test(flow, L3_TO_L2_DECAP)) mlx5e_detach_decap(priv, flow); - kfree(flow->attr->esw_attr->sample); kfree(flow->attr); } @@ -1647,17 +1644,22 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, } } -static int flow_has_tc_fwd_action(struct flow_cls_offload *f) +static bool flow_requires_tunnel_mapping(u32 chain, struct flow_cls_offload *f) { struct flow_rule *rule = flow_cls_offload_flow_rule(f); struct flow_action *flow_action = &rule->action; const struct flow_action_entry *act; int i; + if (chain) + return false; + flow_action_for_each(i, act, flow_action) { switch (act->id) { case FLOW_ACTION_GOTO: return true; + case FLOW_ACTION_SAMPLE: + return true; default: continue; } @@ -1898,7 +1900,7 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv, return -EOPNOTSUPP; needs_mapping = !!flow->attr->chain; - sets_mapping = !flow->attr->chain && flow_has_tc_fwd_action(f); + sets_mapping = flow_requires_tunnel_mapping(flow->attr->chain, f); *match_inner = !needs_mapping; if ((needs_mapping || sets_mapping) && @@ -2471,7 +2473,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_3; } } - /* Currenlty supported only for MPLS over UDP */ + /* Currently supported only for MPLS over UDP */ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS) && !netif_is_bareudp(filter_dev)) { NL_SET_ERR_MSG_MOD(extack, @@ -2725,7 +2727,9 @@ static int offload_pedit_fields(struct mlx5e_priv *priv, if (s_mask && a_mask) { NL_SET_ERR_MSG_MOD(extack, "can't set and add to the same HW field"); - printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field); + netdev_warn(priv->netdev, + "mlx5: can't set and add to the same HW field (%x)\n", + f->field); return -EOPNOTSUPP; } @@ -2764,8 +2768,9 @@ static int offload_pedit_fields(struct mlx5e_priv *priv, if (first < next_z && next_z < last) { NL_SET_ERR_MSG_MOD(extack, "rewrite of few sub-fields isn't supported"); - printk(KERN_WARNING "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n", - mask); + netdev_warn(priv->netdev, + "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n", + mask); return -EOPNOTSUPP; } @@ -3352,10 +3357,10 @@ static int validate_goto_chain(struct mlx5e_priv *priv, static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct flow_action *flow_action, - struct mlx5e_tc_flow_parse_attr *parse_attr, struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack) { + struct mlx5e_tc_flow_parse_attr *parse_attr; struct mlx5_flow_attr *attr = flow->attr; struct pedit_headers_action hdrs[2] = {}; const struct flow_action_entry *act; @@ -3371,8 +3376,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, return -EOPNOTSUPP; nic_attr = attr->nic_attr; - nic_attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; + parse_attr = attr->parse_attr; flow_action_for_each(i, act, flow_action) { switch (act->id) { @@ -3381,10 +3386,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, MLX5_FLOW_CONTEXT_ACTION_COUNT; break; case FLOW_ACTION_DROP: - action |= MLX5_FLOW_CONTEXT_ACTION_DROP; - if (MLX5_CAP_FLOWTABLE(priv->mdev, - flow_table_properties_nic_receive.flow_counter)) - action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; + action |= MLX5_FLOW_CONTEXT_ACTION_DROP | + MLX5_FLOW_CONTEXT_ACTION_COUNT; break; case FLOW_ACTION_MANGLE: case FLOW_ACTION_ADD: @@ -3425,7 +3428,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, "device is not on same HW, can't offload"); netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n", peer_dev->name); - return -EINVAL; + return -EOPNOTSUPP; } } break; @@ -3435,7 +3438,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, if (mark & ~MLX5E_TC_FLOW_ID_MASK) { NL_SET_ERR_MSG_MOD(extack, "Bad flow mark - only 16 bit is supported"); - return -EINVAL; + return -EOPNOTSUPP; } nic_attr->flow_tag = mark; @@ -3732,20 +3735,19 @@ static int verify_uplink_forwarding(struct mlx5e_priv *priv, static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct flow_action *flow_action, struct mlx5e_tc_flow *flow, - struct netlink_ext_ack *extack, - struct net_device *filter_dev) + struct netlink_ext_ack *extack) { struct pedit_headers_action hdrs[2] = {}; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5e_tc_flow_parse_attr *parse_attr; struct mlx5e_rep_priv *rpriv = priv->ppriv; + struct mlx5e_sample_attr sample_attr = {}; const struct ip_tunnel_info *info = NULL; struct mlx5_flow_attr *attr = flow->attr; int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS]; bool ft_flow = mlx5e_is_ft_flow(flow); const struct flow_action_entry *act; struct mlx5_esw_flow_attr *esw_attr; - struct mlx5_sample_attr sample = {}; bool encap = false, decap = false; u32 action = attr->action; int err, i, if_count = 0; @@ -3798,7 +3800,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, "mpls pop supported only as first action"); return -EOPNOTSUPP; } - if (!netif_is_bareudp(filter_dev)) { + if (!netif_is_bareudp(parse_attr->filter_dev)) { NL_SET_ERR_MSG_MOD(extack, "mpls pop supported only on bareudp devices"); return -EOPNOTSUPP; @@ -3947,7 +3949,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, "devices %s %s not on same switch HW, can't offload forwarding\n", priv->netdev->name, out_dev->name); - return -EINVAL; + return -EOPNOTSUPP; } } break; @@ -4016,10 +4018,10 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, NL_SET_ERR_MSG_MOD(extack, "Sample action with connection tracking is not supported"); return -EOPNOTSUPP; } - sample.rate = act->sample.rate; - sample.group_num = act->sample.psample_group->group_num; + sample_attr.rate = act->sample.rate; + sample_attr.group_num = act->sample.psample_group->group_num; if (act->sample.truncate) - sample.trunc_size = act->sample.trunc_size; + sample_attr.trunc_size = act->sample.trunc_size; flow_flag_set(flow, SAMPLE); break; default: @@ -4104,10 +4106,10 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, * no errors after parsing. */ if (flow_flag_test(flow, SAMPLE)) { - esw_attr->sample = kzalloc(sizeof(*esw_attr->sample), GFP_KERNEL); - if (!esw_attr->sample) + attr->sample_attr = kzalloc(sizeof(*attr->sample_attr), GFP_KERNEL); + if (!attr->sample_attr) return -ENOMEM; - *esw_attr->sample = sample; + *attr->sample_attr = sample_attr; } return 0; @@ -4300,7 +4302,7 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv, if (err) goto err_free; - err = parse_tc_fdb_actions(priv, &rule->action, flow, extack, filter_dev); + err = parse_tc_fdb_actions(priv, &rule->action, flow, extack); if (err) goto err_free; @@ -4446,11 +4448,11 @@ mlx5e_add_nic_flow(struct mlx5e_priv *priv, if (err) goto err_free; - err = parse_tc_nic_actions(priv, &rule->action, parse_attr, flow, extack); + err = parse_tc_nic_actions(priv, &rule->action, flow, extack); if (err) goto err_free; - err = mlx5e_tc_add_nic_flow(priv, parse_attr, flow, extack); + err = mlx5e_tc_add_nic_flow(priv, flow, extack); if (err) goto err_free; @@ -4705,7 +4707,7 @@ static int apply_police_params(struct mlx5e_priv *priv, u64 rate, rate_mbps = max_t(u32, rate, 1); } - err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps); + err = mlx5_esw_qos_modify_vport_rate(esw, vport_num, rate_mbps); if (err) NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware"); @@ -4877,6 +4879,7 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv) struct mlx5_core_dev *dev = priv->mdev; struct mapping_ctx *chains_mapping; struct mlx5_chains_attr attr = {}; + u64 mapping_id; int err; mlx5e_mod_hdr_tbl_init(&tc->mod_hdr); @@ -4890,8 +4893,12 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv) lockdep_set_class(&tc->ht.mutex, &tc_ht_lock_key); - chains_mapping = mapping_create(sizeof(struct mlx5_mapped_obj), - MLX5E_TC_TABLE_CHAIN_TAG_MASK, true); + mapping_id = mlx5_query_nic_system_image_guid(dev); + + chains_mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_CHAIN, + sizeof(struct mlx5_mapped_obj), + MLX5E_TC_TABLE_CHAIN_TAG_MASK, true); + if (IS_ERR(chains_mapping)) { err = PTR_ERR(chains_mapping); goto err_mapping; @@ -4913,8 +4920,9 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv) goto err_chains; } + tc->post_act = mlx5e_tc_post_act_init(priv, tc->chains, MLX5_FLOW_NAMESPACE_KERNEL); tc->ct = mlx5_tc_ct_init(priv, tc->chains, &priv->fs.tc.mod_hdr, - MLX5_FLOW_NAMESPACE_KERNEL); + MLX5_FLOW_NAMESPACE_KERNEL, tc->post_act); tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event; err = register_netdevice_notifier_dev_net(priv->netdev, @@ -4930,6 +4938,7 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv) err_reg: mlx5_tc_ct_clean(tc->ct); + mlx5e_tc_post_act_destroy(tc->post_act); mlx5_chains_destroy(tc->chains); err_chains: mapping_destroy(chains_mapping); @@ -4968,6 +4977,7 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) mutex_destroy(&tc->t_lock); mlx5_tc_ct_clean(tc->ct); + mlx5e_tc_post_act_destroy(tc->post_act); mapping_destroy(tc->mapping); mlx5_chains_destroy(tc->chains); } @@ -4980,6 +4990,7 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht) struct mapping_ctx *mapping; struct mlx5_eswitch *esw; struct mlx5e_priv *priv; + u64 mapping_id; int err = 0; uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht); @@ -4987,17 +4998,24 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht) priv = netdev_priv(rpriv->netdev); esw = priv->mdev->priv.eswitch; + uplink_priv->post_act = mlx5e_tc_post_act_init(priv, esw_chains(esw), + MLX5_FLOW_NAMESPACE_FDB); uplink_priv->ct_priv = mlx5_tc_ct_init(netdev_priv(priv->netdev), esw_chains(esw), &esw->offloads.mod_hdr, - MLX5_FLOW_NAMESPACE_FDB); + MLX5_FLOW_NAMESPACE_FDB, + uplink_priv->post_act); #if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE) - uplink_priv->esw_psample = mlx5_esw_sample_init(netdev_priv(priv->netdev)); + uplink_priv->tc_psample = mlx5e_tc_sample_init(esw, uplink_priv->post_act); #endif - mapping = mapping_create(sizeof(struct tunnel_match_key), - TUNNEL_INFO_BITS_MASK, true); + mapping_id = mlx5_query_nic_system_image_guid(esw->dev); + + mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_TUNNEL, + sizeof(struct tunnel_match_key), + TUNNEL_INFO_BITS_MASK, true); + if (IS_ERR(mapping)) { err = PTR_ERR(mapping); goto err_tun_mapping; @@ -5005,7 +5023,8 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht) uplink_priv->tunnel_mapping = mapping; /* 0xFFF is reserved for stack devices slow path table mark */ - mapping = mapping_create(sz_enc_opts, ENC_OPTS_BITS_MASK - 1, true); + mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_TUNNEL_ENC_OPTS, + sz_enc_opts, ENC_OPTS_BITS_MASK - 1, true); if (IS_ERR(mapping)) { err = PTR_ERR(mapping); goto err_enc_opts_mapping; @@ -5034,11 +5053,12 @@ err_enc_opts_mapping: mapping_destroy(uplink_priv->tunnel_mapping); err_tun_mapping: #if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE) - mlx5_esw_sample_cleanup(uplink_priv->esw_psample); + mlx5e_tc_sample_cleanup(uplink_priv->tc_psample); #endif mlx5_tc_ct_clean(uplink_priv->ct_priv); netdev_warn(priv->netdev, "Failed to initialize tc (eswitch), err: %d", err); + mlx5e_tc_post_act_destroy(uplink_priv->post_act); return err; } @@ -5055,9 +5075,10 @@ void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht) mapping_destroy(uplink_priv->tunnel_mapping); #if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE) - mlx5_esw_sample_cleanup(uplink_priv->esw_psample); + mlx5e_tc_sample_cleanup(uplink_priv->tc_psample); #endif mlx5_tc_ct_clean(uplink_priv->ct_priv); + mlx5e_tc_post_act_destroy(uplink_priv->post_act); } int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h index f7cbeb0b66d2..1a4cd882f0fb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h @@ -70,6 +70,7 @@ struct mlx5_flow_attr { struct mlx5_fc *counter; struct mlx5_modify_hdr *modify_hdr; struct mlx5_ct_attr ct_attr; + struct mlx5e_sample_attr *sample_attr; struct mlx5e_tc_flow_parse_attr *parse_attr; u32 chain; u16 prio; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c index 505bf811984a..2e504c7461c6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c @@ -15,6 +15,15 @@ static void esw_acl_egress_ofld_fwd2vport_destroy(struct mlx5_vport *vport) vport->egress.offloads.fwd_rule = NULL; } +static void esw_acl_egress_ofld_bounce_rule_destroy(struct mlx5_vport *vport) +{ + if (!vport->egress.offloads.bounce_rule) + return; + + mlx5_del_flow_rules(vport->egress.offloads.bounce_rule); + vport->egress.offloads.bounce_rule = NULL; +} + static int esw_acl_egress_ofld_fwd2vport_create(struct mlx5_eswitch *esw, struct mlx5_vport *vport, struct mlx5_flow_destination *fwd_dest) @@ -87,6 +96,7 @@ static void esw_acl_egress_ofld_rules_destroy(struct mlx5_vport *vport) { esw_acl_egress_vlan_destroy(vport); esw_acl_egress_ofld_fwd2vport_destroy(vport); + esw_acl_egress_ofld_bounce_rule_destroy(vport); } static int esw_acl_egress_ofld_groups_create(struct mlx5_eswitch *esw, @@ -145,6 +155,12 @@ static void esw_acl_egress_ofld_groups_destroy(struct mlx5_vport *vport) mlx5_destroy_flow_group(vport->egress.offloads.fwd_grp); vport->egress.offloads.fwd_grp = NULL; } + + if (!IS_ERR_OR_NULL(vport->egress.offloads.bounce_grp)) { + mlx5_destroy_flow_group(vport->egress.offloads.bounce_grp); + vport->egress.offloads.bounce_grp = NULL; + } + esw_acl_egress_vlan_grp_destroy(vport); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c index 69a3630818d7..7e221038df8d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c @@ -5,6 +5,7 @@ #include <linux/notifier.h> #include <net/netevent.h> #include <net/switchdev.h> +#include "lib/devcom.h" #include "bridge.h" #include "eswitch.h" #include "bridge_priv.h" @@ -56,7 +57,6 @@ struct mlx5_esw_bridge { struct list_head fdb_list; struct rhashtable fdb_ht; - struct xarray vports; struct mlx5_flow_table *egress_ft; struct mlx5_flow_group *egress_vlan_fg; @@ -77,6 +77,15 @@ mlx5_esw_bridge_fdb_offload_notify(struct net_device *dev, const unsigned char * call_switchdev_notifiers(val, dev, &send_info.info, NULL); } +static void +mlx5_esw_bridge_fdb_del_notify(struct mlx5_esw_bridge_fdb_entry *entry) +{ + if (!(entry->flags & (MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER | MLX5_ESW_BRIDGE_FLAG_PEER))) + mlx5_esw_bridge_fdb_offload_notify(entry->dev, entry->key.addr, + entry->key.vid, + SWITCHDEV_FDB_DEL_TO_BRIDGE); +} + static struct mlx5_flow_table * mlx5_esw_bridge_table_create(int max_fte, u32 level, struct mlx5_eswitch *esw) { @@ -400,9 +409,10 @@ mlx5_esw_bridge_egress_table_cleanup(struct mlx5_esw_bridge *bridge) } static struct mlx5_flow_handle * -mlx5_esw_bridge_ingress_flow_create(u16 vport_num, const unsigned char *addr, - struct mlx5_esw_bridge_vlan *vlan, u32 counter_id, - struct mlx5_esw_bridge *bridge) +mlx5_esw_bridge_ingress_flow_with_esw_create(u16 vport_num, const unsigned char *addr, + struct mlx5_esw_bridge_vlan *vlan, u32 counter_id, + struct mlx5_esw_bridge *bridge, + struct mlx5_eswitch *esw) { struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads; struct mlx5_flow_act flow_act = { @@ -430,7 +440,7 @@ mlx5_esw_bridge_ingress_flow_create(u16 vport_num, const unsigned char *addr, MLX5_SET(fte_match_param, rule_spec->match_criteria, misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask()); MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_0, - mlx5_eswitch_get_vport_metadata_for_match(br_offloads->esw, vport_num)); + mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num)); if (vlan && vlan->pkt_reformat_push) { flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; @@ -459,6 +469,35 @@ mlx5_esw_bridge_ingress_flow_create(u16 vport_num, const unsigned char *addr, } static struct mlx5_flow_handle * +mlx5_esw_bridge_ingress_flow_create(u16 vport_num, const unsigned char *addr, + struct mlx5_esw_bridge_vlan *vlan, u32 counter_id, + struct mlx5_esw_bridge *bridge) +{ + return mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter_id, + bridge, bridge->br_offloads->esw); +} + +static struct mlx5_flow_handle * +mlx5_esw_bridge_ingress_flow_peer_create(u16 vport_num, const unsigned char *addr, + struct mlx5_esw_bridge_vlan *vlan, u32 counter_id, + struct mlx5_esw_bridge *bridge) +{ + struct mlx5_devcom *devcom = bridge->br_offloads->esw->dev->priv.devcom; + static struct mlx5_flow_handle *handle; + struct mlx5_eswitch *peer_esw; + + peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); + if (!peer_esw) + return ERR_PTR(-ENODEV); + + handle = mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter_id, + bridge, peer_esw); + + mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); + return handle; +} + +static struct mlx5_flow_handle * mlx5_esw_bridge_ingress_filter_flow_create(u16 vport_num, const unsigned char *addr, struct mlx5_esw_bridge *bridge) { @@ -505,7 +544,7 @@ mlx5_esw_bridge_ingress_filter_flow_create(u16 vport_num, const unsigned char *a } static struct mlx5_flow_handle * -mlx5_esw_bridge_egress_flow_create(u16 vport_num, const unsigned char *addr, +mlx5_esw_bridge_egress_flow_create(u16 vport_num, u16 esw_owner_vhca_id, const unsigned char *addr, struct mlx5_esw_bridge_vlan *vlan, struct mlx5_esw_bridge *bridge) { @@ -550,6 +589,10 @@ mlx5_esw_bridge_egress_flow_create(u16 vport_num, const unsigned char *addr, vlan->vid); } + if (MLX5_CAP_ESW(bridge->br_offloads->esw->dev, merged_eswitch)) { + dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID; + dest.vport.vhca_id = esw_owner_vhca_id; + } handle = mlx5_add_flow_rules(bridge->egress_ft, rule_spec, &flow_act, &dest, 1); kvfree(rule_spec); @@ -576,7 +619,6 @@ static struct mlx5_esw_bridge *mlx5_esw_bridge_create(int ifindex, goto err_fdb_ht; INIT_LIST_HEAD(&bridge->fdb_list); - xa_init(&bridge->vports); bridge->ifindex = ifindex; bridge->refcnt = 1; bridge->ageing_time = clock_t_to_jiffies(BR_DEFAULT_AGEING_TIME); @@ -603,7 +645,6 @@ static void mlx5_esw_bridge_put(struct mlx5_esw_bridge_offloads *br_offloads, return; mlx5_esw_bridge_egress_table_cleanup(bridge); - WARN_ON(!xa_empty(&bridge->vports)); list_del(&bridge->list); rhashtable_destroy(&bridge->fdb_ht); kvfree(bridge); @@ -639,30 +680,40 @@ mlx5_esw_bridge_lookup(int ifindex, struct mlx5_esw_bridge_offloads *br_offloads return bridge; } +static unsigned long mlx5_esw_bridge_port_key_from_data(u16 vport_num, u16 esw_owner_vhca_id) +{ + return vport_num | (unsigned long)esw_owner_vhca_id << sizeof(vport_num) * BITS_PER_BYTE; +} + +static unsigned long mlx5_esw_bridge_port_key(struct mlx5_esw_bridge_port *port) +{ + return mlx5_esw_bridge_port_key_from_data(port->vport_num, port->esw_owner_vhca_id); +} + static int mlx5_esw_bridge_port_insert(struct mlx5_esw_bridge_port *port, - struct mlx5_esw_bridge *bridge) + struct mlx5_esw_bridge_offloads *br_offloads) { - return xa_insert(&bridge->vports, port->vport_num, port, GFP_KERNEL); + return xa_insert(&br_offloads->ports, mlx5_esw_bridge_port_key(port), port, GFP_KERNEL); } static struct mlx5_esw_bridge_port * -mlx5_esw_bridge_port_lookup(u16 vport_num, struct mlx5_esw_bridge *bridge) +mlx5_esw_bridge_port_lookup(u16 vport_num, u16 esw_owner_vhca_id, + struct mlx5_esw_bridge_offloads *br_offloads) { - return xa_load(&bridge->vports, vport_num); + return xa_load(&br_offloads->ports, mlx5_esw_bridge_port_key_from_data(vport_num, + esw_owner_vhca_id)); } static void mlx5_esw_bridge_port_erase(struct mlx5_esw_bridge_port *port, - struct mlx5_esw_bridge *bridge) + struct mlx5_esw_bridge_offloads *br_offloads) { - xa_erase(&bridge->vports, port->vport_num); + xa_erase(&br_offloads->ports, mlx5_esw_bridge_port_key(port)); } -static void mlx5_esw_bridge_fdb_entry_refresh(unsigned long lastuse, - struct mlx5_esw_bridge_fdb_entry *entry) +static void mlx5_esw_bridge_fdb_entry_refresh(struct mlx5_esw_bridge_fdb_entry *entry) { trace_mlx5_esw_bridge_fdb_entry_refresh(entry); - entry->lastuse = lastuse; mlx5_esw_bridge_fdb_offload_notify(entry->dev, entry->key.addr, entry->key.vid, SWITCHDEV_FDB_ADD_TO_BRIDGE); @@ -690,10 +741,7 @@ static void mlx5_esw_bridge_fdb_flush(struct mlx5_esw_bridge *bridge) struct mlx5_esw_bridge_fdb_entry *entry, *tmp; list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list) { - if (!(entry->flags & MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER)) - mlx5_esw_bridge_fdb_offload_notify(entry->dev, entry->key.addr, - entry->key.vid, - SWITCHDEV_FDB_DEL_TO_BRIDGE); + mlx5_esw_bridge_fdb_del_notify(entry); mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge); } } @@ -841,10 +889,7 @@ static void mlx5_esw_bridge_vlan_flush(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_esw_bridge_fdb_entry *entry, *tmp; list_for_each_entry_safe(entry, tmp, &vlan->fdb_list, vlan_list) { - if (!(entry->flags & MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER)) - mlx5_esw_bridge_fdb_offload_notify(entry->dev, entry->key.addr, - entry->key.vid, - SWITCHDEV_FDB_DEL_TO_BRIDGE); + mlx5_esw_bridge_fdb_del_notify(entry); mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge); } @@ -875,13 +920,13 @@ static void mlx5_esw_bridge_port_vlans_flush(struct mlx5_esw_bridge_port *port, } static struct mlx5_esw_bridge_vlan * -mlx5_esw_bridge_port_vlan_lookup(u16 vid, u16 vport_num, struct mlx5_esw_bridge *bridge, - struct mlx5_eswitch *esw) +mlx5_esw_bridge_port_vlan_lookup(u16 vid, u16 vport_num, u16 esw_owner_vhca_id, + struct mlx5_esw_bridge *bridge, struct mlx5_eswitch *esw) { struct mlx5_esw_bridge_port *port; struct mlx5_esw_bridge_vlan *vlan; - port = mlx5_esw_bridge_port_lookup(vport_num, bridge); + port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, bridge->br_offloads); if (!port) { /* FDB is added asynchronously on wq while port might have been deleted * concurrently. Report on 'info' logging level and skip the FDB offload. @@ -904,24 +949,23 @@ mlx5_esw_bridge_port_vlan_lookup(u16 vid, u16 vport_num, struct mlx5_esw_bridge } static struct mlx5_esw_bridge_fdb_entry * -mlx5_esw_bridge_fdb_entry_init(struct net_device *dev, u16 vport_num, const unsigned char *addr, - u16 vid, bool added_by_user, struct mlx5_eswitch *esw, - struct mlx5_esw_bridge *bridge) +mlx5_esw_bridge_fdb_entry_init(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id, + const unsigned char *addr, u16 vid, bool added_by_user, bool peer, + struct mlx5_eswitch *esw, struct mlx5_esw_bridge *bridge) { struct mlx5_esw_bridge_vlan *vlan = NULL; struct mlx5_esw_bridge_fdb_entry *entry; struct mlx5_flow_handle *handle; struct mlx5_fc *counter; - struct mlx5e_priv *priv; int err; if (bridge->flags & MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG && vid) { - vlan = mlx5_esw_bridge_port_vlan_lookup(vid, vport_num, bridge, esw); + vlan = mlx5_esw_bridge_port_vlan_lookup(vid, vport_num, esw_owner_vhca_id, bridge, + esw); if (IS_ERR(vlan)) return ERR_CAST(vlan); } - priv = netdev_priv(dev); entry = kvzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) return ERR_PTR(-ENOMEM); @@ -930,19 +974,25 @@ mlx5_esw_bridge_fdb_entry_init(struct net_device *dev, u16 vport_num, const unsi entry->key.vid = vid; entry->dev = dev; entry->vport_num = vport_num; + entry->esw_owner_vhca_id = esw_owner_vhca_id; entry->lastuse = jiffies; if (added_by_user) entry->flags |= MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER; + if (peer) + entry->flags |= MLX5_ESW_BRIDGE_FLAG_PEER; - counter = mlx5_fc_create(priv->mdev, true); + counter = mlx5_fc_create(esw->dev, true); if (IS_ERR(counter)) { err = PTR_ERR(counter); goto err_ingress_fc_create; } entry->ingress_counter = counter; - handle = mlx5_esw_bridge_ingress_flow_create(vport_num, addr, vlan, mlx5_fc_id(counter), - bridge); + handle = peer ? + mlx5_esw_bridge_ingress_flow_peer_create(vport_num, addr, vlan, + mlx5_fc_id(counter), bridge) : + mlx5_esw_bridge_ingress_flow_create(vport_num, addr, vlan, + mlx5_fc_id(counter), bridge); if (IS_ERR(handle)) { err = PTR_ERR(handle); esw_warn(esw->dev, "Failed to create ingress flow(vport=%u,err=%d)\n", @@ -962,7 +1012,8 @@ mlx5_esw_bridge_fdb_entry_init(struct net_device *dev, u16 vport_num, const unsi entry->filter_handle = handle; } - handle = mlx5_esw_bridge_egress_flow_create(vport_num, addr, vlan, bridge); + handle = mlx5_esw_bridge_egress_flow_create(vport_num, esw_owner_vhca_id, addr, vlan, + bridge); if (IS_ERR(handle)) { err = PTR_ERR(handle); esw_warn(esw->dev, "Failed to create egress flow(vport=%u,err=%d)\n", @@ -994,32 +1045,37 @@ err_egress_flow_create: err_ingress_filter_flow_create: mlx5_del_flow_rules(entry->ingress_handle); err_ingress_flow_create: - mlx5_fc_destroy(priv->mdev, entry->ingress_counter); + mlx5_fc_destroy(esw->dev, entry->ingress_counter); err_ingress_fc_create: kvfree(entry); return ERR_PTR(err); } -int mlx5_esw_bridge_ageing_time_set(unsigned long ageing_time, struct mlx5_eswitch *esw, - struct mlx5_vport *vport) +int mlx5_esw_bridge_ageing_time_set(u16 vport_num, u16 esw_owner_vhca_id, unsigned long ageing_time, + struct mlx5_esw_bridge_offloads *br_offloads) { - if (!vport->bridge) + struct mlx5_esw_bridge_port *port; + + port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads); + if (!port) return -EINVAL; - vport->bridge->ageing_time = clock_t_to_jiffies(ageing_time); + port->bridge->ageing_time = clock_t_to_jiffies(ageing_time); return 0; } -int mlx5_esw_bridge_vlan_filtering_set(bool enable, struct mlx5_eswitch *esw, - struct mlx5_vport *vport) +int mlx5_esw_bridge_vlan_filtering_set(u16 vport_num, u16 esw_owner_vhca_id, bool enable, + struct mlx5_esw_bridge_offloads *br_offloads) { + struct mlx5_esw_bridge_port *port; struct mlx5_esw_bridge *bridge; bool filtering; - if (!vport->bridge) + port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads); + if (!port) return -EINVAL; - bridge = vport->bridge; + bridge = port->bridge; filtering = bridge->flags & MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG; if (filtering == enable) return 0; @@ -1033,114 +1089,143 @@ int mlx5_esw_bridge_vlan_filtering_set(bool enable, struct mlx5_eswitch *esw, return 0; } -static int mlx5_esw_bridge_vport_init(struct mlx5_esw_bridge_offloads *br_offloads, - struct mlx5_esw_bridge *bridge, - struct mlx5_vport *vport) +static int mlx5_esw_bridge_vport_init(u16 vport_num, u16 esw_owner_vhca_id, u16 flags, + struct mlx5_esw_bridge_offloads *br_offloads, + struct mlx5_esw_bridge *bridge) { struct mlx5_eswitch *esw = br_offloads->esw; struct mlx5_esw_bridge_port *port; int err; port = kvzalloc(sizeof(*port), GFP_KERNEL); - if (!port) { - err = -ENOMEM; - goto err_port_alloc; - } + if (!port) + return -ENOMEM; - port->vport_num = vport->vport; + port->vport_num = vport_num; + port->esw_owner_vhca_id = esw_owner_vhca_id; + port->bridge = bridge; + port->flags |= flags; xa_init(&port->vlans); - err = mlx5_esw_bridge_port_insert(port, bridge); + err = mlx5_esw_bridge_port_insert(port, br_offloads); if (err) { - esw_warn(esw->dev, "Failed to insert port metadata (vport=%u,err=%d)\n", - vport->vport, err); + esw_warn(esw->dev, + "Failed to insert port metadata (vport=%u,esw_owner_vhca_id=%u,err=%d)\n", + port->vport_num, port->esw_owner_vhca_id, err); goto err_port_insert; } trace_mlx5_esw_bridge_vport_init(port); - vport->bridge = bridge; return 0; err_port_insert: kvfree(port); -err_port_alloc: - mlx5_esw_bridge_put(br_offloads, bridge); return err; } static int mlx5_esw_bridge_vport_cleanup(struct mlx5_esw_bridge_offloads *br_offloads, - struct mlx5_vport *vport) + struct mlx5_esw_bridge_port *port) { - struct mlx5_esw_bridge *bridge = vport->bridge; + u16 vport_num = port->vport_num, esw_owner_vhca_id = port->esw_owner_vhca_id; + struct mlx5_esw_bridge *bridge = port->bridge; struct mlx5_esw_bridge_fdb_entry *entry, *tmp; - struct mlx5_esw_bridge_port *port; list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list) - if (entry->vport_num == vport->vport) + if (entry->vport_num == vport_num && entry->esw_owner_vhca_id == esw_owner_vhca_id) mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge); - port = mlx5_esw_bridge_port_lookup(vport->vport, bridge); - if (!port) { - WARN(1, "Vport %u metadata not found on bridge", vport->vport); - return -EINVAL; - } - trace_mlx5_esw_bridge_vport_cleanup(port); mlx5_esw_bridge_port_vlans_flush(port, bridge); - mlx5_esw_bridge_port_erase(port, bridge); + mlx5_esw_bridge_port_erase(port, br_offloads); kvfree(port); mlx5_esw_bridge_put(br_offloads, bridge); - vport->bridge = NULL; return 0; } -int mlx5_esw_bridge_vport_link(int ifindex, struct mlx5_esw_bridge_offloads *br_offloads, - struct mlx5_vport *vport, struct netlink_ext_ack *extack) +static int mlx5_esw_bridge_vport_link_with_flags(int ifindex, u16 vport_num, u16 esw_owner_vhca_id, + u16 flags, + struct mlx5_esw_bridge_offloads *br_offloads, + struct netlink_ext_ack *extack) { struct mlx5_esw_bridge *bridge; int err; - WARN_ON(vport->bridge); - bridge = mlx5_esw_bridge_lookup(ifindex, br_offloads); if (IS_ERR(bridge)) { NL_SET_ERR_MSG_MOD(extack, "Error checking for existing bridge with same ifindex"); return PTR_ERR(bridge); } - err = mlx5_esw_bridge_vport_init(br_offloads, bridge, vport); - if (err) + err = mlx5_esw_bridge_vport_init(vport_num, esw_owner_vhca_id, flags, br_offloads, bridge); + if (err) { NL_SET_ERR_MSG_MOD(extack, "Error initializing port"); + goto err_vport; + } + return 0; + +err_vport: + mlx5_esw_bridge_put(br_offloads, bridge); return err; } -int mlx5_esw_bridge_vport_unlink(int ifindex, struct mlx5_esw_bridge_offloads *br_offloads, - struct mlx5_vport *vport, struct netlink_ext_ack *extack) +int mlx5_esw_bridge_vport_link(int ifindex, u16 vport_num, u16 esw_owner_vhca_id, + struct mlx5_esw_bridge_offloads *br_offloads, + struct netlink_ext_ack *extack) { - struct mlx5_esw_bridge *bridge = vport->bridge; + return mlx5_esw_bridge_vport_link_with_flags(ifindex, vport_num, esw_owner_vhca_id, 0, + br_offloads, extack); +} + +int mlx5_esw_bridge_vport_unlink(int ifindex, u16 vport_num, u16 esw_owner_vhca_id, + struct mlx5_esw_bridge_offloads *br_offloads, + struct netlink_ext_ack *extack) +{ + struct mlx5_esw_bridge_port *port; int err; - if (!bridge) { + port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads); + if (!port) { NL_SET_ERR_MSG_MOD(extack, "Port is not attached to any bridge"); return -EINVAL; } - if (bridge->ifindex != ifindex) { + if (port->bridge->ifindex != ifindex) { NL_SET_ERR_MSG_MOD(extack, "Port is attached to another bridge"); return -EINVAL; } - err = mlx5_esw_bridge_vport_cleanup(br_offloads, vport); + err = mlx5_esw_bridge_vport_cleanup(br_offloads, port); if (err) NL_SET_ERR_MSG_MOD(extack, "Port cleanup failed"); return err; } -int mlx5_esw_bridge_port_vlan_add(u16 vid, u16 flags, struct mlx5_eswitch *esw, - struct mlx5_vport *vport, struct netlink_ext_ack *extack) +int mlx5_esw_bridge_vport_peer_link(int ifindex, u16 vport_num, u16 esw_owner_vhca_id, + struct mlx5_esw_bridge_offloads *br_offloads, + struct netlink_ext_ack *extack) +{ + if (!MLX5_CAP_ESW(br_offloads->esw->dev, merged_eswitch)) + return 0; + + return mlx5_esw_bridge_vport_link_with_flags(ifindex, vport_num, esw_owner_vhca_id, + MLX5_ESW_BRIDGE_PORT_FLAG_PEER, + br_offloads, extack); +} + +int mlx5_esw_bridge_vport_peer_unlink(int ifindex, u16 vport_num, u16 esw_owner_vhca_id, + struct mlx5_esw_bridge_offloads *br_offloads, + struct netlink_ext_ack *extack) +{ + return mlx5_esw_bridge_vport_unlink(ifindex, vport_num, esw_owner_vhca_id, br_offloads, + extack); +} + +int mlx5_esw_bridge_port_vlan_add(u16 vport_num, u16 esw_owner_vhca_id, u16 vid, u16 flags, + struct mlx5_esw_bridge_offloads *br_offloads, + struct netlink_ext_ack *extack) { struct mlx5_esw_bridge_port *port; struct mlx5_esw_bridge_vlan *vlan; - port = mlx5_esw_bridge_port_lookup(vport->vport, vport->bridge); + port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads); if (!port) return -EINVAL; @@ -1148,10 +1233,10 @@ int mlx5_esw_bridge_port_vlan_add(u16 vid, u16 flags, struct mlx5_eswitch *esw, if (vlan) { if (vlan->flags == flags) return 0; - mlx5_esw_bridge_vlan_cleanup(port, vlan, vport->bridge); + mlx5_esw_bridge_vlan_cleanup(port, vlan, port->bridge); } - vlan = mlx5_esw_bridge_vlan_create(vid, flags, port, esw); + vlan = mlx5_esw_bridge_vlan_create(vid, flags, port, br_offloads->esw); if (IS_ERR(vlan)) { NL_SET_ERR_MSG_MOD(extack, "Failed to create VLAN entry"); return PTR_ERR(vlan); @@ -1159,62 +1244,93 @@ int mlx5_esw_bridge_port_vlan_add(u16 vid, u16 flags, struct mlx5_eswitch *esw, return 0; } -void mlx5_esw_bridge_port_vlan_del(u16 vid, struct mlx5_eswitch *esw, struct mlx5_vport *vport) +void mlx5_esw_bridge_port_vlan_del(u16 vport_num, u16 esw_owner_vhca_id, u16 vid, + struct mlx5_esw_bridge_offloads *br_offloads) { struct mlx5_esw_bridge_port *port; struct mlx5_esw_bridge_vlan *vlan; - port = mlx5_esw_bridge_port_lookup(vport->vport, vport->bridge); + port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads); if (!port) return; vlan = mlx5_esw_bridge_vlan_lookup(vid, port); if (!vlan) return; - mlx5_esw_bridge_vlan_cleanup(port, vlan, vport->bridge); + mlx5_esw_bridge_vlan_cleanup(port, vlan, port->bridge); } -void mlx5_esw_bridge_fdb_create(struct net_device *dev, struct mlx5_eswitch *esw, - struct mlx5_vport *vport, - struct switchdev_notifier_fdb_info *fdb_info) +void mlx5_esw_bridge_fdb_update_used(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id, + struct mlx5_esw_bridge_offloads *br_offloads, + struct switchdev_notifier_fdb_info *fdb_info) { - struct mlx5_esw_bridge *bridge = vport->bridge; struct mlx5_esw_bridge_fdb_entry *entry; - u16 vport_num = vport->vport; + struct mlx5_esw_bridge_fdb_key key; + struct mlx5_esw_bridge_port *port; + struct mlx5_esw_bridge *bridge; - if (!bridge) { - esw_info(esw->dev, "Vport is not assigned to bridge (vport=%u)\n", vport_num); + port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads); + if (!port || port->flags & MLX5_ESW_BRIDGE_PORT_FLAG_PEER) + return; + + bridge = port->bridge; + ether_addr_copy(key.addr, fdb_info->addr); + key.vid = fdb_info->vid; + entry = rhashtable_lookup_fast(&bridge->fdb_ht, &key, fdb_ht_params); + if (!entry) { + esw_debug(br_offloads->esw->dev, + "FDB entry with specified key not found (MAC=%pM,vid=%u,vport=%u)\n", + key.addr, key.vid, vport_num); return; } - entry = mlx5_esw_bridge_fdb_entry_init(dev, vport_num, fdb_info->addr, fdb_info->vid, - fdb_info->added_by_user, esw, bridge); + entry->lastuse = jiffies; +} + +void mlx5_esw_bridge_fdb_create(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id, + struct mlx5_esw_bridge_offloads *br_offloads, + struct switchdev_notifier_fdb_info *fdb_info) +{ + struct mlx5_esw_bridge_fdb_entry *entry; + struct mlx5_esw_bridge_port *port; + struct mlx5_esw_bridge *bridge; + + port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads); + if (!port) + return; + + bridge = port->bridge; + entry = mlx5_esw_bridge_fdb_entry_init(dev, vport_num, esw_owner_vhca_id, fdb_info->addr, + fdb_info->vid, fdb_info->added_by_user, + port->flags & MLX5_ESW_BRIDGE_PORT_FLAG_PEER, + br_offloads->esw, bridge); if (IS_ERR(entry)) return; if (entry->flags & MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER) mlx5_esw_bridge_fdb_offload_notify(dev, entry->key.addr, entry->key.vid, SWITCHDEV_FDB_OFFLOADED); - else + else if (!(entry->flags & MLX5_ESW_BRIDGE_FLAG_PEER)) /* Take over dynamic entries to prevent kernel bridge from aging them out. */ mlx5_esw_bridge_fdb_offload_notify(dev, entry->key.addr, entry->key.vid, SWITCHDEV_FDB_ADD_TO_BRIDGE); } -void mlx5_esw_bridge_fdb_remove(struct net_device *dev, struct mlx5_eswitch *esw, - struct mlx5_vport *vport, +void mlx5_esw_bridge_fdb_remove(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id, + struct mlx5_esw_bridge_offloads *br_offloads, struct switchdev_notifier_fdb_info *fdb_info) { - struct mlx5_esw_bridge *bridge = vport->bridge; + struct mlx5_eswitch *esw = br_offloads->esw; struct mlx5_esw_bridge_fdb_entry *entry; struct mlx5_esw_bridge_fdb_key key; - u16 vport_num = vport->vport; + struct mlx5_esw_bridge_port *port; + struct mlx5_esw_bridge *bridge; - if (!bridge) { - esw_warn(esw->dev, "Vport is not assigned to bridge (vport=%u)\n", vport_num); + port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads); + if (!port) return; - } + bridge = port->bridge; ether_addr_copy(key.addr, fdb_info->addr); key.vid = fdb_info->vid; entry = rhashtable_lookup_fast(&bridge->fdb_ht, &key, fdb_ht_params); @@ -1225,9 +1341,7 @@ void mlx5_esw_bridge_fdb_remove(struct net_device *dev, struct mlx5_eswitch *esw return; } - if (!(entry->flags & MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER)) - mlx5_esw_bridge_fdb_offload_notify(dev, entry->key.addr, entry->key.vid, - SWITCHDEV_FDB_DEL_TO_BRIDGE); + mlx5_esw_bridge_fdb_del_notify(entry); mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge); } @@ -1245,11 +1359,10 @@ void mlx5_esw_bridge_update(struct mlx5_esw_bridge_offloads *br_offloads) continue; if (time_after(lastuse, entry->lastuse)) { - mlx5_esw_bridge_fdb_entry_refresh(lastuse, entry); - } else if (time_is_before_jiffies(entry->lastuse + bridge->ageing_time)) { - mlx5_esw_bridge_fdb_offload_notify(entry->dev, entry->key.addr, - entry->key.vid, - SWITCHDEV_FDB_DEL_TO_BRIDGE); + mlx5_esw_bridge_fdb_entry_refresh(entry); + } else if (!(entry->flags & MLX5_ESW_BRIDGE_FLAG_PEER) && + time_is_before_jiffies(entry->lastuse + bridge->ageing_time)) { + mlx5_esw_bridge_fdb_del_notify(entry); mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge); } } @@ -1258,13 +1371,11 @@ void mlx5_esw_bridge_update(struct mlx5_esw_bridge_offloads *br_offloads) static void mlx5_esw_bridge_flush(struct mlx5_esw_bridge_offloads *br_offloads) { - struct mlx5_eswitch *esw = br_offloads->esw; - struct mlx5_vport *vport; + struct mlx5_esw_bridge_port *port; unsigned long i; - mlx5_esw_for_each_vport(esw, i, vport) - if (vport->bridge) - mlx5_esw_bridge_vport_cleanup(br_offloads, vport); + xa_for_each(&br_offloads->ports, i, port) + mlx5_esw_bridge_vport_cleanup(br_offloads, port); WARN_ONCE(!list_empty(&br_offloads->bridges), "Cleaning up bridge offloads while still having bridges attached\n"); @@ -1279,6 +1390,7 @@ struct mlx5_esw_bridge_offloads *mlx5_esw_bridge_init(struct mlx5_eswitch *esw) return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&br_offloads->bridges); + xa_init(&br_offloads->ports); br_offloads->esw = esw; esw->br_offloads = br_offloads; @@ -1293,6 +1405,7 @@ void mlx5_esw_bridge_cleanup(struct mlx5_eswitch *esw) return; mlx5_esw_bridge_flush(br_offloads); + WARN_ON(!xa_empty(&br_offloads->ports)); esw->br_offloads = NULL; kvfree(br_offloads); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h index d826942b27fc..efc39975226e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h @@ -7,6 +7,7 @@ #include <linux/notifier.h> #include <linux/list.h> #include <linux/workqueue.h> +#include <linux/xarray.h> #include "eswitch.h" struct mlx5_flow_table; @@ -15,6 +16,8 @@ struct mlx5_flow_group; struct mlx5_esw_bridge_offloads { struct mlx5_eswitch *esw; struct list_head bridges; + struct xarray ports; + struct notifier_block netdev_nb; struct notifier_block nb_blk; struct notifier_block nb; @@ -31,23 +34,36 @@ struct mlx5_esw_bridge_offloads { struct mlx5_esw_bridge_offloads *mlx5_esw_bridge_init(struct mlx5_eswitch *esw); void mlx5_esw_bridge_cleanup(struct mlx5_eswitch *esw); -int mlx5_esw_bridge_vport_link(int ifindex, struct mlx5_esw_bridge_offloads *br_offloads, - struct mlx5_vport *vport, struct netlink_ext_ack *extack); -int mlx5_esw_bridge_vport_unlink(int ifindex, struct mlx5_esw_bridge_offloads *br_offloads, - struct mlx5_vport *vport, struct netlink_ext_ack *extack); -void mlx5_esw_bridge_fdb_create(struct net_device *dev, struct mlx5_eswitch *esw, - struct mlx5_vport *vport, +int mlx5_esw_bridge_vport_link(int ifindex, u16 vport_num, u16 esw_owner_vhca_id, + struct mlx5_esw_bridge_offloads *br_offloads, + struct netlink_ext_ack *extack); +int mlx5_esw_bridge_vport_unlink(int ifindex, u16 vport_num, u16 esw_owner_vhca_id, + struct mlx5_esw_bridge_offloads *br_offloads, + struct netlink_ext_ack *extack); +int mlx5_esw_bridge_vport_peer_link(int ifindex, u16 vport_num, u16 esw_owner_vhca_id, + struct mlx5_esw_bridge_offloads *br_offloads, + struct netlink_ext_ack *extack); +int mlx5_esw_bridge_vport_peer_unlink(int ifindex, u16 vport_num, u16 esw_owner_vhca_id, + struct mlx5_esw_bridge_offloads *br_offloads, + struct netlink_ext_ack *extack); +void mlx5_esw_bridge_fdb_update_used(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id, + struct mlx5_esw_bridge_offloads *br_offloads, + struct switchdev_notifier_fdb_info *fdb_info); +void mlx5_esw_bridge_fdb_create(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id, + struct mlx5_esw_bridge_offloads *br_offloads, struct switchdev_notifier_fdb_info *fdb_info); -void mlx5_esw_bridge_fdb_remove(struct net_device *dev, struct mlx5_eswitch *esw, - struct mlx5_vport *vport, +void mlx5_esw_bridge_fdb_remove(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id, + struct mlx5_esw_bridge_offloads *br_offloads, struct switchdev_notifier_fdb_info *fdb_info); void mlx5_esw_bridge_update(struct mlx5_esw_bridge_offloads *br_offloads); -int mlx5_esw_bridge_ageing_time_set(unsigned long ageing_time, struct mlx5_eswitch *esw, - struct mlx5_vport *vport); -int mlx5_esw_bridge_vlan_filtering_set(bool enable, struct mlx5_eswitch *esw, - struct mlx5_vport *vport); -int mlx5_esw_bridge_port_vlan_add(u16 vid, u16 flags, struct mlx5_eswitch *esw, - struct mlx5_vport *vport, struct netlink_ext_ack *extack); -void mlx5_esw_bridge_port_vlan_del(u16 vid, struct mlx5_eswitch *esw, struct mlx5_vport *vport); +int mlx5_esw_bridge_ageing_time_set(u16 vport_num, u16 esw_owner_vhca_id, unsigned long ageing_time, + struct mlx5_esw_bridge_offloads *br_offloads); +int mlx5_esw_bridge_vlan_filtering_set(u16 vport_num, u16 esw_owner_vhca_id, bool enable, + struct mlx5_esw_bridge_offloads *br_offloads); +int mlx5_esw_bridge_port_vlan_add(u16 vport_num, u16 esw_owner_vhca_id, u16 vid, u16 flags, + struct mlx5_esw_bridge_offloads *br_offloads, + struct netlink_ext_ack *extack); +void mlx5_esw_bridge_port_vlan_del(u16 vport_num, u16 esw_owner_vhca_id, u16 vid, + struct mlx5_esw_bridge_offloads *br_offloads); #endif /* __MLX5_ESW_BRIDGE_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h index d9ab2e8bc2cb..52964a82d6a6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h @@ -19,6 +19,11 @@ struct mlx5_esw_bridge_fdb_key { enum { MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER = BIT(0), + MLX5_ESW_BRIDGE_FLAG_PEER = BIT(1), +}; + +enum { + MLX5_ESW_BRIDGE_PORT_FLAG_PEER = BIT(0), }; struct mlx5_esw_bridge_fdb_entry { @@ -28,6 +33,7 @@ struct mlx5_esw_bridge_fdb_entry { struct list_head list; struct list_head vlan_list; u16 vport_num; + u16 esw_owner_vhca_id; u16 flags; struct mlx5_flow_handle *ingress_handle; @@ -47,6 +53,9 @@ struct mlx5_esw_bridge_vlan { struct mlx5_esw_bridge_port { u16 vport_num; + u16 esw_owner_vhca_id; + u16 flags; + struct mlx5_esw_bridge *bridge; struct xarray vlans; }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c index 1703384eca95..20af557ae30c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c @@ -91,9 +91,15 @@ int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, u16 vport_ if (err) goto reg_err; + err = devlink_rate_leaf_create(dl_port, vport); + if (err) + goto rate_err; + vport->dl_port = dl_port; return 0; +rate_err: + devlink_port_unregister(dl_port); reg_err: mlx5_esw_dl_port_free(dl_port); return err; @@ -109,6 +115,12 @@ void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, u16 vpo vport = mlx5_eswitch_get_vport(esw, vport_num); if (IS_ERR(vport)) return; + + if (vport->dl_port->devlink_rate) { + mlx5_esw_qos_vport_update_group(esw, vport, NULL, NULL); + devlink_rate_leaf_destroy(vport->dl_port); + } + devlink_port_unregister(vport->dl_port); mlx5_esw_dl_port_free(vport->dl_port); vport->dl_port = NULL; @@ -148,8 +160,16 @@ int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_p if (err) return err; + err = devlink_rate_leaf_create(dl_port, vport); + if (err) + goto rate_err; + vport->dl_port = dl_port; return 0; + +rate_err: + devlink_port_unregister(dl_port); + return err; } void mlx5_esw_devlink_sf_port_unregister(struct mlx5_eswitch *esw, u16 vport_num) @@ -159,6 +179,12 @@ void mlx5_esw_devlink_sf_port_unregister(struct mlx5_eswitch *esw, u16 vport_num vport = mlx5_eswitch_get_vport(esw, vport_num); if (IS_ERR(vport)) return; + + if (vport->dl_port->devlink_rate) { + mlx5_esw_qos_vport_update_group(esw, vport, NULL, NULL); + devlink_rate_leaf_destroy(vport->dl_port); + } + devlink_port_unregister(vport->dl_port); vport->dl_port = NULL; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h index 227964b7d3b9..3401188e0a60 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h @@ -85,11 +85,18 @@ DECLARE_EVENT_CLASS(mlx5_esw_bridge_port_template, TP_ARGS(port), TP_STRUCT__entry( __field(u16, vport_num) + __field(u16, esw_owner_vhca_id) + __field(u16, flags) ), TP_fast_assign( __entry->vport_num = port->vport_num; + __entry->esw_owner_vhca_id = port->esw_owner_vhca_id; + __entry->flags = port->flags; ), - TP_printk("vport_num=%hu", __entry->vport_num) + TP_printk("vport_num=%hu esw_owner_vhca_id=%hu flags=%hx", + __entry->vport_num, + __entry->esw_owner_vhca_id, + __entry->flags) ); DEFINE_EVENT(mlx5_esw_bridge_port_template, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/qos_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/qos_tracepoint.h new file mode 100644 index 000000000000..458baf0c6415 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/qos_tracepoint.h @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM mlx5 + +#if !defined(_MLX5_ESW_TP_) || defined(TRACE_HEADER_MULTI_READ) +#define _MLX5_ESW_TP_ + +#include <linux/tracepoint.h> +#include "eswitch.h" + +TRACE_EVENT(mlx5_esw_vport_qos_destroy, + TP_PROTO(const struct mlx5_vport *vport), + TP_ARGS(vport), + TP_STRUCT__entry(__string(devname, dev_name(vport->dev->device)) + __field(unsigned short, vport_id) + __field(unsigned int, tsar_ix) + ), + TP_fast_assign(__assign_str(devname, dev_name(vport->dev->device)); + __entry->vport_id = vport->vport; + __entry->tsar_ix = vport->qos.esw_tsar_ix; + ), + TP_printk("(%s) vport=%hu tsar_ix=%u\n", + __get_str(devname), __entry->vport_id, __entry->tsar_ix + ) +); + +DECLARE_EVENT_CLASS(mlx5_esw_vport_qos_template, + TP_PROTO(const struct mlx5_vport *vport, u32 bw_share, u32 max_rate), + TP_ARGS(vport, bw_share, max_rate), + TP_STRUCT__entry(__string(devname, dev_name(vport->dev->device)) + __field(unsigned short, vport_id) + __field(unsigned int, tsar_ix) + __field(unsigned int, bw_share) + __field(unsigned int, max_rate) + __field(void *, group) + ), + TP_fast_assign(__assign_str(devname, dev_name(vport->dev->device)); + __entry->vport_id = vport->vport; + __entry->tsar_ix = vport->qos.esw_tsar_ix; + __entry->bw_share = bw_share; + __entry->max_rate = max_rate; + __entry->group = vport->qos.group; + ), + TP_printk("(%s) vport=%hu tsar_ix=%u bw_share=%u, max_rate=%u group=%p\n", + __get_str(devname), __entry->vport_id, __entry->tsar_ix, + __entry->bw_share, __entry->max_rate, __entry->group + ) +); + +DEFINE_EVENT(mlx5_esw_vport_qos_template, mlx5_esw_vport_qos_create, + TP_PROTO(const struct mlx5_vport *vport, u32 bw_share, u32 max_rate), + TP_ARGS(vport, bw_share, max_rate) + ); + +DEFINE_EVENT(mlx5_esw_vport_qos_template, mlx5_esw_vport_qos_config, + TP_PROTO(const struct mlx5_vport *vport, u32 bw_share, u32 max_rate), + TP_ARGS(vport, bw_share, max_rate) + ); + +DECLARE_EVENT_CLASS(mlx5_esw_group_qos_template, + TP_PROTO(const struct mlx5_core_dev *dev, + const struct mlx5_esw_rate_group *group, + unsigned int tsar_ix), + TP_ARGS(dev, group, tsar_ix), + TP_STRUCT__entry(__string(devname, dev_name(dev->device)) + __field(const void *, group) + __field(unsigned int, tsar_ix) + ), + TP_fast_assign(__assign_str(devname, dev_name(dev->device)); + __entry->group = group; + __entry->tsar_ix = tsar_ix; + ), + TP_printk("(%s) group=%p tsar_ix=%u\n", + __get_str(devname), __entry->group, __entry->tsar_ix + ) +); + +DEFINE_EVENT(mlx5_esw_group_qos_template, mlx5_esw_group_qos_create, + TP_PROTO(const struct mlx5_core_dev *dev, + const struct mlx5_esw_rate_group *group, + unsigned int tsar_ix), + TP_ARGS(dev, group, tsar_ix) + ); + +DEFINE_EVENT(mlx5_esw_group_qos_template, mlx5_esw_group_qos_destroy, + TP_PROTO(const struct mlx5_core_dev *dev, + const struct mlx5_esw_rate_group *group, + unsigned int tsar_ix), + TP_ARGS(dev, group, tsar_ix) + ); + +TRACE_EVENT(mlx5_esw_group_qos_config, + TP_PROTO(const struct mlx5_core_dev *dev, + const struct mlx5_esw_rate_group *group, + unsigned int tsar_ix, u32 bw_share, u32 max_rate), + TP_ARGS(dev, group, tsar_ix, bw_share, max_rate), + TP_STRUCT__entry(__string(devname, dev_name(dev->device)) + __field(const void *, group) + __field(unsigned int, tsar_ix) + __field(unsigned int, bw_share) + __field(unsigned int, max_rate) + ), + TP_fast_assign(__assign_str(devname, dev_name(dev->device)); + __entry->group = group; + __entry->tsar_ix = tsar_ix; + __entry->bw_share = bw_share; + __entry->max_rate = max_rate; + ), + TP_printk("(%s) group=%p tsar_ix=%u bw_share=%u max_rate=%u\n", + __get_str(devname), __entry->group, __entry->tsar_ix, + __entry->bw_share, __entry->max_rate + ) +); +#endif /* _MLX5_ESW_TP_ */ + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH esw/diag +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE qos_tracepoint +#include <trace/define_trace.h> diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c index 3da7becc1069..425c91814b34 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c @@ -364,6 +364,7 @@ static int mlx5_create_indir_fwd_group(struct mlx5_eswitch *esw, dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; dest.vport.num = e->vport; dest.vport.vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id); + dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID; e->fwd_rule = mlx5_add_flow_rules(e->ft, spec, &flow_act, &dest, 1); if (IS_ERR(e->fwd_rule)) { mlx5_destroy_flow_group(e->fwd_grp); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c index d9041b16611d..df277a6cddc0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c @@ -11,6 +11,7 @@ #include "mlx5_core.h" #include "eswitch.h" #include "fs_core.h" +#include "esw/qos.h" enum { LEGACY_VEPA_PRIO = 0, @@ -508,3 +509,22 @@ unlock: mutex_unlock(&esw->state_lock); return err; } + +int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport, + u32 max_rate, u32 min_rate) +{ + struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); + int err; + + if (!mlx5_esw_allowed(esw)) + return -EPERM; + if (IS_ERR(evport)) + return PTR_ERR(evport); + + mutex_lock(&esw->state_lock); + err = mlx5_esw_qos_set_vport_min_rate(esw, evport, min_rate, NULL); + if (!err) + err = mlx5_esw_qos_set_vport_max_rate(esw, evport, max_rate, NULL); + mutex_unlock(&esw->state_lock); + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c new file mode 100644 index 000000000000..985e305179d1 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c @@ -0,0 +1,869 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#include "eswitch.h" +#include "esw/qos.h" +#include "en/port.h" +#define CREATE_TRACE_POINTS +#include "diag/qos_tracepoint.h" + +/* Minimum supported BW share value by the HW is 1 Mbit/sec */ +#define MLX5_MIN_BW_SHARE 1 + +#define MLX5_RATE_TO_BW_SHARE(rate, divider, limit) \ + min_t(u32, max_t(u32, DIV_ROUND_UP(rate, divider), MLX5_MIN_BW_SHARE), limit) + +struct mlx5_esw_rate_group { + u32 tsar_ix; + u32 max_rate; + u32 min_rate; + u32 bw_share; + struct list_head list; +}; + +static int esw_qos_tsar_config(struct mlx5_core_dev *dev, u32 *sched_ctx, + u32 parent_ix, u32 tsar_ix, + u32 max_rate, u32 bw_share) +{ + u32 bitmask = 0; + + if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling)) + return -EOPNOTSUPP; + + MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_ix); + MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_rate); + MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share); + bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW; + bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE; + + return mlx5_modify_scheduling_element_cmd(dev, + SCHEDULING_HIERARCHY_E_SWITCH, + sched_ctx, + tsar_ix, + bitmask); +} + +static int esw_qos_group_config(struct mlx5_eswitch *esw, struct mlx5_esw_rate_group *group, + u32 max_rate, u32 bw_share, struct netlink_ext_ack *extack) +{ + u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {}; + struct mlx5_core_dev *dev = esw->dev; + int err; + + err = esw_qos_tsar_config(dev, sched_ctx, + esw->qos.root_tsar_ix, group->tsar_ix, + max_rate, bw_share); + if (err) + NL_SET_ERR_MSG_MOD(extack, "E-Switch modify group TSAR element failed"); + + trace_mlx5_esw_group_qos_config(dev, group, group->tsar_ix, bw_share, max_rate); + + return err; +} + +static int esw_qos_vport_config(struct mlx5_eswitch *esw, + struct mlx5_vport *vport, + u32 max_rate, u32 bw_share, + struct netlink_ext_ack *extack) +{ + u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {}; + struct mlx5_esw_rate_group *group = vport->qos.group; + struct mlx5_core_dev *dev = esw->dev; + u32 parent_tsar_ix; + void *vport_elem; + int err; + + if (!vport->qos.enabled) + return -EIO; + + parent_tsar_ix = group ? group->tsar_ix : esw->qos.root_tsar_ix; + MLX5_SET(scheduling_context, sched_ctx, element_type, + SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT); + vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx, + element_attributes); + MLX5_SET(vport_element, vport_elem, vport_number, vport->vport); + + err = esw_qos_tsar_config(dev, sched_ctx, parent_tsar_ix, vport->qos.esw_tsar_ix, + max_rate, bw_share); + if (err) { + esw_warn(esw->dev, + "E-Switch modify TSAR vport element failed (vport=%d,err=%d)\n", + vport->vport, err); + NL_SET_ERR_MSG_MOD(extack, "E-Switch modify TSAR vport element failed"); + return err; + } + + trace_mlx5_esw_vport_qos_config(vport, bw_share, max_rate); + + return 0; +} + +static u32 esw_qos_calculate_min_rate_divider(struct mlx5_eswitch *esw, + struct mlx5_esw_rate_group *group, + bool group_level) +{ + u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share); + struct mlx5_vport *evport; + u32 max_guarantee = 0; + unsigned long i; + + if (group_level) { + struct mlx5_esw_rate_group *group; + + list_for_each_entry(group, &esw->qos.groups, list) { + if (group->min_rate < max_guarantee) + continue; + max_guarantee = group->min_rate; + } + } else { + mlx5_esw_for_each_vport(esw, i, evport) { + if (!evport->enabled || !evport->qos.enabled || + evport->qos.group != group || evport->qos.min_rate < max_guarantee) + continue; + max_guarantee = evport->qos.min_rate; + } + } + + if (max_guarantee) + return max_t(u32, max_guarantee / fw_max_bw_share, 1); + + /* If vports min rate divider is 0 but their group has bw_share configured, then + * need to set bw_share for vports to minimal value. + */ + if (!group_level && !max_guarantee && group->bw_share) + return 1; + return 0; +} + +static u32 esw_qos_calc_bw_share(u32 min_rate, u32 divider, u32 fw_max) +{ + if (divider) + return MLX5_RATE_TO_BW_SHARE(min_rate, divider, fw_max); + + return 0; +} + +static int esw_qos_normalize_vports_min_rate(struct mlx5_eswitch *esw, + struct mlx5_esw_rate_group *group, + struct netlink_ext_ack *extack) +{ + u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share); + u32 divider = esw_qos_calculate_min_rate_divider(esw, group, false); + struct mlx5_vport *evport; + unsigned long i; + u32 bw_share; + int err; + + mlx5_esw_for_each_vport(esw, i, evport) { + if (!evport->enabled || !evport->qos.enabled || evport->qos.group != group) + continue; + bw_share = esw_qos_calc_bw_share(evport->qos.min_rate, divider, fw_max_bw_share); + + if (bw_share == evport->qos.bw_share) + continue; + + err = esw_qos_vport_config(esw, evport, evport->qos.max_rate, bw_share, extack); + if (err) + return err; + + evport->qos.bw_share = bw_share; + } + + return 0; +} + +static int esw_qos_normalize_groups_min_rate(struct mlx5_eswitch *esw, u32 divider, + struct netlink_ext_ack *extack) +{ + u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share); + struct mlx5_esw_rate_group *group; + u32 bw_share; + int err; + + list_for_each_entry(group, &esw->qos.groups, list) { + bw_share = esw_qos_calc_bw_share(group->min_rate, divider, fw_max_bw_share); + + if (bw_share == group->bw_share) + continue; + + err = esw_qos_group_config(esw, group, group->max_rate, bw_share, extack); + if (err) + return err; + + group->bw_share = bw_share; + + /* All the group's vports need to be set with default bw_share + * to enable them with QOS + */ + err = esw_qos_normalize_vports_min_rate(esw, group, extack); + + if (err) + return err; + } + + return 0; +} + +int mlx5_esw_qos_set_vport_min_rate(struct mlx5_eswitch *esw, + struct mlx5_vport *evport, + u32 min_rate, + struct netlink_ext_ack *extack) +{ + u32 fw_max_bw_share, previous_min_rate; + bool min_rate_supported; + int err; + + lockdep_assert_held(&esw->state_lock); + fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share); + min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) && + fw_max_bw_share >= MLX5_MIN_BW_SHARE; + if (min_rate && !min_rate_supported) + return -EOPNOTSUPP; + if (min_rate == evport->qos.min_rate) + return 0; + + previous_min_rate = evport->qos.min_rate; + evport->qos.min_rate = min_rate; + err = esw_qos_normalize_vports_min_rate(esw, evport->qos.group, extack); + if (err) + evport->qos.min_rate = previous_min_rate; + + return err; +} + +int mlx5_esw_qos_set_vport_max_rate(struct mlx5_eswitch *esw, + struct mlx5_vport *evport, + u32 max_rate, + struct netlink_ext_ack *extack) +{ + u32 act_max_rate = max_rate; + bool max_rate_supported; + int err; + + lockdep_assert_held(&esw->state_lock); + max_rate_supported = MLX5_CAP_QOS(esw->dev, esw_rate_limit); + + if (max_rate && !max_rate_supported) + return -EOPNOTSUPP; + if (max_rate == evport->qos.max_rate) + return 0; + + /* If parent group has rate limit need to set to group + * value when new max rate is 0. + */ + if (evport->qos.group && !max_rate) + act_max_rate = evport->qos.group->max_rate; + + err = esw_qos_vport_config(esw, evport, act_max_rate, evport->qos.bw_share, extack); + + if (!err) + evport->qos.max_rate = max_rate; + + return err; +} + +static int esw_qos_set_group_min_rate(struct mlx5_eswitch *esw, struct mlx5_esw_rate_group *group, + u32 min_rate, struct netlink_ext_ack *extack) +{ + u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share); + struct mlx5_core_dev *dev = esw->dev; + u32 previous_min_rate, divider; + int err; + + if (!(MLX5_CAP_QOS(dev, esw_bw_share) && fw_max_bw_share >= MLX5_MIN_BW_SHARE)) + return -EOPNOTSUPP; + + if (min_rate == group->min_rate) + return 0; + + previous_min_rate = group->min_rate; + group->min_rate = min_rate; + divider = esw_qos_calculate_min_rate_divider(esw, group, true); + err = esw_qos_normalize_groups_min_rate(esw, divider, extack); + if (err) { + group->min_rate = previous_min_rate; + NL_SET_ERR_MSG_MOD(extack, "E-Switch group min rate setting failed"); + + /* Attempt restoring previous configuration */ + divider = esw_qos_calculate_min_rate_divider(esw, group, true); + if (esw_qos_normalize_groups_min_rate(esw, divider, extack)) + NL_SET_ERR_MSG_MOD(extack, "E-Switch BW share restore failed"); + } + + return err; +} + +static int esw_qos_set_group_max_rate(struct mlx5_eswitch *esw, + struct mlx5_esw_rate_group *group, + u32 max_rate, struct netlink_ext_ack *extack) +{ + struct mlx5_vport *vport; + unsigned long i; + int err; + + if (group->max_rate == max_rate) + return 0; + + err = esw_qos_group_config(esw, group, max_rate, group->bw_share, extack); + if (err) + return err; + + group->max_rate = max_rate; + + /* Any unlimited vports in the group should be set + * with the value of the group. + */ + mlx5_esw_for_each_vport(esw, i, vport) { + if (!vport->enabled || !vport->qos.enabled || + vport->qos.group != group || vport->qos.max_rate) + continue; + + err = esw_qos_vport_config(esw, vport, max_rate, vport->qos.bw_share, extack); + if (err) + NL_SET_ERR_MSG_MOD(extack, + "E-Switch vport implicit rate limit setting failed"); + } + + return err; +} + +static int esw_qos_vport_create_sched_element(struct mlx5_eswitch *esw, + struct mlx5_vport *vport, + u32 max_rate, u32 bw_share) +{ + u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {}; + struct mlx5_esw_rate_group *group = vport->qos.group; + struct mlx5_core_dev *dev = esw->dev; + u32 parent_tsar_ix; + void *vport_elem; + int err; + + parent_tsar_ix = group ? group->tsar_ix : esw->qos.root_tsar_ix; + MLX5_SET(scheduling_context, sched_ctx, element_type, + SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT); + vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx, element_attributes); + MLX5_SET(vport_element, vport_elem, vport_number, vport->vport); + MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_tsar_ix); + MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_rate); + MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share); + + err = mlx5_create_scheduling_element_cmd(dev, + SCHEDULING_HIERARCHY_E_SWITCH, + sched_ctx, + &vport->qos.esw_tsar_ix); + if (err) { + esw_warn(esw->dev, "E-Switch create TSAR vport element failed (vport=%d,err=%d)\n", + vport->vport, err); + return err; + } + + return 0; +} + +static int esw_qos_update_group_scheduling_element(struct mlx5_eswitch *esw, + struct mlx5_vport *vport, + struct mlx5_esw_rate_group *curr_group, + struct mlx5_esw_rate_group *new_group, + struct netlink_ext_ack *extack) +{ + u32 max_rate; + int err; + + err = mlx5_destroy_scheduling_element_cmd(esw->dev, + SCHEDULING_HIERARCHY_E_SWITCH, + vport->qos.esw_tsar_ix); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "E-Switch destroy TSAR vport element failed"); + return err; + } + + vport->qos.group = new_group; + max_rate = vport->qos.max_rate ? vport->qos.max_rate : new_group->max_rate; + + /* If vport is unlimited, we set the group's value. + * Therefore, if the group is limited it will apply to + * the vport as well and if not, vport will remain unlimited. + */ + err = esw_qos_vport_create_sched_element(esw, vport, max_rate, vport->qos.bw_share); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "E-Switch vport group set failed."); + goto err_sched; + } + + return 0; + +err_sched: + vport->qos.group = curr_group; + max_rate = vport->qos.max_rate ? vport->qos.max_rate : curr_group->max_rate; + if (esw_qos_vport_create_sched_element(esw, vport, max_rate, vport->qos.bw_share)) + esw_warn(esw->dev, "E-Switch vport group restore failed (vport=%d)\n", + vport->vport); + + return err; +} + +static int esw_qos_vport_update_group(struct mlx5_eswitch *esw, + struct mlx5_vport *vport, + struct mlx5_esw_rate_group *group, + struct netlink_ext_ack *extack) +{ + struct mlx5_esw_rate_group *new_group, *curr_group; + int err; + + if (!vport->enabled) + return -EINVAL; + + curr_group = vport->qos.group; + new_group = group ?: esw->qos.group0; + if (curr_group == new_group) + return 0; + + err = esw_qos_update_group_scheduling_element(esw, vport, curr_group, new_group, extack); + if (err) + return err; + + /* Recalculate bw share weights of old and new groups */ + if (vport->qos.bw_share) { + esw_qos_normalize_vports_min_rate(esw, curr_group, extack); + esw_qos_normalize_vports_min_rate(esw, new_group, extack); + } + + return 0; +} + +static struct mlx5_esw_rate_group * +esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack) +{ + u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {}; + struct mlx5_esw_rate_group *group; + u32 divider; + int err; + + if (!MLX5_CAP_QOS(esw->dev, log_esw_max_sched_depth)) + return ERR_PTR(-EOPNOTSUPP); + + group = kzalloc(sizeof(*group), GFP_KERNEL); + if (!group) + return ERR_PTR(-ENOMEM); + + MLX5_SET(scheduling_context, tsar_ctx, parent_element_id, + esw->qos.root_tsar_ix); + err = mlx5_create_scheduling_element_cmd(esw->dev, + SCHEDULING_HIERARCHY_E_SWITCH, + tsar_ctx, + &group->tsar_ix); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "E-Switch create TSAR for group failed"); + goto err_sched_elem; + } + + list_add_tail(&group->list, &esw->qos.groups); + + divider = esw_qos_calculate_min_rate_divider(esw, group, true); + if (divider) { + err = esw_qos_normalize_groups_min_rate(esw, divider, extack); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "E-Switch groups normalization failed"); + goto err_min_rate; + } + } + trace_mlx5_esw_group_qos_create(esw->dev, group, group->tsar_ix); + + return group; + +err_min_rate: + list_del(&group->list); + err = mlx5_destroy_scheduling_element_cmd(esw->dev, + SCHEDULING_HIERARCHY_E_SWITCH, + group->tsar_ix); + if (err) + NL_SET_ERR_MSG_MOD(extack, "E-Switch destroy TSAR for group failed"); +err_sched_elem: + kfree(group); + return ERR_PTR(err); +} + +static int esw_qos_destroy_rate_group(struct mlx5_eswitch *esw, + struct mlx5_esw_rate_group *group, + struct netlink_ext_ack *extack) +{ + u32 divider; + int err; + + list_del(&group->list); + + divider = esw_qos_calculate_min_rate_divider(esw, NULL, true); + err = esw_qos_normalize_groups_min_rate(esw, divider, extack); + if (err) + NL_SET_ERR_MSG_MOD(extack, "E-Switch groups' normalization failed"); + + err = mlx5_destroy_scheduling_element_cmd(esw->dev, + SCHEDULING_HIERARCHY_E_SWITCH, + group->tsar_ix); + if (err) + NL_SET_ERR_MSG_MOD(extack, "E-Switch destroy TSAR_ID failed"); + + trace_mlx5_esw_group_qos_destroy(esw->dev, group, group->tsar_ix); + kfree(group); + return err; +} + +static bool esw_qos_element_type_supported(struct mlx5_core_dev *dev, int type) +{ + switch (type) { + case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR: + return MLX5_CAP_QOS(dev, esw_element_type) & + ELEMENT_TYPE_CAP_MASK_TASR; + case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT: + return MLX5_CAP_QOS(dev, esw_element_type) & + ELEMENT_TYPE_CAP_MASK_VPORT; + case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC: + return MLX5_CAP_QOS(dev, esw_element_type) & + ELEMENT_TYPE_CAP_MASK_VPORT_TC; + case SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC: + return MLX5_CAP_QOS(dev, esw_element_type) & + ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC; + } + return false; +} + +void mlx5_esw_qos_create(struct mlx5_eswitch *esw) +{ + u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {}; + struct mlx5_core_dev *dev = esw->dev; + __be32 *attr; + int err; + + if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling)) + return; + + if (!esw_qos_element_type_supported(dev, SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR)) + return; + + mutex_lock(&esw->state_lock); + if (esw->qos.enabled) + goto unlock; + + MLX5_SET(scheduling_context, tsar_ctx, element_type, + SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR); + + attr = MLX5_ADDR_OF(scheduling_context, tsar_ctx, element_attributes); + *attr = cpu_to_be32(TSAR_ELEMENT_TSAR_TYPE_DWRR << 16); + + err = mlx5_create_scheduling_element_cmd(dev, + SCHEDULING_HIERARCHY_E_SWITCH, + tsar_ctx, + &esw->qos.root_tsar_ix); + if (err) { + esw_warn(dev, "E-Switch create root TSAR failed (%d)\n", err); + goto unlock; + } + + INIT_LIST_HEAD(&esw->qos.groups); + if (MLX5_CAP_QOS(dev, log_esw_max_sched_depth)) { + esw->qos.group0 = esw_qos_create_rate_group(esw, NULL); + if (IS_ERR(esw->qos.group0)) { + esw_warn(dev, "E-Switch create rate group 0 failed (%ld)\n", + PTR_ERR(esw->qos.group0)); + goto err_group0; + } + } + esw->qos.enabled = true; +unlock: + mutex_unlock(&esw->state_lock); + return; + +err_group0: + err = mlx5_destroy_scheduling_element_cmd(esw->dev, + SCHEDULING_HIERARCHY_E_SWITCH, + esw->qos.root_tsar_ix); + if (err) + esw_warn(esw->dev, "E-Switch destroy root TSAR failed (%d)\n", err); + mutex_unlock(&esw->state_lock); +} + +void mlx5_esw_qos_destroy(struct mlx5_eswitch *esw) +{ + struct devlink *devlink = priv_to_devlink(esw->dev); + int err; + + devlink_rate_nodes_destroy(devlink); + mutex_lock(&esw->state_lock); + if (!esw->qos.enabled) + goto unlock; + + if (esw->qos.group0) + esw_qos_destroy_rate_group(esw, esw->qos.group0, NULL); + + err = mlx5_destroy_scheduling_element_cmd(esw->dev, + SCHEDULING_HIERARCHY_E_SWITCH, + esw->qos.root_tsar_ix); + if (err) + esw_warn(esw->dev, "E-Switch destroy root TSAR failed (%d)\n", err); + + esw->qos.enabled = false; +unlock: + mutex_unlock(&esw->state_lock); +} + +int mlx5_esw_qos_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport, + u32 max_rate, u32 bw_share) +{ + int err; + + lockdep_assert_held(&esw->state_lock); + if (!esw->qos.enabled) + return 0; + + if (vport->qos.enabled) + return -EEXIST; + + vport->qos.group = esw->qos.group0; + + err = esw_qos_vport_create_sched_element(esw, vport, max_rate, bw_share); + if (!err) { + vport->qos.enabled = true; + trace_mlx5_esw_vport_qos_create(vport, bw_share, max_rate); + } + + return err; +} + +void mlx5_esw_qos_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vport) +{ + int err; + + lockdep_assert_held(&esw->state_lock); + if (!esw->qos.enabled || !vport->qos.enabled) + return; + WARN(vport->qos.group && vport->qos.group != esw->qos.group0, + "Disabling QoS on port before detaching it from group"); + + err = mlx5_destroy_scheduling_element_cmd(esw->dev, + SCHEDULING_HIERARCHY_E_SWITCH, + vport->qos.esw_tsar_ix); + if (err) + esw_warn(esw->dev, "E-Switch destroy TSAR vport element failed (vport=%d,err=%d)\n", + vport->vport, err); + + vport->qos.enabled = false; + trace_mlx5_esw_vport_qos_destroy(vport); +} + +int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 rate_mbps) +{ + u32 ctx[MLX5_ST_SZ_DW(scheduling_context)] = {}; + struct mlx5_vport *vport; + u32 bitmask; + + vport = mlx5_eswitch_get_vport(esw, vport_num); + if (IS_ERR(vport)) + return PTR_ERR(vport); + + if (!vport->qos.enabled) + return -EOPNOTSUPP; + + MLX5_SET(scheduling_context, ctx, max_average_bw, rate_mbps); + bitmask = MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW; + + return mlx5_modify_scheduling_element_cmd(esw->dev, + SCHEDULING_HIERARCHY_E_SWITCH, + ctx, + vport->qos.esw_tsar_ix, + bitmask); +} + +#define MLX5_LINKSPEED_UNIT 125000 /* 1Mbps in Bps */ + +/* Converts bytes per second value passed in a pointer into megabits per + * second, rewriting last. If converted rate exceed link speed or is not a + * fraction of Mbps - returns error. + */ +static int esw_qos_devlink_rate_to_mbps(struct mlx5_core_dev *mdev, const char *name, + u64 *rate, struct netlink_ext_ack *extack) +{ + u32 link_speed_max, reminder; + u64 value; + int err; + + err = mlx5e_port_max_linkspeed(mdev, &link_speed_max); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to get link maximum speed"); + return err; + } + + value = div_u64_rem(*rate, MLX5_LINKSPEED_UNIT, &reminder); + if (reminder) { + pr_err("%s rate value %lluBps not in link speed units of 1Mbps.\n", + name, *rate); + NL_SET_ERR_MSG_MOD(extack, "TX rate value not in link speed units of 1Mbps"); + return -EINVAL; + } + + if (value > link_speed_max) { + pr_err("%s rate value %lluMbps exceed link maximum speed %u.\n", + name, value, link_speed_max); + NL_SET_ERR_MSG_MOD(extack, "TX rate value exceed link maximum speed"); + return -EINVAL; + } + + *rate = value; + return 0; +} + +/* Eswitch devlink rate API */ + +int mlx5_esw_devlink_rate_leaf_tx_share_set(struct devlink_rate *rate_leaf, void *priv, + u64 tx_share, struct netlink_ext_ack *extack) +{ + struct mlx5_vport *vport = priv; + struct mlx5_eswitch *esw; + int err; + + esw = vport->dev->priv.eswitch; + if (!mlx5_esw_allowed(esw)) + return -EPERM; + + err = esw_qos_devlink_rate_to_mbps(vport->dev, "tx_share", &tx_share, extack); + if (err) + return err; + + mutex_lock(&esw->state_lock); + err = mlx5_esw_qos_set_vport_min_rate(esw, vport, tx_share, extack); + mutex_unlock(&esw->state_lock); + return err; +} + +int mlx5_esw_devlink_rate_leaf_tx_max_set(struct devlink_rate *rate_leaf, void *priv, + u64 tx_max, struct netlink_ext_ack *extack) +{ + struct mlx5_vport *vport = priv; + struct mlx5_eswitch *esw; + int err; + + esw = vport->dev->priv.eswitch; + if (!mlx5_esw_allowed(esw)) + return -EPERM; + + err = esw_qos_devlink_rate_to_mbps(vport->dev, "tx_max", &tx_max, extack); + if (err) + return err; + + mutex_lock(&esw->state_lock); + err = mlx5_esw_qos_set_vport_max_rate(esw, vport, tx_max, extack); + mutex_unlock(&esw->state_lock); + return err; +} + +int mlx5_esw_devlink_rate_node_tx_share_set(struct devlink_rate *rate_node, void *priv, + u64 tx_share, struct netlink_ext_ack *extack) +{ + struct mlx5_core_dev *dev = devlink_priv(rate_node->devlink); + struct mlx5_eswitch *esw = dev->priv.eswitch; + struct mlx5_esw_rate_group *group = priv; + int err; + + err = esw_qos_devlink_rate_to_mbps(dev, "tx_share", &tx_share, extack); + if (err) + return err; + + mutex_lock(&esw->state_lock); + err = esw_qos_set_group_min_rate(esw, group, tx_share, extack); + mutex_unlock(&esw->state_lock); + return err; +} + +int mlx5_esw_devlink_rate_node_tx_max_set(struct devlink_rate *rate_node, void *priv, + u64 tx_max, struct netlink_ext_ack *extack) +{ + struct mlx5_core_dev *dev = devlink_priv(rate_node->devlink); + struct mlx5_eswitch *esw = dev->priv.eswitch; + struct mlx5_esw_rate_group *group = priv; + int err; + + err = esw_qos_devlink_rate_to_mbps(dev, "tx_max", &tx_max, extack); + if (err) + return err; + + mutex_lock(&esw->state_lock); + err = esw_qos_set_group_max_rate(esw, group, tx_max, extack); + mutex_unlock(&esw->state_lock); + return err; +} + +int mlx5_esw_devlink_rate_node_new(struct devlink_rate *rate_node, void **priv, + struct netlink_ext_ack *extack) +{ + struct mlx5_esw_rate_group *group; + struct mlx5_eswitch *esw; + int err = 0; + + esw = mlx5_devlink_eswitch_get(rate_node->devlink); + if (IS_ERR(esw)) + return PTR_ERR(esw); + + mutex_lock(&esw->state_lock); + if (esw->mode != MLX5_ESWITCH_OFFLOADS) { + NL_SET_ERR_MSG_MOD(extack, + "Rate node creation supported only in switchdev mode"); + err = -EOPNOTSUPP; + goto unlock; + } + + group = esw_qos_create_rate_group(esw, extack); + if (IS_ERR(group)) { + err = PTR_ERR(group); + goto unlock; + } + + *priv = group; +unlock: + mutex_unlock(&esw->state_lock); + return err; +} + +int mlx5_esw_devlink_rate_node_del(struct devlink_rate *rate_node, void *priv, + struct netlink_ext_ack *extack) +{ + struct mlx5_esw_rate_group *group = priv; + struct mlx5_eswitch *esw; + int err; + + esw = mlx5_devlink_eswitch_get(rate_node->devlink); + if (IS_ERR(esw)) + return PTR_ERR(esw); + + mutex_lock(&esw->state_lock); + err = esw_qos_destroy_rate_group(esw, group, extack); + mutex_unlock(&esw->state_lock); + return err; +} + +int mlx5_esw_qos_vport_update_group(struct mlx5_eswitch *esw, + struct mlx5_vport *vport, + struct mlx5_esw_rate_group *group, + struct netlink_ext_ack *extack) +{ + int err; + + mutex_lock(&esw->state_lock); + err = esw_qos_vport_update_group(esw, vport, group, extack); + mutex_unlock(&esw->state_lock); + return err; +} + +int mlx5_esw_devlink_rate_parent_set(struct devlink_rate *devlink_rate, + struct devlink_rate *parent, + void *priv, void *parent_priv, + struct netlink_ext_ack *extack) +{ + struct mlx5_esw_rate_group *group; + struct mlx5_vport *vport = priv; + + if (!parent) + return mlx5_esw_qos_vport_update_group(vport->dev->priv.eswitch, + vport, NULL, extack); + + group = parent_priv; + return mlx5_esw_qos_vport_update_group(vport->dev->priv.eswitch, vport, group, extack); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h new file mode 100644 index 000000000000..28451abe2d2f --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#ifndef __MLX5_ESW_QOS_H__ +#define __MLX5_ESW_QOS_H__ + +#ifdef CONFIG_MLX5_ESWITCH + +int mlx5_esw_qos_set_vport_min_rate(struct mlx5_eswitch *esw, + struct mlx5_vport *evport, + u32 min_rate, + struct netlink_ext_ack *extack); +int mlx5_esw_qos_set_vport_max_rate(struct mlx5_eswitch *esw, + struct mlx5_vport *evport, + u32 max_rate, + struct netlink_ext_ack *extack); +void mlx5_esw_qos_create(struct mlx5_eswitch *esw); +void mlx5_esw_qos_destroy(struct mlx5_eswitch *esw); +int mlx5_esw_qos_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport, + u32 max_rate, u32 bw_share); +void mlx5_esw_qos_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vport); + +int mlx5_esw_devlink_rate_leaf_tx_share_set(struct devlink_rate *rate_leaf, void *priv, + u64 tx_share, struct netlink_ext_ack *extack); +int mlx5_esw_devlink_rate_leaf_tx_max_set(struct devlink_rate *rate_leaf, void *priv, + u64 tx_max, struct netlink_ext_ack *extack); +int mlx5_esw_devlink_rate_node_tx_share_set(struct devlink_rate *rate_node, void *priv, + u64 tx_share, struct netlink_ext_ack *extack); +int mlx5_esw_devlink_rate_node_tx_max_set(struct devlink_rate *rate_node, void *priv, + u64 tx_max, struct netlink_ext_ack *extack); +int mlx5_esw_devlink_rate_node_new(struct devlink_rate *rate_node, void **priv, + struct netlink_ext_ack *extack); +int mlx5_esw_devlink_rate_node_del(struct devlink_rate *rate_node, void *priv, + struct netlink_ext_ack *extack); +int mlx5_esw_devlink_rate_parent_set(struct devlink_rate *devlink_rate, + struct devlink_rate *parent, + void *priv, void *parent_priv, + struct netlink_ext_ack *extack); +#endif + +#endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/sample.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/sample.h deleted file mode 100644 index 2a3f4be10030..000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/sample.h +++ /dev/null @@ -1,42 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ -/* Copyright (c) 2021 Mellanox Technologies. */ - -#ifndef __MLX5_EN_TC_SAMPLE_H__ -#define __MLX5_EN_TC_SAMPLE_H__ - -#include "en.h" -#include "eswitch.h" - -struct mlx5e_priv; -struct mlx5_flow_attr; -struct mlx5_esw_psample; - -struct mlx5_sample_attr { - u32 group_num; - u32 rate; - u32 trunc_size; - u32 restore_obj_id; - u32 sampler_id; - struct mlx5_flow_table *sample_default_tbl; - struct mlx5_sample_flow *sample_flow; -}; - -void mlx5_esw_sample_skb(struct sk_buff *skb, struct mlx5_mapped_obj *mapped_obj); - -struct mlx5_flow_handle * -mlx5_esw_sample_offload(struct mlx5_esw_psample *sample_priv, - struct mlx5_flow_spec *spec, - struct mlx5_flow_attr *attr); - -void -mlx5_esw_sample_unoffload(struct mlx5_esw_psample *sample_priv, - struct mlx5_flow_handle *rule, - struct mlx5_flow_attr *attr); - -struct mlx5_esw_psample * -mlx5_esw_sample_init(struct mlx5e_priv *priv); - -void -mlx5_esw_sample_cleanup(struct mlx5_esw_psample *esw_psample); - -#endif /* __MLX5_EN_TC_SAMPLE_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 97e6cb6f13c1..ec136b499204 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -38,6 +38,7 @@ #include <linux/mlx5/mpfs.h> #include "esw/acl/lgcy.h" #include "esw/legacy.h" +#include "esw/qos.h" #include "mlx5_core.h" #include "lib/eq.h" #include "eswitch.h" @@ -740,201 +741,6 @@ static void esw_vport_change_handler(struct work_struct *work) mutex_unlock(&esw->state_lock); } -static bool element_type_supported(struct mlx5_eswitch *esw, int type) -{ - const struct mlx5_core_dev *dev = esw->dev; - - switch (type) { - case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR: - return MLX5_CAP_QOS(dev, esw_element_type) & - ELEMENT_TYPE_CAP_MASK_TASR; - case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT: - return MLX5_CAP_QOS(dev, esw_element_type) & - ELEMENT_TYPE_CAP_MASK_VPORT; - case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC: - return MLX5_CAP_QOS(dev, esw_element_type) & - ELEMENT_TYPE_CAP_MASK_VPORT_TC; - case SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC: - return MLX5_CAP_QOS(dev, esw_element_type) & - ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC; - } - return false; -} - -/* Vport QoS management */ -static void esw_create_tsar(struct mlx5_eswitch *esw) -{ - u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0}; - struct mlx5_core_dev *dev = esw->dev; - __be32 *attr; - int err; - - if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling)) - return; - - if (!element_type_supported(esw, SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR)) - return; - - if (esw->qos.enabled) - return; - - MLX5_SET(scheduling_context, tsar_ctx, element_type, - SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR); - - attr = MLX5_ADDR_OF(scheduling_context, tsar_ctx, element_attributes); - *attr = cpu_to_be32(TSAR_ELEMENT_TSAR_TYPE_DWRR << 16); - - err = mlx5_create_scheduling_element_cmd(dev, - SCHEDULING_HIERARCHY_E_SWITCH, - tsar_ctx, - &esw->qos.root_tsar_id); - if (err) { - esw_warn(esw->dev, "E-Switch create TSAR failed (%d)\n", err); - return; - } - - esw->qos.enabled = true; -} - -static void esw_destroy_tsar(struct mlx5_eswitch *esw) -{ - int err; - - if (!esw->qos.enabled) - return; - - err = mlx5_destroy_scheduling_element_cmd(esw->dev, - SCHEDULING_HIERARCHY_E_SWITCH, - esw->qos.root_tsar_id); - if (err) - esw_warn(esw->dev, "E-Switch destroy TSAR failed (%d)\n", err); - - esw->qos.enabled = false; -} - -static int esw_vport_enable_qos(struct mlx5_eswitch *esw, - struct mlx5_vport *vport, - u32 initial_max_rate, u32 initial_bw_share) -{ - u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0}; - struct mlx5_core_dev *dev = esw->dev; - void *vport_elem; - int err = 0; - - if (!esw->qos.enabled) - return 0; - - if (vport->qos.enabled) - return -EEXIST; - - MLX5_SET(scheduling_context, sched_ctx, element_type, - SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT); - vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx, - element_attributes); - MLX5_SET(vport_element, vport_elem, vport_number, vport->vport); - MLX5_SET(scheduling_context, sched_ctx, parent_element_id, - esw->qos.root_tsar_id); - MLX5_SET(scheduling_context, sched_ctx, max_average_bw, - initial_max_rate); - MLX5_SET(scheduling_context, sched_ctx, bw_share, initial_bw_share); - - err = mlx5_create_scheduling_element_cmd(dev, - SCHEDULING_HIERARCHY_E_SWITCH, - sched_ctx, - &vport->qos.esw_tsar_ix); - if (err) { - esw_warn(esw->dev, "E-Switch create TSAR vport element failed (vport=%d,err=%d)\n", - vport->vport, err); - return err; - } - - vport->qos.enabled = true; - return 0; -} - -static void esw_vport_disable_qos(struct mlx5_eswitch *esw, - struct mlx5_vport *vport) -{ - int err; - - if (!vport->qos.enabled) - return; - - err = mlx5_destroy_scheduling_element_cmd(esw->dev, - SCHEDULING_HIERARCHY_E_SWITCH, - vport->qos.esw_tsar_ix); - if (err) - esw_warn(esw->dev, "E-Switch destroy TSAR vport element failed (vport=%d,err=%d)\n", - vport->vport, err); - - vport->qos.enabled = false; -} - -static int esw_vport_qos_config(struct mlx5_eswitch *esw, - struct mlx5_vport *vport, - u32 max_rate, u32 bw_share) -{ - u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0}; - struct mlx5_core_dev *dev = esw->dev; - void *vport_elem; - u32 bitmask = 0; - int err = 0; - - if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling)) - return -EOPNOTSUPP; - - if (!vport->qos.enabled) - return -EIO; - - MLX5_SET(scheduling_context, sched_ctx, element_type, - SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT); - vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx, - element_attributes); - MLX5_SET(vport_element, vport_elem, vport_number, vport->vport); - MLX5_SET(scheduling_context, sched_ctx, parent_element_id, - esw->qos.root_tsar_id); - MLX5_SET(scheduling_context, sched_ctx, max_average_bw, - max_rate); - MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share); - bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW; - bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE; - - err = mlx5_modify_scheduling_element_cmd(dev, - SCHEDULING_HIERARCHY_E_SWITCH, - sched_ctx, - vport->qos.esw_tsar_ix, - bitmask); - if (err) { - esw_warn(esw->dev, "E-Switch modify TSAR vport element failed (vport=%d,err=%d)\n", - vport->vport, err); - return err; - } - - return 0; -} - -int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, - u32 rate_mbps) -{ - u32 ctx[MLX5_ST_SZ_DW(scheduling_context)] = {}; - struct mlx5_vport *vport; - - vport = mlx5_eswitch_get_vport(esw, vport_num); - if (IS_ERR(vport)) - return PTR_ERR(vport); - - if (!vport->qos.enabled) - return -EOPNOTSUPP; - - MLX5_SET(scheduling_context, ctx, max_average_bw, rate_mbps); - - return mlx5_modify_scheduling_element_cmd(esw->dev, - SCHEDULING_HIERARCHY_E_SWITCH, - ctx, - vport->qos.esw_tsar_ix, - MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW); -} - static void node_guid_gen_from_mac(u64 *node_guid, const u8 *mac) { ((u8 *)node_guid)[7] = mac[0]; @@ -976,7 +782,7 @@ static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport) return err; /* Attach vport to the eswitch rate limiter */ - esw_vport_enable_qos(esw, vport, vport->qos.max_rate, vport->qos.bw_share); + mlx5_esw_qos_vport_enable(esw, vport, vport->qos.max_rate, vport->qos.bw_share); if (mlx5_esw_is_manager_vport(esw, vport_num)) return 0; @@ -1013,7 +819,7 @@ static void esw_vport_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport vport_num, 1, MLX5_VPORT_ADMIN_STATE_DOWN); - esw_vport_disable_qos(esw, vport); + mlx5_esw_qos_vport_disable(esw, vport); esw_vport_cleanup_acl(esw, vport); } @@ -1454,12 +1260,10 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs) mlx5_eswitch_update_num_of_vfs(esw, num_vfs); - esw_create_tsar(esw); + mlx5_esw_qos_create(esw); esw->mode = mode; - mlx5_lag_update(esw->dev); - if (mode == MLX5_ESWITCH_LEGACY) { err = esw_legacy_enable(esw); } else { @@ -1486,7 +1290,7 @@ abort: if (mode == MLX5_ESWITCH_OFFLOADS) mlx5_rescan_drivers(esw->dev); - esw_destroy_tsar(esw); + mlx5_esw_qos_destroy(esw); mlx5_esw_acls_ns_cleanup(esw); return err; } @@ -1494,7 +1298,7 @@ abort: /** * mlx5_eswitch_enable - Enable eswitch * @esw: Pointer to eswitch - * @num_vfs: Enable eswitch swich for given number of VFs. + * @num_vfs: Enable eswitch switch for given number of VFs. * Caller must pass num_vfs > 0 when enabling eswitch for * vf vports. * mlx5_eswitch_enable() returns 0 on success or error code on failure. @@ -1506,6 +1310,7 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) if (!mlx5_esw_allowed(esw)) return 0; + mlx5_lag_disable_change(esw->dev); down_write(&esw->mode_lock); if (esw->mode == MLX5_ESWITCH_NONE) { ret = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY, num_vfs); @@ -1519,6 +1324,7 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) esw->esw_funcs.num_vfs = num_vfs; } up_write(&esw->mode_lock); + mlx5_lag_enable_change(esw->dev); return ret; } @@ -1550,12 +1356,10 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf) old_mode = esw->mode; esw->mode = MLX5_ESWITCH_NONE; - mlx5_lag_update(esw->dev); - if (old_mode == MLX5_ESWITCH_OFFLOADS) mlx5_rescan_drivers(esw->dev); - esw_destroy_tsar(esw); + mlx5_esw_qos_destroy(esw); mlx5_esw_acls_ns_cleanup(esw); if (clear_vf) @@ -1567,10 +1371,12 @@ void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) if (!mlx5_esw_allowed(esw)) return; + mlx5_lag_disable_change(esw->dev); down_write(&esw->mode_lock); mlx5_eswitch_disable_locked(esw, clear_vf); esw->esw_funcs.num_vfs = 0; up_write(&esw->mode_lock); + mlx5_lag_enable_change(esw->dev); } static int mlx5_query_hca_cap_host_pf(struct mlx5_core_dev *dev, void *out) @@ -1759,7 +1565,9 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) ida_init(&esw->offloads.vport_metadata_ida); xa_init_flags(&esw->offloads.vhca_map, XA_FLAGS_ALLOC); mutex_init(&esw->state_lock); + lockdep_register_key(&esw->mode_lock_key); init_rwsem(&esw->mode_lock); + lockdep_set_class(&esw->mode_lock, &esw->mode_lock_key); esw->enabled_vports = 0; esw->mode = MLX5_ESWITCH_NONE; @@ -1793,6 +1601,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) esw->dev->priv.eswitch = NULL; destroy_workqueue(esw->work_queue); + lockdep_unregister_key(&esw->mode_lock_key); mutex_destroy(&esw->state_lock); WARN_ON(!xa_empty(&esw->offloads.vhca_map)); xa_destroy(&esw->offloads.vhca_map); @@ -1889,8 +1698,7 @@ is_port_function_supported(struct mlx5_eswitch *esw, u16 vport_num) mlx5_esw_is_sf_vport(esw, vport_num); } -int mlx5_devlink_port_function_hw_addr_get(struct devlink *devlink, - struct devlink_port *port, +int mlx5_devlink_port_function_hw_addr_get(struct devlink_port *port, u8 *hw_addr, int *hw_addr_len, struct netlink_ext_ack *extack) { @@ -1899,7 +1707,7 @@ int mlx5_devlink_port_function_hw_addr_get(struct devlink *devlink, int err = -EOPNOTSUPP; u16 vport_num; - esw = mlx5_devlink_eswitch_get(devlink); + esw = mlx5_devlink_eswitch_get(port->devlink); if (IS_ERR(esw)) return PTR_ERR(esw); @@ -1923,8 +1731,7 @@ int mlx5_devlink_port_function_hw_addr_get(struct devlink *devlink, return err; } -int mlx5_devlink_port_function_hw_addr_set(struct devlink *devlink, - struct devlink_port *port, +int mlx5_devlink_port_function_hw_addr_set(struct devlink_port *port, const u8 *hw_addr, int hw_addr_len, struct netlink_ext_ack *extack) { @@ -1933,7 +1740,7 @@ int mlx5_devlink_port_function_hw_addr_set(struct devlink *devlink, int err = -EOPNOTSUPP; u16 vport_num; - esw = mlx5_devlink_eswitch_get(devlink); + esw = mlx5_devlink_eswitch_get(port->devlink); if (IS_ERR(esw)) { NL_SET_ERR_MSG_MOD(extack, "Eswitch doesn't support set hw_addr"); return PTR_ERR(esw); @@ -2049,110 +1856,6 @@ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, return err; } -static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw) -{ - u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share); - struct mlx5_vport *evport; - u32 max_guarantee = 0; - unsigned long i; - - mlx5_esw_for_each_vport(esw, i, evport) { - if (!evport->enabled || evport->qos.min_rate < max_guarantee) - continue; - max_guarantee = evport->qos.min_rate; - } - - if (max_guarantee) - return max_t(u32, max_guarantee / fw_max_bw_share, 1); - return 0; -} - -static int normalize_vports_min_rate(struct mlx5_eswitch *esw) -{ - u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share); - u32 divider = calculate_vports_min_rate_divider(esw); - struct mlx5_vport *evport; - u32 vport_max_rate; - u32 vport_min_rate; - unsigned long i; - u32 bw_share; - int err; - - mlx5_esw_for_each_vport(esw, i, evport) { - if (!evport->enabled) - continue; - vport_min_rate = evport->qos.min_rate; - vport_max_rate = evport->qos.max_rate; - bw_share = 0; - - if (divider) - bw_share = MLX5_RATE_TO_BW_SHARE(vport_min_rate, - divider, - fw_max_bw_share); - - if (bw_share == evport->qos.bw_share) - continue; - - err = esw_vport_qos_config(esw, evport, vport_max_rate, - bw_share); - if (!err) - evport->qos.bw_share = bw_share; - else - return err; - } - - return 0; -} - -int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport, - u32 max_rate, u32 min_rate) -{ - struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); - u32 fw_max_bw_share; - u32 previous_min_rate; - bool min_rate_supported; - bool max_rate_supported; - int err = 0; - - if (!mlx5_esw_allowed(esw)) - return -EPERM; - if (IS_ERR(evport)) - return PTR_ERR(evport); - - fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share); - min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) && - fw_max_bw_share >= MLX5_MIN_BW_SHARE; - max_rate_supported = MLX5_CAP_QOS(esw->dev, esw_rate_limit); - - if ((min_rate && !min_rate_supported) || (max_rate && !max_rate_supported)) - return -EOPNOTSUPP; - - mutex_lock(&esw->state_lock); - - if (min_rate == evport->qos.min_rate) - goto set_max_rate; - - previous_min_rate = evport->qos.min_rate; - evport->qos.min_rate = min_rate; - err = normalize_vports_min_rate(esw); - if (err) { - evport->qos.min_rate = previous_min_rate; - goto unlock; - } - -set_max_rate: - if (max_rate == evport->qos.max_rate) - goto unlock; - - err = esw_vport_qos_config(esw, evport, max_rate, evport->qos.bw_share); - if (!err) - evport->qos.max_rate = max_rate; - -unlock: - mutex_unlock(&esw->state_lock); - return err; -} - int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, u16 vport_num, struct ifla_vf_stats *vf_stats) @@ -2366,10 +2069,23 @@ int mlx5_esw_try_lock(struct mlx5_eswitch *esw) */ void mlx5_esw_unlock(struct mlx5_eswitch *esw) { + if (!mlx5_esw_allowed(esw)) + return; up_write(&esw->mode_lock); } /** + * mlx5_esw_lock() - Take write lock on esw mode lock + * @esw: eswitch device. + */ +void mlx5_esw_lock(struct mlx5_eswitch *esw) +{ + if (!mlx5_esw_allowed(esw)) + return; + down_write(&esw->mode_lock); +} + +/** * mlx5_eswitch_get_total_vports - Get total vports of the eswitch * * @dev: Pointer to core device @@ -2384,3 +2100,15 @@ u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev) return mlx5_esw_allowed(esw) ? esw->total_vports : 0; } EXPORT_SYMBOL_GPL(mlx5_eswitch_get_total_vports); + +/** + * mlx5_eswitch_get_core_dev - Get the mdev device + * @esw : eswitch device. + * + * Return the mellanox core device which manages the eswitch. + */ +struct mlx5_core_dev *mlx5_eswitch_get_core_dev(struct mlx5_eswitch *esw) +{ + return mlx5_esw_allowed(esw) ? esw->dev : NULL; +} +EXPORT_SYMBOL(mlx5_eswitch_get_core_dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index d562edf5b0bc..2c7444101bb9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -46,7 +46,7 @@ #include "lib/fs_chains.h" #include "sf/sf.h" #include "en/tc_ct.h" -#include "esw/sample.h" +#include "en/tc/sample.h" enum mlx5_mapped_obj_type { MLX5_MAPPED_OBJ_CHAIN, @@ -61,6 +61,7 @@ struct mlx5_mapped_obj { u32 group_id; u32 rate; u32 trunc_size; + u32 tunnel_id; } sample; }; }; @@ -75,17 +76,20 @@ struct mlx5_mapped_obj { #define MLX5_MAX_MC_PER_VPORT(dev) \ (1 << MLX5_CAP_GEN(dev, log_max_current_mc_list)) -#define MLX5_MIN_BW_SHARE 1 - -#define MLX5_RATE_TO_BW_SHARE(rate, divider, limit) \ - min_t(u32, max_t(u32, (rate) / (divider), MLX5_MIN_BW_SHARE), limit) - #define mlx5_esw_has_fwd_fdb(dev) \ MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_to_table) #define esw_chains(esw) \ ((esw)->fdb_table.offloads.esw_chains_priv) +enum { + MAPPING_TYPE_CHAIN, + MAPPING_TYPE_TUNNEL, + MAPPING_TYPE_TUNNEL_ENC_OPTS, + MAPPING_TYPE_LABELS, + MAPPING_TYPE_ZONE, +}; + struct vport_ingress { struct mlx5_flow_table *acl; struct mlx5_flow_handle *allow_rule; @@ -124,6 +128,8 @@ struct vport_egress { struct { struct mlx5_flow_group *fwd_grp; struct mlx5_flow_handle *fwd_rule; + struct mlx5_flow_handle *bounce_rule; + struct mlx5_flow_group *bounce_grp; } offloads; }; }; @@ -150,8 +156,6 @@ enum mlx5_eswitch_vport_event { MLX5_VPORT_PROMISC_CHANGE = BIT(3), }; -struct mlx5_esw_bridge; - struct mlx5_vport { struct mlx5_core_dev *dev; struct hlist_head uc_list[MLX5_L2_ADDR_HASH_SIZE]; @@ -173,6 +177,7 @@ struct mlx5_vport { u32 bw_share; u32 min_rate; u32 max_rate; + struct mlx5_esw_rate_group *group; } qos; u16 vport; @@ -180,7 +185,6 @@ struct mlx5_vport { enum mlx5_eswitch_vport_event enabled_events; int index; struct devlink_port *dl_port; - struct mlx5_esw_bridge *bridge; }; struct mlx5_esw_indir_table; @@ -302,7 +306,9 @@ struct mlx5_eswitch { struct { bool enabled; - u32 root_tsar_id; + u32 root_tsar_ix; + struct mlx5_esw_rate_group *group0; + struct list_head groups; /* Protected by esw->state_lock */ } qos; struct mlx5_esw_bridge_offloads *br_offloads; @@ -315,6 +321,7 @@ struct mlx5_eswitch { u32 large_group_num; } params; struct blocking_notifier_head n_head; + struct lock_class_key mode_lock_key; }; void esw_offloads_disable(struct mlx5_eswitch *esw); @@ -327,8 +334,7 @@ int mlx5_esw_offloads_vport_metadata_set(struct mlx5_eswitch *esw, bool enable); u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw); void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata); -int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, - u32 rate_mbps); +int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 rate_mbps); /* E-Switch API */ int mlx5_eswitch_init(struct mlx5_core_dev *dev); @@ -351,6 +357,10 @@ int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw, u16 vport_num, bool setting); int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport, u32 max_rate, u32 min_rate); +int mlx5_esw_qos_vport_update_group(struct mlx5_eswitch *esw, + struct mlx5_vport *vport, + struct mlx5_esw_rate_group *group, + struct netlink_ext_ack *extack); int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting); int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting); int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, @@ -461,7 +471,6 @@ struct mlx5_esw_flow_attr { } dests[MLX5_MAX_FLOW_FWD_VPORTS]; struct mlx5_rx_tun_attr *rx_tun_attr; struct mlx5_pkt_reformat *decap_pkt_reformat; - struct mlx5_sample_attr *sample; }; int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, @@ -475,12 +484,10 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, struct netlink_ext_ack *extack); int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, enum devlink_eswitch_encap_mode *encap); -int mlx5_devlink_port_function_hw_addr_get(struct devlink *devlink, - struct devlink_port *port, +int mlx5_devlink_port_function_hw_addr_get(struct devlink_port *port, u8 *hw_addr, int *hw_addr_len, struct netlink_ext_ack *extack); -int mlx5_devlink_port_function_hw_addr_set(struct devlink *devlink, - struct devlink_port *port, +int mlx5_devlink_port_function_hw_addr_set(struct devlink_port *port, const u8 *hw_addr, int hw_addr_len, struct netlink_ext_ack *extack); @@ -699,11 +706,18 @@ void mlx5_esw_get(struct mlx5_core_dev *dev); void mlx5_esw_put(struct mlx5_core_dev *dev); int mlx5_esw_try_lock(struct mlx5_eswitch *esw); void mlx5_esw_unlock(struct mlx5_eswitch *esw); +void mlx5_esw_lock(struct mlx5_eswitch *esw); void esw_vport_change_handle_locked(struct mlx5_vport *vport); bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch *esw, u32 controller); +int mlx5_eswitch_offloads_config_single_fdb(struct mlx5_eswitch *master_esw, + struct mlx5_eswitch *slave_esw); +void mlx5_eswitch_offloads_destroy_single_fdb(struct mlx5_eswitch *master_esw, + struct mlx5_eswitch *slave_esw); +int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw); + #else /* CONFIG_MLX5_ESWITCH */ /* eswitch API stubs */ static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } @@ -719,6 +733,9 @@ static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev) return ERR_PTR(-EOPNOTSUPP); } +static inline void mlx5_esw_unlock(struct mlx5_eswitch *esw) { return; } +static inline void mlx5_esw_lock(struct mlx5_eswitch *esw) { return; } + static inline struct mlx5_flow_handle * esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag) { @@ -731,6 +748,23 @@ mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev *dev, { return vport_num; } + +static inline int +mlx5_eswitch_offloads_config_single_fdb(struct mlx5_eswitch *master_esw, + struct mlx5_eswitch *slave_esw) +{ + return 0; +} + +static inline void +mlx5_eswitch_offloads_destroy_single_fdb(struct mlx5_eswitch *master_esw, + struct mlx5_eswitch *slave_esw) {} + +static inline int +mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw) +{ + return 0; +} #endif /* CONFIG_MLX5_ESWITCH */ #endif /* __MLX5_ESWITCH_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 3bb71a186004..0d461e38add3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -187,12 +187,12 @@ esw_cleanup_decap_indir(struct mlx5_eswitch *esw, static int esw_setup_sampler_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act, - struct mlx5_esw_flow_attr *esw_attr, + struct mlx5_flow_attr *attr, int i) { flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER; - dest[i].sampler_id = esw_attr->sample->sampler_id; + dest[i].sampler_id = attr->sample_attr->sampler_id; return 0; } @@ -435,7 +435,7 @@ esw_setup_dests(struct mlx5_flow_destination *dest, attr->flags |= MLX5_ESW_ATTR_FLAG_SRC_REWRITE; if (attr->flags & MLX5_ESW_ATTR_FLAG_SAMPLE) { - esw_setup_sampler_dest(dest, flow_act, esw_attr, *i); + esw_setup_sampler_dest(dest, flow_act, attr, *i); (*i)++; } else if (attr->dest_ft) { esw_setup_ft_dest(dest, flow_act, esw, attr, spec, *i); @@ -540,10 +540,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) flow_act.modify_hdr = attr->modify_hdr; - /* esw_attr->sample is allocated only when there is a sample action */ - if (esw_attr->sample && esw_attr->sample->sample_default_tbl) { - fdb = esw_attr->sample->sample_default_tbl; - } else if (split) { + if (split) { fwd_attr.chain = attr->chain; fwd_attr.prio = attr->prio; fwd_attr.vport = esw_attr->in_rep->vport; @@ -927,6 +924,7 @@ out: struct mlx5_flow_handle * mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw, + struct mlx5_eswitch *from_esw, struct mlx5_eswitch_rep *rep, u32 sqn) { @@ -945,10 +943,10 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw, misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn); /* source vport is the esw manager */ - MLX5_SET(fte_match_set_misc, misc, source_port, rep->esw->manager_vport); + MLX5_SET(fte_match_set_misc, misc, source_port, from_esw->manager_vport); if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch)) MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id, - MLX5_CAP_GEN(rep->esw->dev, vhca_id)); + MLX5_CAP_GEN(from_esw->dev, vhca_id)); misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn); @@ -964,6 +962,9 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw, dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + if (rep->vport == MLX5_VPORT_UPLINK) + spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT; + flow_rule = mlx5_add_flow_rules(on_esw->fdb_table.offloads.slow_fdb, spec, &flow_act, &dest, 1); if (IS_ERR(flow_rule)) @@ -1614,7 +1615,18 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw) goto ns_err; } - table_size = esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ + + /* To be strictly correct: + * MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ) + * should be: + * esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ + + * peer_esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ + * but as the peer device might not be in switchdev mode it's not + * possible. We use the fact that by default FW sets max vfs and max sfs + * to the same value on both devices. If it needs to be changed in the future note + * the peer miss group should also be created based on the number of + * total vports of the peer (currently is also uses esw->total_vports). + */ + table_size = MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ) + MLX5_ESW_MISS_FLOWS + esw->total_vports + esw->esw_funcs.num_vfs; /* create the slow path fdb with encap set, so further table instances @@ -1671,7 +1683,8 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw) source_eswitch_owner_vhca_id_valid, 1); } - ix = esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ; + /* See comment above table_size calculation */ + ix = MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ); MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1); @@ -2311,14 +2324,293 @@ void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num) mlx5_esw_offloads_devlink_port_unregister(esw, vport_num); } +static int esw_set_uplink_slave_ingress_root(struct mlx5_core_dev *master, + struct mlx5_core_dev *slave) +{ + u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {}; + u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {}; + struct mlx5_eswitch *esw; + struct mlx5_flow_root_namespace *root; + struct mlx5_flow_namespace *ns; + struct mlx5_vport *vport; + int err; + + MLX5_SET(set_flow_table_root_in, in, opcode, + MLX5_CMD_OP_SET_FLOW_TABLE_ROOT); + MLX5_SET(set_flow_table_root_in, in, table_type, FS_FT_ESW_INGRESS_ACL); + MLX5_SET(set_flow_table_root_in, in, other_vport, 1); + MLX5_SET(set_flow_table_root_in, in, vport_number, MLX5_VPORT_UPLINK); + + if (master) { + esw = master->priv.eswitch; + vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK); + MLX5_SET(set_flow_table_root_in, in, table_of_other_vport, 1); + MLX5_SET(set_flow_table_root_in, in, table_vport_number, + MLX5_VPORT_UPLINK); + + ns = mlx5_get_flow_vport_acl_namespace(master, + MLX5_FLOW_NAMESPACE_ESW_INGRESS, + vport->index); + root = find_root(&ns->node); + mutex_lock(&root->chain_lock); + + MLX5_SET(set_flow_table_root_in, in, + table_eswitch_owner_vhca_id_valid, 1); + MLX5_SET(set_flow_table_root_in, in, + table_eswitch_owner_vhca_id, + MLX5_CAP_GEN(master, vhca_id)); + MLX5_SET(set_flow_table_root_in, in, table_id, + root->root_ft->id); + } else { + esw = slave->priv.eswitch; + vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK); + ns = mlx5_get_flow_vport_acl_namespace(slave, + MLX5_FLOW_NAMESPACE_ESW_INGRESS, + vport->index); + root = find_root(&ns->node); + mutex_lock(&root->chain_lock); + MLX5_SET(set_flow_table_root_in, in, table_id, root->root_ft->id); + } + + err = mlx5_cmd_exec(slave, in, sizeof(in), out, sizeof(out)); + mutex_unlock(&root->chain_lock); + + return err; +} + +static int esw_set_slave_root_fdb(struct mlx5_core_dev *master, + struct mlx5_core_dev *slave) +{ + u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {}; + u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {}; + struct mlx5_flow_root_namespace *root; + struct mlx5_flow_namespace *ns; + int err; + + MLX5_SET(set_flow_table_root_in, in, opcode, + MLX5_CMD_OP_SET_FLOW_TABLE_ROOT); + MLX5_SET(set_flow_table_root_in, in, table_type, + FS_FT_FDB); + + if (master) { + ns = mlx5_get_flow_namespace(master, + MLX5_FLOW_NAMESPACE_FDB); + root = find_root(&ns->node); + mutex_lock(&root->chain_lock); + MLX5_SET(set_flow_table_root_in, in, + table_eswitch_owner_vhca_id_valid, 1); + MLX5_SET(set_flow_table_root_in, in, + table_eswitch_owner_vhca_id, + MLX5_CAP_GEN(master, vhca_id)); + MLX5_SET(set_flow_table_root_in, in, table_id, + root->root_ft->id); + } else { + ns = mlx5_get_flow_namespace(slave, + MLX5_FLOW_NAMESPACE_FDB); + root = find_root(&ns->node); + mutex_lock(&root->chain_lock); + MLX5_SET(set_flow_table_root_in, in, table_id, + root->root_ft->id); + } + + err = mlx5_cmd_exec(slave, in, sizeof(in), out, sizeof(out)); + mutex_unlock(&root->chain_lock); + + return err; +} + +static int __esw_set_master_egress_rule(struct mlx5_core_dev *master, + struct mlx5_core_dev *slave, + struct mlx5_vport *vport, + struct mlx5_flow_table *acl) +{ + struct mlx5_flow_handle *flow_rule = NULL; + struct mlx5_flow_destination dest = {}; + struct mlx5_flow_act flow_act = {}; + struct mlx5_flow_spec *spec; + int err = 0; + void *misc; + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) + return -ENOMEM; + + spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; + misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, + misc_parameters); + MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_UPLINK); + MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id, + MLX5_CAP_GEN(slave, vhca_id)); + + misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); + MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); + MLX5_SET_TO_ONES(fte_match_set_misc, misc, + source_eswitch_owner_vhca_id); + + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; + dest.vport.num = slave->priv.eswitch->manager_vport; + dest.vport.vhca_id = MLX5_CAP_GEN(slave, vhca_id); + dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; + + flow_rule = mlx5_add_flow_rules(acl, spec, &flow_act, + &dest, 1); + if (IS_ERR(flow_rule)) + err = PTR_ERR(flow_rule); + else + vport->egress.offloads.bounce_rule = flow_rule; + + kvfree(spec); + return err; +} + +static int esw_set_master_egress_rule(struct mlx5_core_dev *master, + struct mlx5_core_dev *slave) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5_eswitch *esw = master->priv.eswitch; + struct mlx5_flow_table_attr ft_attr = { + .max_fte = 1, .prio = 0, .level = 0, + }; + struct mlx5_flow_namespace *egress_ns; + struct mlx5_flow_table *acl; + struct mlx5_flow_group *g; + struct mlx5_vport *vport; + void *match_criteria; + u32 *flow_group_in; + int err; + + vport = mlx5_eswitch_get_vport(esw, esw->manager_vport); + if (IS_ERR(vport)) + return PTR_ERR(vport); + + egress_ns = mlx5_get_flow_vport_acl_namespace(master, + MLX5_FLOW_NAMESPACE_ESW_EGRESS, + vport->index); + if (!egress_ns) + return -EINVAL; + + if (vport->egress.acl) + return -EINVAL; + + flow_group_in = kvzalloc(inlen, GFP_KERNEL); + if (!flow_group_in) + return -ENOMEM; + + acl = mlx5_create_vport_flow_table(egress_ns, &ft_attr, vport->vport); + if (IS_ERR(acl)) { + err = PTR_ERR(acl); + goto out; + } + + match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, + match_criteria); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, + misc_parameters.source_port); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, + misc_parameters.source_eswitch_owner_vhca_id); + MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, + MLX5_MATCH_MISC_PARAMETERS); + + MLX5_SET(create_flow_group_in, flow_group_in, + source_eswitch_owner_vhca_id_valid, 1); + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0); + + g = mlx5_create_flow_group(acl, flow_group_in); + if (IS_ERR(g)) { + err = PTR_ERR(g); + goto err_group; + } + + err = __esw_set_master_egress_rule(master, slave, vport, acl); + if (err) + goto err_rule; + + vport->egress.acl = acl; + vport->egress.offloads.bounce_grp = g; + + kvfree(flow_group_in); + + return 0; + +err_rule: + mlx5_destroy_flow_group(g); +err_group: + mlx5_destroy_flow_table(acl); +out: + kvfree(flow_group_in); + return err; +} + +static void esw_unset_master_egress_rule(struct mlx5_core_dev *dev) +{ + struct mlx5_vport *vport; + + vport = mlx5_eswitch_get_vport(dev->priv.eswitch, + dev->priv.eswitch->manager_vport); + + esw_acl_egress_ofld_cleanup(vport); +} + +int mlx5_eswitch_offloads_config_single_fdb(struct mlx5_eswitch *master_esw, + struct mlx5_eswitch *slave_esw) +{ + int err; + + err = esw_set_uplink_slave_ingress_root(master_esw->dev, + slave_esw->dev); + if (err) + return -EINVAL; + + err = esw_set_slave_root_fdb(master_esw->dev, + slave_esw->dev); + if (err) + goto err_fdb; + + err = esw_set_master_egress_rule(master_esw->dev, + slave_esw->dev); + if (err) + goto err_acl; + + return err; + +err_acl: + esw_set_slave_root_fdb(NULL, slave_esw->dev); + +err_fdb: + esw_set_uplink_slave_ingress_root(NULL, slave_esw->dev); + + return err; +} + +void mlx5_eswitch_offloads_destroy_single_fdb(struct mlx5_eswitch *master_esw, + struct mlx5_eswitch *slave_esw) +{ + esw_unset_master_egress_rule(master_esw->dev); + esw_set_slave_root_fdb(NULL, slave_esw->dev); + esw_set_uplink_slave_ingress_root(NULL, slave_esw->dev); +} + #define ESW_OFFLOADS_DEVCOM_PAIR (0) #define ESW_OFFLOADS_DEVCOM_UNPAIR (1) -static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw, - struct mlx5_eswitch *peer_esw) +static void mlx5_esw_offloads_rep_event_unpair(struct mlx5_eswitch *esw) { + const struct mlx5_eswitch_rep_ops *ops; + struct mlx5_eswitch_rep *rep; + unsigned long i; + u8 rep_type; - return esw_add_fdb_peer_miss_rules(esw, peer_esw->dev); + mlx5_esw_for_each_rep(esw, i, rep) { + rep_type = NUM_REP_TYPES; + while (rep_type--) { + ops = esw->offloads.rep_ops[rep_type]; + if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED && + ops->event) + ops->event(esw, rep, MLX5_SWITCHDEV_EVENT_UNPAIR, NULL); + } + } } static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw) @@ -2326,9 +2618,42 @@ static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw) #if IS_ENABLED(CONFIG_MLX5_CLS_ACT) mlx5e_tc_clean_fdb_peer_flows(esw); #endif + mlx5_esw_offloads_rep_event_unpair(esw); esw_del_fdb_peer_miss_rules(esw); } +static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw, + struct mlx5_eswitch *peer_esw) +{ + const struct mlx5_eswitch_rep_ops *ops; + struct mlx5_eswitch_rep *rep; + unsigned long i; + u8 rep_type; + int err; + + err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev); + if (err) + return err; + + mlx5_esw_for_each_rep(esw, i, rep) { + for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) { + ops = esw->offloads.rep_ops[rep_type]; + if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED && + ops->event) { + err = ops->event(esw, rep, MLX5_SWITCHDEV_EVENT_PAIR, peer_esw); + if (err) + goto err_out; + } + } + } + + return 0; + +err_out: + mlx5_esw_offloads_unpair(esw); + return err; +} + static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw, struct mlx5_eswitch *peer_esw, bool pair) @@ -2619,6 +2944,31 @@ static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw) esw_vport_destroy_offloads_acl_tables(esw, vport); } +int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw) +{ + struct mlx5_eswitch_rep *rep; + unsigned long i; + int ret; + + if (!esw || esw->mode != MLX5_ESWITCH_OFFLOADS) + return 0; + + rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK); + if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED) + return 0; + + ret = mlx5_esw_offloads_rep_load(esw, MLX5_VPORT_UPLINK); + if (ret) + return ret; + + mlx5_esw_for_each_rep(esw, i, rep) { + if (atomic_read(&rep->rep_data[REP_ETH].state) == REP_LOADED) + mlx5_esw_offloads_rep_load(esw, rep->vport); + } + + return 0; +} + static int esw_offloads_steering_init(struct mlx5_eswitch *esw) { struct mlx5_esw_indir_table *indir; @@ -2788,6 +3138,7 @@ int esw_offloads_enable(struct mlx5_eswitch *esw) struct mapping_ctx *reg_c0_obj_pool; struct mlx5_vport *vport; unsigned long i; + u64 mapping_id; int err; if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat) && @@ -2811,9 +3162,13 @@ int esw_offloads_enable(struct mlx5_eswitch *esw) if (err) goto err_vport_metadata; - reg_c0_obj_pool = mapping_create(sizeof(struct mlx5_mapped_obj), - ESW_REG_C0_USER_DATA_METADATA_MASK, - true); + mapping_id = mlx5_query_nic_system_image_guid(esw->dev); + + reg_c0_obj_pool = mapping_create_for_id(mapping_id, MAPPING_TYPE_CHAIN, + sizeof(struct mlx5_mapped_obj), + ESW_REG_C0_USER_DATA_METADATA_MASK, + true); + if (IS_ERR(reg_c0_obj_pool)) { err = PTR_ERR(reg_c0_obj_pool); goto err_pool; @@ -2991,10 +3346,11 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, if (esw_mode_from_devlink(mode, &mlx5_mode)) return -EINVAL; + mlx5_lag_disable_change(esw->dev); err = mlx5_esw_try_lock(esw); if (err < 0) { NL_SET_ERR_MSG_MOD(extack, "Can't change mode, E-Switch is busy"); - return err; + goto enable_lag; } cur_mlx5_mode = err; err = 0; @@ -3018,6 +3374,8 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, unlock: mlx5_esw_unlock(esw); +enable_lag: + mlx5_lag_enable_change(esw->dev); return err; } @@ -3091,8 +3449,11 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) { case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: - if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE) + if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE) { + err = 0; goto out; + } + fallthrough; case MLX5_CAP_INLINE_MODE_L2: NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set"); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/events.c b/drivers/net/ethernet/mellanox/mlx5/core/events.c index d713ae24d6b6..a1ac3a654962 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/events.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/events.c @@ -27,7 +27,7 @@ static int pcie_core(struct notifier_block *, unsigned long, void *); static int forward_event(struct notifier_block *, unsigned long, void *); static struct mlx5_nb events_nbs_ref[] = { - /* Events to be proccessed by mlx5_core */ + /* Events to be processed by mlx5_core */ {.nb.notifier_call = any_notifier, .event_type = MLX5_EVENT_TYPE_NOTIFY_ANY }, {.nb.notifier_call = temp_warn, .event_type = MLX5_EVENT_TYPE_TEMP_WARN_EVENT }, {.nb.notifier_call = port_module, .event_type = MLX5_EVENT_TYPE_PORT_MODULE_EVENT }, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c index d5da4ab65766..306279b7f9e7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c @@ -453,7 +453,7 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size) cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context); MLX5_SET(cqc, cqc, log_cq_size, ilog2(cq_size)); - MLX5_SET(cqc, cqc, c_eqn, eqn); + MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn); MLX5_SET(cqc, cqc, uar_page, fdev->conn_res.uar->index); MLX5_SET(cqc, cqc, log_page_size, conn->cq.wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c index 0bba92cf5dc0..8ec148010d62 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c @@ -1516,7 +1516,7 @@ static int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, mutex_lock(&fpga_xfrm->lock); if (!fpga_xfrm->sa_ctx) - /* Unbounded xfrm, chane only sw attrs */ + /* Unbounded xfrm, change only sw attrs */ goto change_sw_xfrm_attrs; /* copy original hw sa */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index 896a6c3dbdb7..7db8df64a60e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -152,17 +152,56 @@ static int mlx5_cmd_stub_destroy_ns(struct mlx5_flow_root_namespace *ns) return 0; } +static int mlx5_cmd_set_slave_root_fdb(struct mlx5_core_dev *master, + struct mlx5_core_dev *slave, + bool ft_id_valid, + u32 ft_id) +{ + u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {}; + u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {}; + struct mlx5_flow_root_namespace *root; + struct mlx5_flow_namespace *ns; + + MLX5_SET(set_flow_table_root_in, in, opcode, + MLX5_CMD_OP_SET_FLOW_TABLE_ROOT); + MLX5_SET(set_flow_table_root_in, in, table_type, + FS_FT_FDB); + if (ft_id_valid) { + MLX5_SET(set_flow_table_root_in, in, + table_eswitch_owner_vhca_id_valid, 1); + MLX5_SET(set_flow_table_root_in, in, + table_eswitch_owner_vhca_id, + MLX5_CAP_GEN(master, vhca_id)); + MLX5_SET(set_flow_table_root_in, in, table_id, + ft_id); + } else { + ns = mlx5_get_flow_namespace(slave, + MLX5_FLOW_NAMESPACE_FDB); + root = find_root(&ns->node); + MLX5_SET(set_flow_table_root_in, in, table_id, + root->root_ft->id); + } + + return mlx5_cmd_exec(slave, in, sizeof(in), out, sizeof(out)); +} + static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, u32 underlay_qpn, bool disconnect) { u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {}; struct mlx5_core_dev *dev = ns->dev; + int err; if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) && underlay_qpn == 0) return 0; + if (ft->type == FS_FT_FDB && + mlx5_lag_is_shared_fdb(dev) && + !mlx5_lag_is_master(dev)) + return 0; + MLX5_SET(set_flow_table_root_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ROOT); MLX5_SET(set_flow_table_root_in, in, table_type, ft->type); @@ -177,7 +216,24 @@ static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns, MLX5_SET(set_flow_table_root_in, in, other_vport, !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT)); - return mlx5_cmd_exec_in(dev, set_flow_table_root, in); + err = mlx5_cmd_exec_in(dev, set_flow_table_root, in); + if (!err && + ft->type == FS_FT_FDB && + mlx5_lag_is_shared_fdb(dev) && + mlx5_lag_is_master(dev)) { + err = mlx5_cmd_set_slave_root_fdb(dev, + mlx5_lag_get_peer_mdev(dev), + !disconnect, (!disconnect) ? + ft->id : 0); + if (err && !disconnect) { + MLX5_SET(set_flow_table_root_in, in, op_mod, 0); + MLX5_SET(set_flow_table_root_in, in, table_id, + ns->root_ft->id); + mlx5_cmd_exec_in(dev, set_flow_table_root, in); + } + } + + return err; } static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index c0697e1b7118..9fe8e3c204d6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -413,7 +413,7 @@ static bool check_valid_spec(const struct mlx5_flow_spec *spec) return true; } -static struct mlx5_flow_root_namespace *find_root(struct fs_node *node) +struct mlx5_flow_root_namespace *find_root(struct fs_node *node) { struct fs_node *root; struct mlx5_flow_namespace *ns; @@ -2343,7 +2343,7 @@ static int create_leaf_prios(struct mlx5_flow_namespace *ns, int prio, #define FLOW_TABLE_BIT_SZ 1 #define GET_FLOW_TABLE_CAP(dev, offset) \ - ((be32_to_cpu(*((__be32 *)(dev->caps.hca_cur[MLX5_CAP_FLOW_TABLE]) + \ + ((be32_to_cpu(*((__be32 *)(dev->caps.hca[MLX5_CAP_FLOW_TABLE]->cur) + \ offset / 32)) >> \ (32 - FLOW_TABLE_BIT_SZ - (offset & 0x1f))) & FLOW_TABLE_BIT_SZ) static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps) @@ -2493,7 +2493,7 @@ static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level) acc_level_ns = set_prio_attrs_in_ns(ns, acc_level); /* If this a prio with chains, and we can jump from one chain - * (namepsace) to another, so we accumulate the levels + * (namespace) to another, so we accumulate the levels */ if (prio->node.type == FS_TYPE_PRIO_CHAINS) acc_level = acc_level_ns; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index 7317cdeab661..98240badc342 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -294,6 +294,8 @@ void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev); int mlx5_fs_ingress_acls_init(struct mlx5_core_dev *dev, int total_vports); void mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev *dev); +struct mlx5_flow_root_namespace *find_root(struct fs_node *node); + #define fs_get_obj(v, _node) {v = container_of((_node), typeof(*v), node); } #define fs_list_for_each_entry(pos, root) \ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 9abeb80ffa31..037e18dd4be0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -170,7 +170,7 @@ static bool reset_fw_if_needed(struct mlx5_core_dev *dev) /* The reset only needs to be issued by one PF. The health buffer is * shared between all functions, and will be cleared during a reset. - * Check again to avoid a redundant 2nd reset. If the fatal erros was + * Check again to avoid a redundant 2nd reset. If the fatal errors was * PCI related a reset won't help. */ fatal_error = mlx5_health_check_fatal_sensors(dev); @@ -213,10 +213,6 @@ void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force) mutex_lock(&dev->intf_state_mutex); if (!err_detected && dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) goto unlock;/* a previous error is still being handled */ - if (dev->state == MLX5_DEVICE_STATE_UNINITIALIZED) { - dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; - goto unlock; - } enter_error_state(dev, force); unlock: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c index 0e487ec57d5c..0c8594c7df21 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c @@ -99,7 +99,9 @@ static void mlx5i_get_channels(struct net_device *dev, } static int mlx5i_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *coal) + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct mlx5e_priv *priv = mlx5i_epriv(netdev); @@ -107,7 +109,9 @@ static int mlx5i_set_coalesce(struct net_device *netdev, } static int mlx5i_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *coal) + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct mlx5e_priv *priv = mlx5i_epriv(netdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index 7d7ed025db0d..67571e5040d6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -50,7 +50,7 @@ static const struct net_device_ops mlx5i_netdev_ops = { .ndo_init = mlx5i_dev_init, .ndo_uninit = mlx5i_dev_cleanup, .ndo_change_mtu = mlx5i_change_mtu, - .ndo_do_ioctl = mlx5i_ioctl, + .ndo_eth_ioctl = mlx5i_ioctl, }; /* IPoIB mlx5 netdev profile */ @@ -314,8 +314,7 @@ static void mlx5i_cleanup_tx(struct mlx5e_priv *priv) static int mlx5i_create_flow_steering(struct mlx5e_priv *priv) { - struct ttc_params ttc_params = {}; - int tt, err; + int err; priv->fs.ns = mlx5_get_flow_namespace(priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL); @@ -330,33 +329,15 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv) priv->netdev->hw_features &= ~NETIF_F_NTUPLE; } - mlx5e_set_ttc_basic_params(priv, &ttc_params); - mlx5e_set_inner_ttc_ft_params(&ttc_params); - for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) - ttc_params.indir_tirn[tt] = priv->inner_indir_tir[tt].tirn; - - err = mlx5e_create_inner_ttc_table(priv, &ttc_params, &priv->fs.inner_ttc); - if (err) { - netdev_err(priv->netdev, "Failed to create inner ttc table, err=%d\n", - err); - goto err_destroy_arfs_tables; - } - - mlx5e_set_ttc_ft_params(&ttc_params); - for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) - ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn; - - err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc); + err = mlx5e_create_ttc_table(priv); if (err) { netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n", err); - goto err_destroy_inner_ttc_table; + goto err_destroy_arfs_tables; } return 0; -err_destroy_inner_ttc_table: - mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc); err_destroy_arfs_tables: mlx5e_arfs_destroy_tables(priv); @@ -365,17 +346,20 @@ err_destroy_arfs_tables: static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv) { - mlx5e_destroy_ttc_table(priv, &priv->fs.ttc); - mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc); + mlx5e_destroy_ttc_table(priv); mlx5e_arfs_destroy_tables(priv); } static int mlx5i_init_rx(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; - u16 max_nch = priv->max_nch; + struct mlx5e_lro_param lro_param; int err; + priv->rx_res = mlx5e_rx_res_alloc(); + if (!priv->rx_res) + return -ENOMEM; + mlx5e_create_q_counters(priv); err = mlx5e_open_drop_rq(priv, &priv->drop_rq); @@ -384,54 +368,38 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv) goto err_destroy_q_counters; } - err = mlx5e_create_indirect_rqt(priv); + lro_param = mlx5e_get_lro_param(&priv->channels.params); + err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, 0, + priv->max_nch, priv->drop_rq.rqn, &lro_param, + priv->channels.params.num_channels); if (err) goto err_close_drop_rq; - err = mlx5e_create_direct_rqts(priv, priv->direct_tir, max_nch); - if (err) - goto err_destroy_indirect_rqts; - - err = mlx5e_create_indirect_tirs(priv, true); - if (err) - goto err_destroy_direct_rqts; - - err = mlx5e_create_direct_tirs(priv, priv->direct_tir, max_nch); - if (err) - goto err_destroy_indirect_tirs; - err = mlx5i_create_flow_steering(priv); if (err) - goto err_destroy_direct_tirs; + goto err_destroy_rx_res; return 0; -err_destroy_direct_tirs: - mlx5e_destroy_direct_tirs(priv, priv->direct_tir, max_nch); -err_destroy_indirect_tirs: - mlx5e_destroy_indirect_tirs(priv); -err_destroy_direct_rqts: - mlx5e_destroy_direct_rqts(priv, priv->direct_tir, max_nch); -err_destroy_indirect_rqts: - mlx5e_destroy_rqt(priv, &priv->indir_rqt); +err_destroy_rx_res: + mlx5e_rx_res_destroy(priv->rx_res); err_close_drop_rq: mlx5e_close_drop_rq(&priv->drop_rq); err_destroy_q_counters: mlx5e_destroy_q_counters(priv); + mlx5e_rx_res_free(priv->rx_res); + priv->rx_res = NULL; return err; } static void mlx5i_cleanup_rx(struct mlx5e_priv *priv) { - u16 max_nch = priv->max_nch; - mlx5i_destroy_flow_steering(priv); - mlx5e_destroy_direct_tirs(priv, priv->direct_tir, max_nch); - mlx5e_destroy_indirect_tirs(priv); - mlx5e_destroy_direct_rqts(priv, priv->direct_tir, max_nch); - mlx5e_destroy_rqt(priv, &priv->indir_rqt); + mlx5e_rx_res_destroy(priv->rx_res); mlx5e_close_drop_rq(&priv->drop_rq); mlx5e_destroy_q_counters(priv); + mlx5e_rx_res_free(priv->rx_res); + priv->rx_res = NULL; } /* The stats groups order is opposite to the update_stats() order calls */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c index 18ee21b06a00..5308f23702bc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c @@ -149,7 +149,7 @@ static const struct net_device_ops mlx5i_pkey_netdev_ops = { .ndo_get_stats64 = mlx5i_get_stats, .ndo_uninit = mlx5i_pkey_dev_cleanup, .ndo_change_mtu = mlx5i_pkey_change_mtu, - .ndo_do_ioctl = mlx5i_pkey_ioctl, + .ndo_eth_ioctl = mlx5i_pkey_ioctl, }; /* Child NDOs */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c index 5c043c5cc403..49ca57c6d31d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c @@ -32,7 +32,9 @@ #include <linux/netdevice.h> #include <linux/mlx5/driver.h> +#include <linux/mlx5/eswitch.h> #include <linux/mlx5/vport.h> +#include "lib/devcom.h" #include "mlx5_core.h" #include "eswitch.h" #include "lag.h" @@ -45,7 +47,7 @@ static DEFINE_SPINLOCK(lag_lock); static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 remap_port1, - u8 remap_port2) + u8 remap_port2, bool shared_fdb) { u32 in[MLX5_ST_SZ_DW(create_lag_in)] = {}; void *lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx); @@ -54,6 +56,7 @@ static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 remap_port1, MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1); MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2); + MLX5_SET(lagc, lag_ctx, fdb_selection_mode, shared_fdb); return mlx5_cmd_exec_in(dev, create_lag, in); } @@ -224,35 +227,59 @@ void mlx5_modify_lag(struct mlx5_lag *ldev, } static int mlx5_create_lag(struct mlx5_lag *ldev, - struct lag_tracker *tracker) + struct lag_tracker *tracker, + bool shared_fdb) { struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; + struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev; + u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {}; int err; mlx5_infer_tx_affinity_mapping(tracker, &ldev->v2p_map[MLX5_LAG_P1], &ldev->v2p_map[MLX5_LAG_P2]); - mlx5_core_info(dev0, "lag map port 1:%d port 2:%d", - ldev->v2p_map[MLX5_LAG_P1], ldev->v2p_map[MLX5_LAG_P2]); + mlx5_core_info(dev0, "lag map port 1:%d port 2:%d shared_fdb:%d", + ldev->v2p_map[MLX5_LAG_P1], ldev->v2p_map[MLX5_LAG_P2], + shared_fdb); err = mlx5_cmd_create_lag(dev0, ldev->v2p_map[MLX5_LAG_P1], - ldev->v2p_map[MLX5_LAG_P2]); - if (err) + ldev->v2p_map[MLX5_LAG_P2], shared_fdb); + if (err) { mlx5_core_err(dev0, "Failed to create LAG (%d)\n", err); + return err; + } + + if (shared_fdb) { + err = mlx5_eswitch_offloads_config_single_fdb(dev0->priv.eswitch, + dev1->priv.eswitch); + if (err) + mlx5_core_err(dev0, "Can't enable single FDB mode\n"); + else + mlx5_core_info(dev0, "Operation mode is single FDB\n"); + } + + if (err) { + MLX5_SET(destroy_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_LAG); + if (mlx5_cmd_exec_in(dev0, destroy_lag, in)) + mlx5_core_err(dev0, + "Failed to deactivate RoCE LAG; driver restart required\n"); + } + return err; } int mlx5_activate_lag(struct mlx5_lag *ldev, struct lag_tracker *tracker, - u8 flags) + u8 flags, + bool shared_fdb) { bool roce_lag = !!(flags & MLX5_LAG_FLAG_ROCE); struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; int err; - err = mlx5_create_lag(ldev, tracker); + err = mlx5_create_lag(ldev, tracker, shared_fdb); if (err) { if (roce_lag) { mlx5_core_err(dev0, @@ -266,6 +293,7 @@ int mlx5_activate_lag(struct mlx5_lag *ldev, } ldev->flags |= flags; + ldev->shared_fdb = shared_fdb; return 0; } @@ -277,6 +305,13 @@ static int mlx5_deactivate_lag(struct mlx5_lag *ldev) int err; ldev->flags &= ~MLX5_LAG_MODE_FLAGS; + mlx5_lag_mp_reset(ldev); + + if (ldev->shared_fdb) { + mlx5_eswitch_offloads_destroy_single_fdb(ldev->pf[MLX5_LAG_P1].dev->priv.eswitch, + ldev->pf[MLX5_LAG_P2].dev->priv.eswitch); + ldev->shared_fdb = false; + } MLX5_SET(destroy_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_LAG); err = mlx5_cmd_exec_in(dev0, destroy_lag, in); @@ -333,6 +368,10 @@ static void mlx5_lag_remove_devices(struct mlx5_lag *ldev) if (!ldev->pf[i].dev) continue; + if (ldev->pf[i].dev->priv.flags & + MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV) + continue; + ldev->pf[i].dev->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV; mlx5_rescan_drivers_locked(ldev->pf[i].dev); } @@ -342,12 +381,15 @@ static void mlx5_disable_lag(struct mlx5_lag *ldev) { struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev; + bool shared_fdb = ldev->shared_fdb; bool roce_lag; int err; roce_lag = __mlx5_lag_is_roce(ldev); - if (roce_lag) { + if (shared_fdb) { + mlx5_lag_remove_devices(ldev); + } else if (roce_lag) { if (!(dev0->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)) { dev0->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV; mlx5_rescan_drivers_locked(dev0); @@ -359,8 +401,34 @@ static void mlx5_disable_lag(struct mlx5_lag *ldev) if (err) return; - if (roce_lag) + if (shared_fdb || roce_lag) mlx5_lag_add_devices(ldev); + + if (shared_fdb) { + if (!(dev0->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)) + mlx5_eswitch_reload_reps(dev0->priv.eswitch); + if (!(dev1->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)) + mlx5_eswitch_reload_reps(dev1->priv.eswitch); + } +} + +static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev) +{ + struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; + struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev; + + if (is_mdev_switchdev_mode(dev0) && + is_mdev_switchdev_mode(dev1) && + mlx5_eswitch_vport_match_metadata_enabled(dev0->priv.eswitch) && + mlx5_eswitch_vport_match_metadata_enabled(dev1->priv.eswitch) && + mlx5_devcom_is_paired(dev0->priv.devcom, + MLX5_DEVCOM_ESW_OFFLOADS) && + MLX5_CAP_GEN(dev1, lag_native_fdb_selection) && + MLX5_CAP_ESW(dev1, root_ft_on_other_esw) && + MLX5_CAP_ESW(dev0, esw_shared_ingress_acl)) + return true; + + return false; } static void mlx5_do_bond(struct mlx5_lag *ldev) @@ -371,14 +439,17 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) bool do_bond, roce_lag; int err; - if (!mlx5_lag_is_ready(ldev)) - return; - - tracker = ldev->tracker; + if (!mlx5_lag_is_ready(ldev)) { + do_bond = false; + } else { + tracker = ldev->tracker; - do_bond = tracker.is_bonded && mlx5_lag_check_prereq(ldev); + do_bond = tracker.is_bonded && mlx5_lag_check_prereq(ldev); + } if (do_bond && !__mlx5_lag_is_active(ldev)) { + bool shared_fdb = mlx5_shared_fdb_supported(ldev); + roce_lag = !mlx5_sriov_is_enabled(dev0) && !mlx5_sriov_is_enabled(dev1); @@ -388,23 +459,40 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) dev1->priv.eswitch->mode == MLX5_ESWITCH_NONE; #endif - if (roce_lag) + if (shared_fdb || roce_lag) mlx5_lag_remove_devices(ldev); err = mlx5_activate_lag(ldev, &tracker, roce_lag ? MLX5_LAG_FLAG_ROCE : - MLX5_LAG_FLAG_SRIOV); + MLX5_LAG_FLAG_SRIOV, + shared_fdb); if (err) { - if (roce_lag) + if (shared_fdb || roce_lag) mlx5_lag_add_devices(ldev); return; - } - - if (roce_lag) { + } else if (roce_lag) { dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV; mlx5_rescan_drivers_locked(dev0); mlx5_nic_vport_enable_roce(dev1); + } else if (shared_fdb) { + dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV; + mlx5_rescan_drivers_locked(dev0); + + err = mlx5_eswitch_reload_reps(dev0->priv.eswitch); + if (!err) + err = mlx5_eswitch_reload_reps(dev1->priv.eswitch); + + if (err) { + dev0->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV; + mlx5_rescan_drivers_locked(dev0); + mlx5_deactivate_lag(ldev); + mlx5_lag_add_devices(ldev); + mlx5_eswitch_reload_reps(dev0->priv.eswitch); + mlx5_eswitch_reload_reps(dev1->priv.eswitch); + mlx5_core_err(dev0, "Failed to enable lag\n"); + return; + } } } else if (do_bond && __mlx5_lag_is_active(ldev)) { mlx5_modify_lag(ldev, &tracker); @@ -418,21 +506,48 @@ static void mlx5_queue_bond_work(struct mlx5_lag *ldev, unsigned long delay) queue_delayed_work(ldev->wq, &ldev->bond_work, delay); } +static void mlx5_lag_lock_eswitches(struct mlx5_core_dev *dev0, + struct mlx5_core_dev *dev1) +{ + if (dev0) + mlx5_esw_lock(dev0->priv.eswitch); + if (dev1) + mlx5_esw_lock(dev1->priv.eswitch); +} + +static void mlx5_lag_unlock_eswitches(struct mlx5_core_dev *dev0, + struct mlx5_core_dev *dev1) +{ + if (dev1) + mlx5_esw_unlock(dev1->priv.eswitch); + if (dev0) + mlx5_esw_unlock(dev0->priv.eswitch); +} + static void mlx5_do_bond_work(struct work_struct *work) { struct delayed_work *delayed_work = to_delayed_work(work); struct mlx5_lag *ldev = container_of(delayed_work, struct mlx5_lag, bond_work); + struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; + struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev; int status; status = mlx5_dev_list_trylock(); if (!status) { - /* 1 sec delay. */ mlx5_queue_bond_work(ldev, HZ); return; } + if (ldev->mode_changes_in_progress) { + mlx5_dev_list_unlock(); + mlx5_queue_bond_work(ldev, HZ); + return; + } + + mlx5_lag_lock_eswitches(dev0, dev1); mlx5_do_bond(ldev); + mlx5_lag_unlock_eswitches(dev0, dev1); mlx5_dev_list_unlock(); } @@ -630,7 +745,7 @@ static void mlx5_ldev_remove_mdev(struct mlx5_lag *ldev, } /* Must be called with intf_mutex held */ -static void __mlx5_lag_dev_add_mdev(struct mlx5_core_dev *dev) +static int __mlx5_lag_dev_add_mdev(struct mlx5_core_dev *dev) { struct mlx5_lag *ldev = NULL; struct mlx5_core_dev *tmp_dev; @@ -638,7 +753,7 @@ static void __mlx5_lag_dev_add_mdev(struct mlx5_core_dev *dev) if (!MLX5_CAP_GEN(dev, vport_group_manager) || !MLX5_CAP_GEN(dev, lag_master) || MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_MAX_PORTS) - return; + return 0; tmp_dev = mlx5_get_next_phys_dev(dev); if (tmp_dev) @@ -648,15 +763,17 @@ static void __mlx5_lag_dev_add_mdev(struct mlx5_core_dev *dev) ldev = mlx5_lag_dev_alloc(dev); if (!ldev) { mlx5_core_err(dev, "Failed to alloc lag dev\n"); - return; + return 0; } } else { + if (ldev->mode_changes_in_progress) + return -EAGAIN; mlx5_ldev_get(ldev); } mlx5_ldev_add_mdev(ldev, dev); - return; + return 0; } void mlx5_lag_remove_mdev(struct mlx5_core_dev *dev) @@ -667,7 +784,13 @@ void mlx5_lag_remove_mdev(struct mlx5_core_dev *dev) if (!ldev) return; +recheck: mlx5_dev_list_lock(); + if (ldev->mode_changes_in_progress) { + mlx5_dev_list_unlock(); + msleep(100); + goto recheck; + } mlx5_ldev_remove_mdev(ldev, dev); mlx5_dev_list_unlock(); mlx5_ldev_put(ldev); @@ -675,8 +798,16 @@ void mlx5_lag_remove_mdev(struct mlx5_core_dev *dev) void mlx5_lag_add_mdev(struct mlx5_core_dev *dev) { + int err; + +recheck: mlx5_dev_list_lock(); - __mlx5_lag_dev_add_mdev(dev); + err = __mlx5_lag_dev_add_mdev(dev); + if (err) { + mlx5_dev_list_unlock(); + msleep(100); + goto recheck; + } mlx5_dev_list_unlock(); } @@ -690,11 +821,11 @@ void mlx5_lag_remove_netdev(struct mlx5_core_dev *dev, if (!ldev) return; - if (__mlx5_lag_is_active(ldev)) - mlx5_disable_lag(ldev); - mlx5_ldev_remove_netdev(ldev, netdev); ldev->flags &= ~MLX5_LAG_FLAG_READY; + + if (__mlx5_lag_is_active(ldev)) + mlx5_queue_bond_work(ldev, 0); } /* Must be called with intf_mutex held */ @@ -716,6 +847,7 @@ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev, if (i >= MLX5_MAX_PORTS) ldev->flags |= MLX5_LAG_FLAG_READY; + mlx5_queue_bond_work(ldev, 0); } bool mlx5_lag_is_roce(struct mlx5_core_dev *dev) @@ -746,6 +878,21 @@ bool mlx5_lag_is_active(struct mlx5_core_dev *dev) } EXPORT_SYMBOL(mlx5_lag_is_active); +bool mlx5_lag_is_master(struct mlx5_core_dev *dev) +{ + struct mlx5_lag *ldev; + bool res; + + spin_lock(&lag_lock); + ldev = mlx5_lag_dev(dev); + res = ldev && __mlx5_lag_is_active(ldev) && + dev == ldev->pf[MLX5_LAG_P1].dev; + spin_unlock(&lag_lock); + + return res; +} +EXPORT_SYMBOL(mlx5_lag_is_master); + bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev) { struct mlx5_lag *ldev; @@ -760,19 +907,50 @@ bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev) } EXPORT_SYMBOL(mlx5_lag_is_sriov); -void mlx5_lag_update(struct mlx5_core_dev *dev) +bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev) +{ + struct mlx5_lag *ldev; + bool res; + + spin_lock(&lag_lock); + ldev = mlx5_lag_dev(dev); + res = ldev && __mlx5_lag_is_sriov(ldev) && ldev->shared_fdb; + spin_unlock(&lag_lock); + + return res; +} +EXPORT_SYMBOL(mlx5_lag_is_shared_fdb); + +void mlx5_lag_disable_change(struct mlx5_core_dev *dev) { + struct mlx5_core_dev *dev0; + struct mlx5_core_dev *dev1; struct mlx5_lag *ldev; mlx5_dev_list_lock(); + ldev = mlx5_lag_dev(dev); - if (!ldev) - goto unlock; + dev0 = ldev->pf[MLX5_LAG_P1].dev; + dev1 = ldev->pf[MLX5_LAG_P2].dev; - mlx5_do_bond(ldev); + ldev->mode_changes_in_progress++; + if (__mlx5_lag_is_active(ldev)) { + mlx5_lag_lock_eswitches(dev0, dev1); + mlx5_disable_lag(ldev); + mlx5_lag_unlock_eswitches(dev0, dev1); + } + mlx5_dev_list_unlock(); +} -unlock: +void mlx5_lag_enable_change(struct mlx5_core_dev *dev) +{ + struct mlx5_lag *ldev; + + mlx5_dev_list_lock(); + ldev = mlx5_lag_dev(dev); + ldev->mode_changes_in_progress--; mlx5_dev_list_unlock(); + mlx5_queue_bond_work(ldev, 0); } struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev) @@ -827,6 +1005,26 @@ unlock: } EXPORT_SYMBOL(mlx5_lag_get_slave_port); +struct mlx5_core_dev *mlx5_lag_get_peer_mdev(struct mlx5_core_dev *dev) +{ + struct mlx5_core_dev *peer_dev = NULL; + struct mlx5_lag *ldev; + + spin_lock(&lag_lock); + ldev = mlx5_lag_dev(dev); + if (!ldev) + goto unlock; + + peer_dev = ldev->pf[MLX5_LAG_P1].dev == dev ? + ldev->pf[MLX5_LAG_P2].dev : + ldev->pf[MLX5_LAG_P1].dev; + +unlock: + spin_unlock(&lag_lock); + return peer_dev; +} +EXPORT_SYMBOL(mlx5_lag_get_peer_mdev); + int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, u64 *values, int num_counters, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag.h index 191392c37558..d4bae528954e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.h @@ -39,6 +39,8 @@ struct lag_tracker { */ struct mlx5_lag { u8 flags; + int mode_changes_in_progress; + bool shared_fdb; u8 v2p_map[MLX5_MAX_PORTS]; struct kref ref; struct lag_func pf[MLX5_MAX_PORTS]; @@ -71,7 +73,8 @@ void mlx5_modify_lag(struct mlx5_lag *ldev, struct lag_tracker *tracker); int mlx5_activate_lag(struct mlx5_lag *ldev, struct lag_tracker *tracker, - u8 flags); + u8 flags, + bool shared_fdb); int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev, struct net_device *ndev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c index c4bf8b679541..f239b352a58a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c @@ -161,7 +161,7 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, struct lag_tracker tracker; tracker = ldev->tracker; - mlx5_activate_lag(ldev, &tracker, MLX5_LAG_FLAG_MULTIPATH); + mlx5_activate_lag(ldev, &tracker, MLX5_LAG_FLAG_MULTIPATH, false); } mlx5_lag_set_port_affinity(ldev, MLX5_LAG_NORMAL_AFFINITY); @@ -302,6 +302,14 @@ static int mlx5_lag_fib_event(struct notifier_block *nb, return NOTIFY_DONE; } +void mlx5_lag_mp_reset(struct mlx5_lag *ldev) +{ + /* Clear mfi, as it might become stale when a route delete event + * has been missed, see mlx5_lag_fib_route_event(). + */ + ldev->lag_mp.mfi = NULL; +} + int mlx5_lag_mp_init(struct mlx5_lag *ldev) { struct lag_mp *mp = &ldev->lag_mp; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h index 258ac7b2964e..729c839397a8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h @@ -21,11 +21,13 @@ struct lag_mp { #ifdef CONFIG_MLX5_ESWITCH +void mlx5_lag_mp_reset(struct mlx5_lag *ldev); int mlx5_lag_mp_init(struct mlx5_lag *ldev); void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev); #else /* CONFIG_MLX5_ESWITCH */ +static inline void mlx5_lag_mp_reset(struct mlx5_lag *ldev) {}; static inline int mlx5_lag_mp_init(struct mlx5_lag *ldev) { return 0; } static inline void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev) {} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c index ce696d523493..ffac8a0e7a23 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c @@ -749,7 +749,7 @@ static int mlx5_pps_event(struct notifier_block *nb, } else { ptp_event.type = PTP_CLOCK_EXTTS; } - /* TODOL clock->ptp can be NULL if ptp_clock_register failes */ + /* TODOL clock->ptp can be NULL if ptp_clock_register fails */ ptp_clock_event(clock->ptp, &ptp_event); break; case PTP_PF_PEROUT: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c new file mode 100644 index 000000000000..749d17c0057d --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c @@ -0,0 +1,602 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. + +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <linux/tcp.h> +#include <linux/mlx5/fs.h> +#include <linux/mlx5/driver.h> +#include "mlx5_core.h" +#include "lib/fs_ttc.h" + +#define MLX5_TTC_NUM_GROUPS 3 +#define MLX5_TTC_GROUP1_SIZE (BIT(3) + MLX5_NUM_TUNNEL_TT) +#define MLX5_TTC_GROUP2_SIZE BIT(1) +#define MLX5_TTC_GROUP3_SIZE BIT(0) +#define MLX5_TTC_TABLE_SIZE (MLX5_TTC_GROUP1_SIZE +\ + MLX5_TTC_GROUP2_SIZE +\ + MLX5_TTC_GROUP3_SIZE) + +#define MLX5_INNER_TTC_NUM_GROUPS 3 +#define MLX5_INNER_TTC_GROUP1_SIZE BIT(3) +#define MLX5_INNER_TTC_GROUP2_SIZE BIT(1) +#define MLX5_INNER_TTC_GROUP3_SIZE BIT(0) +#define MLX5_INNER_TTC_TABLE_SIZE (MLX5_INNER_TTC_GROUP1_SIZE +\ + MLX5_INNER_TTC_GROUP2_SIZE +\ + MLX5_INNER_TTC_GROUP3_SIZE) + +/* L3/L4 traffic type classifier */ +struct mlx5_ttc_table { + int num_groups; + struct mlx5_flow_table *t; + struct mlx5_flow_group **g; + struct mlx5_ttc_rule rules[MLX5_NUM_TT]; + struct mlx5_flow_handle *tunnel_rules[MLX5_NUM_TUNNEL_TT]; +}; + +struct mlx5_flow_table *mlx5_get_ttc_flow_table(struct mlx5_ttc_table *ttc) +{ + return ttc->t; +} + +static void mlx5_cleanup_ttc_rules(struct mlx5_ttc_table *ttc) +{ + int i; + + for (i = 0; i < MLX5_NUM_TT; i++) { + if (!IS_ERR_OR_NULL(ttc->rules[i].rule)) { + mlx5_del_flow_rules(ttc->rules[i].rule); + ttc->rules[i].rule = NULL; + } + } + + for (i = 0; i < MLX5_NUM_TUNNEL_TT; i++) { + if (!IS_ERR_OR_NULL(ttc->tunnel_rules[i])) { + mlx5_del_flow_rules(ttc->tunnel_rules[i]); + ttc->tunnel_rules[i] = NULL; + } + } +} + +struct mlx5_etype_proto { + u16 etype; + u8 proto; +}; + +static struct mlx5_etype_proto ttc_rules[] = { + [MLX5_TT_IPV4_TCP] = { + .etype = ETH_P_IP, + .proto = IPPROTO_TCP, + }, + [MLX5_TT_IPV6_TCP] = { + .etype = ETH_P_IPV6, + .proto = IPPROTO_TCP, + }, + [MLX5_TT_IPV4_UDP] = { + .etype = ETH_P_IP, + .proto = IPPROTO_UDP, + }, + [MLX5_TT_IPV6_UDP] = { + .etype = ETH_P_IPV6, + .proto = IPPROTO_UDP, + }, + [MLX5_TT_IPV4_IPSEC_AH] = { + .etype = ETH_P_IP, + .proto = IPPROTO_AH, + }, + [MLX5_TT_IPV6_IPSEC_AH] = { + .etype = ETH_P_IPV6, + .proto = IPPROTO_AH, + }, + [MLX5_TT_IPV4_IPSEC_ESP] = { + .etype = ETH_P_IP, + .proto = IPPROTO_ESP, + }, + [MLX5_TT_IPV6_IPSEC_ESP] = { + .etype = ETH_P_IPV6, + .proto = IPPROTO_ESP, + }, + [MLX5_TT_IPV4] = { + .etype = ETH_P_IP, + .proto = 0, + }, + [MLX5_TT_IPV6] = { + .etype = ETH_P_IPV6, + .proto = 0, + }, + [MLX5_TT_ANY] = { + .etype = 0, + .proto = 0, + }, +}; + +static struct mlx5_etype_proto ttc_tunnel_rules[] = { + [MLX5_TT_IPV4_GRE] = { + .etype = ETH_P_IP, + .proto = IPPROTO_GRE, + }, + [MLX5_TT_IPV6_GRE] = { + .etype = ETH_P_IPV6, + .proto = IPPROTO_GRE, + }, + [MLX5_TT_IPV4_IPIP] = { + .etype = ETH_P_IP, + .proto = IPPROTO_IPIP, + }, + [MLX5_TT_IPV6_IPIP] = { + .etype = ETH_P_IPV6, + .proto = IPPROTO_IPIP, + }, + [MLX5_TT_IPV4_IPV6] = { + .etype = ETH_P_IP, + .proto = IPPROTO_IPV6, + }, + [MLX5_TT_IPV6_IPV6] = { + .etype = ETH_P_IPV6, + .proto = IPPROTO_IPV6, + }, + +}; + +u8 mlx5_get_proto_by_tunnel_type(enum mlx5_tunnel_types tt) +{ + return ttc_tunnel_rules[tt].proto; +} + +static bool mlx5_tunnel_proto_supported_rx(struct mlx5_core_dev *mdev, + u8 proto_type) +{ + switch (proto_type) { + case IPPROTO_GRE: + return MLX5_CAP_ETH(mdev, tunnel_stateless_gre); + case IPPROTO_IPIP: + case IPPROTO_IPV6: + return (MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip) || + MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip_rx)); + default: + return false; + } +} + +static bool mlx5_tunnel_any_rx_proto_supported(struct mlx5_core_dev *mdev) +{ + int tt; + + for (tt = 0; tt < MLX5_NUM_TUNNEL_TT; tt++) { + if (mlx5_tunnel_proto_supported_rx(mdev, + ttc_tunnel_rules[tt].proto)) + return true; + } + return false; +} + +bool mlx5_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev) +{ + return (mlx5_tunnel_any_rx_proto_supported(mdev) && + MLX5_CAP_FLOWTABLE_NIC_RX(mdev, + ft_field_support.inner_ip_version)); +} + +static u8 mlx5_etype_to_ipv(u16 ethertype) +{ + if (ethertype == ETH_P_IP) + return 4; + + if (ethertype == ETH_P_IPV6) + return 6; + + return 0; +} + +static struct mlx5_flow_handle * +mlx5_generate_ttc_rule(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft, + struct mlx5_flow_destination *dest, u16 etype, u8 proto) +{ + int match_ipv_outer = + MLX5_CAP_FLOWTABLE_NIC_RX(dev, + ft_field_support.outer_ip_version); + MLX5_DECLARE_FLOW_ACT(flow_act); + struct mlx5_flow_handle *rule; + struct mlx5_flow_spec *spec; + int err = 0; + u8 ipv; + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) + return ERR_PTR(-ENOMEM); + + if (proto) { + spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, proto); + } + + ipv = mlx5_etype_to_ipv(etype); + if (match_ipv_outer && ipv) { + spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, ipv); + } else if (etype) { + spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype); + } + + rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + mlx5_core_err(dev, "%s: add rule failed\n", __func__); + } + + kvfree(spec); + return err ? ERR_PTR(err) : rule; +} + +static int mlx5_generate_ttc_table_rules(struct mlx5_core_dev *dev, + struct ttc_params *params, + struct mlx5_ttc_table *ttc) +{ + struct mlx5_flow_handle **trules; + struct mlx5_ttc_rule *rules; + struct mlx5_flow_table *ft; + int tt; + int err; + + ft = ttc->t; + rules = ttc->rules; + for (tt = 0; tt < MLX5_NUM_TT; tt++) { + struct mlx5_ttc_rule *rule = &rules[tt]; + + rule->rule = mlx5_generate_ttc_rule(dev, ft, ¶ms->dests[tt], + ttc_rules[tt].etype, + ttc_rules[tt].proto); + if (IS_ERR(rule->rule)) { + err = PTR_ERR(rule->rule); + rule->rule = NULL; + goto del_rules; + } + rule->default_dest = params->dests[tt]; + } + + if (!params->inner_ttc || !mlx5_tunnel_inner_ft_supported(dev)) + return 0; + + trules = ttc->tunnel_rules; + for (tt = 0; tt < MLX5_NUM_TUNNEL_TT; tt++) { + if (!mlx5_tunnel_proto_supported_rx(dev, + ttc_tunnel_rules[tt].proto)) + continue; + trules[tt] = mlx5_generate_ttc_rule(dev, ft, + ¶ms->tunnel_dests[tt], + ttc_tunnel_rules[tt].etype, + ttc_tunnel_rules[tt].proto); + if (IS_ERR(trules[tt])) { + err = PTR_ERR(trules[tt]); + trules[tt] = NULL; + goto del_rules; + } + } + + return 0; + +del_rules: + mlx5_cleanup_ttc_rules(ttc); + return err; +} + +static int mlx5_create_ttc_table_groups(struct mlx5_ttc_table *ttc, + bool use_ipv) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + int ix = 0; + u32 *in; + int err; + u8 *mc; + + ttc->g = kcalloc(MLX5_TTC_NUM_GROUPS, sizeof(*ttc->g), GFP_KERNEL); + if (!ttc->g) + return -ENOMEM; + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) { + kfree(ttc->g); + ttc->g = NULL; + return -ENOMEM; + } + + /* L4 Group */ + mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol); + if (use_ipv) + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_version); + else + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); + MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5_TTC_GROUP1_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in); + if (IS_ERR(ttc->g[ttc->num_groups])) + goto err; + ttc->num_groups++; + + /* L3 Group */ + MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5_TTC_GROUP2_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in); + if (IS_ERR(ttc->g[ttc->num_groups])) + goto err; + ttc->num_groups++; + + /* Any Group */ + memset(in, 0, inlen); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5_TTC_GROUP3_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in); + if (IS_ERR(ttc->g[ttc->num_groups])) + goto err; + ttc->num_groups++; + + kvfree(in); + return 0; + +err: + err = PTR_ERR(ttc->g[ttc->num_groups]); + ttc->g[ttc->num_groups] = NULL; + kvfree(in); + + return err; +} + +static struct mlx5_flow_handle * +mlx5_generate_inner_ttc_rule(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + struct mlx5_flow_destination *dest, + u16 etype, u8 proto) +{ + MLX5_DECLARE_FLOW_ACT(flow_act); + struct mlx5_flow_handle *rule; + struct mlx5_flow_spec *spec; + int err = 0; + u8 ipv; + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) + return ERR_PTR(-ENOMEM); + + ipv = mlx5_etype_to_ipv(etype); + if (etype && ipv) { + spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS; + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, inner_headers.ip_version); + MLX5_SET(fte_match_param, spec->match_value, inner_headers.ip_version, ipv); + } + + if (proto) { + spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS; + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, inner_headers.ip_protocol); + MLX5_SET(fte_match_param, spec->match_value, inner_headers.ip_protocol, proto); + } + + rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + mlx5_core_err(dev, "%s: add inner TTC rule failed\n", __func__); + } + + kvfree(spec); + return err ? ERR_PTR(err) : rule; +} + +static int mlx5_generate_inner_ttc_table_rules(struct mlx5_core_dev *dev, + struct ttc_params *params, + struct mlx5_ttc_table *ttc) +{ + struct mlx5_ttc_rule *rules; + struct mlx5_flow_table *ft; + int err; + int tt; + + ft = ttc->t; + rules = ttc->rules; + + for (tt = 0; tt < MLX5_NUM_TT; tt++) { + struct mlx5_ttc_rule *rule = &rules[tt]; + + rule->rule = mlx5_generate_inner_ttc_rule(dev, ft, + ¶ms->dests[tt], + ttc_rules[tt].etype, + ttc_rules[tt].proto); + if (IS_ERR(rule->rule)) { + err = PTR_ERR(rule->rule); + rule->rule = NULL; + goto del_rules; + } + rule->default_dest = params->dests[tt]; + } + + return 0; + +del_rules: + + mlx5_cleanup_ttc_rules(ttc); + return err; +} + +static int mlx5_create_inner_ttc_table_groups(struct mlx5_ttc_table *ttc) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + int ix = 0; + u32 *in; + int err; + u8 *mc; + + ttc->g = kcalloc(MLX5_INNER_TTC_NUM_GROUPS, sizeof(*ttc->g), + GFP_KERNEL); + if (!ttc->g) + return -ENOMEM; + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) { + kfree(ttc->g); + ttc->g = NULL; + return -ENOMEM; + } + + /* L4 Group */ + mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); + MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol); + MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_version); + MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5_INNER_TTC_GROUP1_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in); + if (IS_ERR(ttc->g[ttc->num_groups])) + goto err; + ttc->num_groups++; + + /* L3 Group */ + MLX5_SET(fte_match_param, mc, inner_headers.ip_protocol, 0); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5_INNER_TTC_GROUP2_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in); + if (IS_ERR(ttc->g[ttc->num_groups])) + goto err; + ttc->num_groups++; + + /* Any Group */ + memset(in, 0, inlen); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5_INNER_TTC_GROUP3_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in); + if (IS_ERR(ttc->g[ttc->num_groups])) + goto err; + ttc->num_groups++; + + kvfree(in); + return 0; + +err: + err = PTR_ERR(ttc->g[ttc->num_groups]); + ttc->g[ttc->num_groups] = NULL; + kvfree(in); + + return err; +} + +struct mlx5_ttc_table *mlx5_create_inner_ttc_table(struct mlx5_core_dev *dev, + struct ttc_params *params) +{ + struct mlx5_ttc_table *ttc; + int err; + + ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL); + if (!ttc) + return ERR_PTR(-ENOMEM); + + WARN_ON_ONCE(params->ft_attr.max_fte); + params->ft_attr.max_fte = MLX5_INNER_TTC_TABLE_SIZE; + ttc->t = mlx5_create_flow_table(params->ns, ¶ms->ft_attr); + if (IS_ERR(ttc->t)) { + err = PTR_ERR(ttc->t); + kvfree(ttc); + return ERR_PTR(err); + } + + err = mlx5_create_inner_ttc_table_groups(ttc); + if (err) + goto destroy_ft; + + err = mlx5_generate_inner_ttc_table_rules(dev, params, ttc); + if (err) + goto destroy_ft; + + return ttc; + +destroy_ft: + mlx5_destroy_ttc_table(ttc); + return ERR_PTR(err); +} + +void mlx5_destroy_ttc_table(struct mlx5_ttc_table *ttc) +{ + int i; + + mlx5_cleanup_ttc_rules(ttc); + for (i = ttc->num_groups - 1; i >= 0; i--) { + if (!IS_ERR_OR_NULL(ttc->g[i])) + mlx5_destroy_flow_group(ttc->g[i]); + ttc->g[i] = NULL; + } + + kfree(ttc->g); + mlx5_destroy_flow_table(ttc->t); + kvfree(ttc); +} + +struct mlx5_ttc_table *mlx5_create_ttc_table(struct mlx5_core_dev *dev, + struct ttc_params *params) +{ + bool match_ipv_outer = + MLX5_CAP_FLOWTABLE_NIC_RX(dev, + ft_field_support.outer_ip_version); + struct mlx5_ttc_table *ttc; + int err; + + ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL); + if (!ttc) + return ERR_PTR(-ENOMEM); + + WARN_ON_ONCE(params->ft_attr.max_fte); + params->ft_attr.max_fte = MLX5_TTC_TABLE_SIZE; + ttc->t = mlx5_create_flow_table(params->ns, ¶ms->ft_attr); + if (IS_ERR(ttc->t)) { + err = PTR_ERR(ttc->t); + kvfree(ttc); + return ERR_PTR(err); + } + + err = mlx5_create_ttc_table_groups(ttc, match_ipv_outer); + if (err) + goto destroy_ft; + + err = mlx5_generate_ttc_table_rules(dev, params, ttc); + if (err) + goto destroy_ft; + + return ttc; + +destroy_ft: + mlx5_destroy_ttc_table(ttc); + return ERR_PTR(err); +} + +int mlx5_ttc_fwd_dest(struct mlx5_ttc_table *ttc, enum mlx5_traffic_types type, + struct mlx5_flow_destination *new_dest) +{ + return mlx5_modify_rule_destination(ttc->rules[type].rule, new_dest, + NULL); +} + +struct mlx5_flow_destination +mlx5_ttc_get_default_dest(struct mlx5_ttc_table *ttc, + enum mlx5_traffic_types type) +{ + struct mlx5_flow_destination *dest = &ttc->rules[type].default_dest; + + WARN_ONCE(dest->type != MLX5_FLOW_DESTINATION_TYPE_TIR, + "TTC[%d] default dest is not setup yet", type); + + return *dest; +} + +int mlx5_ttc_fwd_default_dest(struct mlx5_ttc_table *ttc, + enum mlx5_traffic_types type) +{ + struct mlx5_flow_destination dest = mlx5_ttc_get_default_dest(ttc, type); + + return mlx5_ttc_fwd_dest(ttc, type, &dest); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h new file mode 100644 index 000000000000..ce95be8f8382 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2020 Mellanox Technologies. */ + +#ifndef __ML5_FS_TTC_H__ +#define __ML5_FS_TTC_H__ + +#include <linux/mlx5/fs.h> + +enum mlx5_traffic_types { + MLX5_TT_IPV4_TCP, + MLX5_TT_IPV6_TCP, + MLX5_TT_IPV4_UDP, + MLX5_TT_IPV6_UDP, + MLX5_TT_IPV4_IPSEC_AH, + MLX5_TT_IPV6_IPSEC_AH, + MLX5_TT_IPV4_IPSEC_ESP, + MLX5_TT_IPV6_IPSEC_ESP, + MLX5_TT_IPV4, + MLX5_TT_IPV6, + MLX5_TT_ANY, + MLX5_NUM_TT, + MLX5_NUM_INDIR_TIRS = MLX5_TT_ANY, +}; + +enum mlx5_tunnel_types { + MLX5_TT_IPV4_GRE, + MLX5_TT_IPV6_GRE, + MLX5_TT_IPV4_IPIP, + MLX5_TT_IPV6_IPIP, + MLX5_TT_IPV4_IPV6, + MLX5_TT_IPV6_IPV6, + MLX5_NUM_TUNNEL_TT, +}; + +struct mlx5_ttc_rule { + struct mlx5_flow_handle *rule; + struct mlx5_flow_destination default_dest; +}; + +struct mlx5_ttc_table; + +struct ttc_params { + struct mlx5_flow_namespace *ns; + struct mlx5_flow_table_attr ft_attr; + struct mlx5_flow_destination dests[MLX5_NUM_TT]; + bool inner_ttc; + struct mlx5_flow_destination tunnel_dests[MLX5_NUM_TUNNEL_TT]; +}; + +struct mlx5_flow_table *mlx5_get_ttc_flow_table(struct mlx5_ttc_table *ttc); + +struct mlx5_ttc_table *mlx5_create_ttc_table(struct mlx5_core_dev *dev, + struct ttc_params *params); +void mlx5_destroy_ttc_table(struct mlx5_ttc_table *ttc); + +struct mlx5_ttc_table *mlx5_create_inner_ttc_table(struct mlx5_core_dev *dev, + struct ttc_params *params); + +int mlx5_ttc_fwd_dest(struct mlx5_ttc_table *ttc, enum mlx5_traffic_types type, + struct mlx5_flow_destination *new_dest); +struct mlx5_flow_destination +mlx5_ttc_get_default_dest(struct mlx5_ttc_table *ttc, + enum mlx5_traffic_types type); +int mlx5_ttc_fwd_default_dest(struct mlx5_ttc_table *ttc, + enum mlx5_traffic_types type); + +bool mlx5_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev); +u8 mlx5_get_proto_by_tunnel_type(enum mlx5_tunnel_types tt); + +#endif /* __MLX5_FS_TTC_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c index 38084400ee8f..e3b0a131c3e1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c @@ -40,7 +40,7 @@ struct mlx5_vxlan { struct mlx5_core_dev *mdev; - /* max_num_ports is usuallly 4, 16 buckets is more than enough */ + /* max_num_ports is usually 4, 16 buckets is more than enough */ DECLARE_HASHTABLE(htable, 4); struct mutex sync_lock; /* sync add/del port HW operations */ }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index c84ad87c99bb..79482824c64f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -252,28 +252,16 @@ static int set_dma_caps(struct pci_dev *pdev) { int err; - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (err) { dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n"); - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n"); return err; } } - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); - if (err) { - dev_warn(&pdev->dev, - "Warning: couldn't set 64-bit consistent PCI DMA mask\n"); - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); - if (err) { - dev_err(&pdev->dev, - "Can't set consistent PCI DMA mask, aborting\n"); - return err; - } - } - dma_set_max_seg_size(&pdev->dev, 2u * 1024 * 1024 * 1024); return err; } @@ -389,11 +377,11 @@ static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev, switch (cap_mode) { case HCA_CAP_OPMOD_GET_MAX: - memcpy(dev->caps.hca_max[cap_type], hca_caps, + memcpy(dev->caps.hca[cap_type]->max, hca_caps, MLX5_UN_SZ_BYTES(hca_cap_union)); break; case HCA_CAP_OPMOD_GET_CUR: - memcpy(dev->caps.hca_cur[cap_type], hca_caps, + memcpy(dev->caps.hca[cap_type]->cur, hca_caps, MLX5_UN_SZ_BYTES(hca_cap_union)); break; default: @@ -469,7 +457,7 @@ static int handle_hca_cap_odp(struct mlx5_core_dev *dev, void *set_ctx) return err; set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability); - memcpy(set_hca_cap, dev->caps.hca_cur[MLX5_CAP_ODP], + memcpy(set_hca_cap, dev->caps.hca[MLX5_CAP_ODP]->cur, MLX5_ST_SZ_BYTES(odp_cap)); #define ODP_CAP_SET_MAX(dev, field) \ @@ -514,7 +502,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx) set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability); - memcpy(set_hca_cap, dev->caps.hca_cur[MLX5_CAP_GENERAL], + memcpy(set_hca_cap, dev->caps.hca[MLX5_CAP_GENERAL]->cur, MLX5_ST_SZ_BYTES(cmd_hca_cap)); mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n", @@ -596,7 +584,7 @@ static int handle_hca_cap_roce(struct mlx5_core_dev *dev, void *set_ctx) return 0; set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability); - memcpy(set_hca_cap, dev->caps.hca_cur[MLX5_CAP_ROCE], + memcpy(set_hca_cap, dev->caps.hca[MLX5_CAP_ROCE]->cur, MLX5_ST_SZ_BYTES(roce_cap)); MLX5_SET(roce_cap, set_hca_cap, sw_r_roce_src_udp_port, 1); @@ -748,14 +736,12 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev) static int mlx5_pci_init(struct mlx5_core_dev *dev, struct pci_dev *pdev, const struct pci_device_id *id) { - struct mlx5_priv *priv = &dev->priv; int err = 0; mutex_init(&dev->pci_status_mutex); pci_set_drvdata(dev->pdev, dev); dev->bar_addr = pci_resource_start(pdev, 0); - priv->numa_node = dev_to_node(mlx5_core_dma_dev(dev)); err = mlx5_pci_enable_device(dev); if (err) { @@ -1179,6 +1165,7 @@ static int mlx5_load(struct mlx5_core_dev *dev) goto err_ec; } + mlx5_lag_add_mdev(dev); err = mlx5_sriov_attach(dev); if (err) { mlx5_core_err(dev, "sriov init failed %d\n", err); @@ -1186,11 +1173,11 @@ static int mlx5_load(struct mlx5_core_dev *dev) } mlx5_sf_dev_table_create(dev); - mlx5_lag_add_mdev(dev); return 0; err_sriov: + mlx5_lag_remove_mdev(dev); mlx5_ec_cleanup(dev); err_ec: mlx5_sf_hw_table_destroy(dev); @@ -1222,9 +1209,9 @@ err_irq_table: static void mlx5_unload(struct mlx5_core_dev *dev) { - mlx5_lag_remove_mdev(dev); mlx5_sf_dev_table_destroy(dev); mlx5_sriov_detach(dev); + mlx5_lag_remove_mdev(dev); mlx5_ec_cleanup(dev); mlx5_sf_hw_table_destroy(dev); mlx5_vhca_event_stop(dev); @@ -1248,11 +1235,6 @@ int mlx5_init_one(struct mlx5_core_dev *dev) int err = 0; mutex_lock(&dev->intf_state_mutex); - if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) { - mlx5_core_warn(dev, "interface is up, NOP\n"); - goto out; - } - /* remove any previous indication of internal error */ dev->state = MLX5_DEVICE_STATE_UP; err = mlx5_function_setup(dev, true); @@ -1271,7 +1253,7 @@ int mlx5_init_one(struct mlx5_core_dev *dev) set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); - err = mlx5_devlink_register(priv_to_devlink(dev), dev->device); + err = mlx5_devlink_register(priv_to_devlink(dev)); if (err) goto err_devlink_reg; @@ -1293,7 +1275,6 @@ function_teardown: mlx5_function_teardown(dev, true); err_function: dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; -out: mutex_unlock(&dev->intf_state_mutex); return err; } @@ -1380,6 +1361,60 @@ out: mutex_unlock(&dev->intf_state_mutex); } +static const int types[] = { + MLX5_CAP_GENERAL, + MLX5_CAP_GENERAL_2, + MLX5_CAP_ETHERNET_OFFLOADS, + MLX5_CAP_IPOIB_ENHANCED_OFFLOADS, + MLX5_CAP_ODP, + MLX5_CAP_ATOMIC, + MLX5_CAP_ROCE, + MLX5_CAP_IPOIB_OFFLOADS, + MLX5_CAP_FLOW_TABLE, + MLX5_CAP_ESWITCH_FLOW_TABLE, + MLX5_CAP_ESWITCH, + MLX5_CAP_VECTOR_CALC, + MLX5_CAP_QOS, + MLX5_CAP_DEBUG, + MLX5_CAP_DEV_MEM, + MLX5_CAP_DEV_EVENT, + MLX5_CAP_TLS, + MLX5_CAP_VDPA_EMULATION, + MLX5_CAP_IPSEC, +}; + +static void mlx5_hca_caps_free(struct mlx5_core_dev *dev) +{ + int type; + int i; + + for (i = 0; i < ARRAY_SIZE(types); i++) { + type = types[i]; + kfree(dev->caps.hca[type]); + } +} + +static int mlx5_hca_caps_alloc(struct mlx5_core_dev *dev) +{ + struct mlx5_hca_cap *cap; + int type; + int i; + + for (i = 0; i < ARRAY_SIZE(types); i++) { + cap = kzalloc(sizeof(*cap), GFP_KERNEL); + if (!cap) + goto err; + type = types[i]; + dev->caps.hca[type] = cap; + } + + return 0; + +err: + mlx5_hca_caps_free(dev); + return -ENOMEM; +} + int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx) { struct mlx5_priv *priv = &dev->priv; @@ -1399,6 +1434,7 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx) mutex_init(&priv->pgdir_mutex); INIT_LIST_HEAD(&priv->pgdir_list); + priv->numa_node = dev_to_node(mlx5_core_dma_dev(dev)); priv->dbg_root = debugfs_create_dir(dev_name(dev->device), mlx5_debugfs_root); INIT_LIST_HEAD(&priv->traps); @@ -1415,8 +1451,14 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx) if (err) goto err_adev_init; + err = mlx5_hca_caps_alloc(dev); + if (err) + goto err_hca_caps; + return 0; +err_hca_caps: + mlx5_adev_cleanup(dev); err_adev_init: mlx5_pagealloc_cleanup(dev); err_pagealloc_init: @@ -1435,6 +1477,7 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev) { struct mlx5_priv *priv = &dev->priv; + mlx5_hca_caps_free(dev); mlx5_adev_cleanup(dev); mlx5_pagealloc_cleanup(dev); mlx5_health_cleanup(dev); @@ -1452,7 +1495,7 @@ static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id) struct devlink *devlink; int err; - devlink = mlx5_devlink_alloc(); + devlink = mlx5_devlink_alloc(&pdev->dev); if (!devlink) { dev_err(&pdev->dev, "devlink alloc failed\n"); return -ENOMEM; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index da365b8f0141..230eab7e3bc9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -168,6 +168,8 @@ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev, struct net_device *netdev); void mlx5_lag_remove_netdev(struct mlx5_core_dev *dev, struct net_device *netdev); void mlx5_lag_add_mdev(struct mlx5_core_dev *dev); void mlx5_lag_remove_mdev(struct mlx5_core_dev *dev); +void mlx5_lag_disable_change(struct mlx5_core_dev *dev); +void mlx5_lag_enable_change(struct mlx5_core_dev *dev); int mlx5_events_init(struct mlx5_core_dev *dev); void mlx5_events_cleanup(struct mlx5_core_dev *dev); @@ -275,4 +277,9 @@ static inline u32 mlx5_sriov_get_vf_total_msix(struct pci_dev *pdev) return MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix); } + +bool mlx5_eth_supported(struct mlx5_core_dev *dev); +bool mlx5_rdma_supported(struct mlx5_core_dev *dev); +bool mlx5_vnet_supported(struct mlx5_core_dev *dev); + #endif /* __MLX5_CORE_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c index 3465b363fc2f..c79a10b3454d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c @@ -18,7 +18,7 @@ #define MLX5_SFS_PER_CTRL_IRQ 64 #define MLX5_IRQ_CTRL_SF_MAX 8 -/* min num of vectores for SFs to be enabled */ +/* min num of vectors for SFs to be enabled */ #define MLX5_IRQ_VEC_COMP_BASE_SF 2 #define MLX5_EQ_SHARE_IRQ_MAX_COMP (8) @@ -28,13 +28,13 @@ #define MLX5_EQ_REFS_PER_IRQ (2) struct mlx5_irq { - u32 index; struct atomic_notifier_head nh; cpumask_var_t mask; char name[MLX5_MAX_IRQ_NAME]; - struct kref kref; - int irqn; struct mlx5_irq_pool *pool; + int refcount; + u32 index; + int irqn; }; struct mlx5_irq_pool { @@ -138,9 +138,8 @@ out: return ret; } -static void irq_release(struct kref *kref) +static void irq_release(struct mlx5_irq *irq) { - struct mlx5_irq *irq = container_of(kref, struct mlx5_irq, kref); struct mlx5_irq_pool *pool = irq->pool; xa_erase(&pool->irqs, irq->index); @@ -159,10 +158,31 @@ static void irq_put(struct mlx5_irq *irq) struct mlx5_irq_pool *pool = irq->pool; mutex_lock(&pool->lock); - kref_put(&irq->kref, irq_release); + irq->refcount--; + if (!irq->refcount) + irq_release(irq); mutex_unlock(&pool->lock); } +static int irq_get_locked(struct mlx5_irq *irq) +{ + lockdep_assert_held(&irq->pool->lock); + if (WARN_ON_ONCE(!irq->refcount)) + return 0; + irq->refcount++; + return 1; +} + +static int irq_get(struct mlx5_irq *irq) +{ + int err; + + mutex_lock(&irq->pool->lock); + err = irq_get_locked(irq); + mutex_unlock(&irq->pool->lock); + return err; +} + static irqreturn_t irq_int_handler(int irq, void *nh) { atomic_notifier_call_chain(nh, 0, NULL); @@ -215,7 +235,7 @@ static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i) goto err_cpumask; } irq->pool = pool; - kref_init(&irq->kref); + irq->refcount = 1; irq->index = i; err = xa_err(xa_store(&pool->irqs, irq->index, irq, GFP_KERNEL)); if (err) { @@ -235,18 +255,18 @@ err_req_irq: int mlx5_irq_attach_nb(struct mlx5_irq *irq, struct notifier_block *nb) { - int err; + int ret; - err = kref_get_unless_zero(&irq->kref); - if (WARN_ON_ONCE(!err)) + ret = irq_get(irq); + if (!ret) /* Something very bad happens here, we are enabling EQ * on non-existing IRQ. */ return -ENOENT; - err = atomic_notifier_chain_register(&irq->nh, nb); - if (err) + ret = atomic_notifier_chain_register(&irq->nh, nb); + if (ret) irq_put(irq); - return err; + return ret; } int mlx5_irq_detach_nb(struct mlx5_irq *irq, struct notifier_block *nb) @@ -304,10 +324,9 @@ static struct mlx5_irq *irq_pool_find_least_loaded(struct mlx5_irq_pool *pool, xa_for_each_range(&pool->irqs, index, iter, start, end) { if (!cpumask_equal(iter->mask, affinity)) continue; - if (kref_read(&iter->kref) < pool->min_threshold) + if (iter->refcount < pool->min_threshold) return iter; - if (!irq || kref_read(&iter->kref) < - kref_read(&irq->kref)) + if (!irq || iter->refcount < irq->refcount) irq = iter; } return irq; @@ -322,7 +341,7 @@ static struct mlx5_irq *irq_pool_request_affinity(struct mlx5_irq_pool *pool, mutex_lock(&pool->lock); least_loaded_irq = irq_pool_find_least_loaded(pool, affinity); if (least_loaded_irq && - kref_read(&least_loaded_irq->kref) < pool->min_threshold) + least_loaded_irq->refcount < pool->min_threshold) goto out; new_irq = irq_pool_create_irq(pool, affinity); if (IS_ERR(new_irq)) { @@ -340,11 +359,11 @@ static struct mlx5_irq *irq_pool_request_affinity(struct mlx5_irq_pool *pool, least_loaded_irq = new_irq; goto unlock; out: - kref_get(&least_loaded_irq->kref); - if (kref_read(&least_loaded_irq->kref) > pool->max_threshold) + irq_get_locked(least_loaded_irq); + if (least_loaded_irq->refcount > pool->max_threshold) mlx5_core_dbg(pool->dev, "IRQ %u overloaded, pool_name: %s, %u EQs on this irq\n", least_loaded_irq->irqn, pool->name, - kref_read(&least_loaded_irq->kref) / MLX5_EQ_REFS_PER_IRQ); + least_loaded_irq->refcount / MLX5_EQ_REFS_PER_IRQ); unlock: mutex_unlock(&pool->lock); return least_loaded_irq; @@ -360,7 +379,7 @@ irq_pool_request_vector(struct mlx5_irq_pool *pool, int vecidx, mutex_lock(&pool->lock); irq = xa_load(&pool->irqs, vecidx); if (irq) { - kref_get(&irq->kref); + irq_get_locked(irq); goto unlock; } irq = irq_request(pool, vecidx); @@ -427,7 +446,7 @@ out: return irq; mlx5_core_dbg(dev, "irq %u mapped to cpu %*pbl, %u EQs on this irq\n", irq->irqn, cpumask_pr_args(affinity), - kref_read(&irq->kref) / MLX5_EQ_REFS_PER_IRQ); + irq->refcount / MLX5_EQ_REFS_PER_IRQ); return irq; } @@ -459,8 +478,12 @@ static void irq_pool_free(struct mlx5_irq_pool *pool) struct mlx5_irq *irq; unsigned long index; + /* There are cases in which we are destrying the irq_table before + * freeing all the IRQs, fast teardown for example. Hence, free the irqs + * which might not have been freed. + */ xa_for_each(&pool->irqs, index, irq) - irq_release(&irq->kref); + irq_release(irq); xa_destroy(&pool->irqs); mutex_destroy(&pool->lock); kvfree(pool); @@ -483,7 +506,7 @@ static int irq_pools_init(struct mlx5_core_dev *dev, int sf_vec, int pf_vec) if (!mlx5_sf_max_functions(dev)) return 0; if (sf_vec < MLX5_IRQ_VEC_COMP_BASE_SF) { - mlx5_core_err(dev, "Not enough IRQs for SFs. SF may run at lower performance\n"); + mlx5_core_dbg(dev, "Not enught IRQs for SFs. SF may run at lower performance\n"); return 0; } @@ -601,7 +624,7 @@ void mlx5_irq_table_destroy(struct mlx5_core_dev *dev) return; /* There are cases where IRQs still will be in used when we reaching - * to here. Hence, making sure all the irqs are realeased. + * to here. Hence, making sure all the irqs are released. */ irq_pools_destroy(table); pci_free_irq_vectors(dev->pdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c index fa0288afc0dd..871c2fbe18d3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c @@ -39,7 +39,7 @@ static ssize_t sfnum_show(struct device *dev, struct device_attribute *attr, cha struct auxiliary_device *adev = container_of(dev, struct auxiliary_device, dev); struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev); - return scnprintf(buf, PAGE_SIZE, "%u\n", sf_dev->sfnum); + return sysfs_emit(buf, "%u\n", sf_dev->sfnum); } static DEVICE_ATTR_RO(sfnum); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c index 42c8ee03fe3e..052f48068dc1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c @@ -14,7 +14,7 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia struct devlink *devlink; int err; - devlink = mlx5_devlink_alloc(); + devlink = mlx5_devlink_alloc(&adev->dev); if (!devlink) return -ENOMEM; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c index 1be048769309..13891fdc607e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c @@ -164,12 +164,12 @@ static bool mlx5_sf_is_active(const struct mlx5_sf *sf) return sf->hw_state == MLX5_VHCA_STATE_ACTIVE || sf->hw_state == MLX5_VHCA_STATE_IN_USE; } -int mlx5_devlink_sf_port_fn_state_get(struct devlink *devlink, struct devlink_port *dl_port, +int mlx5_devlink_sf_port_fn_state_get(struct devlink_port *dl_port, enum devlink_port_fn_state *state, enum devlink_port_fn_opstate *opstate, struct netlink_ext_ack *extack) { - struct mlx5_core_dev *dev = devlink_priv(devlink); + struct mlx5_core_dev *dev = devlink_priv(dl_port->devlink); struct mlx5_sf_table *table; struct mlx5_sf *sf; int err = 0; @@ -248,11 +248,11 @@ out: return err; } -int mlx5_devlink_sf_port_fn_state_set(struct devlink *devlink, struct devlink_port *dl_port, +int mlx5_devlink_sf_port_fn_state_set(struct devlink_port *dl_port, enum devlink_port_fn_state state, struct netlink_ext_ack *extack) { - struct mlx5_core_dev *dev = devlink_priv(devlink); + struct mlx5_core_dev *dev = devlink_priv(dl_port->devlink); struct mlx5_sf_table *table; struct mlx5_sf *sf; int err; @@ -476,7 +476,7 @@ static void mlx5_sf_table_disable(struct mlx5_sf_table *table) return; /* Balances with refcount_set; drop the reference so that new user cmd cannot start - * and new vhca event handler cannnot run. + * and new vhca event handler cannot run. */ mlx5_sf_table_put(table); wait_for_completion(&table->disable_complete); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h index 81ce13b19ee8..3a480e06ecc0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h @@ -24,11 +24,11 @@ int mlx5_devlink_sf_port_new(struct devlink *devlink, unsigned int *new_port_index); int mlx5_devlink_sf_port_del(struct devlink *devlink, unsigned int port_index, struct netlink_ext_ack *extack); -int mlx5_devlink_sf_port_fn_state_get(struct devlink *devlink, struct devlink_port *dl_port, +int mlx5_devlink_sf_port_fn_state_get(struct devlink_port *dl_port, enum devlink_port_fn_state *state, enum devlink_port_fn_opstate *opstate, struct netlink_ext_ack *extack); -int mlx5_devlink_sf_port_fn_state_set(struct devlink *devlink, struct devlink_port *dl_port, +int mlx5_devlink_sf_port_fn_state_set(struct devlink_port *dl_port, enum devlink_port_fn_state state, struct netlink_ext_ack *extack); #else diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c index 6475ba35cf6b..a5b9f65db23c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c @@ -18,12 +18,39 @@ enum dr_action_valid_state { DR_ACTION_STATE_ENCAP, DR_ACTION_STATE_DECAP, DR_ACTION_STATE_MODIFY_HDR, - DR_ACTION_STATE_MODIFY_VLAN, + DR_ACTION_STATE_POP_VLAN, + DR_ACTION_STATE_PUSH_VLAN, DR_ACTION_STATE_NON_TERM, DR_ACTION_STATE_TERM, DR_ACTION_STATE_MAX, }; +static const char * const action_type_to_str[] = { + [DR_ACTION_TYP_TNL_L2_TO_L2] = "DR_ACTION_TYP_TNL_L2_TO_L2", + [DR_ACTION_TYP_L2_TO_TNL_L2] = "DR_ACTION_TYP_L2_TO_TNL_L2", + [DR_ACTION_TYP_TNL_L3_TO_L2] = "DR_ACTION_TYP_TNL_L3_TO_L2", + [DR_ACTION_TYP_L2_TO_TNL_L3] = "DR_ACTION_TYP_L2_TO_TNL_L3", + [DR_ACTION_TYP_DROP] = "DR_ACTION_TYP_DROP", + [DR_ACTION_TYP_QP] = "DR_ACTION_TYP_QP", + [DR_ACTION_TYP_FT] = "DR_ACTION_TYP_FT", + [DR_ACTION_TYP_CTR] = "DR_ACTION_TYP_CTR", + [DR_ACTION_TYP_TAG] = "DR_ACTION_TYP_TAG", + [DR_ACTION_TYP_MODIFY_HDR] = "DR_ACTION_TYP_MODIFY_HDR", + [DR_ACTION_TYP_VPORT] = "DR_ACTION_TYP_VPORT", + [DR_ACTION_TYP_POP_VLAN] = "DR_ACTION_TYP_POP_VLAN", + [DR_ACTION_TYP_PUSH_VLAN] = "DR_ACTION_TYP_PUSH_VLAN", + [DR_ACTION_TYP_INSERT_HDR] = "DR_ACTION_TYP_INSERT_HDR", + [DR_ACTION_TYP_REMOVE_HDR] = "DR_ACTION_TYP_REMOVE_HDR", + [DR_ACTION_TYP_MAX] = "DR_ACTION_UNKNOWN", +}; + +static const char *dr_action_id_to_str(enum mlx5dr_action_type action_id) +{ + if (action_id > DR_ACTION_TYP_MAX) + action_id = DR_ACTION_TYP_MAX; + return action_type_to_str[action_id]; +} + static const enum dr_action_valid_state next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] = { [DR_ACTION_DOMAIN_NIC_INGRESS] = { @@ -39,8 +66,10 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_REMOVE_HDR] = DR_ACTION_STATE_DECAP, [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR, - [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, }, [DR_ACTION_STATE_DECAP] = { [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, @@ -53,7 +82,8 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR, - [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, }, [DR_ACTION_STATE_ENCAP] = { [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, @@ -73,20 +103,31 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, }, - [DR_ACTION_STATE_MODIFY_VLAN] = { + [DR_ACTION_STATE_POP_VLAN] = { [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, [DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM, [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM, [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM, - [DR_ACTION_TYP_TAG] = DR_ACTION_STATE_MODIFY_VLAN, - [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_VLAN, - [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_TAG] = DR_ACTION_STATE_POP_VLAN, + [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_POP_VLAN, + [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN, [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR, [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, }, + [DR_ACTION_STATE_PUSH_VLAN] = { + [DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_TAG] = DR_ACTION_STATE_PUSH_VLAN, + [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_PUSH_VLAN, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, + [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, + }, [DR_ACTION_STATE_NON_TERM] = { [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, [DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM, @@ -99,8 +140,10 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_REMOVE_HDR] = DR_ACTION_STATE_DECAP, [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR, - [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, }, [DR_ACTION_STATE_TERM] = { [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_TERM, @@ -115,8 +158,16 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_REMOVE_HDR] = DR_ACTION_STATE_DECAP, [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR, - [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, + [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN, + }, + [DR_ACTION_STATE_DECAP] = { + [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_DECAP, }, [DR_ACTION_STATE_ENCAP] = { [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, @@ -132,14 +183,25 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, - [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, + }, + [DR_ACTION_STATE_POP_VLAN] = { + [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_POP_VLAN, + [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, + [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR, + [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, }, - [DR_ACTION_STATE_MODIFY_VLAN] = { + [DR_ACTION_STATE_PUSH_VLAN] = { [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM, [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM, - [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_VLAN, - [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_PUSH_VLAN, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, @@ -152,8 +214,10 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_REMOVE_HDR] = DR_ACTION_STATE_DECAP, [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR, - [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, + [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN, }, [DR_ACTION_STATE_TERM] = { [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_TERM, @@ -170,8 +234,10 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_REMOVE_HDR] = DR_ACTION_STATE_DECAP, [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR, - [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, + [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN, [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM, }, [DR_ACTION_STATE_DECAP] = { @@ -180,11 +246,12 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_DECAP, [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM, [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR, - [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM, [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, }, [DR_ACTION_STATE_ENCAP] = { [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, @@ -203,13 +270,26 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, }, - [DR_ACTION_STATE_MODIFY_VLAN] = { + [DR_ACTION_STATE_POP_VLAN] = { [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM, [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM, - [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, - [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN, + [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_POP_VLAN, + [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR, + [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, + }, + [DR_ACTION_STATE_PUSH_VLAN] = { + [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, + [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_PUSH_VLAN, [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM, [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR, [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, @@ -226,8 +306,10 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_REMOVE_HDR] = DR_ACTION_STATE_DECAP, [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR, - [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM, }, [DR_ACTION_STATE_TERM] = { @@ -244,8 +326,17 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, - [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_REMOVE_HDR] = DR_ACTION_STATE_DECAP, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, + [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN, + [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM, + }, + [DR_ACTION_STATE_DECAP] = { + [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_DECAP, [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM, }, [DR_ACTION_STATE_ENCAP] = { [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, @@ -262,15 +353,27 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, - [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, + [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM, + }, + [DR_ACTION_STATE_POP_VLAN] = { + [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_POP_VLAN, + [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, + [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR, + [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM, }, - [DR_ACTION_STATE_MODIFY_VLAN] = { + [DR_ACTION_STATE_PUSH_VLAN] = { [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM, [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM, - [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, - [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, + [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_PUSH_VLAN, [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, @@ -285,7 +388,9 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, - [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_REMOVE_HDR] = DR_ACTION_STATE_DECAP, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, + [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN, [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM, }, [DR_ACTION_STATE_TERM] = { @@ -314,6 +419,9 @@ dr_action_reformat_to_action_type(enum mlx5dr_action_reformat_type reformat_type case DR_ACTION_REFORMAT_TYP_INSERT_HDR: *action_type = DR_ACTION_TYP_INSERT_HDR; break; + case DR_ACTION_REFORMAT_TYP_REMOVE_HDR: + *action_type = DR_ACTION_TYP_REMOVE_HDR; + break; default: return -EINVAL; } @@ -326,7 +434,7 @@ dr_action_reformat_to_action_type(enum mlx5dr_action_reformat_type reformat_type * the new size of the STEs array, rule with actions. */ static void dr_actions_apply(struct mlx5dr_domain *dmn, - enum mlx5dr_ste_entry_type ste_type, + enum mlx5dr_domain_nic_type nic_type, u8 *action_type_set, u8 *last_ste, struct mlx5dr_ste_actions_attr *attr, @@ -335,7 +443,7 @@ static void dr_actions_apply(struct mlx5dr_domain *dmn, struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx; u32 added_stes = 0; - if (ste_type == MLX5DR_STE_TYPE_RX) + if (nic_type == DR_DOMAIN_NIC_TYPE_RX) mlx5dr_ste_set_actions_rx(ste_ctx, dmn, action_type_set, last_ste, attr, &added_stes); else @@ -347,7 +455,7 @@ static void dr_actions_apply(struct mlx5dr_domain *dmn, static enum dr_action_domain dr_action_get_action_domain(enum mlx5dr_domain_type domain, - enum mlx5dr_ste_entry_type ste_type) + enum mlx5dr_domain_nic_type nic_type) { switch (domain) { case MLX5DR_DOMAIN_TYPE_NIC_RX: @@ -355,7 +463,7 @@ dr_action_get_action_domain(enum mlx5dr_domain_type domain, case MLX5DR_DOMAIN_TYPE_NIC_TX: return DR_ACTION_DOMAIN_NIC_EGRESS; case MLX5DR_DOMAIN_TYPE_FDB: - if (ste_type == MLX5DR_STE_TYPE_RX) + if (nic_type == DR_DOMAIN_NIC_TYPE_RX) return DR_ACTION_DOMAIN_FDB_INGRESS; return DR_ACTION_DOMAIN_FDB_EGRESS; default: @@ -421,6 +529,18 @@ static int dr_action_handle_cs_recalc(struct mlx5dr_domain *dmn, return 0; } +static void dr_action_print_sequence(struct mlx5dr_domain *dmn, + struct mlx5dr_action *actions[], + int last_idx) +{ + int i; + + for (i = 0; i <= last_idx; i++) + mlx5dr_err(dmn, "< %s (%d) > ", + dr_action_id_to_str(actions[i]->action_type), + actions[i]->action_type); +} + #define WITH_VLAN_NUM_HW_ACTIONS 6 int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, @@ -431,7 +551,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, u32 *new_hw_ste_arr_sz) { struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn; - bool rx_rule = nic_dmn->ste_type == MLX5DR_STE_TYPE_RX; + bool rx_rule = nic_dmn->type == DR_DOMAIN_NIC_TYPE_RX; struct mlx5dr_domain *dmn = matcher->tbl->dmn; u8 action_type_set[DR_ACTION_TYP_MAX] = {}; struct mlx5dr_ste_actions_attr attr = {}; @@ -445,7 +565,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, attr.gvmi = dmn->info.caps.gvmi; attr.hit_gvmi = dmn->info.caps.gvmi; attr.final_icm_addr = nic_dmn->default_icm_addr; - action_domain = dr_action_get_action_domain(dmn->type, nic_dmn->ste_type); + action_domain = dr_action_get_action_domain(dmn->type, nic_dmn->type); for (i = 0; i < num_actions; i++) { struct mlx5dr_action_dest_tbl *dest_tbl; @@ -467,11 +587,11 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, if (dest_tbl->tbl->dmn != dmn) { mlx5dr_err(dmn, "Destination table belongs to a different domain\n"); - goto out_invalid_arg; + return -EINVAL; } if (dest_tbl->tbl->level <= matcher->tbl->level) { - mlx5_core_warn_once(dmn->mdev, - "Connecting table to a lower/same level destination table\n"); + mlx5_core_dbg_once(dmn->mdev, + "Connecting table to a lower/same level destination table\n"); mlx5dr_dbg(dmn, "Connecting table at level %d to a destination table at level %d\n", matcher->tbl->level, @@ -509,7 +629,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, break; case DR_ACTION_TYP_QP: mlx5dr_info(dmn, "Domain doesn't support QP\n"); - goto out_invalid_arg; + return -EOPNOTSUPP; case DR_ACTION_TYP_CTR: attr.ctr_id = action->ctr->ctr_id + action->ctr->offeset; @@ -536,7 +656,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, if (rx_rule && !(dmn->ste_ctx->actions_caps & DR_STE_CTX_ACTION_CAP_RX_ENCAP)) { mlx5dr_info(dmn, "Device doesn't support Encap on RX\n"); - goto out_invalid_arg; + return -EOPNOTSUPP; } attr.reformat.size = action->reformat->size; attr.reformat.id = action->reformat->id; @@ -549,48 +669,66 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, attr.hit_gvmi = action->vport->caps->vhca_gvmi; dest_action = action; if (rx_rule) { - /* Loopback on WIRE vport is not supported */ - if (action->vport->caps->num == WIRE_PORT) - goto out_invalid_arg; - + if (action->vport->caps->num == WIRE_PORT) { + mlx5dr_dbg(dmn, "Device doesn't support Loopback on WIRE vport\n"); + return -EOPNOTSUPP; + } attr.final_icm_addr = action->vport->caps->icm_address_rx; } else { attr.final_icm_addr = action->vport->caps->icm_address_tx; } break; case DR_ACTION_TYP_POP_VLAN: + if (!rx_rule && !(dmn->ste_ctx->actions_caps & + DR_STE_CTX_ACTION_CAP_TX_POP)) { + mlx5dr_dbg(dmn, "Device doesn't support POP VLAN action on TX\n"); + return -EOPNOTSUPP; + } + max_actions_type = MLX5DR_MAX_VLANS; attr.vlans.count++; break; case DR_ACTION_TYP_PUSH_VLAN: + if (rx_rule && !(dmn->ste_ctx->actions_caps & + DR_STE_CTX_ACTION_CAP_RX_PUSH)) { + mlx5dr_dbg(dmn, "Device doesn't support PUSH VLAN action on RX\n"); + return -EOPNOTSUPP; + } + max_actions_type = MLX5DR_MAX_VLANS; - if (attr.vlans.count == MLX5DR_MAX_VLANS) + if (attr.vlans.count == MLX5DR_MAX_VLANS) { + mlx5dr_dbg(dmn, "Max VLAN push/pop count exceeded\n"); return -EINVAL; + } attr.vlans.headers[attr.vlans.count++] = action->push_vlan->vlan_hdr; break; case DR_ACTION_TYP_INSERT_HDR: + case DR_ACTION_TYP_REMOVE_HDR: attr.reformat.size = action->reformat->size; attr.reformat.id = action->reformat->id; attr.reformat.param_0 = action->reformat->param_0; attr.reformat.param_1 = action->reformat->param_1; break; default: - goto out_invalid_arg; + mlx5dr_err(dmn, "Unsupported action type %d\n", action_type); + return -EINVAL; } /* Check action duplication */ if (++action_type_set[action_type] > max_actions_type) { mlx5dr_err(dmn, "Action type %d supports only max %d time(s)\n", action_type, max_actions_type); - goto out_invalid_arg; + return -EINVAL; } /* Check action state machine is valid */ if (dr_action_validate_and_get_next_state(action_domain, action_type, &state)) { - mlx5dr_err(dmn, "Invalid action sequence provided\n"); + mlx5dr_err(dmn, "Invalid action (gvmi: %d, is_rx: %d) sequence provided:", + attr.gvmi, rx_rule); + dr_action_print_sequence(dmn, actions, i); return -EOPNOTSUPP; } } @@ -614,16 +752,13 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, } dr_actions_apply(dmn, - nic_dmn->ste_type, + nic_dmn->type, action_type_set, last_ste, &attr, new_hw_ste_arr_sz); return 0; - -out_invalid_arg: - return -EINVAL; } static unsigned int action_size[DR_ACTION_TYP_MAX] = { @@ -638,6 +773,7 @@ static unsigned int action_size[DR_ACTION_TYP_MAX] = { [DR_ACTION_TYP_VPORT] = sizeof(struct mlx5dr_action_vport), [DR_ACTION_TYP_PUSH_VLAN] = sizeof(struct mlx5dr_action_push_vlan), [DR_ACTION_TYP_INSERT_HDR] = sizeof(struct mlx5dr_action_reformat), + [DR_ACTION_TYP_REMOVE_HDR] = sizeof(struct mlx5dr_action_reformat), [DR_ACTION_TYP_SAMPLER] = sizeof(struct mlx5dr_action_sampler), }; @@ -709,7 +845,8 @@ dec_ref: struct mlx5dr_action * mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn, struct mlx5dr_action_dest *dests, - u32 num_of_dests) + u32 num_of_dests, + bool ignore_flow_level) { struct mlx5dr_cmd_flow_destination_hw_info *hw_dests; struct mlx5dr_action **ref_actions; @@ -776,7 +913,8 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn, num_of_dests, reformat_req, &action->dest_tbl->fw_tbl.id, - &action->dest_tbl->fw_tbl.group_id); + &action->dest_tbl->fw_tbl.group_id, + ignore_flow_level); if (ret) goto free_action; @@ -884,11 +1022,23 @@ dr_action_verify_reformat_params(enum mlx5dr_action_type reformat_type, size_t data_sz, void *data) { - if ((!data && data_sz) || (data && !data_sz) || - ((reformat_param_0 || reformat_param_1) && - reformat_type != DR_ACTION_TYP_INSERT_HDR) || - reformat_type > DR_ACTION_TYP_INSERT_HDR) { - mlx5dr_dbg(dmn, "Invalid reformat parameter!\n"); + if (reformat_type == DR_ACTION_TYP_INSERT_HDR) { + if ((!data && data_sz) || (data && !data_sz) || + MLX5_CAP_GEN_2(dmn->mdev, max_reformat_insert_size) < data_sz || + MLX5_CAP_GEN_2(dmn->mdev, max_reformat_insert_offset) < reformat_param_1) { + mlx5dr_dbg(dmn, "Invalid reformat parameters for INSERT_HDR\n"); + goto out_err; + } + } else if (reformat_type == DR_ACTION_TYP_REMOVE_HDR) { + if (data || + MLX5_CAP_GEN_2(dmn->mdev, max_reformat_remove_size) < data_sz || + MLX5_CAP_GEN_2(dmn->mdev, max_reformat_remove_offset) < reformat_param_1) { + mlx5dr_dbg(dmn, "Invalid reformat parameters for REMOVE_HDR\n"); + goto out_err; + } + } else if (reformat_param_0 || reformat_param_1 || + reformat_type > DR_ACTION_TYP_REMOVE_HDR) { + mlx5dr_dbg(dmn, "Invalid reformat parameters\n"); goto out_err; } @@ -987,7 +1137,6 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn, return 0; } case DR_ACTION_TYP_INSERT_HDR: - { ret = mlx5dr_cmd_create_reformat_ctx(dmn->mdev, MLX5_REFORMAT_TYPE_INSERT_HDR, reformat_param_0, @@ -1002,7 +1151,12 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn, action->reformat->param_0 = reformat_param_0; action->reformat->param_1 = reformat_param_1; return 0; - } + case DR_ACTION_TYP_REMOVE_HDR: + action->reformat->id = 0; + action->reformat->size = data_sz; + action->reformat->param_0 = reformat_param_0; + action->reformat->param_1 = reformat_param_1; + return 0; default: mlx5dr_info(dmn, "Reformat type is not supported %d\n", action->action_type); return -EINVAL; @@ -1658,6 +1812,7 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action) } break; case DR_ACTION_TYP_TNL_L2_TO_L2: + case DR_ACTION_TYP_REMOVE_HDR: refcount_dec(&action->reformat->dmn->refcount); break; case DR_ACTION_TYP_TNL_L3_TO_L2: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c index 54e1f5438bbe..56307283bf9b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c @@ -655,6 +655,7 @@ int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev, MLX5_SET(set_fte_in, in, table_type, ft->type); MLX5_SET(set_fte_in, in, table_id, ft->id); MLX5_SET(set_fte_in, in, flow_index, fte->index); + MLX5_SET(set_fte_in, in, ignore_flow_level, fte->ignore_flow_level); if (ft->vport) { MLX5_SET(set_fte_in, in, vport_number, ft->vport); MLX5_SET(set_fte_in, in, other_vport, 1); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c index 7091b1be84ef..0fe159809ba1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c @@ -245,7 +245,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev, return -ENOTSUPP; dmn->info.supp_sw_steering = true; - dmn->info.rx.ste_type = MLX5DR_STE_TYPE_RX; + dmn->info.rx.type = DR_DOMAIN_NIC_TYPE_RX; dmn->info.rx.default_icm_addr = dmn->info.caps.nic_rx_drop_address; dmn->info.rx.drop_icm_addr = dmn->info.caps.nic_rx_drop_address; break; @@ -254,7 +254,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev, return -ENOTSUPP; dmn->info.supp_sw_steering = true; - dmn->info.tx.ste_type = MLX5DR_STE_TYPE_TX; + dmn->info.tx.type = DR_DOMAIN_NIC_TYPE_TX; dmn->info.tx.default_icm_addr = dmn->info.caps.nic_tx_allow_address; dmn->info.tx.drop_icm_addr = dmn->info.caps.nic_tx_drop_address; break; @@ -265,8 +265,8 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev, if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, fdb)) return -ENOTSUPP; - dmn->info.rx.ste_type = MLX5DR_STE_TYPE_RX; - dmn->info.tx.ste_type = MLX5DR_STE_TYPE_TX; + dmn->info.rx.type = DR_DOMAIN_NIC_TYPE_RX; + dmn->info.tx.type = DR_DOMAIN_NIC_TYPE_TX; vport_cap = mlx5dr_get_vport_cap(&dmn->info.caps, 0); if (!vport_cap) { mlx5dr_err(dmn, "Failed to get esw manager vport\n"); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c index 7ccfd40586ce..0d6f86eb248b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c @@ -103,7 +103,8 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn, int num_dest, bool reformat_req, u32 *tbl_id, - u32 *group_id) + u32 *group_id, + bool ignore_flow_level) { struct mlx5dr_cmd_create_flow_table_attr ft_attr = {}; struct mlx5dr_cmd_fte_info fte_info = {}; @@ -137,6 +138,7 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn, fte_info.dests_size = num_dest; fte_info.val = val; fte_info.dest_arr = dest; + fte_info.ignore_flow_level = ignore_flow_level; ret = mlx5dr_cmd_set_fte(dmn->mdev, 0, 0, &ft_info, *group_id, &fte_info); if (ret) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c index 6f6191d1d5a6..b5409cc021d3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c @@ -396,13 +396,14 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, struct mlx5dr_domain *dmn = matcher->tbl->dmn; struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx; struct mlx5dr_match_param mask = {}; + bool allow_empty_match = false; struct mlx5dr_ste_build *sb; bool inner, rx; int idx = 0; int ret, i; sb = nic_matcher->ste_builder_arr[outer_ipv][inner_ipv]; - rx = nic_dmn->ste_type == MLX5DR_STE_TYPE_RX; + rx = nic_dmn->type == DR_DOMAIN_NIC_TYPE_RX; /* Create a temporary mask to track and clear used mask fields */ if (matcher->match_criteria & DR_MATCHER_CRITERIA_OUTER) @@ -428,6 +429,16 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, if (ret) return ret; + /* Optimize RX pipe by reducing source port match, since + * the FDB RX part is connected only to the wire. + */ + if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB && + rx && mask.misc.source_port) { + mask.misc.source_port = 0; + mask.misc.source_eswitch_owner_vhca_id = 0; + allow_empty_match = true; + } + /* Outer */ if (matcher->match_criteria & (DR_MATCHER_CRITERIA_OUTER | DR_MATCHER_CRITERIA_MISC | @@ -619,7 +630,8 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, } /* Empty matcher, takes all */ - if (matcher->match_criteria == DR_MATCHER_CRITERIA_EMPTY) + if ((!idx && allow_empty_match) || + matcher->match_criteria == DR_MATCHER_CRITERIA_EMPTY) mlx5dr_ste_build_empty_always_hit(&sb[idx++], rx); if (idx == 0) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c index 43356fad53de..aca80efc28fa 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c @@ -81,6 +81,7 @@ dr_rule_create_collision_entry(struct mlx5dr_matcher *matcher, } ste->ste_chain_location = orig_ste->ste_chain_location; + ste->htbl->pointing_ste = orig_ste->htbl->pointing_ste; /* In collision entry, all members share the same miss_list_head */ ste->htbl->miss_list = mlx5dr_ste_get_miss_list(orig_ste); @@ -185,6 +186,9 @@ dr_rule_rehash_handle_collision(struct mlx5dr_matcher *matcher, if (!new_ste) return NULL; + /* Update collision pointing STE */ + new_ste->htbl->pointing_ste = col_ste->htbl->pointing_ste; + /* In collision entry, all members share the same miss_list_head */ new_ste->htbl->miss_list = mlx5dr_ste_get_miss_list(col_ste); @@ -212,7 +216,7 @@ static void dr_rule_rehash_copy_ste_ctrl(struct mlx5dr_matcher *matcher, new_ste->next_htbl = cur_ste->next_htbl; new_ste->ste_chain_location = cur_ste->ste_chain_location; - if (!mlx5dr_ste_is_last_in_rule(nic_matcher, new_ste->ste_chain_location)) + if (new_ste->next_htbl) new_ste->next_htbl->pointing_ste = new_ste; /* We need to copy the refcount since this ste @@ -220,10 +224,8 @@ static void dr_rule_rehash_copy_ste_ctrl(struct mlx5dr_matcher *matcher, */ new_ste->refcount = cur_ste->refcount; - /* Link old STEs rule_mem list to the new ste */ - mlx5dr_rule_update_rule_member(cur_ste, new_ste); - INIT_LIST_HEAD(&new_ste->rule_list); - list_splice_tail_init(&cur_ste->rule_list, &new_ste->rule_list); + /* Link old STEs rule to the new ste */ + mlx5dr_rule_set_last_member(cur_ste->rule_rx_tx, new_ste, false); } static struct mlx5dr_ste * @@ -404,7 +406,7 @@ dr_rule_rehash_htbl(struct mlx5dr_rule *rule, info.miss_icm_addr = nic_matcher->e_anchor->chunk->icm_addr; mlx5dr_ste_set_formatted_ste(dmn->ste_ctx, dmn->info.caps.gvmi, - nic_dmn, + nic_dmn->type, new_htbl, formatted_ste, &info); @@ -581,34 +583,66 @@ free_action_members: return -ENOMEM; } -/* While the pointer of ste is no longer valid, like while moving ste to be - * the first in the miss_list, and to be in the origin table, - * all rule-members that are attached to this ste should update their ste member - * to the new pointer - */ -void mlx5dr_rule_update_rule_member(struct mlx5dr_ste *ste, - struct mlx5dr_ste *new_ste) +void mlx5dr_rule_set_last_member(struct mlx5dr_rule_rx_tx *nic_rule, + struct mlx5dr_ste *ste, + bool force) { - struct mlx5dr_rule_member *rule_mem; + /* Update rule member is usually done for the last STE or during rule + * creation to recover from mid-creation failure (for this peruse the + * force flag is used) + */ + if (ste->next_htbl && !force) + return; - list_for_each_entry(rule_mem, &ste->rule_list, use_ste_list) - rule_mem->ste = new_ste; + /* Update is required since each rule keeps track of its last STE */ + ste->rule_rx_tx = nic_rule; + nic_rule->last_rule_ste = ste; +} + +static struct mlx5dr_ste *dr_rule_get_pointed_ste(struct mlx5dr_ste *curr_ste) +{ + struct mlx5dr_ste *first_ste; + + first_ste = list_first_entry(mlx5dr_ste_get_miss_list(curr_ste), + struct mlx5dr_ste, miss_list_node); + + return first_ste->htbl->pointing_ste; +} + +int mlx5dr_rule_get_reverse_rule_members(struct mlx5dr_ste **ste_arr, + struct mlx5dr_ste *curr_ste, + int *num_of_stes) +{ + bool first = false; + + *num_of_stes = 0; + + if (!curr_ste) + return -ENOENT; + + /* Iterate from last to first */ + while (!first) { + first = curr_ste->ste_chain_location == 1; + ste_arr[*num_of_stes] = curr_ste; + *num_of_stes += 1; + curr_ste = dr_rule_get_pointed_ste(curr_ste); + } + + return 0; } static void dr_rule_clean_rule_members(struct mlx5dr_rule *rule, struct mlx5dr_rule_rx_tx *nic_rule) { - struct mlx5dr_rule_member *rule_mem; - struct mlx5dr_rule_member *tmp_mem; + struct mlx5dr_ste *ste_arr[DR_RULE_MAX_STES + DR_ACTION_MAX_STES]; + struct mlx5dr_ste *curr_ste = nic_rule->last_rule_ste; + int i; - if (list_empty(&nic_rule->rule_members_list)) + if (mlx5dr_rule_get_reverse_rule_members(ste_arr, curr_ste, &i)) return; - list_for_each_entry_safe(rule_mem, tmp_mem, &nic_rule->rule_members_list, list) { - list_del(&rule_mem->list); - list_del(&rule_mem->use_ste_list); - mlx5dr_ste_put(rule_mem->ste, rule->matcher, nic_rule->nic_matcher); - kvfree(rule_mem); - } + + while (i--) + mlx5dr_ste_put(ste_arr[i], rule->matcher, nic_rule->nic_matcher); } static u16 dr_get_bits_per_mask(u16 byte_mask) @@ -628,43 +662,25 @@ static bool dr_rule_need_enlarge_hash(struct mlx5dr_ste_htbl *htbl, struct mlx5dr_domain_rx_tx *nic_dmn) { struct mlx5dr_ste_htbl_ctrl *ctrl = &htbl->ctrl; + int threshold; if (dmn->info.max_log_sw_icm_sz <= htbl->chunk_size) return false; - if (!ctrl->may_grow) + if (!mlx5dr_ste_htbl_may_grow(htbl)) return false; if (dr_get_bits_per_mask(htbl->byte_mask) * BITS_PER_BYTE <= htbl->chunk_size) return false; - if (ctrl->num_of_collisions >= ctrl->increase_threshold && - (ctrl->num_of_valid_entries - ctrl->num_of_collisions) >= ctrl->increase_threshold) + threshold = mlx5dr_ste_htbl_increase_threshold(htbl); + if (ctrl->num_of_collisions >= threshold && + (ctrl->num_of_valid_entries - ctrl->num_of_collisions) >= threshold) return true; return false; } -static int dr_rule_add_member(struct mlx5dr_rule_rx_tx *nic_rule, - struct mlx5dr_ste *ste) -{ - struct mlx5dr_rule_member *rule_mem; - - rule_mem = kvzalloc(sizeof(*rule_mem), GFP_KERNEL); - if (!rule_mem) - return -ENOMEM; - - INIT_LIST_HEAD(&rule_mem->list); - INIT_LIST_HEAD(&rule_mem->use_ste_list); - - rule_mem->ste = ste; - list_add_tail(&rule_mem->list, &nic_rule->rule_members_list); - - list_add_tail(&rule_mem->use_ste_list, &ste->rule_list); - - return 0; -} - static int dr_rule_handle_action_stes(struct mlx5dr_rule *rule, struct mlx5dr_rule_rx_tx *nic_rule, struct list_head *send_ste_list, @@ -679,15 +695,13 @@ static int dr_rule_handle_action_stes(struct mlx5dr_rule *rule, struct mlx5dr_domain *dmn = matcher->tbl->dmn; u8 *curr_hw_ste, *prev_hw_ste; struct mlx5dr_ste *action_ste; - int i, k, ret; + int i, k; /* Two cases: * 1. num_of_builders is equal to new_hw_ste_arr_sz, the action in the ste * 2. num_of_builders is less then new_hw_ste_arr_sz, new ste was added * to support the action. */ - if (num_of_builders == new_hw_ste_arr_sz) - return 0; for (i = num_of_builders, k = 0; i < new_hw_ste_arr_sz; i++, k++) { curr_hw_ste = hw_ste_arr + i * DR_STE_SIZE; @@ -700,6 +714,10 @@ static int dr_rule_handle_action_stes(struct mlx5dr_rule *rule, mlx5dr_ste_get(action_ste); + action_ste->htbl->pointing_ste = last_ste; + last_ste->next_htbl = action_ste->htbl; + last_ste = action_ste; + /* While free ste we go over the miss list, so add this ste to the list */ list_add_tail(&action_ste->miss_list_node, mlx5dr_ste_get_miss_list(action_ste)); @@ -713,21 +731,19 @@ static int dr_rule_handle_action_stes(struct mlx5dr_rule *rule, mlx5dr_ste_set_hit_addr_by_next_htbl(dmn->ste_ctx, prev_hw_ste, action_ste->htbl); - ret = dr_rule_add_member(nic_rule, action_ste); - if (ret) { - mlx5dr_dbg(dmn, "Failed adding rule member\n"); - goto free_ste_info; - } + + mlx5dr_rule_set_last_member(nic_rule, action_ste, true); + mlx5dr_send_fill_and_append_ste_send_info(action_ste, DR_STE_SIZE, 0, curr_hw_ste, ste_info_arr[k], send_ste_list, false); } + last_ste->next_htbl = NULL; + return 0; -free_ste_info: - kfree(ste_info_arr[k]); err_exit: mlx5dr_ste_put(action_ste, matcher, nic_matcher); return -ENOMEM; @@ -846,9 +862,9 @@ again: new_htbl = dr_rule_rehash(rule, nic_rule, cur_htbl, ste_location, send_ste_list); if (!new_htbl) { - mlx5dr_htbl_put(cur_htbl); mlx5dr_err(dmn, "Failed creating rehash table, htbl-log_size: %d\n", cur_htbl->chunk_size); + mlx5dr_htbl_put(cur_htbl); } else { cur_htbl = new_htbl; } @@ -1015,12 +1031,12 @@ static enum mlx5dr_ipv dr_rule_get_ipv(struct mlx5dr_match_spec *spec) } static bool dr_rule_skip(enum mlx5dr_domain_type domain, - enum mlx5dr_ste_entry_type ste_type, + enum mlx5dr_domain_nic_type nic_type, struct mlx5dr_match_param *mask, struct mlx5dr_match_param *value, u32 flow_source) { - bool rx = ste_type == MLX5DR_STE_TYPE_RX; + bool rx = nic_type == DR_DOMAIN_NIC_TYPE_RX; if (domain != MLX5DR_DOMAIN_TYPE_FDB) return false; @@ -1065,9 +1081,7 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule, nic_matcher = nic_rule->nic_matcher; nic_dmn = nic_matcher->nic_tbl->nic_dmn; - INIT_LIST_HEAD(&nic_rule->rule_members_list); - - if (dr_rule_skip(dmn->type, nic_dmn->ste_type, &matcher->mask, param, + if (dr_rule_skip(dmn->type, nic_dmn->type, &matcher->mask, param, rule->flow_source)) return 0; @@ -1121,14 +1135,8 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule, cur_htbl = ste->next_htbl; - /* Keep all STEs in the rule struct */ - ret = dr_rule_add_member(nic_rule, ste); - if (ret) { - mlx5dr_dbg(dmn, "Failed adding rule member index %d\n", i); - goto free_ste; - } - mlx5dr_ste_get(ste); + mlx5dr_rule_set_last_member(nic_rule, ste, true); } /* Connect actions */ @@ -1153,8 +1161,6 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule, return 0; -free_ste: - mlx5dr_ste_put(ste, matcher, nic_matcher); free_rule: dr_rule_clean_rule_members(rule, nic_rule); /* Clean all ste_info's */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c index 9df0e73d1c35..bfb14b4b1906 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c @@ -325,10 +325,14 @@ static int dr_handle_pending_wc(struct mlx5dr_domain *dmn, do { ne = dr_poll_cq(send_ring->cq, 1); - if (ne < 0) + if (unlikely(ne < 0)) { + mlx5_core_warn_once(dmn->mdev, "SMFS QPN 0x%x is disabled/limited", + send_ring->qp->qpn); + send_ring->err_state = true; return ne; - else if (ne == 1) + } else if (ne == 1) { send_ring->pending_wqe -= send_ring->signal_th; + } } while (is_drain && send_ring->pending_wqe); return 0; @@ -361,6 +365,14 @@ static int dr_postsend_icm_data(struct mlx5dr_domain *dmn, u32 buff_offset; int ret; + if (unlikely(dmn->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR || + send_ring->err_state)) { + mlx5_core_dbg_once(dmn->mdev, + "Skipping post send: QP err state: %d, device state: %d\n", + send_ring->err_state, dmn->mdev->state); + return 0; + } + spin_lock(&send_ring->lock); ret = dr_handle_pending_wc(dmn, send_ring); @@ -620,6 +632,7 @@ static int dr_cmd_modify_qp_rtr2rts(struct mlx5_core_dev *mdev, MLX5_SET(qpc, qpc, retry_count, attr->retry_cnt); MLX5_SET(qpc, qpc, rnr_retry, attr->rnr_retry); + MLX5_SET(qpc, qpc, primary_address_path.ack_timeout, 0x8); /* ~1ms */ MLX5_SET(rtr2rts_qp_in, in, opcode, MLX5_CMD_OP_RTR2RTS_QP); MLX5_SET(rtr2rts_qp_in, in, qpn, dr_qp->qpn); @@ -789,7 +802,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev, cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context); MLX5_SET(cqc, cqc, log_cq_size, ilog2(ncqe)); - MLX5_SET(cqc, cqc, c_eqn, eqn); + MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn); MLX5_SET(cqc, cqc, uar_page, uar->index); MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c index 9b1529137cba..1cdfe4fccc7a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c @@ -172,9 +172,6 @@ static void dr_ste_replace(struct mlx5dr_ste *dst, struct mlx5dr_ste *src) dst->next_htbl->pointing_ste = dst; dst->refcount = src->refcount; - - INIT_LIST_HEAD(&dst->rule_list); - list_splice_tail_init(&src->rule_list, &dst->rule_list); } /* Free ste which is the head and the only one in miss_list */ @@ -233,12 +230,12 @@ dr_ste_replace_head_ste(struct mlx5dr_matcher_rx_tx *nic_matcher, /* Remove from the miss_list the next_ste before copy */ list_del_init(&next_ste->miss_list_node); - /* All rule-members that use next_ste should know about that */ - mlx5dr_rule_update_rule_member(next_ste, ste); - /* Move data from next into ste */ dr_ste_replace(ste, next_ste); + /* Update the rule on STE change */ + mlx5dr_rule_set_last_member(next_ste->rule_rx_tx, ste, false); + /* Copy all 64 hw_ste bytes */ memcpy(hw_ste, ste->hw_ste, DR_STE_SIZE_REDUCED); sb_idx = ste->ste_chain_location - 1; @@ -382,14 +379,15 @@ void mlx5dr_ste_prepare_for_postsend(struct mlx5dr_ste_ctx *ste_ctx, /* Init one ste as a pattern for ste data array */ void mlx5dr_ste_set_formatted_ste(struct mlx5dr_ste_ctx *ste_ctx, u16 gvmi, - struct mlx5dr_domain_rx_tx *nic_dmn, + enum mlx5dr_domain_nic_type nic_type, struct mlx5dr_ste_htbl *htbl, u8 *formatted_ste, struct mlx5dr_htbl_connect_info *connect_info) { + bool is_rx = nic_type == DR_DOMAIN_NIC_TYPE_RX; struct mlx5dr_ste ste = {}; - ste_ctx->ste_init(formatted_ste, htbl->lu_type, nic_dmn->ste_type, gvmi); + ste_ctx->ste_init(formatted_ste, htbl->lu_type, is_rx, gvmi); ste.hw_ste = formatted_ste; if (connect_info->type == CONNECT_HIT) @@ -408,7 +406,7 @@ int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn, mlx5dr_ste_set_formatted_ste(dmn->ste_ctx, dmn->info.caps.gvmi, - nic_dmn, + nic_dmn->type, htbl, formatted_ste, connect_info); @@ -466,21 +464,6 @@ free_table: return -ENOENT; } -static void dr_ste_set_ctrl(struct mlx5dr_ste_htbl *htbl) -{ - struct mlx5dr_ste_htbl_ctrl *ctrl = &htbl->ctrl; - int num_of_entries; - - htbl->ctrl.may_grow = true; - - if (htbl->chunk_size == DR_CHUNK_SIZE_MAX - 1 || !htbl->byte_mask) - htbl->ctrl.may_grow = false; - - /* Threshold is 50%, one is added to table of size 1 */ - num_of_entries = mlx5dr_icm_pool_chunk_size_to_entries(htbl->chunk_size); - ctrl->increase_threshold = (num_of_entries + 1) / 2; -} - struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool, enum mlx5dr_icm_chunk_size chunk_size, u16 lu_type, u16 byte_mask) @@ -513,11 +496,9 @@ struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool, ste->refcount = 0; INIT_LIST_HEAD(&ste->miss_list_node); INIT_LIST_HEAD(&htbl->miss_list[i]); - INIT_LIST_HEAD(&ste->rule_list); } htbl->chunk_size = chunk_size; - dr_ste_set_ctrl(htbl); return htbl; out_free_htbl: @@ -649,6 +630,7 @@ int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher, u8 *ste_arr) { struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn; + bool is_rx = nic_dmn->type == DR_DOMAIN_NIC_TYPE_RX; struct mlx5dr_domain *dmn = matcher->tbl->dmn; struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx; struct mlx5dr_ste_build *sb; @@ -663,7 +645,7 @@ int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher, for (i = 0; i < nic_matcher->num_of_builders; i++) { ste_ctx->ste_init(ste_arr, sb->lu_type, - nic_dmn->ste_type, + is_rx, dmn->info.caps.gvmi); mlx5dr_ste_set_bit_mask(ste_arr, sb->bit_mask); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h index 12a8bbbf944b..2d52d065dc8b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h @@ -146,7 +146,7 @@ struct mlx5dr_ste_ctx { /* Getters and Setters */ void (*ste_init)(u8 *hw_ste_p, u16 lu_type, - u8 entry_type, u16 gvmi); + bool is_rx, u16 gvmi); void (*set_next_lu_type)(u8 *hw_ste_p, u16 lu_type); u16 (*get_next_lu_type)(u8 *hw_ste_p); void (*set_miss_addr)(u8 *hw_ste_p, u64 miss_addr); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c index e4dd4eed5aee..9c704bce3c12 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c @@ -8,6 +8,12 @@ #define SVLAN_ETHERTYPE 0x88a8 #define DR_STE_ENABLE_FLOW_TAG BIT(31) +enum dr_ste_v0_entry_type { + DR_STE_TYPE_TX = 1, + DR_STE_TYPE_RX = 2, + DR_STE_TYPE_MODIFY_PKT = 6, +}; + enum dr_ste_v0_action_tunl { DR_STE_TUNL_ACTION_NONE = 0, DR_STE_TUNL_ACTION_ENABLE = 1, @@ -292,8 +298,8 @@ static void dr_ste_v0_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size) MLX5_SET(ste_general, hw_ste_p, next_table_base_31_5_size, index); } -static void dr_ste_v0_init(u8 *hw_ste_p, u16 lu_type, - u8 entry_type, u16 gvmi) +static void dr_ste_v0_init_full(u8 *hw_ste_p, u16 lu_type, + enum dr_ste_v0_entry_type entry_type, u16 gvmi) { dr_ste_v0_set_entry_type(hw_ste_p, entry_type); dr_ste_v0_set_lu_type(hw_ste_p, lu_type); @@ -307,6 +313,15 @@ static void dr_ste_v0_init(u8 *hw_ste_p, u16 lu_type, MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_63_48, gvmi); } +static void dr_ste_v0_init(u8 *hw_ste_p, u16 lu_type, + bool is_rx, u16 gvmi) +{ + enum dr_ste_v0_entry_type entry_type; + + entry_type = is_rx ? DR_STE_TYPE_RX : DR_STE_TYPE_TX; + dr_ste_v0_init_full(hw_ste_p, lu_type, entry_type, gvmi); +} + static void dr_ste_v0_rx_set_flow_tag(u8 *hw_ste_p, u32 flow_tag) { MLX5_SET(ste_rx_steering_mult, hw_ste_p, qp_list_pointer, @@ -380,13 +395,13 @@ static void dr_ste_v0_set_rewrite_actions(u8 *hw_ste_p, u16 num_of_actions, static void dr_ste_v0_arr_init_next(u8 **last_ste, u32 *added_stes, - enum mlx5dr_ste_entry_type entry_type, + enum dr_ste_v0_entry_type entry_type, u16 gvmi) { (*added_stes)++; *last_ste += DR_STE_SIZE; - dr_ste_v0_init(*last_ste, MLX5DR_STE_LU_TYPE_DONT_CARE, - entry_type, gvmi); + dr_ste_v0_init_full(*last_ste, MLX5DR_STE_LU_TYPE_DONT_CARE, + entry_type, gvmi); } static void @@ -404,7 +419,7 @@ dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn, * modify headers for outer headers only */ if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) { - dr_ste_v0_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT); + dr_ste_v0_set_entry_type(last_ste, DR_STE_TYPE_MODIFY_PKT); dr_ste_v0_set_rewrite_actions(last_ste, attr->modify_actions, attr->modify_index); @@ -417,7 +432,7 @@ dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn, if (i || action_type_set[DR_ACTION_TYP_MODIFY_HDR]) dr_ste_v0_arr_init_next(&last_ste, added_stes, - MLX5DR_STE_TYPE_TX, + DR_STE_TYPE_TX, attr->gvmi); dr_ste_v0_set_tx_push_vlan(last_ste, @@ -435,7 +450,7 @@ dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn, action_type_set[DR_ACTION_TYP_PUSH_VLAN]) dr_ste_v0_arr_init_next(&last_ste, added_stes, - MLX5DR_STE_TYPE_TX, + DR_STE_TYPE_TX, attr->gvmi); dr_ste_v0_set_tx_encap(last_ste, @@ -469,7 +484,7 @@ dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn, dr_ste_v0_set_counter_id(last_ste, attr->ctr_id); if (action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2]) { - dr_ste_v0_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT); + dr_ste_v0_set_entry_type(last_ste, DR_STE_TYPE_MODIFY_PKT); dr_ste_v0_set_rx_decap_l3(last_ste, attr->decap_with_vlan); dr_ste_v0_set_rewrite_actions(last_ste, attr->decap_actions, @@ -488,7 +503,7 @@ dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn, action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2]) dr_ste_v0_arr_init_next(&last_ste, added_stes, - MLX5DR_STE_TYPE_RX, + DR_STE_TYPE_RX, attr->gvmi); dr_ste_v0_set_rx_pop_vlan(last_ste); @@ -496,13 +511,13 @@ dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn, } if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) { - if (dr_ste_v0_get_entry_type(last_ste) == MLX5DR_STE_TYPE_MODIFY_PKT) + if (dr_ste_v0_get_entry_type(last_ste) == DR_STE_TYPE_MODIFY_PKT) dr_ste_v0_arr_init_next(&last_ste, added_stes, - MLX5DR_STE_TYPE_MODIFY_PKT, + DR_STE_TYPE_MODIFY_PKT, attr->gvmi); else - dr_ste_v0_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT); + dr_ste_v0_set_entry_type(last_ste, DR_STE_TYPE_MODIFY_PKT); dr_ste_v0_set_rewrite_actions(last_ste, attr->modify_actions, @@ -510,10 +525,10 @@ dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn, } if (action_type_set[DR_ACTION_TYP_TAG]) { - if (dr_ste_v0_get_entry_type(last_ste) == MLX5DR_STE_TYPE_MODIFY_PKT) + if (dr_ste_v0_get_entry_type(last_ste) == DR_STE_TYPE_MODIFY_PKT) dr_ste_v0_arr_init_next(&last_ste, added_stes, - MLX5DR_STE_TYPE_RX, + DR_STE_TYPE_RX, attr->gvmi); dr_ste_v0_rx_set_flow_tag(last_ste, attr->flow_tag); @@ -1157,6 +1172,7 @@ dr_ste_v0_build_eth_ipv6_l3_l4_tag(struct mlx5dr_match_param *value, u8 *tag) { struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer; + struct mlx5dr_match_misc *misc = &value->misc; DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, tcp_dport); DR_STE_SET_TAG(eth_l4, tag, src_port, spec, tcp_sport); @@ -1168,6 +1184,11 @@ dr_ste_v0_build_eth_ipv6_l3_l4_tag(struct mlx5dr_match_param *value, DR_STE_SET_TAG(eth_l4, tag, ecn, spec, ip_ecn); DR_STE_SET_TAG(eth_l4, tag, ipv6_hop_limit, spec, ttl_hoplimit); + if (sb->inner) + DR_STE_SET_TAG(eth_l4, tag, flow_label, misc, inner_ipv6_flow_label); + else + DR_STE_SET_TAG(eth_l4, tag, flow_label, misc, outer_ipv6_flow_label); + if (spec->tcp_flags) { DR_STE_SET_TCP_FLAGS(eth_l4, tag, spec); spec->tcp_flags = 0; @@ -1772,7 +1793,7 @@ dr_ste_v0_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb, static int dr_ste_v0_build_flex_parser_tnl_gtpu_tag(struct mlx5dr_match_param *value, struct mlx5dr_ste_build *sb, - uint8_t *tag) + u8 *tag) { struct mlx5dr_match_misc3 *misc3 = &value->misc3; @@ -1802,7 +1823,7 @@ static void dr_ste_v0_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *s static int dr_ste_v0_build_tnl_gtpu_flex_parser_0_tag(struct mlx5dr_match_param *value, struct mlx5dr_ste_build *sb, - uint8_t *tag) + u8 *tag) { if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_dw_0)) DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3); @@ -1829,7 +1850,7 @@ dr_ste_v0_build_tnl_gtpu_flex_parser_0_init(struct mlx5dr_ste_build *sb, static int dr_ste_v0_build_tnl_gtpu_flex_parser_1_tag(struct mlx5dr_match_param *value, struct mlx5dr_ste_build *sb, - uint8_t *tag) + u8 *tag) { if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_dw_0)) DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c index 4aaca8eb7597..b2481c99da79 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c @@ -322,7 +322,7 @@ static void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size) } static void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type, - u8 entry_type, u16 gvmi) + bool is_rx, u16 gvmi) { dr_ste_v1_set_lu_type(hw_ste_p, lu_type); dr_ste_v1_set_next_lu_type(hw_ste_p, MLX5DR_STE_LU_TYPE_DONT_CARE); @@ -402,8 +402,23 @@ static void dr_ste_v1_set_insert_hdr(u8 *hw_ste_p, u8 *d_action, dr_ste_v1_set_reparse(hw_ste_p); } -static void dr_ste_v1_set_tx_push_vlan(u8 *hw_ste_p, u8 *d_action, - u32 vlan_hdr) +static void dr_ste_v1_set_remove_hdr(u8 *hw_ste_p, u8 *s_action, + u8 anchor, u8 offset, + int size) +{ + MLX5_SET(ste_single_action_remove_header_size_v1, s_action, + action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE); + MLX5_SET(ste_single_action_remove_header_size_v1, s_action, start_anchor, anchor); + + /* The hardware expects here size and offset in words (2 byte) */ + MLX5_SET(ste_single_action_remove_header_size_v1, s_action, remove_size, size / 2); + MLX5_SET(ste_single_action_remove_header_size_v1, s_action, start_offset, offset / 2); + + dr_ste_v1_set_reparse(hw_ste_p); +} + +static void dr_ste_v1_set_push_vlan(u8 *hw_ste_p, u8 *d_action, + u32 vlan_hdr) { MLX5_SET(ste_double_action_insert_with_inline_v1, d_action, action_id, DR_STE_V1_ACTION_ID_INSERT_INLINE); @@ -416,7 +431,7 @@ static void dr_ste_v1_set_tx_push_vlan(u8 *hw_ste_p, u8 *d_action, dr_ste_v1_set_reparse(hw_ste_p); } -static void dr_ste_v1_set_rx_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num) +static void dr_ste_v1_set_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num) { MLX5_SET(ste_single_action_remove_header_size_v1, s_action, action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE); @@ -503,13 +518,28 @@ static void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, { u8 *action = MLX5_ADDR_OF(ste_match_bwc_v1, last_ste, action); u8 action_sz = DR_STE_ACTION_DOUBLE_SZ; + bool allow_modify_hdr = true; bool allow_encap = true; + if (action_type_set[DR_ACTION_TYP_POP_VLAN]) { + if (action_sz < DR_STE_ACTION_SINGLE_SZ) { + dr_ste_v1_arr_init_next_match(&last_ste, added_stes, + attr->gvmi); + action = MLX5_ADDR_OF(ste_mask_and_match_v1, + last_ste, action); + action_sz = DR_STE_ACTION_TRIPLE_SZ; + } + dr_ste_v1_set_pop_vlan(last_ste, action, attr->vlans.count); + action_sz -= DR_STE_ACTION_SINGLE_SZ; + action += DR_STE_ACTION_SINGLE_SZ; + allow_modify_hdr = false; + } + if (action_type_set[DR_ACTION_TYP_CTR]) dr_ste_v1_set_counter_id(last_ste, attr->ctr_id); if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) { - if (action_sz < DR_STE_ACTION_DOUBLE_SZ) { + if (!allow_modify_hdr || action_sz < DR_STE_ACTION_DOUBLE_SZ) { dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi); action = MLX5_ADDR_OF(ste_mask_and_match_v1, @@ -534,7 +564,8 @@ static void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, action_sz = DR_STE_ACTION_TRIPLE_SZ; allow_encap = true; } - dr_ste_v1_set_tx_push_vlan(last_ste, action, attr->vlans.headers[i]); + dr_ste_v1_set_push_vlan(last_ste, action, + attr->vlans.headers[i]); action_sz -= DR_STE_ACTION_DOUBLE_SZ; action += DR_STE_ACTION_DOUBLE_SZ; } @@ -579,6 +610,18 @@ static void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, attr->reformat.size); action_sz -= DR_STE_ACTION_DOUBLE_SZ; action += DR_STE_ACTION_DOUBLE_SZ; + } else if (action_type_set[DR_ACTION_TYP_REMOVE_HDR]) { + if (action_sz < DR_STE_ACTION_SINGLE_SZ) { + dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi); + action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action); + action_sz = DR_STE_ACTION_TRIPLE_SZ; + } + dr_ste_v1_set_remove_hdr(last_ste, action, + attr->reformat.param_0, + attr->reformat.param_1, + attr->reformat.size); + action_sz -= DR_STE_ACTION_SINGLE_SZ; + action += DR_STE_ACTION_SINGLE_SZ; } dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi); @@ -635,7 +678,7 @@ static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, allow_ctr = false; } - dr_ste_v1_set_rx_pop_vlan(last_ste, action, attr->vlans.count); + dr_ste_v1_set_pop_vlan(last_ste, action, attr->vlans.count); action_sz -= DR_STE_ACTION_SINGLE_SZ; action += DR_STE_ACTION_SINGLE_SZ; } @@ -656,6 +699,26 @@ static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, action += DR_STE_ACTION_DOUBLE_SZ; } + if (action_type_set[DR_ACTION_TYP_PUSH_VLAN]) { + int i; + + for (i = 0; i < attr->vlans.count; i++) { + if (action_sz < DR_STE_ACTION_DOUBLE_SZ || + !allow_modify_hdr) { + dr_ste_v1_arr_init_next_match(&last_ste, + added_stes, + attr->gvmi); + action = MLX5_ADDR_OF(ste_mask_and_match_v1, + last_ste, action); + action_sz = DR_STE_ACTION_TRIPLE_SZ; + } + dr_ste_v1_set_push_vlan(last_ste, action, + attr->vlans.headers[i]); + action_sz -= DR_STE_ACTION_DOUBLE_SZ; + action += DR_STE_ACTION_DOUBLE_SZ; + } + } + if (action_type_set[DR_ACTION_TYP_CTR]) { /* Counter action set after decap and before insert_hdr * to exclude decaped / encaped header respectively. @@ -714,6 +777,20 @@ static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, action_sz -= DR_STE_ACTION_DOUBLE_SZ; action += DR_STE_ACTION_DOUBLE_SZ; allow_modify_hdr = false; + } else if (action_type_set[DR_ACTION_TYP_REMOVE_HDR]) { + if (action_sz < DR_STE_ACTION_SINGLE_SZ) { + dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi); + action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action); + action_sz = DR_STE_ACTION_TRIPLE_SZ; + allow_modify_hdr = true; + allow_ctr = true; + } + dr_ste_v1_set_remove_hdr(last_ste, action, + attr->reformat.param_0, + attr->reformat.param_1, + attr->reformat.size); + action_sz -= DR_STE_ACTION_SINGLE_SZ; + action += DR_STE_ACTION_SINGLE_SZ; } dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi); @@ -1844,7 +1921,7 @@ dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb, static int dr_ste_v1_build_flex_parser_tnl_gtpu_tag(struct mlx5dr_match_param *value, struct mlx5dr_ste_build *sb, - uint8_t *tag) + u8 *tag) { struct mlx5dr_match_misc3 *misc3 = &value->misc3; @@ -1868,7 +1945,7 @@ static void dr_ste_v1_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *s static int dr_ste_v1_build_tnl_gtpu_flex_parser_0_tag(struct mlx5dr_match_param *value, struct mlx5dr_ste_build *sb, - uint8_t *tag) + u8 *tag) { if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_dw_0)) DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3); @@ -1895,7 +1972,7 @@ dr_ste_v1_build_tnl_gtpu_flex_parser_0_init(struct mlx5dr_ste_build *sb, static int dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag(struct mlx5dr_match_param *value, struct mlx5dr_ste_build *sb, - uint8_t *tag) + u8 *tag) { if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_dw_0)) DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3); @@ -1960,7 +2037,9 @@ struct mlx5dr_ste_ctx ste_ctx_v1 = { .set_byte_mask = &dr_ste_v1_set_byte_mask, .get_byte_mask = &dr_ste_v1_get_byte_mask, /* Actions */ - .actions_caps = DR_STE_CTX_ACTION_CAP_RX_ENCAP, + .actions_caps = DR_STE_CTX_ACTION_CAP_TX_POP | + DR_STE_CTX_ACTION_CAP_RX_PUSH | + DR_STE_CTX_ACTION_CAP_RX_ENCAP, .set_actions_rx = &dr_ste_v1_set_actions_rx, .set_actions_tx = &dr_ste_v1_set_actions_tx, .modify_field_arr_sz = ARRAY_SIZE(dr_ste_v1_action_modify_field_arr), diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h index f5e93fa87aff..b20e8aabb861 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h @@ -83,15 +83,14 @@ enum { DR_STE_SIZE_CTRL = 32, DR_STE_SIZE_TAG = 16, DR_STE_SIZE_MASK = 16, -}; - -enum { DR_STE_SIZE_REDUCED = DR_STE_SIZE - DR_STE_SIZE_MASK, }; enum mlx5dr_ste_ctx_action_cap { DR_STE_CTX_ACTION_CAP_NONE = 0, - DR_STE_CTX_ACTION_CAP_RX_ENCAP = 1 << 0, + DR_STE_CTX_ACTION_CAP_TX_POP = 1 << 0, + DR_STE_CTX_ACTION_CAP_RX_PUSH = 1 << 1, + DR_STE_CTX_ACTION_CAP_RX_ENCAP = 1 << 2, }; enum { @@ -124,6 +123,7 @@ enum mlx5dr_action_type { DR_ACTION_TYP_POP_VLAN, DR_ACTION_TYP_PUSH_VLAN, DR_ACTION_TYP_INSERT_HDR, + DR_ACTION_TYP_REMOVE_HDR, DR_ACTION_TYP_SAMPLER, DR_ACTION_TYP_MAX, }; @@ -140,6 +140,7 @@ struct mlx5dr_icm_buddy_mem; struct mlx5dr_ste_htbl; struct mlx5dr_match_param; struct mlx5dr_cmd_caps; +struct mlx5dr_rule_rx_tx; struct mlx5dr_matcher_rx_tx; struct mlx5dr_ste_ctx; @@ -151,14 +152,14 @@ struct mlx5dr_ste { /* attached to the miss_list head at each htbl entry */ struct list_head miss_list_node; - /* each rule member that uses this ste attached here */ - struct list_head rule_list; - /* this ste is member of htbl */ struct mlx5dr_ste_htbl *htbl; struct mlx5dr_ste_htbl *next_htbl; + /* The rule this STE belongs to */ + struct mlx5dr_rule_rx_tx *rule_rx_tx; + /* this ste is part of a rule, located in ste's chain */ u8 ste_chain_location; }; @@ -171,8 +172,6 @@ struct mlx5dr_ste_htbl_ctrl { /* total number of collisions entries attached to this table */ unsigned int num_of_collisions; - unsigned int increase_threshold; - u8 may_grow:1; }; struct mlx5dr_ste_htbl { @@ -804,10 +803,15 @@ struct mlx5dr_cmd_caps { u8 isolate_vl_tc:1; }; +enum mlx5dr_domain_nic_type { + DR_DOMAIN_NIC_TYPE_RX, + DR_DOMAIN_NIC_TYPE_TX, +}; + struct mlx5dr_domain_rx_tx { u64 drop_icm_addr; u64 default_icm_addr; - enum mlx5dr_ste_entry_type ste_type; + enum mlx5dr_domain_nic_type type; struct mutex mutex; /* protect rx/tx domain */ }; @@ -885,14 +889,6 @@ struct mlx5dr_matcher { struct mlx5dv_flow_matcher *dv_matcher; }; -struct mlx5dr_rule_member { - struct mlx5dr_ste *ste; - /* attached to mlx5dr_rule via this */ - struct list_head list; - /* attached to mlx5dr_ste via this */ - struct list_head use_ste_list; -}; - struct mlx5dr_ste_action_modify_field { u16 hw_field; u8 start; @@ -993,8 +989,8 @@ struct mlx5dr_htbl_connect_info { }; struct mlx5dr_rule_rx_tx { - struct list_head rule_members_list; struct mlx5dr_matcher_rx_tx *nic_matcher; + struct mlx5dr_ste *last_rule_ste; }; struct mlx5dr_rule { @@ -1005,8 +1001,12 @@ struct mlx5dr_rule { u32 flow_source; }; -void mlx5dr_rule_update_rule_member(struct mlx5dr_ste *new_ste, - struct mlx5dr_ste *ste); +void mlx5dr_rule_set_last_member(struct mlx5dr_rule_rx_tx *nic_rule, + struct mlx5dr_ste *ste, + bool force); +int mlx5dr_rule_get_reverse_rule_members(struct mlx5dr_ste **ste_arr, + struct mlx5dr_ste *curr_ste, + int *num_of_stes); struct mlx5dr_icm_chunk { struct mlx5dr_icm_buddy_mem *buddy_mem; @@ -1083,6 +1083,25 @@ mlx5dr_icm_pool_chunk_size_to_byte(enum mlx5dr_icm_chunk_size chunk_size, return entry_size * num_of_entries; } +static inline int +mlx5dr_ste_htbl_increase_threshold(struct mlx5dr_ste_htbl *htbl) +{ + int num_of_entries = + mlx5dr_icm_pool_chunk_size_to_entries(htbl->chunk_size); + + /* Threshold is 50%, one is added to table of size 1 */ + return (num_of_entries + 1) / 2; +} + +static inline bool +mlx5dr_ste_htbl_may_grow(struct mlx5dr_ste_htbl *htbl) +{ + if (htbl->chunk_size == DR_CHUNK_SIZE_MAX - 1 || !htbl->byte_mask) + return false; + + return true; +} + static inline struct mlx5dr_cmd_vport_cap * mlx5dr_get_vport_cap(struct mlx5dr_cmd_caps *caps, u32 vport) { @@ -1216,7 +1235,7 @@ int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn, bool update_hw_ste); void mlx5dr_ste_set_formatted_ste(struct mlx5dr_ste_ctx *ste_ctx, u16 gvmi, - struct mlx5dr_domain_rx_tx *nic_dmn, + enum mlx5dr_domain_nic_type nic_type, struct mlx5dr_ste_htbl *htbl, u8 *formatted_ste, struct mlx5dr_htbl_connect_info *connect_info); @@ -1282,6 +1301,7 @@ struct mlx5dr_send_ring { u8 sync_buff[MIN_READ_SYNC]; struct mlx5dr_mr *sync_mr; spinlock_t lock; /* Protect the data path of the send ring */ + bool err_state; /* send_ring is not usable in err state */ }; int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn); @@ -1333,6 +1353,7 @@ struct mlx5dr_cmd_fte_info { u32 *val; struct mlx5_flow_act action; struct mlx5dr_cmd_flow_destination_hw_info *dest_arr; + bool ignore_flow_level; }; int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev, @@ -1362,7 +1383,8 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn, int num_dest, bool reformat_req, u32 *tbl_id, - u32 *group_id); + u32 *group_id, + bool ignore_flow_level); void mlx5dr_fw_destroy_md_tbl(struct mlx5dr_domain *dmn, u32 tbl_id, u32 group_id); #endif /* _DR_TYPES_H_ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c index d5926dd7e972..7e58f4e594b7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c @@ -133,6 +133,9 @@ static int mlx5_cmd_dr_modify_flow_table(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, struct mlx5_flow_table *next_ft) { + if (mlx5_dr_is_fw_table(ft->flags)) + return mlx5_fs_cmd_get_fw_cmds()->modify_flow_table(ns, ft, next_ft); + return set_miss_action(ns, ft, next_ft); } @@ -487,9 +490,13 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, actions[num_actions++] = term_actions->dest; } else if (num_term_actions > 1) { + bool ignore_flow_level = + !!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL); + tmp_action = mlx5dr_action_create_mult_dest_tbl(domain, term_actions, - num_term_actions); + num_term_actions, + ignore_flow_level); if (!tmp_action) { err = -EOPNOTSUPP; goto free_actions; @@ -557,6 +564,9 @@ static int mlx5_cmd_dr_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns case MLX5_REFORMAT_TYPE_INSERT_HDR: dr_reformat = DR_ACTION_REFORMAT_TYP_INSERT_HDR; break; + case MLX5_REFORMAT_TYPE_REMOVE_HDR: + dr_reformat = DR_ACTION_REFORMAT_TYP_REMOVE_HDR; + break; default: mlx5_core_err(ns->dev, "Packet-reformat not supported(%d)\n", params->type); @@ -615,15 +625,6 @@ static void mlx5_cmd_dr_modify_header_dealloc(struct mlx5_flow_root_namespace *n mlx5dr_action_destroy(modify_hdr->action.dr_action); } -static int mlx5_cmd_dr_update_fte(struct mlx5_flow_root_namespace *ns, - struct mlx5_flow_table *ft, - struct mlx5_flow_group *group, - int modify_mask, - struct fs_fte *fte) -{ - return -EOPNOTSUPP; -} - static int mlx5_cmd_dr_delete_fte(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, struct fs_fte *fte) @@ -648,6 +649,36 @@ static int mlx5_cmd_dr_delete_fte(struct mlx5_flow_root_namespace *ns, return 0; } +static int mlx5_cmd_dr_update_fte(struct mlx5_flow_root_namespace *ns, + struct mlx5_flow_table *ft, + struct mlx5_flow_group *group, + int modify_mask, + struct fs_fte *fte) +{ + struct fs_fte fte_tmp = {}; + int ret; + + if (mlx5_dr_is_fw_table(ft->flags)) + return mlx5_fs_cmd_get_fw_cmds()->update_fte(ns, ft, group, modify_mask, fte); + + /* Backup current dr rule details */ + fte_tmp.fs_dr_rule = fte->fs_dr_rule; + memset(&fte->fs_dr_rule, 0, sizeof(struct mlx5_fs_dr_rule)); + + /* First add the new updated rule, then delete the old rule */ + ret = mlx5_cmd_dr_create_fte(ns, ft, group, fte); + if (ret) + goto restore_fte; + + ret = mlx5_cmd_dr_delete_fte(ns, ft, &fte_tmp); + WARN_ONCE(ret, "dr update fte duplicate rule deletion failed\n"); + return ret; + +restore_fte: + fte->fs_dr_rule = fte_tmp.fs_dr_rule; + return ret; +} + static int mlx5_cmd_dr_set_peer(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_root_namespace *peer_ns) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h index 9643ee647f57..d2a937f69784 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h @@ -8,12 +8,6 @@ enum { MLX5DR_STE_LU_TYPE_DONT_CARE = 0x0f, }; -enum mlx5dr_ste_entry_type { - MLX5DR_STE_TYPE_TX = 1, - MLX5DR_STE_TYPE_RX = 2, - MLX5DR_STE_TYPE_MODIFY_PKT = 6, -}; - struct mlx5_ifc_ste_general_bits { u8 entry_type[0x4]; u8 reserved_at_4[0x4]; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h index bbfe101d4e57..c5a8b1601999 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h @@ -27,6 +27,7 @@ enum mlx5dr_action_reformat_type { DR_ACTION_REFORMAT_TYP_TNL_L3_TO_L2, DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L3, DR_ACTION_REFORMAT_TYP_INSERT_HDR, + DR_ACTION_REFORMAT_TYP_REMOVE_HDR, }; struct mlx5dr_match_parameters { @@ -94,7 +95,8 @@ mlx5dr_action_create_dest_vport(struct mlx5dr_domain *domain, struct mlx5dr_action * mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn, struct mlx5dr_action_dest *dests, - u32 num_of_dests); + u32 num_of_dests, + bool ignore_flow_level); struct mlx5dr_action *mlx5dr_action_create_drop(void); diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c index a0a059e0154f..3e85b17f5857 100644 --- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c +++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c @@ -199,7 +199,7 @@ static int mlxbf_gige_stop(struct net_device *netdev) return 0; } -static int mlxbf_gige_do_ioctl(struct net_device *netdev, +static int mlxbf_gige_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { if (!(netif_running(netdev))) @@ -253,7 +253,7 @@ static const struct net_device_ops mlxbf_gige_netdev_ops = { .ndo_start_xmit = mlxbf_gige_start_xmit, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = mlxbf_gige_do_ioctl, + .ndo_eth_ioctl = mlxbf_gige_eth_ioctl, .ndo_set_rx_mode = mlxbf_gige_set_rx_mode, .ndo_get_stats64 = mlxbf_gige_get_stats64, }; @@ -269,9 +269,6 @@ static int mlxbf_gige_probe(struct platform_device *pdev) { struct phy_device *phydev; struct net_device *netdev; - struct resource *mac_res; - struct resource *llu_res; - struct resource *plu_res; struct mlxbf_gige *priv; void __iomem *llu_base; void __iomem *plu_base; @@ -280,27 +277,15 @@ static int mlxbf_gige_probe(struct platform_device *pdev) int addr; int err; - mac_res = platform_get_resource(pdev, IORESOURCE_MEM, MLXBF_GIGE_RES_MAC); - if (!mac_res) - return -ENXIO; - - base = devm_ioremap_resource(&pdev->dev, mac_res); + base = devm_platform_ioremap_resource(pdev, MLXBF_GIGE_RES_MAC); if (IS_ERR(base)) return PTR_ERR(base); - llu_res = platform_get_resource(pdev, IORESOURCE_MEM, MLXBF_GIGE_RES_LLU); - if (!llu_res) - return -ENXIO; - - llu_base = devm_ioremap_resource(&pdev->dev, llu_res); + llu_base = devm_platform_ioremap_resource(pdev, MLXBF_GIGE_RES_LLU); if (IS_ERR(llu_base)) return PTR_ERR(llu_base); - plu_res = platform_get_resource(pdev, IORESOURCE_MEM, MLXBF_GIGE_RES_PLU); - if (!plu_res) - return -ENXIO; - - plu_base = devm_ioremap_resource(&pdev->dev, plu_res); + plu_base = devm_platform_ioremap_resource(pdev, MLXBF_GIGE_RES_PLU); if (IS_ERR(plu_base)) return PTR_ERR(plu_base); diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c index e32dd34fdcc0..7905179a9575 100644 --- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c +++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c @@ -145,14 +145,9 @@ static int mlxbf_gige_mdio_write(struct mii_bus *bus, int phy_add, int mlxbf_gige_mdio_probe(struct platform_device *pdev, struct mlxbf_gige *priv) { struct device *dev = &pdev->dev; - struct resource *res; int ret; - res = platform_get_resource(pdev, IORESOURCE_MEM, MLXBF_GIGE_RES_MDIO9); - if (!res) - return -ENODEV; - - priv->mdio_io = devm_ioremap_resource(dev, res); + priv->mdio_io = devm_platform_ioremap_resource(pdev, MLXBF_GIGE_RES_MDIO9); if (IS_ERR(priv->mdio_io)) return PTR_ERR(priv->mdio_io); diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig index 12871c8dc7c1..d1ae248e125c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig +++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig @@ -58,10 +58,10 @@ config MLXSW_SPECTRUM depends on NET_IPGRE || NET_IPGRE=n depends on IPV6_GRE || IPV6_GRE=n depends on VXLAN || VXLAN=n + depends on PTP_1588_CLOCK_OPTIONAL select GENERIC_ALLOCATOR select PARMAN select OBJAGG - imply PTP_1588_CLOCK select NET_PTP_CLASSIFY if PTP_1588_CLOCK default m help diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index e775f08fb464..f080fab3de2b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -1927,7 +1927,8 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, if (!reload) { alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size; - devlink = devlink_alloc(&mlxsw_devlink_ops, alloc_size); + devlink = devlink_alloc(&mlxsw_devlink_ops, alloc_size, + mlxsw_bus_info->dev); if (!devlink) { err = -ENOMEM; goto err_devlink_alloc; @@ -1974,7 +1975,7 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, goto err_emad_init; if (!reload) { - err = devlink_register(devlink, mlxsw_bus_info->dev); + err = devlink_register(devlink); if (err) goto err_devlink_register; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 88699e678544..250c5a24264d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1207,7 +1207,7 @@ static const struct net_device_ops mlxsw_sp_port_netdev_ops = { .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, .ndo_set_features = mlxsw_sp_set_features, .ndo_get_devlink_port = mlxsw_sp_port_get_devlink_port, - .ndo_do_ioctl = mlxsw_sp_port_ioctl, + .ndo_eth_ioctl = mlxsw_sp_port_ioctl, }; static int @@ -2717,6 +2717,22 @@ mlxsw_sp_sample_trigger_params_unset(struct mlxsw_sp *mlxsw_sp, static int mlxsw_sp_netdevice_event(struct notifier_block *unused, unsigned long event, void *ptr); +#define MLXSW_SP_DEFAULT_PARSING_DEPTH 96 +#define MLXSW_SP_INCREASED_PARSING_DEPTH 128 +#define MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT 4789 + +static void mlxsw_sp_parsing_init(struct mlxsw_sp *mlxsw_sp) +{ + mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH; + mlxsw_sp->parsing.vxlan_udp_dport = MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT; + mutex_init(&mlxsw_sp->parsing.lock); +} + +static void mlxsw_sp_parsing_fini(struct mlxsw_sp *mlxsw_sp) +{ + mutex_destroy(&mlxsw_sp->parsing.lock); +} + static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, const struct mlxsw_bus_info *mlxsw_bus_info, struct netlink_ext_ack *extack) @@ -2727,6 +2743,7 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, mlxsw_sp->core = mlxsw_core; mlxsw_sp->bus_info = mlxsw_bus_info; + mlxsw_sp_parsing_init(mlxsw_sp); mlxsw_core_emad_string_tlv_enable(mlxsw_core); err = mlxsw_sp_base_mac_get(mlxsw_sp); @@ -2926,6 +2943,7 @@ err_policers_init: mlxsw_sp_fids_fini(mlxsw_sp); err_fids_init: mlxsw_sp_kvdl_fini(mlxsw_sp); + mlxsw_sp_parsing_fini(mlxsw_sp); return err; } @@ -3046,6 +3064,7 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) mlxsw_sp_policers_fini(mlxsw_sp); mlxsw_sp_fids_fini(mlxsw_sp); mlxsw_sp_kvdl_fini(mlxsw_sp); + mlxsw_sp_parsing_fini(mlxsw_sp); } /* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated @@ -3611,6 +3630,69 @@ void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) dev_put(mlxsw_sp_port->dev); } +int mlxsw_sp_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp) +{ + char mprs_pl[MLXSW_REG_MPRS_LEN]; + int err = 0; + + mutex_lock(&mlxsw_sp->parsing.lock); + + if (refcount_inc_not_zero(&mlxsw_sp->parsing.parsing_depth_ref)) + goto out_unlock; + + mlxsw_reg_mprs_pack(mprs_pl, MLXSW_SP_INCREASED_PARSING_DEPTH, + mlxsw_sp->parsing.vxlan_udp_dport); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl); + if (err) + goto out_unlock; + + mlxsw_sp->parsing.parsing_depth = MLXSW_SP_INCREASED_PARSING_DEPTH; + refcount_set(&mlxsw_sp->parsing.parsing_depth_ref, 1); + +out_unlock: + mutex_unlock(&mlxsw_sp->parsing.lock); + return err; +} + +void mlxsw_sp_parsing_depth_dec(struct mlxsw_sp *mlxsw_sp) +{ + char mprs_pl[MLXSW_REG_MPRS_LEN]; + + mutex_lock(&mlxsw_sp->parsing.lock); + + if (!refcount_dec_and_test(&mlxsw_sp->parsing.parsing_depth_ref)) + goto out_unlock; + + mlxsw_reg_mprs_pack(mprs_pl, MLXSW_SP_DEFAULT_PARSING_DEPTH, + mlxsw_sp->parsing.vxlan_udp_dport); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl); + mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH; + +out_unlock: + mutex_unlock(&mlxsw_sp->parsing.lock); +} + +int mlxsw_sp_parsing_vxlan_udp_dport_set(struct mlxsw_sp *mlxsw_sp, + __be16 udp_dport) +{ + char mprs_pl[MLXSW_REG_MPRS_LEN]; + int err; + + mutex_lock(&mlxsw_sp->parsing.lock); + + mlxsw_reg_mprs_pack(mprs_pl, mlxsw_sp->parsing.parsing_depth, + be16_to_cpu(udp_dport)); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl); + if (err) + goto out_unlock; + + mlxsw_sp->parsing.vxlan_udp_dport = be16_to_cpu(udp_dport); + +out_unlock: + mutex_unlock(&mlxsw_sp->parsing.lock); + return err; +} + static void mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port, struct net_device *lag_dev) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index f99db88ee884..3a43cba6d23c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -148,6 +148,13 @@ struct mlxsw_sp_port_mapping { u8 lane; }; +struct mlxsw_sp_parsing { + refcount_t parsing_depth_ref; + u16 parsing_depth; + u16 vxlan_udp_dport; + struct mutex lock; /* Protects parsing configuration */ +}; + struct mlxsw_sp { struct mlxsw_sp_port **ports; struct mlxsw_core *core; @@ -173,6 +180,7 @@ struct mlxsw_sp { struct mlxsw_sp_counter_pool *counter_pool; struct mlxsw_sp_span *span; struct mlxsw_sp_trap *trap; + struct mlxsw_sp_parsing parsing; const struct mlxsw_sp_switchdev_ops *switchdev_ops; const struct mlxsw_sp_kvdl_ops *kvdl_ops; const struct mlxsw_afa_ops *afa_ops; @@ -652,6 +660,10 @@ struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev); struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev); void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port); struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev); +int mlxsw_sp_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp); +void mlxsw_sp_parsing_depth_dec(struct mlxsw_sp *mlxsw_sp); +int mlxsw_sp_parsing_vxlan_udp_dport_set(struct mlxsw_sp *mlxsw_sp, + __be16 udp_dport); /* spectrum_dcb.c */ #ifdef CONFIG_MLXSW_SPECTRUM_DCB diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h index d8104fc6c900..98d1fdc25eac 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h @@ -29,7 +29,6 @@ struct mlxsw_sp_nve { unsigned int num_max_mc_entries[MLXSW_SP_L3_PROTO_MAX]; u32 tunnel_index; u16 ul_rif_index; /* Reserved for Spectrum */ - unsigned int inc_parsing_depth_refs; }; struct mlxsw_sp_nve_ops { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c index b84bb4b65098..d018d2da5949 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c @@ -10,14 +10,6 @@ #include "spectrum.h" #include "spectrum_nve.h" -/* Eth (18B) | IPv6 (40B) | UDP (8B) | VxLAN (8B) | Eth (14B) | IPv6 (40B) - * - * In the worst case - where we have a VLAN tag on the outer Ethernet - * header and IPv6 in overlay and underlay - we need to parse 128 bytes - */ -#define MLXSW_SP_NVE_VXLAN_PARSING_DEPTH 128 -#define MLXSW_SP_NVE_DEFAULT_PARSING_DEPTH 96 - #define MLXSW_SP_NVE_VXLAN_SUPPORTED_FLAGS (VXLAN_F_UDP_ZERO_CSUM_TX | \ VXLAN_F_LEARN) @@ -115,66 +107,6 @@ static void mlxsw_sp_nve_vxlan_config(const struct mlxsw_sp_nve *nve, config->udp_dport = cfg->dst_port; } -static int __mlxsw_sp_nve_parsing_set(struct mlxsw_sp *mlxsw_sp, - unsigned int parsing_depth, - __be16 udp_dport) -{ - char mprs_pl[MLXSW_REG_MPRS_LEN]; - - mlxsw_reg_mprs_pack(mprs_pl, parsing_depth, be16_to_cpu(udp_dport)); - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl); -} - -static int mlxsw_sp_nve_parsing_set(struct mlxsw_sp *mlxsw_sp, - __be16 udp_dport) -{ - int parsing_depth = mlxsw_sp->nve->inc_parsing_depth_refs ? - MLXSW_SP_NVE_VXLAN_PARSING_DEPTH : - MLXSW_SP_NVE_DEFAULT_PARSING_DEPTH; - - return __mlxsw_sp_nve_parsing_set(mlxsw_sp, parsing_depth, udp_dport); -} - -static int -__mlxsw_sp_nve_inc_parsing_depth_get(struct mlxsw_sp *mlxsw_sp, - __be16 udp_dport) -{ - int err; - - mlxsw_sp->nve->inc_parsing_depth_refs++; - - err = mlxsw_sp_nve_parsing_set(mlxsw_sp, udp_dport); - if (err) - goto err_nve_parsing_set; - return 0; - -err_nve_parsing_set: - mlxsw_sp->nve->inc_parsing_depth_refs--; - return err; -} - -static void -__mlxsw_sp_nve_inc_parsing_depth_put(struct mlxsw_sp *mlxsw_sp, - __be16 udp_dport) -{ - mlxsw_sp->nve->inc_parsing_depth_refs--; - mlxsw_sp_nve_parsing_set(mlxsw_sp, udp_dport); -} - -int mlxsw_sp_nve_inc_parsing_depth_get(struct mlxsw_sp *mlxsw_sp) -{ - __be16 udp_dport = mlxsw_sp->nve->config.udp_dport; - - return __mlxsw_sp_nve_inc_parsing_depth_get(mlxsw_sp, udp_dport); -} - -void mlxsw_sp_nve_inc_parsing_depth_put(struct mlxsw_sp *mlxsw_sp) -{ - __be16 udp_dport = mlxsw_sp->nve->config.udp_dport; - - __mlxsw_sp_nve_inc_parsing_depth_put(mlxsw_sp, udp_dport); -} - static void mlxsw_sp_nve_vxlan_config_prepare(char *tngcr_pl, const struct mlxsw_sp_nve_config *config) @@ -238,10 +170,14 @@ static int mlxsw_sp1_nve_vxlan_init(struct mlxsw_sp_nve *nve, struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp; int err; - err = __mlxsw_sp_nve_inc_parsing_depth_get(mlxsw_sp, config->udp_dport); + err = mlxsw_sp_parsing_vxlan_udp_dport_set(mlxsw_sp, config->udp_dport); if (err) return err; + err = mlxsw_sp_parsing_depth_inc(mlxsw_sp); + if (err) + goto err_parsing_depth_inc; + err = mlxsw_sp1_nve_vxlan_config_set(mlxsw_sp, config); if (err) goto err_config_set; @@ -263,7 +199,9 @@ err_promote_decap: err_rtdp_set: mlxsw_sp1_nve_vxlan_config_clear(mlxsw_sp); err_config_set: - __mlxsw_sp_nve_inc_parsing_depth_put(mlxsw_sp, 0); + mlxsw_sp_parsing_depth_dec(mlxsw_sp); +err_parsing_depth_inc: + mlxsw_sp_parsing_vxlan_udp_dport_set(mlxsw_sp, 0); return err; } @@ -275,7 +213,8 @@ static void mlxsw_sp1_nve_vxlan_fini(struct mlxsw_sp_nve *nve) mlxsw_sp_router_nve_demote_decap(mlxsw_sp, config->ul_tb_id, config->ul_proto, &config->ul_sip); mlxsw_sp1_nve_vxlan_config_clear(mlxsw_sp); - __mlxsw_sp_nve_inc_parsing_depth_put(mlxsw_sp, 0); + mlxsw_sp_parsing_depth_dec(mlxsw_sp); + mlxsw_sp_parsing_vxlan_udp_dport_set(mlxsw_sp, 0); } static int @@ -412,10 +351,14 @@ static int mlxsw_sp2_nve_vxlan_init(struct mlxsw_sp_nve *nve, struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp; int err; - err = __mlxsw_sp_nve_inc_parsing_depth_get(mlxsw_sp, config->udp_dport); + err = mlxsw_sp_parsing_vxlan_udp_dport_set(mlxsw_sp, config->udp_dport); if (err) return err; + err = mlxsw_sp_parsing_depth_inc(mlxsw_sp); + if (err) + goto err_parsing_depth_inc; + err = mlxsw_sp2_nve_vxlan_config_set(mlxsw_sp, config); if (err) goto err_config_set; @@ -438,7 +381,9 @@ err_promote_decap: err_rtdp_set: mlxsw_sp2_nve_vxlan_config_clear(mlxsw_sp); err_config_set: - __mlxsw_sp_nve_inc_parsing_depth_put(mlxsw_sp, 0); + mlxsw_sp_parsing_depth_dec(mlxsw_sp); +err_parsing_depth_inc: + mlxsw_sp_parsing_vxlan_udp_dport_set(mlxsw_sp, 0); return err; } @@ -450,7 +395,8 @@ static void mlxsw_sp2_nve_vxlan_fini(struct mlxsw_sp_nve *nve) mlxsw_sp_router_nve_demote_decap(mlxsw_sp, config->ul_tb_id, config->ul_proto, &config->ul_sip); mlxsw_sp2_nve_vxlan_config_clear(mlxsw_sp); - __mlxsw_sp_nve_inc_parsing_depth_put(mlxsw_sp, 0); + mlxsw_sp_parsing_depth_dec(mlxsw_sp); + mlxsw_sp_parsing_vxlan_udp_dport_set(mlxsw_sp, 0); } const struct mlxsw_sp_nve_ops mlxsw_sp2_nve_vxlan_ops = { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c index bfef65d1587c..1a180384e7e8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c @@ -975,14 +975,14 @@ static int mlxsw_sp1_ptp_mtpppc_update(struct mlxsw_sp_port *mlxsw_sp_port, } if ((ing_types || egr_types) && !(orig_ing_types || orig_egr_types)) { - err = mlxsw_sp_nve_inc_parsing_depth_get(mlxsw_sp); + err = mlxsw_sp_parsing_depth_inc(mlxsw_sp); if (err) { netdev_err(mlxsw_sp_port->dev, "Failed to increase parsing depth"); return err; } } if (!(ing_types || egr_types) && (orig_ing_types || orig_egr_types)) - mlxsw_sp_nve_inc_parsing_depth_put(mlxsw_sp); + mlxsw_sp_parsing_depth_dec(mlxsw_sp); return mlxsw_sp1_ptp_mtpppc_set(mlxsw_sp_port->mlxsw_sp, ing_types, egr_types); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index f69cbb3852d5..19bb3ca0515e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -9484,6 +9484,7 @@ struct mlxsw_sp_mp_hash_config { DECLARE_BITMAP(fields, __MLXSW_REG_RECR2_FIELD_CNT); DECLARE_BITMAP(inner_headers, __MLXSW_REG_RECR2_HEADER_CNT); DECLARE_BITMAP(inner_fields, __MLXSW_REG_RECR2_INNER_FIELD_CNT); + bool inc_parsing_depth; }; #define MLXSW_SP_MP_HASH_HEADER_SET(_headers, _header) \ @@ -9654,6 +9655,7 @@ static void mlxsw_sp_mp6_hash_init(struct mlxsw_sp *mlxsw_sp, MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL); /* Inner */ mlxsw_sp_mp_hash_inner_l3(config); + config->inc_parsing_depth = true; break; case 3: /* Outer */ @@ -9678,22 +9680,53 @@ static void mlxsw_sp_mp6_hash_init(struct mlxsw_sp *mlxsw_sp, MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT); /* Inner */ mlxsw_sp_mp_hash_inner_custom(config, hash_fields); + if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_MASK) + config->inc_parsing_depth = true; break; } } +static int mlxsw_sp_mp_hash_parsing_depth_adjust(struct mlxsw_sp *mlxsw_sp, + bool old_inc_parsing_depth, + bool new_inc_parsing_depth) +{ + int err; + + if (!old_inc_parsing_depth && new_inc_parsing_depth) { + err = mlxsw_sp_parsing_depth_inc(mlxsw_sp); + if (err) + return err; + mlxsw_sp->router->inc_parsing_depth = true; + } else if (old_inc_parsing_depth && !new_inc_parsing_depth) { + mlxsw_sp_parsing_depth_dec(mlxsw_sp); + mlxsw_sp->router->inc_parsing_depth = false; + } + + return 0; +} + static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp) { + bool old_inc_parsing_depth, new_inc_parsing_depth; struct mlxsw_sp_mp_hash_config config = {}; char recr2_pl[MLXSW_REG_RECR2_LEN]; unsigned long bit; u32 seed; + int err; seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 0); mlxsw_reg_recr2_pack(recr2_pl, seed); mlxsw_sp_mp4_hash_init(mlxsw_sp, &config); mlxsw_sp_mp6_hash_init(mlxsw_sp, &config); + old_inc_parsing_depth = mlxsw_sp->router->inc_parsing_depth; + new_inc_parsing_depth = config.inc_parsing_depth; + err = mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp, + old_inc_parsing_depth, + new_inc_parsing_depth); + if (err) + return err; + for_each_set_bit(bit, config.headers, __MLXSW_REG_RECR2_HEADER_CNT) mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, bit, 1); for_each_set_bit(bit, config.fields, __MLXSW_REG_RECR2_FIELD_CNT) @@ -9703,7 +9736,16 @@ static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp) for_each_set_bit(bit, config.inner_fields, __MLXSW_REG_RECR2_INNER_FIELD_CNT) mlxsw_reg_recr2_inner_header_fields_enable_set(recr2_pl, bit, 1); - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl); + if (err) + goto err_reg_write; + + return 0; + +err_reg_write: + mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp, new_inc_parsing_depth, + old_inc_parsing_depth); + return err; } #else static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h index c5d7007f9173..25d3eae63501 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h @@ -81,6 +81,7 @@ struct mlxsw_sp_router { size_t adj_grp_size_ranges_count; struct delayed_work nh_grp_activity_dw; struct list_head nh_res_grp_list; + bool inc_parsing_depth; }; struct mlxsw_sp_fib_entry_priv { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 8f90cd323d5f..22fede5cb32c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -335,14 +335,16 @@ mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge, static struct mlxsw_sp_bridge_port * mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device, - struct net_device *brport_dev) + struct net_device *brport_dev, + struct netlink_ext_ack *extack) { struct mlxsw_sp_bridge_port *bridge_port; struct mlxsw_sp_port *mlxsw_sp_port; + int err; bridge_port = kzalloc(sizeof(*bridge_port), GFP_KERNEL); if (!bridge_port) - return NULL; + return ERR_PTR(-ENOMEM); mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(brport_dev); bridge_port->lagged = mlxsw_sp_port->lagged; @@ -359,12 +361,23 @@ mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device, list_add(&bridge_port->list, &bridge_device->ports_list); bridge_port->ref_count = 1; + err = switchdev_bridge_port_offload(brport_dev, mlxsw_sp_port->dev, + NULL, NULL, NULL, false, extack); + if (err) + goto err_switchdev_offload; + return bridge_port; + +err_switchdev_offload: + list_del(&bridge_port->list); + kfree(bridge_port); + return ERR_PTR(err); } static void mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port *bridge_port) { + switchdev_bridge_port_unoffload(bridge_port->dev, NULL, NULL, NULL); list_del(&bridge_port->list); WARN_ON(!list_empty(&bridge_port->vlans_list)); kfree(bridge_port); @@ -390,9 +403,10 @@ mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge, if (IS_ERR(bridge_device)) return ERR_CAST(bridge_device); - bridge_port = mlxsw_sp_bridge_port_create(bridge_device, brport_dev); - if (!bridge_port) { - err = -ENOMEM; + bridge_port = mlxsw_sp_bridge_port_create(bridge_device, brport_dev, + extack); + if (IS_ERR(bridge_port)) { + err = PTR_ERR(bridge_port); goto err_bridge_port_create; } @@ -1569,7 +1583,6 @@ mlxsw_sp_mc_write_mdb_entry(struct mlxsw_sp *mlxsw_sp, { long *flood_bitmap; int num_of_ports; - int alloc_size; u16 mid_idx; int err; @@ -1579,18 +1592,17 @@ mlxsw_sp_mc_write_mdb_entry(struct mlxsw_sp *mlxsw_sp, return false; num_of_ports = mlxsw_core_max_ports(mlxsw_sp->core); - alloc_size = sizeof(long) * BITS_TO_LONGS(num_of_ports); - flood_bitmap = kzalloc(alloc_size, GFP_KERNEL); + flood_bitmap = bitmap_alloc(num_of_ports, GFP_KERNEL); if (!flood_bitmap) return false; - bitmap_copy(flood_bitmap, mid->ports_in_mid, num_of_ports); + bitmap_copy(flood_bitmap, mid->ports_in_mid, num_of_ports); mlxsw_sp_mc_get_mrouters_bitmap(flood_bitmap, bridge_device, mlxsw_sp); mid->mid = mid_idx; err = mlxsw_sp_port_smid_full_entry(mlxsw_sp, mid_idx, flood_bitmap, bridge_device->mrouter); - kfree(flood_bitmap); + bitmap_free(flood_bitmap); if (err) return false; diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c index 831518466de2..3f69bb59ba49 100644 --- a/drivers/net/ethernet/micrel/ks8851_common.c +++ b/drivers/net/ethernet/micrel/ks8851_common.c @@ -689,7 +689,7 @@ static int ks8851_net_ioctl(struct net_device *dev, struct ifreq *req, int cmd) static const struct net_device_ops ks8851_netdev_ops = { .ndo_open = ks8851_net_open, .ndo_stop = ks8851_net_stop, - .ndo_do_ioctl = ks8851_net_ioctl, + .ndo_eth_ioctl = ks8851_net_ioctl, .ndo_start_xmit = ks8851_start_xmit, .ndo_set_mac_address = ks8851_set_mac_address, .ndo_set_rx_mode = ks8851_set_rx_mode, diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c index 7945eb5e2fe8..a0ee155f9f51 100644 --- a/drivers/net/ethernet/micrel/ksz884x.c +++ b/drivers/net/ethernet/micrel/ksz884x.c @@ -6738,7 +6738,7 @@ static const struct net_device_ops netdev_ops = { .ndo_set_features = netdev_set_features, .ndo_set_mac_address = netdev_set_mac_address, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = netdev_ioctl, + .ndo_eth_ioctl = netdev_ioctl, .ndo_set_rx_mode = netdev_set_rx_mode, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = netdev_netpoll, diff --git a/drivers/net/ethernet/microchip/Kconfig b/drivers/net/ethernet/microchip/Kconfig index d54aa164c4e9..735eea1dacf1 100644 --- a/drivers/net/ethernet/microchip/Kconfig +++ b/drivers/net/ethernet/microchip/Kconfig @@ -45,6 +45,7 @@ config ENCX24J600 config LAN743X tristate "LAN743x support" depends on PCI + depends on PTP_1588_CLOCK_OPTIONAL select PHYLIB select CRC16 select CRC32 diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index dae10328c6cf..9e8561cdc32a 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -2655,7 +2655,7 @@ static const struct net_device_ops lan743x_netdev_ops = { .ndo_open = lan743x_netdev_open, .ndo_stop = lan743x_netdev_close, .ndo_start_xmit = lan743x_netdev_xmit_frame, - .ndo_do_ioctl = lan743x_netdev_ioctl, + .ndo_eth_ioctl = lan743x_netdev_ioctl, .ndo_set_rx_mode = lan743x_netdev_set_multicast, .ndo_change_mtu = lan743x_netdev_change_mtu, .ndo_get_stats64 = lan743x_netdev_get_stats64, diff --git a/drivers/net/ethernet/microchip/sparx5/Makefile b/drivers/net/ethernet/microchip/sparx5/Makefile index faa8f07a6b75..c271e86ee292 100644 --- a/drivers/net/ethernet/microchip/sparx5/Makefile +++ b/drivers/net/ethernet/microchip/sparx5/Makefile @@ -7,4 +7,4 @@ obj-$(CONFIG_SPARX5_SWITCH) += sparx5-switch.o sparx5-switch-objs := sparx5_main.o sparx5_packet.o \ sparx5_netdev.o sparx5_phylink.o sparx5_port.o sparx5_mactable.o sparx5_vlan.o \ - sparx5_switchdev.o sparx5_calendar.o sparx5_ethtool.o + sparx5_switchdev.o sparx5_calendar.o sparx5_ethtool.o sparx5_fdma.o diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c new file mode 100644 index 000000000000..7436f62fa152 --- /dev/null +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c @@ -0,0 +1,593 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Microchip Sparx5 Switch driver + * + * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries. + * + * The Sparx5 Chip Register Model can be browsed at this location: + * https://github.com/microchip-ung/sparx-5_reginfo + */ + +#include <linux/types.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/interrupt.h> +#include <linux/ip.h> +#include <linux/dma-mapping.h> + +#include "sparx5_main_regs.h" +#include "sparx5_main.h" +#include "sparx5_port.h" + +#define FDMA_XTR_CHANNEL 6 +#define FDMA_INJ_CHANNEL 0 + +#define FDMA_DCB_INFO_DATAL(x) ((x) & GENMASK(15, 0)) +#define FDMA_DCB_INFO_TOKEN BIT(17) +#define FDMA_DCB_INFO_INTR BIT(18) +#define FDMA_DCB_INFO_SW(x) (((x) << 24) & GENMASK(31, 24)) + +#define FDMA_DCB_STATUS_BLOCKL(x) ((x) & GENMASK(15, 0)) +#define FDMA_DCB_STATUS_SOF BIT(16) +#define FDMA_DCB_STATUS_EOF BIT(17) +#define FDMA_DCB_STATUS_INTR BIT(18) +#define FDMA_DCB_STATUS_DONE BIT(19) +#define FDMA_DCB_STATUS_BLOCKO(x) (((x) << 20) & GENMASK(31, 20)) +#define FDMA_DCB_INVALID_DATA 0x1 + +#define FDMA_XTR_BUFFER_SIZE 2048 +#define FDMA_WEIGHT 4 + +/* Frame DMA DCB format + * + * +---------------------------+ + * | Next Ptr | + * +---------------------------+ + * | Reserved | Info | + * +---------------------------+ + * | Data0 Ptr | + * +---------------------------+ + * | Reserved | Status0 | + * +---------------------------+ + * | Data1 Ptr | + * +---------------------------+ + * | Reserved | Status1 | + * +---------------------------+ + * | Data2 Ptr | + * +---------------------------+ + * | Reserved | Status2 | + * |-------------|-------------| + * | | + * | | + * | | + * | | + * | | + * |---------------------------| + * | Data14 Ptr | + * +-------------|-------------+ + * | Reserved | Status14 | + * +-------------|-------------+ + */ + +/* For each hardware DB there is an entry in this list and when the HW DB + * entry is used, this SW DB entry is moved to the back of the list + */ +struct sparx5_db { + struct list_head list; + void *cpu_addr; +}; + +static void sparx5_fdma_rx_add_dcb(struct sparx5_rx *rx, + struct sparx5_rx_dcb_hw *dcb, + u64 nextptr) +{ + int idx = 0; + + /* Reset the status of the DB */ + for (idx = 0; idx < FDMA_RX_DCB_MAX_DBS; ++idx) { + struct sparx5_db_hw *db = &dcb->db[idx]; + + db->status = FDMA_DCB_STATUS_INTR; + } + dcb->nextptr = FDMA_DCB_INVALID_DATA; + dcb->info = FDMA_DCB_INFO_DATAL(FDMA_XTR_BUFFER_SIZE); + rx->last_entry->nextptr = nextptr; + rx->last_entry = dcb; +} + +static void sparx5_fdma_tx_add_dcb(struct sparx5_tx *tx, + struct sparx5_tx_dcb_hw *dcb, + u64 nextptr) +{ + int idx = 0; + + /* Reset the status of the DB */ + for (idx = 0; idx < FDMA_TX_DCB_MAX_DBS; ++idx) { + struct sparx5_db_hw *db = &dcb->db[idx]; + + db->status = FDMA_DCB_STATUS_DONE; + } + dcb->nextptr = FDMA_DCB_INVALID_DATA; + dcb->info = FDMA_DCB_INFO_DATAL(FDMA_XTR_BUFFER_SIZE); +} + +static void sparx5_fdma_rx_activate(struct sparx5 *sparx5, struct sparx5_rx *rx) +{ + /* Write the buffer address in the LLP and LLP1 regs */ + spx5_wr(((u64)rx->dma) & GENMASK(31, 0), sparx5, + FDMA_DCB_LLP(rx->channel_id)); + spx5_wr(((u64)rx->dma) >> 32, sparx5, FDMA_DCB_LLP1(rx->channel_id)); + + /* Set the number of RX DBs to be used, and DB end-of-frame interrupt */ + spx5_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_RX_DCB_MAX_DBS) | + FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) | + FDMA_CH_CFG_CH_INJ_PORT_SET(XTR_QUEUE), + sparx5, FDMA_CH_CFG(rx->channel_id)); + + /* Set the RX Watermark to max */ + spx5_rmw(FDMA_XTR_CFG_XTR_FIFO_WM_SET(31), FDMA_XTR_CFG_XTR_FIFO_WM, + sparx5, + FDMA_XTR_CFG); + + /* Start RX fdma */ + spx5_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(0), FDMA_PORT_CTRL_XTR_STOP, + sparx5, FDMA_PORT_CTRL(0)); + + /* Enable RX channel DB interrupt */ + spx5_rmw(BIT(rx->channel_id), + BIT(rx->channel_id) & FDMA_INTR_DB_ENA_INTR_DB_ENA, + sparx5, FDMA_INTR_DB_ENA); + + /* Activate the RX channel */ + spx5_wr(BIT(rx->channel_id), sparx5, FDMA_CH_ACTIVATE); +} + +static void sparx5_fdma_rx_deactivate(struct sparx5 *sparx5, struct sparx5_rx *rx) +{ + /* Dectivate the RX channel */ + spx5_rmw(0, BIT(rx->channel_id) & FDMA_CH_ACTIVATE_CH_ACTIVATE, + sparx5, FDMA_CH_ACTIVATE); + + /* Disable RX channel DB interrupt */ + spx5_rmw(0, BIT(rx->channel_id) & FDMA_INTR_DB_ENA_INTR_DB_ENA, + sparx5, FDMA_INTR_DB_ENA); + + /* Stop RX fdma */ + spx5_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(1), FDMA_PORT_CTRL_XTR_STOP, + sparx5, FDMA_PORT_CTRL(0)); +} + +static void sparx5_fdma_tx_activate(struct sparx5 *sparx5, struct sparx5_tx *tx) +{ + /* Write the buffer address in the LLP and LLP1 regs */ + spx5_wr(((u64)tx->dma) & GENMASK(31, 0), sparx5, + FDMA_DCB_LLP(tx->channel_id)); + spx5_wr(((u64)tx->dma) >> 32, sparx5, FDMA_DCB_LLP1(tx->channel_id)); + + /* Set the number of TX DBs to be used, and DB end-of-frame interrupt */ + spx5_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_TX_DCB_MAX_DBS) | + FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) | + FDMA_CH_CFG_CH_INJ_PORT_SET(INJ_QUEUE), + sparx5, FDMA_CH_CFG(tx->channel_id)); + + /* Start TX fdma */ + spx5_rmw(FDMA_PORT_CTRL_INJ_STOP_SET(0), FDMA_PORT_CTRL_INJ_STOP, + sparx5, FDMA_PORT_CTRL(0)); + + /* Activate the channel */ + spx5_wr(BIT(tx->channel_id), sparx5, FDMA_CH_ACTIVATE); +} + +static void sparx5_fdma_tx_deactivate(struct sparx5 *sparx5, struct sparx5_tx *tx) +{ + /* Disable the channel */ + spx5_rmw(0, BIT(tx->channel_id) & FDMA_CH_ACTIVATE_CH_ACTIVATE, + sparx5, FDMA_CH_ACTIVATE); +} + +static void sparx5_fdma_rx_reload(struct sparx5 *sparx5, struct sparx5_rx *rx) +{ + /* Reload the RX channel */ + spx5_wr(BIT(rx->channel_id), sparx5, FDMA_CH_RELOAD); +} + +static void sparx5_fdma_tx_reload(struct sparx5 *sparx5, struct sparx5_tx *tx) +{ + /* Reload the TX channel */ + spx5_wr(BIT(tx->channel_id), sparx5, FDMA_CH_RELOAD); +} + +static struct sk_buff *sparx5_fdma_rx_alloc_skb(struct sparx5_rx *rx) +{ + return __netdev_alloc_skb(rx->ndev, FDMA_XTR_BUFFER_SIZE, + GFP_ATOMIC); +} + +static bool sparx5_fdma_rx_get_frame(struct sparx5 *sparx5, struct sparx5_rx *rx) +{ + struct sparx5_db_hw *db_hw; + unsigned int packet_size; + struct sparx5_port *port; + struct sk_buff *new_skb; + struct frame_info fi; + struct sk_buff *skb; + dma_addr_t dma_addr; + + /* Check if the DCB is done */ + db_hw = &rx->dcb_entries[rx->dcb_index].db[rx->db_index]; + if (unlikely(!(db_hw->status & FDMA_DCB_STATUS_DONE))) + return false; + skb = rx->skb[rx->dcb_index][rx->db_index]; + /* Replace the DB entry with a new SKB */ + new_skb = sparx5_fdma_rx_alloc_skb(rx); + if (unlikely(!new_skb)) + return false; + /* Map the new skb data and set the new skb */ + dma_addr = virt_to_phys(new_skb->data); + rx->skb[rx->dcb_index][rx->db_index] = new_skb; + db_hw->dataptr = dma_addr; + packet_size = FDMA_DCB_STATUS_BLOCKL(db_hw->status); + skb_put(skb, packet_size); + /* Now do the normal processing of the skb */ + sparx5_ifh_parse((u32 *)skb->data, &fi); + /* Map to port netdev */ + port = fi.src_port < SPX5_PORTS ? sparx5->ports[fi.src_port] : NULL; + if (!port || !port->ndev) { + dev_err(sparx5->dev, "Data on inactive port %d\n", fi.src_port); + sparx5_xtr_flush(sparx5, XTR_QUEUE); + return false; + } + skb->dev = port->ndev; + skb_pull(skb, IFH_LEN * sizeof(u32)); + if (likely(!(skb->dev->features & NETIF_F_RXFCS))) + skb_trim(skb, skb->len - ETH_FCS_LEN); + skb->protocol = eth_type_trans(skb, skb->dev); + /* Everything we see on an interface that is in the HW bridge + * has already been forwarded + */ + if (test_bit(port->portno, sparx5->bridge_mask)) + skb->offload_fwd_mark = 1; + skb->dev->stats.rx_bytes += skb->len; + skb->dev->stats.rx_packets++; + rx->packets++; + netif_receive_skb(skb); + return true; +} + +static int sparx5_fdma_napi_callback(struct napi_struct *napi, int weight) +{ + struct sparx5_rx *rx = container_of(napi, struct sparx5_rx, napi); + struct sparx5 *sparx5 = container_of(rx, struct sparx5, rx); + int counter = 0; + + while (counter < weight && sparx5_fdma_rx_get_frame(sparx5, rx)) { + struct sparx5_rx_dcb_hw *old_dcb; + + rx->db_index++; + counter++; + /* Check if the DCB can be reused */ + if (rx->db_index != FDMA_RX_DCB_MAX_DBS) + continue; + /* As the DCB can be reused, just advance the dcb_index + * pointer and set the nextptr in the DCB + */ + rx->db_index = 0; + old_dcb = &rx->dcb_entries[rx->dcb_index]; + rx->dcb_index++; + rx->dcb_index &= FDMA_DCB_MAX - 1; + sparx5_fdma_rx_add_dcb(rx, old_dcb, + rx->dma + + ((unsigned long)old_dcb - + (unsigned long)rx->dcb_entries)); + } + if (counter < weight) { + napi_complete_done(&rx->napi, counter); + spx5_rmw(BIT(rx->channel_id), + BIT(rx->channel_id) & FDMA_INTR_DB_ENA_INTR_DB_ENA, + sparx5, FDMA_INTR_DB_ENA); + } + if (counter) + sparx5_fdma_rx_reload(sparx5, rx); + return counter; +} + +static struct sparx5_tx_dcb_hw *sparx5_fdma_next_dcb(struct sparx5_tx *tx, + struct sparx5_tx_dcb_hw *dcb) +{ + struct sparx5_tx_dcb_hw *next_dcb; + + next_dcb = dcb; + next_dcb++; + /* Handle wrap-around */ + if ((unsigned long)next_dcb >= + ((unsigned long)tx->first_entry + FDMA_DCB_MAX * sizeof(*dcb))) + next_dcb = tx->first_entry; + return next_dcb; +} + +int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb) +{ + struct sparx5_tx_dcb_hw *next_dcb_hw; + struct sparx5_tx *tx = &sparx5->tx; + static bool first_time = true; + struct sparx5_db_hw *db_hw; + struct sparx5_db *db; + + next_dcb_hw = sparx5_fdma_next_dcb(tx, tx->curr_entry); + db_hw = &next_dcb_hw->db[0]; + if (!(db_hw->status & FDMA_DCB_STATUS_DONE)) + tx->dropped++; + db = list_first_entry(&tx->db_list, struct sparx5_db, list); + list_move_tail(&db->list, &tx->db_list); + next_dcb_hw->nextptr = FDMA_DCB_INVALID_DATA; + tx->curr_entry->nextptr = tx->dma + + ((unsigned long)next_dcb_hw - + (unsigned long)tx->first_entry); + tx->curr_entry = next_dcb_hw; + memset(db->cpu_addr, 0, FDMA_XTR_BUFFER_SIZE); + memcpy(db->cpu_addr, ifh, IFH_LEN * 4); + memcpy(db->cpu_addr + IFH_LEN * 4, skb->data, skb->len); + db_hw->status = FDMA_DCB_STATUS_SOF | + FDMA_DCB_STATUS_EOF | + FDMA_DCB_STATUS_BLOCKO(0) | + FDMA_DCB_STATUS_BLOCKL(skb->len + IFH_LEN * 4 + 4); + if (first_time) { + sparx5_fdma_tx_activate(sparx5, tx); + first_time = false; + } else { + sparx5_fdma_tx_reload(sparx5, tx); + } + return NETDEV_TX_OK; +} + +static int sparx5_fdma_rx_alloc(struct sparx5 *sparx5) +{ + struct sparx5_rx *rx = &sparx5->rx; + struct sparx5_rx_dcb_hw *dcb; + int idx, jdx; + int size; + + size = sizeof(struct sparx5_rx_dcb_hw) * FDMA_DCB_MAX; + size = ALIGN(size, PAGE_SIZE); + rx->dcb_entries = devm_kzalloc(sparx5->dev, size, GFP_KERNEL); + if (!rx->dcb_entries) + return -ENOMEM; + rx->dma = virt_to_phys(rx->dcb_entries); + rx->last_entry = rx->dcb_entries; + rx->db_index = 0; + rx->dcb_index = 0; + /* Now for each dcb allocate the db */ + for (idx = 0; idx < FDMA_DCB_MAX; ++idx) { + dcb = &rx->dcb_entries[idx]; + dcb->info = 0; + /* For each db allocate an skb and map skb data pointer to the DB + * dataptr. In this way when the frame is received the skb->data + * will contain the frame, so no memcpy is needed + */ + for (jdx = 0; jdx < FDMA_RX_DCB_MAX_DBS; ++jdx) { + struct sparx5_db_hw *db_hw = &dcb->db[jdx]; + dma_addr_t dma_addr; + struct sk_buff *skb; + + skb = sparx5_fdma_rx_alloc_skb(rx); + if (!skb) + return -ENOMEM; + + dma_addr = virt_to_phys(skb->data); + db_hw->dataptr = dma_addr; + db_hw->status = 0; + rx->skb[idx][jdx] = skb; + } + sparx5_fdma_rx_add_dcb(rx, dcb, rx->dma + sizeof(*dcb) * idx); + } + netif_napi_add(rx->ndev, &rx->napi, sparx5_fdma_napi_callback, FDMA_WEIGHT); + napi_enable(&rx->napi); + sparx5_fdma_rx_activate(sparx5, rx); + return 0; +} + +static int sparx5_fdma_tx_alloc(struct sparx5 *sparx5) +{ + struct sparx5_tx *tx = &sparx5->tx; + struct sparx5_tx_dcb_hw *dcb; + int idx, jdx; + int size; + + size = sizeof(struct sparx5_tx_dcb_hw) * FDMA_DCB_MAX; + size = ALIGN(size, PAGE_SIZE); + tx->curr_entry = devm_kzalloc(sparx5->dev, size, GFP_KERNEL); + if (!tx->curr_entry) + return -ENOMEM; + tx->dma = virt_to_phys(tx->curr_entry); + tx->first_entry = tx->curr_entry; + INIT_LIST_HEAD(&tx->db_list); + /* Now for each dcb allocate the db */ + for (idx = 0; idx < FDMA_DCB_MAX; ++idx) { + dcb = &tx->curr_entry[idx]; + dcb->info = 0; + /* TX databuffers must be 16byte aligned */ + for (jdx = 0; jdx < FDMA_TX_DCB_MAX_DBS; ++jdx) { + struct sparx5_db_hw *db_hw = &dcb->db[jdx]; + struct sparx5_db *db; + dma_addr_t phys; + void *cpu_addr; + + cpu_addr = devm_kzalloc(sparx5->dev, + FDMA_XTR_BUFFER_SIZE, + GFP_KERNEL); + if (!cpu_addr) + return -ENOMEM; + phys = virt_to_phys(cpu_addr); + db_hw->dataptr = phys; + db_hw->status = 0; + db = devm_kzalloc(sparx5->dev, sizeof(*db), GFP_KERNEL); + db->cpu_addr = cpu_addr; + list_add_tail(&db->list, &tx->db_list); + } + sparx5_fdma_tx_add_dcb(tx, dcb, tx->dma + sizeof(*dcb) * idx); + /* Let the curr_entry to point to the last allocated entry */ + if (idx == FDMA_DCB_MAX - 1) + tx->curr_entry = dcb; + } + return 0; +} + +static void sparx5_fdma_rx_init(struct sparx5 *sparx5, + struct sparx5_rx *rx, int channel) +{ + int idx; + + rx->channel_id = channel; + /* Fetch a netdev for SKB and NAPI use, any will do */ + for (idx = 0; idx < SPX5_PORTS; ++idx) { + struct sparx5_port *port = sparx5->ports[idx]; + + if (port && port->ndev) { + rx->ndev = port->ndev; + break; + } + } +} + +static void sparx5_fdma_tx_init(struct sparx5 *sparx5, + struct sparx5_tx *tx, int channel) +{ + tx->channel_id = channel; +} + +irqreturn_t sparx5_fdma_handler(int irq, void *args) +{ + struct sparx5 *sparx5 = args; + u32 db = 0, err = 0; + + db = spx5_rd(sparx5, FDMA_INTR_DB); + err = spx5_rd(sparx5, FDMA_INTR_ERR); + /* Clear interrupt */ + if (db) { + spx5_wr(0, sparx5, FDMA_INTR_DB_ENA); + spx5_wr(db, sparx5, FDMA_INTR_DB); + napi_schedule(&sparx5->rx.napi); + } + if (err) { + u32 err_type = spx5_rd(sparx5, FDMA_ERRORS); + + dev_err_ratelimited(sparx5->dev, + "ERR: int: %#x, type: %#x\n", + err, err_type); + spx5_wr(err, sparx5, FDMA_INTR_ERR); + spx5_wr(err_type, sparx5, FDMA_ERRORS); + } + return IRQ_HANDLED; +} + +static void sparx5_fdma_injection_mode(struct sparx5 *sparx5) +{ + const int byte_swap = 1; + int portno; + int urgency; + + /* Change mode to fdma extraction and injection */ + spx5_wr(QS_XTR_GRP_CFG_MODE_SET(2) | + QS_XTR_GRP_CFG_STATUS_WORD_POS_SET(1) | + QS_XTR_GRP_CFG_BYTE_SWAP_SET(byte_swap), + sparx5, QS_XTR_GRP_CFG(XTR_QUEUE)); + spx5_wr(QS_INJ_GRP_CFG_MODE_SET(2) | + QS_INJ_GRP_CFG_BYTE_SWAP_SET(byte_swap), + sparx5, QS_INJ_GRP_CFG(INJ_QUEUE)); + + /* CPU ports capture setup */ + for (portno = SPX5_PORT_CPU_0; portno <= SPX5_PORT_CPU_1; portno++) { + /* ASM CPU port: No preamble, IFH, enable padding */ + spx5_wr(ASM_PORT_CFG_PAD_ENA_SET(1) | + ASM_PORT_CFG_NO_PREAMBLE_ENA_SET(1) | + ASM_PORT_CFG_INJ_FORMAT_CFG_SET(1), /* 1 = IFH */ + sparx5, ASM_PORT_CFG(portno)); + + /* Reset WM cnt to unclog queued frames */ + spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR_SET(1), + DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR, + sparx5, + DSM_DEV_TX_STOP_WM_CFG(portno)); + + /* Set Disassembler Stop Watermark level */ + spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM_SET(100), + DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM, + sparx5, + DSM_DEV_TX_STOP_WM_CFG(portno)); + + /* Enable port in queue system */ + urgency = sparx5_port_fwd_urg(sparx5, SPEED_2500); + spx5_rmw(QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(1) | + QFWD_SWITCH_PORT_MODE_FWD_URGENCY_SET(urgency), + QFWD_SWITCH_PORT_MODE_PORT_ENA | + QFWD_SWITCH_PORT_MODE_FWD_URGENCY, + sparx5, + QFWD_SWITCH_PORT_MODE(portno)); + + /* Disable Disassembler buffer underrun watchdog + * to avoid truncated packets in XTR + */ + spx5_rmw(DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS_SET(1), + DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS, + sparx5, + DSM_BUF_CFG(portno)); + + /* Disabling frame aging */ + spx5_rmw(HSCH_PORT_MODE_AGE_DIS_SET(1), + HSCH_PORT_MODE_AGE_DIS, + sparx5, + HSCH_PORT_MODE(portno)); + } +} + +int sparx5_fdma_start(struct sparx5 *sparx5) +{ + int err; + + /* Reset FDMA state */ + spx5_wr(FDMA_CTRL_NRESET_SET(0), sparx5, FDMA_CTRL); + spx5_wr(FDMA_CTRL_NRESET_SET(1), sparx5, FDMA_CTRL); + + /* Force ACP caching but disable read/write allocation */ + spx5_rmw(CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA_SET(1) | + CPU_PROC_CTRL_ACP_AWCACHE_SET(0) | + CPU_PROC_CTRL_ACP_ARCACHE_SET(0), + CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA | + CPU_PROC_CTRL_ACP_AWCACHE | + CPU_PROC_CTRL_ACP_ARCACHE, + sparx5, CPU_PROC_CTRL); + + sparx5_fdma_injection_mode(sparx5); + sparx5_fdma_rx_init(sparx5, &sparx5->rx, FDMA_XTR_CHANNEL); + sparx5_fdma_tx_init(sparx5, &sparx5->tx, FDMA_INJ_CHANNEL); + err = sparx5_fdma_rx_alloc(sparx5); + if (err) { + dev_err(sparx5->dev, "Could not allocate RX buffers: %d\n", err); + return err; + } + err = sparx5_fdma_tx_alloc(sparx5); + if (err) { + dev_err(sparx5->dev, "Could not allocate TX buffers: %d\n", err); + return err; + } + return err; +} + +static u32 sparx5_fdma_port_ctrl(struct sparx5 *sparx5) +{ + return spx5_rd(sparx5, FDMA_PORT_CTRL(0)); +} + +int sparx5_fdma_stop(struct sparx5 *sparx5) +{ + u32 val; + + napi_disable(&sparx5->rx.napi); + /* Stop the fdma and channel interrupts */ + sparx5_fdma_rx_deactivate(sparx5, &sparx5->rx); + sparx5_fdma_tx_deactivate(sparx5, &sparx5->tx); + /* Wait for the RX channel to stop */ + read_poll_timeout(sparx5_fdma_port_ctrl, val, + FDMA_PORT_CTRL_XTR_BUF_IS_EMPTY_GET(val) == 0, + 500, 10000, 0, sparx5); + return 0; +} diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c index f666133a15de..cbece6e9bff2 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c @@ -640,8 +640,23 @@ static int sparx5_start(struct sparx5 *sparx5) sparx5_board_init(sparx5); err = sparx5_register_notifier_blocks(sparx5); - /* Start register based INJ/XTR */ + /* Start Frame DMA with fallback to register based INJ/XTR */ err = -ENXIO; + if (sparx5->fdma_irq >= 0) { + if (GCB_CHIP_ID_REV_ID_GET(sparx5->chip_id) > 0) + err = devm_request_threaded_irq(sparx5->dev, + sparx5->fdma_irq, + NULL, + sparx5_fdma_handler, + IRQF_ONESHOT, + "sparx5-fdma", sparx5); + if (!err) + err = sparx5_fdma_start(sparx5); + if (err) + sparx5->fdma_irq = -ENXIO; + } else { + sparx5->fdma_irq = -ENXIO; + } if (err && sparx5->xtr_irq >= 0) { err = devm_request_irq(sparx5->dev, sparx5->xtr_irq, sparx5_xtr_handler, IRQF_SHARED, @@ -766,6 +781,7 @@ static int mchp_sparx5_probe(struct platform_device *pdev) sparx5->base_mac[5] = 0; } + sparx5->fdma_irq = platform_get_irq_byname(sparx5->pdev, "fdma"); sparx5->xtr_irq = platform_get_irq_byname(sparx5->pdev, "xtr"); /* Read chip ID to check CPU interface */ @@ -824,6 +840,11 @@ static int mchp_sparx5_remove(struct platform_device *pdev) disable_irq(sparx5->xtr_irq); sparx5->xtr_irq = -ENXIO; } + if (sparx5->fdma_irq) { + disable_irq(sparx5->fdma_irq); + sparx5->fdma_irq = -ENXIO; + } + sparx5_fdma_stop(sparx5); sparx5_cleanup_ports(sparx5); /* Unregister netdevs */ sparx5_unregister_notifier_blocks(sparx5); diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h index 4d5f44c3a421..a1acc9b461f2 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h @@ -73,8 +73,61 @@ enum sparx5_vlan_port_type { #define XTR_QUEUE 0 #define INJ_QUEUE 0 +#define FDMA_DCB_MAX 64 +#define FDMA_RX_DCB_MAX_DBS 15 +#define FDMA_TX_DCB_MAX_DBS 1 + struct sparx5; +struct sparx5_db_hw { + u64 dataptr; + u64 status; +}; + +struct sparx5_rx_dcb_hw { + u64 nextptr; + u64 info; + struct sparx5_db_hw db[FDMA_RX_DCB_MAX_DBS]; +}; + +struct sparx5_tx_dcb_hw { + u64 nextptr; + u64 info; + struct sparx5_db_hw db[FDMA_TX_DCB_MAX_DBS]; +}; + +/* Frame DMA receive state: + * For each DB, there is a SKB, and the skb data pointer is mapped in + * the DB. Once a frame is received the skb is given to the upper layers + * and a new skb is added to the dcb. + * When the db_index reached FDMA_RX_DCB_MAX_DBS the DB is reused. + */ +struct sparx5_rx { + struct sparx5_rx_dcb_hw *dcb_entries; + struct sparx5_rx_dcb_hw *last_entry; + struct sk_buff *skb[FDMA_DCB_MAX][FDMA_RX_DCB_MAX_DBS]; + int db_index; + int dcb_index; + dma_addr_t dma; + struct napi_struct napi; + u32 channel_id; + struct net_device *ndev; + u64 packets; +}; + +/* Frame DMA transmit state: + * DCBs are chained using the DCBs nextptr field. + */ +struct sparx5_tx { + struct sparx5_tx_dcb_hw *curr_entry; + struct sparx5_tx_dcb_hw *first_entry; + struct list_head db_list; + dma_addr_t dma; + u32 channel_id; + u64 packets; + u64 dropped; +}; + struct sparx5_port_config { phy_interface_t portmode; u32 bandwidth; @@ -167,6 +220,10 @@ struct sparx5 { bool sd_sgpio_remapping; /* Register based inj/xtr */ int xtr_irq; + /* Frame DMA */ + int fdma_irq; + struct sparx5_rx rx; + struct sparx5_tx tx; }; /* sparx5_switchdev.c */ @@ -174,11 +231,23 @@ int sparx5_register_notifier_blocks(struct sparx5 *sparx5); void sparx5_unregister_notifier_blocks(struct sparx5 *sparx5); /* sparx5_packet.c */ +struct frame_info { + int src_port; +}; + +void sparx5_xtr_flush(struct sparx5 *sparx5, u8 grp); +void sparx5_ifh_parse(u32 *ifh, struct frame_info *info); irqreturn_t sparx5_xtr_handler(int irq, void *_priv); int sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev); int sparx5_manual_injection_mode(struct sparx5 *sparx5); void sparx5_port_inj_timer_setup(struct sparx5_port *port); +/* sparx5_fdma.c */ +int sparx5_fdma_start(struct sparx5 *sparx5); +int sparx5_fdma_stop(struct sparx5 *sparx5); +int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb); +irqreturn_t sparx5_fdma_handler(int irq, void *args); + /* sparx5_mactable.c */ void sparx5_mact_pull_work(struct work_struct *work); int sparx5_mact_learn(struct sparx5 *sparx5, int port, diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c index 09ca7a3bafdc..dc7e5ea6ec15 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c @@ -20,11 +20,7 @@ #define INJ_TIMEOUT_NS 50000 -struct frame_info { - int src_port; -}; - -static void sparx5_xtr_flush(struct sparx5 *sparx5, u8 grp) +void sparx5_xtr_flush(struct sparx5 *sparx5, u8 grp) { /* Start flush */ spx5_wr(QS_XTR_FLUSH_FLUSH_SET(BIT(grp)), sparx5, QS_XTR_FLUSH); @@ -36,7 +32,7 @@ static void sparx5_xtr_flush(struct sparx5 *sparx5, u8 grp) spx5_wr(0, sparx5, QS_XTR_FLUSH); } -static void sparx5_ifh_parse(u32 *ifh, struct frame_info *info) +void sparx5_ifh_parse(u32 *ifh, struct frame_info *info) { u8 *xtr_hdr = (u8 *)ifh; @@ -224,7 +220,10 @@ int sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev) struct sparx5 *sparx5 = port->sparx5; int ret; - ret = sparx5_inject(sparx5, port->ifh, skb, dev); + if (sparx5->fdma_irq > 0) + ret = sparx5_fdma_xmit(sparx5, port->ifh, skb); + else + ret = sparx5_inject(sparx5, port->ifh, skb, dev); if (ret == NETDEV_TX_OK) { stats->tx_bytes += skb->len; diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c index d2e3250928bf..189a6a0a2e08 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c @@ -596,7 +596,7 @@ static int sparx5_port_max_tags_set(struct sparx5 *sparx5, return 0; } -static int sparx5_port_fwd_urg(struct sparx5 *sparx5, u32 speed) +int sparx5_port_fwd_urg(struct sparx5 *sparx5, u32 speed) { u32 clk_period_ps = 1600; /* 625Mhz for now */ u32 urg = 672000; diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.h b/drivers/net/ethernet/microchip/sparx5/sparx5_port.h index fd05ab6436d1..2f8043eac71b 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_port.h +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_port.h @@ -89,5 +89,6 @@ int sparx5_get_port_status(struct sparx5 *sparx5, struct sparx5_port_status *status); void sparx5_port_enable(struct sparx5_port *port, bool enable); +int sparx5_port_fwd_urg(struct sparx5 *sparx5, u32 speed); #endif /* __SPARX5_PORT_H__ */ diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c index a72e3b3b596e..649ca609884a 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c @@ -93,9 +93,12 @@ static int sparx5_port_attr_set(struct net_device *dev, const void *ctx, } static int sparx5_port_bridge_join(struct sparx5_port *port, - struct net_device *bridge) + struct net_device *bridge, + struct netlink_ext_ack *extack) { struct sparx5 *sparx5 = port->sparx5; + struct net_device *ndev = port->ndev; + int err; if (bitmap_empty(sparx5->bridge_mask, SPX5_PORTS)) /* First bridged port */ @@ -109,12 +112,21 @@ static int sparx5_port_bridge_join(struct sparx5_port *port, set_bit(port->portno, sparx5->bridge_mask); + err = switchdev_bridge_port_offload(ndev, ndev, NULL, NULL, NULL, + false, extack); + if (err) + goto err_switchdev_offload; + /* Port enters in bridge mode therefor don't need to copy to CPU * frames for multicast in case the bridge is not requesting them */ - __dev_mc_unsync(port->ndev, sparx5_mc_unsync); + __dev_mc_unsync(ndev, sparx5_mc_unsync); return 0; + +err_switchdev_offload: + clear_bit(port->portno, sparx5->bridge_mask); + return err; } static void sparx5_port_bridge_leave(struct sparx5_port *port, @@ -122,6 +134,8 @@ static void sparx5_port_bridge_leave(struct sparx5_port *port, { struct sparx5 *sparx5 = port->sparx5; + switchdev_bridge_port_unoffload(port->ndev, NULL, NULL, NULL); + clear_bit(port->portno, sparx5->bridge_mask); if (bitmap_empty(sparx5->bridge_mask, SPX5_PORTS)) sparx5->hw_bridge_dev = NULL; @@ -139,11 +153,15 @@ static int sparx5_port_changeupper(struct net_device *dev, struct netdev_notifier_changeupper_info *info) { struct sparx5_port *port = netdev_priv(dev); + struct netlink_ext_ack *extack; int err = 0; + extack = netdev_notifier_info_to_extack(&info->info); + if (netif_is_bridge_master(info->upper_dev)) { if (info->linking) - err = sparx5_port_bridge_join(port, info->upper_dev); + err = sparx5_port_bridge_join(port, info->upper_dev, + extack); else sparx5_port_bridge_leave(port, info->upper_dev); diff --git a/drivers/net/ethernet/microsoft/mana/gdma.h b/drivers/net/ethernet/microsoft/mana/gdma.h index 33e53d32e891..41ecd156e95f 100644 --- a/drivers/net/ethernet/microsoft/mana/gdma.h +++ b/drivers/net/ethernet/microsoft/mana/gdma.h @@ -239,10 +239,8 @@ struct gdma_event { struct gdma_queue; -#define CQE_POLLING_BUFFER 512 struct mana_eq { struct gdma_queue *eq; - struct gdma_comp cqe_poll[CQE_POLLING_BUFFER]; }; typedef void gdma_eq_callback(void *context, struct gdma_queue *q, @@ -291,11 +289,6 @@ struct gdma_queue { unsigned int msix_index; u32 log2_throttle_limit; - - /* NAPI data */ - struct napi_struct napi; - int work_done; - int budget; } eq; struct { @@ -319,9 +312,6 @@ struct gdma_queue_spec { void *context; unsigned long log2_throttle_limit; - - /* Only used by the MANA device. */ - struct net_device *ndev; } eq; struct { @@ -406,7 +396,7 @@ void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue); int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe); -void mana_gd_arm_cq(struct gdma_queue *cq); +void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit); struct gdma_wqe { u32 reserved :24; @@ -496,16 +486,28 @@ enum { GDMA_PROTOCOL_LAST = GDMA_PROTOCOL_V1, }; +#define GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT BIT(0) + +#define GDMA_DRV_CAP_FLAGS1 GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT + +#define GDMA_DRV_CAP_FLAGS2 0 + +#define GDMA_DRV_CAP_FLAGS3 0 + +#define GDMA_DRV_CAP_FLAGS4 0 + struct gdma_verify_ver_req { struct gdma_req_hdr hdr; /* Mandatory fields required for protocol establishment */ u64 protocol_ver_min; u64 protocol_ver_max; - u64 drv_cap_flags1; - u64 drv_cap_flags2; - u64 drv_cap_flags3; - u64 drv_cap_flags4; + + /* Gdma Driver Capability Flags */ + u64 gd_drv_cap_flags1; + u64 gd_drv_cap_flags2; + u64 gd_drv_cap_flags3; + u64 gd_drv_cap_flags4; /* Advisory fields */ u64 drv_ver; diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c index 2f87bf90f8ec..cee75b561f59 100644 --- a/drivers/net/ethernet/microsoft/mana/gdma_main.c +++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c @@ -67,6 +67,10 @@ static int mana_gd_query_max_resources(struct pci_dev *pdev) if (gc->max_num_queues > resp.max_rq) gc->max_num_queues = resp.max_rq; + /* The Hardware Channel (HWC) used 1 MSI-X */ + if (gc->max_num_queues > gc->num_msix_usable - 1) + gc->max_num_queues = gc->num_msix_usable - 1; + return 0; } @@ -267,7 +271,7 @@ void mana_gd_wq_ring_doorbell(struct gdma_context *gc, struct gdma_queue *queue) queue->id, queue->head * GDMA_WQE_BU_SIZE, 1); } -void mana_gd_arm_cq(struct gdma_queue *cq) +void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit) { struct gdma_context *gc = cq->gdma_dev->gdma_context; @@ -276,7 +280,7 @@ void mana_gd_arm_cq(struct gdma_queue *cq) u32 head = cq->head % (num_cqe << GDMA_CQE_OWNER_BITS); mana_gd_ring_doorbell(gc, cq->gdma_dev->doorbell, cq->type, cq->id, - head, SET_ARM_BIT); + head, arm_bit); } static void mana_gd_process_eqe(struct gdma_queue *eq) @@ -339,7 +343,6 @@ static void mana_gd_process_eq_events(void *arg) struct gdma_queue *eq = arg; struct gdma_context *gc; struct gdma_eqe *eqe; - unsigned int arm_bit; u32 head, num_eqe; int i; @@ -370,92 +373,54 @@ static void mana_gd_process_eq_events(void *arg) eq->head++; } - /* Always rearm the EQ for HWC. For MANA, rearm it when NAPI is done. */ - if (mana_gd_is_hwc(eq->gdma_dev)) { - arm_bit = SET_ARM_BIT; - } else if (eq->eq.work_done < eq->eq.budget && - napi_complete_done(&eq->eq.napi, eq->eq.work_done)) { - arm_bit = SET_ARM_BIT; - } else { - arm_bit = 0; - } - head = eq->head % (num_eqe << GDMA_EQE_OWNER_BITS); mana_gd_ring_doorbell(gc, eq->gdma_dev->doorbell, eq->type, eq->id, - head, arm_bit); -} - -static int mana_poll(struct napi_struct *napi, int budget) -{ - struct gdma_queue *eq = container_of(napi, struct gdma_queue, eq.napi); - - eq->eq.work_done = 0; - eq->eq.budget = budget; - - mana_gd_process_eq_events(eq); - - return min(eq->eq.work_done, budget); -} - -static void mana_gd_schedule_napi(void *arg) -{ - struct gdma_queue *eq = arg; - struct napi_struct *napi; - - napi = &eq->eq.napi; - napi_schedule_irqoff(napi); + head, SET_ARM_BIT); } static int mana_gd_register_irq(struct gdma_queue *queue, const struct gdma_queue_spec *spec) { struct gdma_dev *gd = queue->gdma_dev; - bool is_mana = mana_gd_is_mana(gd); struct gdma_irq_context *gic; struct gdma_context *gc; struct gdma_resource *r; unsigned int msi_index; unsigned long flags; - int err; + struct device *dev; + int err = 0; gc = gd->gdma_context; r = &gc->msix_resource; + dev = gc->dev; spin_lock_irqsave(&r->lock, flags); msi_index = find_first_zero_bit(r->map, r->size); - if (msi_index >= r->size) { + if (msi_index >= r->size || msi_index >= gc->num_msix_usable) { err = -ENOSPC; } else { bitmap_set(r->map, msi_index, 1); queue->eq.msix_index = msi_index; - err = 0; } spin_unlock_irqrestore(&r->lock, flags); - if (err) - return err; + if (err) { + dev_err(dev, "Register IRQ err:%d, msi:%u rsize:%u, nMSI:%u", + err, msi_index, r->size, gc->num_msix_usable); - WARN_ON(msi_index >= gc->num_msix_usable); + return err; + } gic = &gc->irq_contexts[msi_index]; - if (is_mana) { - netif_napi_add(spec->eq.ndev, &queue->eq.napi, mana_poll, - NAPI_POLL_WEIGHT); - napi_enable(&queue->eq.napi); - } - WARN_ON(gic->handler || gic->arg); gic->arg = queue; - if (is_mana) - gic->handler = mana_gd_schedule_napi; - else - gic->handler = mana_gd_process_eq_events; + gic->handler = mana_gd_process_eq_events; return 0; } @@ -549,11 +514,6 @@ static void mana_gd_destroy_eq(struct gdma_context *gc, bool flush_evenets, mana_gd_deregiser_irq(queue); - if (mana_gd_is_mana(queue->gdma_dev)) { - napi_disable(&queue->eq.napi); - netif_napi_del(&queue->eq.napi); - } - if (queue->eq.disable_needed) mana_gd_disable_queue(queue); } @@ -883,6 +843,11 @@ int mana_gd_verify_vf_version(struct pci_dev *pdev) req.protocol_ver_min = GDMA_PROTOCOL_FIRST; req.protocol_ver_max = GDMA_PROTOCOL_LAST; + req.gd_drv_cap_flags1 = GDMA_DRV_CAP_FLAGS1; + req.gd_drv_cap_flags2 = GDMA_DRV_CAP_FLAGS2; + req.gd_drv_cap_flags3 = GDMA_DRV_CAP_FLAGS3; + req.gd_drv_cap_flags4 = GDMA_DRV_CAP_FLAGS4; + err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); if (err || resp.hdr.status) { dev_err(gc->dev, "VfVerifyVersionOutput: %d, status=0x%x\n", @@ -1128,7 +1093,7 @@ static int mana_gd_read_cqe(struct gdma_queue *cq, struct gdma_comp *comp) new_bits = (cq->head / num_cqe) & GDMA_CQE_OWNER_MASK; /* Return -1 if overflow detected. */ - if (owner_bits != new_bits) + if (WARN_ON_ONCE(owner_bits != new_bits)) return -1; comp->wq_num = cqe->cqe_info.wq_num; @@ -1201,10 +1166,8 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev) if (max_queues_per_port > MANA_MAX_NUM_QUEUES) max_queues_per_port = MANA_MAX_NUM_QUEUES; - max_irqs = max_queues_per_port * MAX_PORTS_IN_MANA_DEV; - /* Need 1 interrupt for the Hardware communication Channel (HWC) */ - max_irqs++; + max_irqs = max_queues_per_port + 1; nvec = pci_alloc_irq_vectors(pdev, 2, max_irqs, PCI_IRQ_MSIX); if (nvec < 0) @@ -1291,6 +1254,9 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent) int bar = 0; int err; + /* Each port has 2 CQs, each CQ has at most 1 EQE at a time */ + BUILD_BUG_ON(2 * MAX_PORTS_IN_MANA_DEV * GDMA_EQE_SIZE > EQ_SIZE); + err = pci_enable_device(pdev); if (err) return -ENXIO; diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c index 1a923fd99990..c1310ea1c216 100644 --- a/drivers/net/ethernet/microsoft/mana/hw_channel.c +++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c @@ -304,7 +304,7 @@ static void mana_hwc_comp_event(void *ctx, struct gdma_queue *q_self) &comp_data); } - mana_gd_arm_cq(q_self); + mana_gd_ring_cq(q_self, SET_ARM_BIT); } static void mana_hwc_destroy_cq(struct gdma_context *gc, struct hwc_cq *hwc_cq) diff --git a/drivers/net/ethernet/microsoft/mana/mana.h b/drivers/net/ethernet/microsoft/mana/mana.h index a2c3f826f022..fc98a5ba5ed0 100644 --- a/drivers/net/ethernet/microsoft/mana/mana.h +++ b/drivers/net/ethernet/microsoft/mana/mana.h @@ -46,7 +46,7 @@ enum TRI_STATE { #define EQ_SIZE (8 * PAGE_SIZE) #define LOG2_EQ_THROTTLE 3 -#define MAX_PORTS_IN_MANA_DEV 16 +#define MAX_PORTS_IN_MANA_DEV 256 struct mana_stats { u64 packets; @@ -225,6 +225,8 @@ struct mana_tx_comp_oob { struct mana_rxq; +#define CQE_POLLING_BUFFER 512 + struct mana_cq { struct gdma_queue *gdma_cq; @@ -244,8 +246,13 @@ struct mana_cq { */ struct mana_txq *txq; - /* Pointer to a buffer which the CQ handler can copy the CQE's into. */ - struct gdma_comp *gdma_comp_buf; + /* Buffer which the CQ handler can copy the CQE's into. */ + struct gdma_comp gdma_comp_buf[CQE_POLLING_BUFFER]; + + /* NAPI data */ + struct napi_struct napi; + int work_done; + int budget; }; #define GDMA_MAX_RQE_SGES 15 @@ -315,6 +322,8 @@ struct mana_context { u16 num_ports; + struct mana_eq *eqs; + struct net_device *ports[MAX_PORTS_IN_MANA_DEV]; }; @@ -324,8 +333,6 @@ struct mana_port_context { u8 mac_addr[ETH_ALEN]; - struct mana_eq *eqs; - enum TRI_STATE rss_state; mana_handle_t default_rxobj; @@ -395,11 +402,11 @@ enum mana_command_code { struct mana_query_device_cfg_req { struct gdma_req_hdr hdr; - /* Driver Capability flags */ - u64 drv_cap_flags1; - u64 drv_cap_flags2; - u64 drv_cap_flags3; - u64 drv_cap_flags4; + /* MANA Nic Driver Capability flags */ + u64 mn_drv_cap_flags1; + u64 mn_drv_cap_flags2; + u64 mn_drv_cap_flags3; + u64 mn_drv_cap_flags4; u32 proto_major_ver; u32 proto_minor_ver; @@ -516,7 +523,7 @@ struct mana_cfg_rx_steer_resp { struct gdma_resp_hdr hdr; }; /* HW DATA */ -#define MANA_MAX_NUM_QUEUES 16 +#define MANA_MAX_NUM_QUEUES 64 #define MANA_SHORT_VPORT_OFFSET_MAX ((1U << 8) - 1) diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index fff78900fc8a..1b21030308e5 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -696,66 +696,56 @@ static void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type, resp.hdr.status); } -static void mana_init_cqe_poll_buf(struct gdma_comp *cqe_poll_buf) -{ - int i; - - for (i = 0; i < CQE_POLLING_BUFFER; i++) - memset(&cqe_poll_buf[i], 0, sizeof(struct gdma_comp)); -} - -static void mana_destroy_eq(struct gdma_context *gc, - struct mana_port_context *apc) +static void mana_destroy_eq(struct mana_context *ac) { + struct gdma_context *gc = ac->gdma_dev->gdma_context; struct gdma_queue *eq; int i; - if (!apc->eqs) + if (!ac->eqs) return; - for (i = 0; i < apc->num_queues; i++) { - eq = apc->eqs[i].eq; + for (i = 0; i < gc->max_num_queues; i++) { + eq = ac->eqs[i].eq; if (!eq) continue; mana_gd_destroy_queue(gc, eq); } - kfree(apc->eqs); - apc->eqs = NULL; + kfree(ac->eqs); + ac->eqs = NULL; } -static int mana_create_eq(struct mana_port_context *apc) +static int mana_create_eq(struct mana_context *ac) { - struct gdma_dev *gd = apc->ac->gdma_dev; + struct gdma_dev *gd = ac->gdma_dev; + struct gdma_context *gc = gd->gdma_context; struct gdma_queue_spec spec = {}; int err; int i; - apc->eqs = kcalloc(apc->num_queues, sizeof(struct mana_eq), - GFP_KERNEL); - if (!apc->eqs) + ac->eqs = kcalloc(gc->max_num_queues, sizeof(struct mana_eq), + GFP_KERNEL); + if (!ac->eqs) return -ENOMEM; spec.type = GDMA_EQ; spec.monitor_avl_buf = false; spec.queue_size = EQ_SIZE; spec.eq.callback = NULL; - spec.eq.context = apc->eqs; + spec.eq.context = ac->eqs; spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE; - spec.eq.ndev = apc->ndev; - - for (i = 0; i < apc->num_queues; i++) { - mana_init_cqe_poll_buf(apc->eqs[i].cqe_poll); - err = mana_gd_create_mana_eq(gd, &spec, &apc->eqs[i].eq); + for (i = 0; i < gc->max_num_queues; i++) { + err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq); if (err) goto out; } return 0; out: - mana_destroy_eq(gd->gdma_context, apc); + mana_destroy_eq(ac); return err; } @@ -790,7 +780,6 @@ static void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc) static void mana_poll_tx_cq(struct mana_cq *cq) { - struct gdma_queue *gdma_eq = cq->gdma_cq->cq.parent; struct gdma_comp *completions = cq->gdma_comp_buf; struct gdma_posted_wqe_info *wqe_info; unsigned int pkt_transmitted = 0; @@ -812,6 +801,9 @@ static void mana_poll_tx_cq(struct mana_cq *cq) comp_read = mana_gd_poll_cq(cq->gdma_cq, completions, CQE_POLLING_BUFFER); + if (comp_read < 1) + return; + for (i = 0; i < comp_read; i++) { struct mana_tx_comp_oob *cqe_oob; @@ -861,7 +853,7 @@ static void mana_poll_tx_cq(struct mana_cq *cq) mana_unmap_skb(skb, apc); - napi_consume_skb(skb, gdma_eq->eq.budget); + napi_consume_skb(skb, cq->budget); pkt_transmitted++; } @@ -890,6 +882,8 @@ static void mana_poll_tx_cq(struct mana_cq *cq) if (atomic_sub_return(pkt_transmitted, &txq->pending_sends) < 0) WARN_ON_ONCE(1); + + cq->work_done = pkt_transmitted; } static void mana_post_pkt_rxq(struct mana_rxq *rxq) @@ -918,17 +912,13 @@ static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe, struct mana_stats *rx_stats = &rxq->stats; struct net_device *ndev = rxq->ndev; uint pkt_len = cqe->ppi[0].pkt_len; - struct mana_port_context *apc; u16 rxq_idx = rxq->rxq_idx; struct napi_struct *napi; - struct gdma_queue *eq; struct sk_buff *skb; u32 hash_value; - apc = netdev_priv(ndev); - eq = apc->eqs[rxq_idx].eq; - eq->eq.work_done++; - napi = &eq->eq.napi; + rxq->rx_cq.work_done++; + napi = &rxq->rx_cq.napi; if (!buf_va) { ++ndev->stats.rx_dropped; @@ -1081,6 +1071,7 @@ static void mana_poll_rx_cq(struct mana_cq *cq) static void mana_cq_handler(void *context, struct gdma_queue *gdma_queue) { struct mana_cq *cq = context; + u8 arm_bit; WARN_ON_ONCE(cq->gdma_cq != gdma_queue); @@ -1089,7 +1080,33 @@ static void mana_cq_handler(void *context, struct gdma_queue *gdma_queue) else mana_poll_tx_cq(cq); - mana_gd_arm_cq(gdma_queue); + if (cq->work_done < cq->budget && + napi_complete_done(&cq->napi, cq->work_done)) { + arm_bit = SET_ARM_BIT; + } else { + arm_bit = 0; + } + + mana_gd_ring_cq(gdma_queue, arm_bit); +} + +static int mana_poll(struct napi_struct *napi, int budget) +{ + struct mana_cq *cq = container_of(napi, struct mana_cq, napi); + + cq->work_done = 0; + cq->budget = budget; + + mana_cq_handler(cq, cq->gdma_cq); + + return min(cq->work_done, budget); +} + +static void mana_schedule_napi(void *context, struct gdma_queue *gdma_queue) +{ + struct mana_cq *cq = context; + + napi_schedule_irqoff(&cq->napi); } static void mana_deinit_cq(struct mana_port_context *apc, struct mana_cq *cq) @@ -1114,12 +1131,18 @@ static void mana_deinit_txq(struct mana_port_context *apc, struct mana_txq *txq) static void mana_destroy_txq(struct mana_port_context *apc) { + struct napi_struct *napi; int i; if (!apc->tx_qp) return; for (i = 0; i < apc->num_queues; i++) { + napi = &apc->tx_qp[i].tx_cq.napi; + napi_synchronize(napi); + napi_disable(napi); + netif_napi_del(napi); + mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object); mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq); @@ -1134,7 +1157,8 @@ static void mana_destroy_txq(struct mana_port_context *apc) static int mana_create_txq(struct mana_port_context *apc, struct net_device *net) { - struct gdma_dev *gd = apc->ac->gdma_dev; + struct mana_context *ac = apc->ac; + struct gdma_dev *gd = ac->gdma_dev; struct mana_obj_spec wq_spec; struct mana_obj_spec cq_spec; struct gdma_queue_spec spec; @@ -1186,7 +1210,6 @@ static int mana_create_txq(struct mana_port_context *apc, /* Create SQ's CQ */ cq = &apc->tx_qp[i].tx_cq; - cq->gdma_comp_buf = apc->eqs[i].cqe_poll; cq->type = MANA_CQ_TYPE_TX; cq->txq = txq; @@ -1195,8 +1218,8 @@ static int mana_create_txq(struct mana_port_context *apc, spec.type = GDMA_CQ; spec.monitor_avl_buf = false; spec.queue_size = cq_size; - spec.cq.callback = mana_cq_handler; - spec.cq.parent_eq = apc->eqs[i].eq; + spec.cq.callback = mana_schedule_napi; + spec.cq.parent_eq = ac->eqs[i].eq; spec.cq.context = cq; err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq); if (err) @@ -1237,7 +1260,10 @@ static int mana_create_txq(struct mana_port_context *apc, gc->cq_table[cq->gdma_id] = cq->gdma_cq; - mana_gd_arm_cq(cq->gdma_cq); + netif_tx_napi_add(net, &cq->napi, mana_poll, NAPI_POLL_WEIGHT); + napi_enable(&cq->napi); + + mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT); } return 0; @@ -1246,21 +1272,6 @@ out: return err; } -static void mana_napi_sync_for_rx(struct mana_rxq *rxq) -{ - struct net_device *ndev = rxq->ndev; - struct mana_port_context *apc; - u16 rxq_idx = rxq->rxq_idx; - struct napi_struct *napi; - struct gdma_queue *eq; - - apc = netdev_priv(ndev); - eq = apc->eqs[rxq_idx].eq; - napi = &eq->eq.napi; - - napi_synchronize(napi); -} - static void mana_destroy_rxq(struct mana_port_context *apc, struct mana_rxq *rxq, bool validate_state) @@ -1268,13 +1279,19 @@ static void mana_destroy_rxq(struct mana_port_context *apc, struct gdma_context *gc = apc->ac->gdma_dev->gdma_context; struct mana_recv_buf_oob *rx_oob; struct device *dev = gc->dev; + struct napi_struct *napi; int i; if (!rxq) return; + napi = &rxq->rx_cq.napi; + if (validate_state) - mana_napi_sync_for_rx(rxq); + napi_synchronize(napi); + + napi_disable(napi); + netif_napi_del(napi); mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj); @@ -1418,7 +1435,6 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc, /* Create RQ's CQ */ cq = &rxq->rx_cq; - cq->gdma_comp_buf = eq->cqe_poll; cq->type = MANA_CQ_TYPE_RX; cq->rxq = rxq; @@ -1426,7 +1442,7 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc, spec.type = GDMA_CQ; spec.monitor_avl_buf = false; spec.queue_size = cq_size; - spec.cq.callback = mana_cq_handler; + spec.cq.callback = mana_schedule_napi; spec.cq.parent_eq = eq->eq; spec.cq.context = cq; err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq); @@ -1466,7 +1482,10 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc, gc->cq_table[cq->gdma_id] = cq->gdma_cq; - mana_gd_arm_cq(cq->gdma_cq); + netif_napi_add(ndev, &cq->napi, mana_poll, 1); + napi_enable(&cq->napi); + + mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT); out: if (!err) return rxq; @@ -1484,12 +1503,13 @@ out: static int mana_add_rx_queues(struct mana_port_context *apc, struct net_device *ndev) { + struct mana_context *ac = apc->ac; struct mana_rxq *rxq; int err = 0; int i; for (i = 0; i < apc->num_queues; i++) { - rxq = mana_create_rxq(apc, i, &apc->eqs[i], ndev); + rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev); if (!rxq) { err = -ENOMEM; goto out; @@ -1601,16 +1621,11 @@ reset_apc: int mana_alloc_queues(struct net_device *ndev) { struct mana_port_context *apc = netdev_priv(ndev); - struct gdma_dev *gd = apc->ac->gdma_dev; int err; - err = mana_create_eq(apc); - if (err) - return err; - err = mana_create_vport(apc, ndev); if (err) - goto destroy_eq; + return err; err = netif_set_real_num_tx_queues(ndev, apc->num_queues); if (err) @@ -1636,8 +1651,6 @@ int mana_alloc_queues(struct net_device *ndev) destroy_vport: mana_destroy_vport(apc); -destroy_eq: - mana_destroy_eq(gd->gdma_context, apc); return err; } @@ -1714,8 +1727,6 @@ static int mana_dealloc_queues(struct net_device *ndev) mana_destroy_vport(apc); - mana_destroy_eq(apc->ac->gdma_dev->gdma_context, apc); - return 0; } @@ -1768,7 +1779,7 @@ static int mana_probe_port(struct mana_context *ac, int port_idx, apc->ac = ac; apc->ndev = ndev; apc->max_queues = gc->max_num_queues; - apc->num_queues = min_t(uint, gc->max_num_queues, MANA_MAX_NUM_QUEUES); + apc->num_queues = gc->max_num_queues; apc->port_handle = INVALID_MANA_HANDLE; apc->port_idx = port_idx; @@ -1839,6 +1850,10 @@ int mana_probe(struct gdma_dev *gd) ac->num_ports = 1; gd->driver_data = ac; + err = mana_create_eq(ac); + if (err) + goto out; + err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION, MANA_MICRO_VERSION, &ac->num_ports); if (err) @@ -1888,6 +1903,9 @@ void mana_remove(struct gdma_dev *gd) free_netdev(ndev); } + + mana_destroy_eq(ac); + out: mana_gd_deregister_device(gd); gd->driver_data = NULL; diff --git a/drivers/net/ethernet/mscc/Kconfig b/drivers/net/ethernet/mscc/Kconfig index 2d3157e4d081..b6a73d151dec 100644 --- a/drivers/net/ethernet/mscc/Kconfig +++ b/drivers/net/ethernet/mscc/Kconfig @@ -16,7 +16,7 @@ config MSCC_OCELOT_SWITCH_LIB select NET_DEVLINK select REGMAP_MMIO select PACKING - select PHYLIB + select PHYLINK tristate help This is a hardware support library for Ocelot network switches. It is @@ -24,6 +24,7 @@ config MSCC_OCELOT_SWITCH_LIB config MSCC_OCELOT_SWITCH tristate "Ocelot switch driver" + depends on PTP_1588_CLOCK_OPTIONAL depends on BRIDGE || BRIDGE=n depends on NET_SWITCHDEV depends on HAS_IOMEM diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index 2948d731a1c1..c581b955efb3 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -222,8 +222,35 @@ static void ocelot_port_set_pvid(struct ocelot *ocelot, int port, ANA_PORT_DROP_CFG, port); } +static int ocelot_vlan_member_set(struct ocelot *ocelot, u32 vlan_mask, u16 vid) +{ + int err; + + err = ocelot_vlant_set_mask(ocelot, vid, vlan_mask); + if (err) + return err; + + ocelot->vlan_mask[vid] = vlan_mask; + + return 0; +} + +static int ocelot_vlan_member_add(struct ocelot *ocelot, int port, u16 vid) +{ + return ocelot_vlan_member_set(ocelot, + ocelot->vlan_mask[vid] | BIT(port), + vid); +} + +static int ocelot_vlan_member_del(struct ocelot *ocelot, int port, u16 vid) +{ + return ocelot_vlan_member_set(ocelot, + ocelot->vlan_mask[vid] & ~BIT(port), + vid); +} + int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port, - bool vlan_aware) + bool vlan_aware, struct netlink_ext_ack *extack) { struct ocelot_vcap_block *block = &ocelot->block[VCAP_IS1]; struct ocelot_port *ocelot_port = ocelot->ports[port]; @@ -233,8 +260,8 @@ int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port, list_for_each_entry(filter, &block->rules, list) { if (filter->ingress_port_mask & BIT(port) && filter->action.vid_replace_ena) { - dev_err(ocelot->dev, - "Cannot change VLAN state with vlan modify rules active\n"); + NL_SET_ERR_MSG_MOD(extack, + "Cannot change VLAN state with vlan modify rules active"); return -EBUSY; } } @@ -259,16 +286,15 @@ int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port, EXPORT_SYMBOL(ocelot_port_vlan_filtering); int ocelot_vlan_prepare(struct ocelot *ocelot, int port, u16 vid, bool pvid, - bool untagged) + bool untagged, struct netlink_ext_ack *extack) { struct ocelot_port *ocelot_port = ocelot->ports[port]; /* Deny changing the native VLAN, but always permit deleting it */ if (untagged && ocelot_port->native_vlan.vid != vid && ocelot_port->native_vlan.valid) { - dev_err(ocelot->dev, - "Port already has a native VLAN: %d\n", - ocelot_port->native_vlan.vid); + NL_SET_ERR_MSG_MOD(extack, + "Port already has a native VLAN"); return -EBUSY; } @@ -279,13 +305,11 @@ EXPORT_SYMBOL(ocelot_vlan_prepare); int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid, bool untagged) { - int ret; + int err; - /* Make the port a member of the VLAN */ - ocelot->vlan_mask[vid] |= BIT(port); - ret = ocelot_vlant_set_mask(ocelot, vid, ocelot->vlan_mask[vid]); - if (ret) - return ret; + err = ocelot_vlan_member_add(ocelot, port, vid); + if (err) + return err; /* Default ingress vlan classification */ if (pvid) { @@ -312,13 +336,11 @@ EXPORT_SYMBOL(ocelot_vlan_add); int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid) { struct ocelot_port *ocelot_port = ocelot->ports[port]; - int ret; + int err; - /* Stop the port from being a member of the vlan */ - ocelot->vlan_mask[vid] &= ~BIT(port); - ret = ocelot_vlant_set_mask(ocelot, vid, ocelot->vlan_mask[vid]); - if (ret) - return ret; + err = ocelot_vlan_member_del(ocelot, port, vid); + if (err) + return err; /* Ingress */ if (ocelot_port->pvid_vlan.vid == vid) { @@ -340,6 +362,7 @@ EXPORT_SYMBOL(ocelot_vlan_del); static void ocelot_vlan_init(struct ocelot *ocelot) { + unsigned long all_ports = GENMASK(ocelot->num_phys_ports - 1, 0); u16 port, vid; /* Clear VLAN table, by default all ports are members of all VLANs */ @@ -348,23 +371,19 @@ static void ocelot_vlan_init(struct ocelot *ocelot) ocelot_vlant_wait_for_completion(ocelot); /* Configure the port VLAN memberships */ - for (vid = 1; vid < VLAN_N_VID; vid++) { - ocelot->vlan_mask[vid] = 0; - ocelot_vlant_set_mask(ocelot, vid, ocelot->vlan_mask[vid]); - } + for (vid = 1; vid < VLAN_N_VID; vid++) + ocelot_vlan_member_set(ocelot, 0, vid); /* Because VLAN filtering is enabled, we need VID 0 to get untagged * traffic. It is added automatically if 8021q module is loaded, but * we can't rely on it since module may be not loaded. */ - ocelot->vlan_mask[0] = GENMASK(ocelot->num_phys_ports - 1, 0); - ocelot_vlant_set_mask(ocelot, 0, ocelot->vlan_mask[0]); + ocelot_vlan_member_set(ocelot, all_ports, 0); /* Set vlan ingress filter mask to all ports but the CPU port by * default. */ - ocelot_write(ocelot, GENMASK(ocelot->num_phys_ports - 1, 0), - ANA_VLANMASK); + ocelot_write(ocelot, all_ports, ANA_VLANMASK); for (port = 0; port < ocelot->num_phys_ports; port++) { ocelot_write_gix(ocelot, 0, REW_PORT_VLAN_CFG, port); @@ -377,7 +396,7 @@ static u32 ocelot_read_eq_avail(struct ocelot *ocelot, int port) return ocelot_read_rix(ocelot, QSYS_SW_STATUS, port); } -int ocelot_port_flush(struct ocelot *ocelot, int port) +static int ocelot_port_flush(struct ocelot *ocelot, int port) { unsigned int pause_ena; int err, val; @@ -429,63 +448,118 @@ int ocelot_port_flush(struct ocelot *ocelot, int port) return err; } -EXPORT_SYMBOL(ocelot_port_flush); -void ocelot_adjust_link(struct ocelot *ocelot, int port, - struct phy_device *phydev) +void ocelot_phylink_mac_link_down(struct ocelot *ocelot, int port, + unsigned int link_an_mode, + phy_interface_t interface, + unsigned long quirks) { struct ocelot_port *ocelot_port = ocelot->ports[port]; - int speed, mode = 0; + int err; + + ocelot_port_rmwl(ocelot_port, 0, DEV_MAC_ENA_CFG_RX_ENA, + DEV_MAC_ENA_CFG); + + ocelot_fields_write(ocelot, port, QSYS_SWITCH_PORT_MODE_PORT_ENA, 0); + + err = ocelot_port_flush(ocelot, port); + if (err) + dev_err(ocelot->dev, "failed to flush port %d: %d\n", + port, err); + + /* Put the port in reset. */ + if (interface != PHY_INTERFACE_MODE_QSGMII || + !(quirks & OCELOT_QUIRK_QSGMII_PORTS_MUST_BE_UP)) + ocelot_port_rmwl(ocelot_port, + DEV_CLOCK_CFG_MAC_TX_RST | + DEV_CLOCK_CFG_MAC_TX_RST, + DEV_CLOCK_CFG_MAC_TX_RST | + DEV_CLOCK_CFG_MAC_TX_RST, + DEV_CLOCK_CFG); +} +EXPORT_SYMBOL_GPL(ocelot_phylink_mac_link_down); + +void ocelot_phylink_mac_link_up(struct ocelot *ocelot, int port, + struct phy_device *phydev, + unsigned int link_an_mode, + phy_interface_t interface, + int speed, int duplex, + bool tx_pause, bool rx_pause, + unsigned long quirks) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + int mac_speed, mode = 0; + u32 mac_fc_cfg; + + /* The MAC might be integrated in systems where the MAC speed is fixed + * and it's the PCS who is performing the rate adaptation, so we have + * to write "1000Mbps" into the LINK_SPEED field of DEV_CLOCK_CFG + * (which is also its default value). + */ + if ((quirks & OCELOT_QUIRK_PCS_PERFORMS_RATE_ADAPTATION) || + speed == SPEED_1000) { + mac_speed = OCELOT_SPEED_1000; + mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA; + } else if (speed == SPEED_2500) { + mac_speed = OCELOT_SPEED_2500; + mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA; + } else if (speed == SPEED_100) { + mac_speed = OCELOT_SPEED_100; + } else { + mac_speed = OCELOT_SPEED_10; + } - switch (phydev->speed) { + if (duplex == DUPLEX_FULL) + mode |= DEV_MAC_MODE_CFG_FDX_ENA; + + ocelot_port_writel(ocelot_port, mode, DEV_MAC_MODE_CFG); + + /* Take port out of reset by clearing the MAC_TX_RST, MAC_RX_RST and + * PORT_RST bits in DEV_CLOCK_CFG. + */ + ocelot_port_writel(ocelot_port, DEV_CLOCK_CFG_LINK_SPEED(mac_speed), + DEV_CLOCK_CFG); + + switch (speed) { case SPEED_10: - speed = OCELOT_SPEED_10; + mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(OCELOT_SPEED_10); break; case SPEED_100: - speed = OCELOT_SPEED_100; + mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(OCELOT_SPEED_100); break; case SPEED_1000: - speed = OCELOT_SPEED_1000; - mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA; - break; case SPEED_2500: - speed = OCELOT_SPEED_2500; - mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA; + mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(OCELOT_SPEED_1000); break; default: - dev_err(ocelot->dev, "Unsupported PHY speed on port %d: %d\n", - port, phydev->speed); + dev_err(ocelot->dev, "Unsupported speed on port %d: %d\n", + port, speed); return; } - phy_print_status(phydev); - - if (!phydev->link) - return; - - /* Only full duplex supported for now */ - ocelot_port_writel(ocelot_port, DEV_MAC_MODE_CFG_FDX_ENA | - mode, DEV_MAC_MODE_CFG); - - /* Disable HDX fast control */ - ocelot_port_writel(ocelot_port, DEV_PORT_MISC_HDX_FAST_DIS, - DEV_PORT_MISC); + /* Handle RX pause in all cases, with 2500base-X this is used for rate + * adaptation. + */ + mac_fc_cfg |= SYS_MAC_FC_CFG_RX_FC_ENA; - /* SGMII only for now */ - ocelot_port_writel(ocelot_port, PCS1G_MODE_CFG_SGMII_MODE_ENA, - PCS1G_MODE_CFG); - ocelot_port_writel(ocelot_port, PCS1G_SD_CFG_SD_SEL, PCS1G_SD_CFG); + if (tx_pause) + mac_fc_cfg |= SYS_MAC_FC_CFG_TX_FC_ENA | + SYS_MAC_FC_CFG_PAUSE_VAL_CFG(0xffff) | + SYS_MAC_FC_CFG_FC_LATENCY_CFG(0x7) | + SYS_MAC_FC_CFG_ZERO_PAUSE_ENA; - /* Enable PCS */ - ocelot_port_writel(ocelot_port, PCS1G_CFG_PCS_ENA, PCS1G_CFG); + /* Flow control. Link speed is only used here to evaluate the time + * specification in incoming pause frames. + */ + ocelot_write_rix(ocelot, mac_fc_cfg, SYS_MAC_FC_CFG, port); - /* No aneg on SGMII */ - ocelot_port_writel(ocelot_port, 0, PCS1G_ANEG_CFG); + ocelot_write_rix(ocelot, 0, ANA_POL_FLOWC, port); - /* No loopback */ - ocelot_port_writel(ocelot_port, 0, PCS1G_LB_CFG); + ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, tx_pause); - /* Enable MAC module */ + /* Undo the effects of ocelot_phylink_mac_link_down: + * enable MAC module + */ ocelot_port_writel(ocelot_port, DEV_MAC_ENA_CFG_RX_ENA | DEV_MAC_ENA_CFG_TX_ENA, DEV_MAC_ENA_CFG); @@ -502,39 +576,8 @@ void ocelot_adjust_link(struct ocelot *ocelot, int port, /* Core: Enable port for frame transfer */ ocelot_fields_write(ocelot, port, QSYS_SWITCH_PORT_MODE_PORT_ENA, 1); - - /* Flow control */ - ocelot_write_rix(ocelot, SYS_MAC_FC_CFG_PAUSE_VAL_CFG(0xffff) | - SYS_MAC_FC_CFG_RX_FC_ENA | SYS_MAC_FC_CFG_TX_FC_ENA | - SYS_MAC_FC_CFG_ZERO_PAUSE_ENA | - SYS_MAC_FC_CFG_FC_LATENCY_CFG(0x7) | - SYS_MAC_FC_CFG_FC_LINK_SPEED(speed), - SYS_MAC_FC_CFG, port); - ocelot_write_rix(ocelot, 0, ANA_POL_FLOWC, port); -} -EXPORT_SYMBOL(ocelot_adjust_link); - -void ocelot_port_enable(struct ocelot *ocelot, int port, - struct phy_device *phy) -{ - /* Enable receiving frames on the port, and activate auto-learning of - * MAC addresses. - */ - ocelot_write_gix(ocelot, ANA_PORT_PORT_CFG_LEARNAUTO | - ANA_PORT_PORT_CFG_RECV_ENA | - ANA_PORT_PORT_CFG_PORTID_VAL(port), - ANA_PORT_PORT_CFG, port); } -EXPORT_SYMBOL(ocelot_port_enable); - -void ocelot_port_disable(struct ocelot *ocelot, int port) -{ - struct ocelot_port *ocelot_port = ocelot->ports[port]; - - ocelot_port_writel(ocelot_port, 0, DEV_MAC_ENA_CFG); - ocelot_fields_write(ocelot, port, QSYS_SWITCH_PORT_MODE_PORT_ENA, 0); -} -EXPORT_SYMBOL(ocelot_port_disable); +EXPORT_SYMBOL_GPL(ocelot_phylink_mac_link_up); static void ocelot_port_add_txtstamp_skb(struct ocelot *ocelot, int port, struct sk_buff *clone) @@ -1957,6 +2000,15 @@ void ocelot_init_port(struct ocelot *ocelot, int port) /* Disable source address learning for standalone mode */ ocelot_port_set_learning(ocelot, port, false); + /* Set the port's initial logical port ID value, enable receiving + * frames on it, and configure the MAC address learning type to + * automatic. + */ + ocelot_write_gix(ocelot, ANA_PORT_PORT_CFG_LEARNAUTO | + ANA_PORT_PORT_CFG_RECV_ENA | + ANA_PORT_PORT_CFG_PORTID_VAL(port), + ANA_PORT_PORT_CFG, port); + /* Enable vcap lookups */ ocelot_vcap_enable(ocelot, port); } diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h index db6b1a4c3926..1952d6a1b98a 100644 --- a/drivers/net/ethernet/mscc/ocelot.h +++ b/drivers/net/ethernet/mscc/ocelot.h @@ -12,8 +12,7 @@ #include <linux/etherdevice.h> #include <linux/if_vlan.h> #include <linux/net_tstamp.h> -#include <linux/phy.h> -#include <linux/phy/phy.h> +#include <linux/phylink.h> #include <linux/platform_device.h> #include <linux/regmap.h> @@ -42,11 +41,9 @@ struct ocelot_port_tc { struct ocelot_port_private { struct ocelot_port port; struct net_device *dev; - struct phy_device *phy; + struct phylink *phylink; + struct phylink_config phylink_config; u8 chip_port; - - struct phy *serdes; - struct ocelot_port_tc tc; }; @@ -107,7 +104,7 @@ u32 ocelot_port_readl(struct ocelot_port *port, u32 reg); void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg); int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target, - struct phy_device *phy); + struct device_node *portnp); void ocelot_release_port(struct ocelot_port *ocelot_port); int ocelot_devlink_init(struct ocelot *ocelot); void ocelot_devlink_teardown(struct ocelot *ocelot); diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c index e9d260d84bf3..c0c465a4a981 100644 --- a/drivers/net/ethernet/mscc/ocelot_net.c +++ b/drivers/net/ethernet/mscc/ocelot_net.c @@ -9,10 +9,14 @@ */ #include <linux/if_bridge.h> +#include <linux/of_net.h> +#include <linux/phy/phy.h> #include <net/pkt_cls.h> #include "ocelot.h" #include "ocelot_vcap.h" +#define OCELOT_MAC_QUIRKS OCELOT_QUIRK_QSGMII_PORTS_MUST_BE_UP + static struct ocelot *devlink_port_to_ocelot(struct devlink_port *dlp) { return devlink_priv(dlp->devlink); @@ -160,6 +164,7 @@ int ocelot_port_devlink_init(struct ocelot *ocelot, int port, struct devlink *dl = ocelot->devlink; struct devlink_port_attrs attrs = {}; + memset(dlp, 0, sizeof(*dlp)); memcpy(attrs.switch_id.id, &ocelot->base_mac, id_len); attrs.switch_id.id_len = id_len; attrs.phys.port_number = port; @@ -381,26 +386,6 @@ static int ocelot_setup_tc(struct net_device *dev, enum tc_setup_type type, return 0; } -static void ocelot_port_adjust_link(struct net_device *dev) -{ - struct ocelot_port_private *priv = netdev_priv(dev); - struct ocelot *ocelot = priv->port.ocelot; - int port = priv->chip_port; - - ocelot_adjust_link(ocelot, port, dev->phydev); -} - -static int ocelot_vlan_vid_prepare(struct net_device *dev, u16 vid, bool pvid, - bool untagged) -{ - struct ocelot_port_private *priv = netdev_priv(dev); - struct ocelot_port *ocelot_port = &priv->port; - struct ocelot *ocelot = ocelot_port->ocelot; - int port = priv->chip_port; - - return ocelot_vlan_prepare(ocelot, port, vid, pvid, untagged); -} - static int ocelot_vlan_vid_add(struct net_device *dev, u16 vid, bool pvid, bool untagged) { @@ -448,33 +433,8 @@ static int ocelot_vlan_vid_del(struct net_device *dev, u16 vid) static int ocelot_port_open(struct net_device *dev) { struct ocelot_port_private *priv = netdev_priv(dev); - struct ocelot_port *ocelot_port = &priv->port; - struct ocelot *ocelot = ocelot_port->ocelot; - int port = priv->chip_port; - int err; - if (priv->serdes) { - err = phy_set_mode_ext(priv->serdes, PHY_MODE_ETHERNET, - ocelot_port->phy_mode); - if (err) { - netdev_err(dev, "Could not set mode of SerDes\n"); - return err; - } - } - - err = phy_connect_direct(dev, priv->phy, &ocelot_port_adjust_link, - ocelot_port->phy_mode); - if (err) { - netdev_err(dev, "Could not attach to PHY\n"); - return err; - } - - dev->phydev = priv->phy; - - phy_attached_info(priv->phy); - phy_start(priv->phy); - - ocelot_port_enable(ocelot, port, priv->phy); + phylink_start(priv->phylink); return 0; } @@ -482,14 +442,8 @@ static int ocelot_port_open(struct net_device *dev) static int ocelot_port_stop(struct net_device *dev) { struct ocelot_port_private *priv = netdev_priv(dev); - struct ocelot *ocelot = priv->port.ocelot; - int port = priv->chip_port; - - phy_disconnect(priv->phy); - - dev->phydev = NULL; - ocelot_port_disable(ocelot, port); + phylink_stop(priv->phylink); return 0; } @@ -823,7 +777,7 @@ static const struct net_device_ops ocelot_port_netdev_ops = { .ndo_vlan_rx_kill_vid = ocelot_vlan_rx_kill_vid, .ndo_set_features = ocelot_set_features, .ndo_setup_tc = ocelot_setup_tc, - .ndo_do_ioctl = ocelot_ioctl, + .ndo_eth_ioctl = ocelot_ioctl, .ndo_get_devlink_port = ocelot_get_devlink_port, }; @@ -959,7 +913,8 @@ static int ocelot_port_attr_set(struct net_device *dev, const void *ctx, ocelot_port_attr_ageing_set(ocelot, port, attr->u.ageing_time); break; case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: - ocelot_port_vlan_filtering(ocelot, port, attr->u.vlan_filtering); + ocelot_port_vlan_filtering(ocelot, port, attr->u.vlan_filtering, + extack); break; case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED: ocelot_port_attr_mc_set(ocelot, port, !attr->u.mc_disabled); @@ -979,14 +934,26 @@ static int ocelot_port_attr_set(struct net_device *dev, const void *ctx, return err; } +static int ocelot_vlan_vid_prepare(struct net_device *dev, u16 vid, bool pvid, + bool untagged, struct netlink_ext_ack *extack) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; + int port = priv->chip_port; + + return ocelot_vlan_prepare(ocelot, port, vid, pvid, untagged, extack); +} + static int ocelot_port_obj_add_vlan(struct net_device *dev, - const struct switchdev_obj_port_vlan *vlan) + const struct switchdev_obj_port_vlan *vlan, + struct netlink_ext_ack *extack) { bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; int ret; - ret = ocelot_vlan_vid_prepare(dev, vlan->vid, pvid, untagged); + ret = ocelot_vlan_vid_prepare(dev, vlan->vid, pvid, untagged, extack); if (ret) return ret; @@ -1074,7 +1041,8 @@ static int ocelot_port_obj_add(struct net_device *dev, const void *ctx, switch (obj->id) { case SWITCHDEV_OBJ_ID_PORT_VLAN: ret = ocelot_port_obj_add_vlan(dev, - SWITCHDEV_OBJ_PORT_VLAN(obj)); + SWITCHDEV_OBJ_PORT_VLAN(obj), + extack); break; case SWITCHDEV_OBJ_ID_PORT_MDB: ret = ocelot_port_obj_add_mdb(dev, SWITCHDEV_OBJ_PORT_MDB(obj)); @@ -1154,45 +1122,27 @@ static int ocelot_switchdev_sync(struct ocelot *ocelot, int port, struct net_device *bridge_dev, struct netlink_ext_ack *extack) { - struct ocelot_port *ocelot_port = ocelot->ports[port]; - struct ocelot_port_private *priv; clock_t ageing_time; u8 stp_state; - int err; - - priv = container_of(ocelot_port, struct ocelot_port_private, port); ocelot_inherit_brport_flags(ocelot, port, brport_dev); stp_state = br_port_get_stp_state(brport_dev); ocelot_bridge_stp_state_set(ocelot, port, stp_state); - err = ocelot_port_vlan_filtering(ocelot, port, - br_vlan_enabled(bridge_dev)); - if (err) - return err; - ageing_time = br_get_ageing_time(bridge_dev); ocelot_port_attr_ageing_set(ocelot, port, ageing_time); - err = br_mdb_replay(bridge_dev, brport_dev, priv, true, - &ocelot_switchdev_blocking_nb, extack); - if (err && err != -EOPNOTSUPP) - return err; - - err = br_vlan_replay(bridge_dev, brport_dev, priv, true, - &ocelot_switchdev_blocking_nb, extack); - if (err && err != -EOPNOTSUPP) - return err; - - return 0; + return ocelot_port_vlan_filtering(ocelot, port, + br_vlan_enabled(bridge_dev), + extack); } static int ocelot_switchdev_unsync(struct ocelot *ocelot, int port) { int err; - err = ocelot_port_vlan_filtering(ocelot, port, false); + err = ocelot_port_vlan_filtering(ocelot, port, false, NULL); if (err) return err; @@ -1216,6 +1166,13 @@ static int ocelot_netdevice_bridge_join(struct net_device *dev, ocelot_port_bridge_join(ocelot, port, bridge); + err = switchdev_bridge_port_offload(brport_dev, dev, priv, + &ocelot_netdevice_nb, + &ocelot_switchdev_blocking_nb, + false, extack); + if (err) + goto err_switchdev_offload; + err = ocelot_switchdev_sync(ocelot, port, brport_dev, bridge, extack); if (err) goto err_switchdev_sync; @@ -1223,10 +1180,24 @@ static int ocelot_netdevice_bridge_join(struct net_device *dev, return 0; err_switchdev_sync: + switchdev_bridge_port_unoffload(brport_dev, priv, + &ocelot_netdevice_nb, + &ocelot_switchdev_blocking_nb); +err_switchdev_offload: ocelot_port_bridge_leave(ocelot, port, bridge); return err; } +static void ocelot_netdevice_pre_bridge_leave(struct net_device *dev, + struct net_device *brport_dev) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + + switchdev_bridge_port_unoffload(brport_dev, priv, + &ocelot_netdevice_nb, + &ocelot_switchdev_blocking_nb); +} + static int ocelot_netdevice_bridge_leave(struct net_device *dev, struct net_device *brport_dev, struct net_device *bridge) @@ -1279,6 +1250,18 @@ err_bridge_join: return err; } +static void ocelot_netdevice_pre_lag_leave(struct net_device *dev, + struct net_device *bond) +{ + struct net_device *bridge_dev; + + bridge_dev = netdev_master_upper_dev_get(bond); + if (!bridge_dev || !netif_is_bridge_master(bridge_dev)) + return; + + ocelot_netdevice_pre_bridge_leave(dev, bond); +} + static int ocelot_netdevice_lag_leave(struct net_device *dev, struct net_device *bond) { @@ -1356,6 +1339,43 @@ ocelot_netdevice_lag_changeupper(struct net_device *dev, } static int +ocelot_netdevice_prechangeupper(struct net_device *dev, + struct net_device *brport_dev, + struct netdev_notifier_changeupper_info *info) +{ + if (netif_is_bridge_master(info->upper_dev) && !info->linking) + ocelot_netdevice_pre_bridge_leave(dev, brport_dev); + + if (netif_is_lag_master(info->upper_dev) && !info->linking) + ocelot_netdevice_pre_lag_leave(dev, info->upper_dev); + + return NOTIFY_DONE; +} + +static int +ocelot_netdevice_lag_prechangeupper(struct net_device *dev, + struct netdev_notifier_changeupper_info *info) +{ + struct net_device *lower; + struct list_head *iter; + int err = NOTIFY_DONE; + + netdev_for_each_lower_dev(dev, lower, iter) { + struct ocelot_port_private *priv = netdev_priv(lower); + struct ocelot_port *ocelot_port = &priv->port; + + if (ocelot_port->bond != dev) + return NOTIFY_OK; + + err = ocelot_netdevice_prechangeupper(dev, lower, info); + if (err) + return err; + } + + return NOTIFY_DONE; +} + +static int ocelot_netdevice_changelowerstate(struct net_device *dev, struct netdev_lag_lower_state_info *info) { @@ -1382,6 +1402,17 @@ static int ocelot_netdevice_event(struct notifier_block *unused, struct net_device *dev = netdev_notifier_info_to_dev(ptr); switch (event) { + case NETDEV_PRECHANGEUPPER: { + struct netdev_notifier_changeupper_info *info = ptr; + + if (ocelot_netdevice_dev_check(dev)) + return ocelot_netdevice_prechangeupper(dev, dev, info); + + if (netif_is_lag_master(dev)) + return ocelot_netdevice_lag_prechangeupper(dev, info); + + break; + } case NETDEV_CHANGEUPPER: { struct netdev_notifier_changeupper_info *info = ptr; @@ -1466,8 +1497,188 @@ struct notifier_block ocelot_switchdev_blocking_nb __read_mostly = { .notifier_call = ocelot_switchdev_blocking_event, }; +static void vsc7514_phylink_validate(struct phylink_config *config, + unsigned long *supported, + struct phylink_link_state *state) +{ + struct net_device *ndev = to_net_dev(config->dev); + struct ocelot_port_private *priv = netdev_priv(ndev); + struct ocelot_port *ocelot_port = &priv->port; + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = {}; + + if (state->interface != PHY_INTERFACE_MODE_NA && + state->interface != ocelot_port->phy_mode) { + bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + return; + } + + phylink_set_port_modes(mask); + + phylink_set(mask, Pause); + phylink_set(mask, Autoneg); + phylink_set(mask, Asym_Pause); + phylink_set(mask, 10baseT_Half); + phylink_set(mask, 10baseT_Full); + phylink_set(mask, 100baseT_Half); + phylink_set(mask, 100baseT_Full); + phylink_set(mask, 1000baseT_Half); + phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 1000baseX_Full); + phylink_set(mask, 2500baseT_Full); + phylink_set(mask, 2500baseX_Full); + + bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); + bitmap_and(state->advertising, state->advertising, mask, + __ETHTOOL_LINK_MODE_MASK_NBITS); +} + +static void vsc7514_phylink_mac_config(struct phylink_config *config, + unsigned int link_an_mode, + const struct phylink_link_state *state) +{ + struct net_device *ndev = to_net_dev(config->dev); + struct ocelot_port_private *priv = netdev_priv(ndev); + struct ocelot_port *ocelot_port = &priv->port; + + /* Disable HDX fast control */ + ocelot_port_writel(ocelot_port, DEV_PORT_MISC_HDX_FAST_DIS, + DEV_PORT_MISC); + + /* SGMII only for now */ + ocelot_port_writel(ocelot_port, PCS1G_MODE_CFG_SGMII_MODE_ENA, + PCS1G_MODE_CFG); + ocelot_port_writel(ocelot_port, PCS1G_SD_CFG_SD_SEL, PCS1G_SD_CFG); + + /* Enable PCS */ + ocelot_port_writel(ocelot_port, PCS1G_CFG_PCS_ENA, PCS1G_CFG); + + /* No aneg on SGMII */ + ocelot_port_writel(ocelot_port, 0, PCS1G_ANEG_CFG); + + /* No loopback */ + ocelot_port_writel(ocelot_port, 0, PCS1G_LB_CFG); +} + +static void vsc7514_phylink_mac_link_down(struct phylink_config *config, + unsigned int link_an_mode, + phy_interface_t interface) +{ + struct net_device *ndev = to_net_dev(config->dev); + struct ocelot_port_private *priv = netdev_priv(ndev); + struct ocelot *ocelot = priv->port.ocelot; + int port = priv->chip_port; + + ocelot_phylink_mac_link_down(ocelot, port, link_an_mode, interface, + OCELOT_MAC_QUIRKS); +} + +static void vsc7514_phylink_mac_link_up(struct phylink_config *config, + struct phy_device *phydev, + unsigned int link_an_mode, + phy_interface_t interface, + int speed, int duplex, + bool tx_pause, bool rx_pause) +{ + struct net_device *ndev = to_net_dev(config->dev); + struct ocelot_port_private *priv = netdev_priv(ndev); + struct ocelot *ocelot = priv->port.ocelot; + int port = priv->chip_port; + + ocelot_phylink_mac_link_up(ocelot, port, phydev, link_an_mode, + interface, speed, duplex, + tx_pause, rx_pause, OCELOT_MAC_QUIRKS); +} + +static const struct phylink_mac_ops ocelot_phylink_ops = { + .validate = vsc7514_phylink_validate, + .mac_config = vsc7514_phylink_mac_config, + .mac_link_down = vsc7514_phylink_mac_link_down, + .mac_link_up = vsc7514_phylink_mac_link_up, +}; + +static int ocelot_port_phylink_create(struct ocelot *ocelot, int port, + struct device_node *portnp) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + struct ocelot_port_private *priv; + struct device *dev = ocelot->dev; + phy_interface_t phy_mode; + struct phylink *phylink; + int err; + + of_get_phy_mode(portnp, &phy_mode); + /* DT bindings of internal PHY ports are broken and don't + * specify a phy-mode + */ + if (phy_mode == PHY_INTERFACE_MODE_NA) + phy_mode = PHY_INTERFACE_MODE_INTERNAL; + + if (phy_mode != PHY_INTERFACE_MODE_SGMII && + phy_mode != PHY_INTERFACE_MODE_QSGMII && + phy_mode != PHY_INTERFACE_MODE_INTERNAL) { + dev_err(dev, "unsupported phy mode %s for port %d\n", + phy_modes(phy_mode), port); + return -EINVAL; + } + + /* Ensure clock signals and speed are set on all QSGMII links */ + if (phy_mode == PHY_INTERFACE_MODE_QSGMII) + ocelot_port_rmwl(ocelot_port, 0, + DEV_CLOCK_CFG_MAC_TX_RST | + DEV_CLOCK_CFG_MAC_TX_RST, + DEV_CLOCK_CFG); + + ocelot_port->phy_mode = phy_mode; + + if (phy_mode != PHY_INTERFACE_MODE_INTERNAL) { + struct phy *serdes = of_phy_get(portnp, NULL); + + if (IS_ERR(serdes)) { + err = PTR_ERR(serdes); + dev_err_probe(dev, err, + "missing SerDes phys for port %d\n", + port); + return err; + } + + err = phy_set_mode_ext(serdes, PHY_MODE_ETHERNET, phy_mode); + of_phy_put(serdes); + if (err) { + dev_err(dev, "Could not SerDes mode on port %d: %pe\n", + port, ERR_PTR(err)); + return err; + } + } + + priv = container_of(ocelot_port, struct ocelot_port_private, port); + + priv->phylink_config.dev = &priv->dev->dev; + priv->phylink_config.type = PHYLINK_NETDEV; + + phylink = phylink_create(&priv->phylink_config, + of_fwnode_handle(portnp), + phy_mode, &ocelot_phylink_ops); + if (IS_ERR(phylink)) { + err = PTR_ERR(phylink); + dev_err(dev, "Could not create phylink (%pe)\n", phylink); + return err; + } + + priv->phylink = phylink; + + err = phylink_of_phy_connect(phylink, portnp, 0); + if (err) { + dev_err(dev, "Could not connect to PHY: %pe\n", ERR_PTR(err)); + phylink_destroy(phylink); + priv->phylink = NULL; + return err; + } + + return 0; +} + int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target, - struct phy_device *phy) + struct device_node *portnp) { struct ocelot_port_private *priv; struct ocelot_port *ocelot_port; @@ -1480,7 +1691,6 @@ int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target, SET_NETDEV_DEV(dev, ocelot->dev); priv = netdev_priv(dev); priv->dev = dev; - priv->phy = phy; priv->chip_port = port; ocelot_port = &priv->port; ocelot_port->ocelot = ocelot; @@ -1501,15 +1711,23 @@ int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target, ocelot_init_port(ocelot, port); + err = ocelot_port_phylink_create(ocelot, port, portnp); + if (err) + goto out; + err = register_netdev(dev); if (err) { dev_err(ocelot->dev, "register_netdev failed\n"); - free_netdev(dev); - ocelot->ports[port] = NULL; - return err; + goto out; } return 0; + +out: + ocelot->ports[port] = NULL; + free_netdev(dev); + + return err; } void ocelot_release_port(struct ocelot_port *ocelot_port) @@ -1519,5 +1737,14 @@ void ocelot_release_port(struct ocelot_port *ocelot_port) port); unregister_netdev(priv->dev); + + if (priv->phylink) { + rtnl_lock(); + phylink_disconnect_phy(priv->phylink); + rtnl_unlock(); + + phylink_destroy(priv->phylink); + } + free_netdev(priv->dev); } diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c index 4bd7e9d9ec61..291ae6817c26 100644 --- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c +++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c @@ -9,6 +9,7 @@ #include <linux/module.h> #include <linux/of_net.h> #include <linux/netdevice.h> +#include <linux/phylink.h> #include <linux/of_mdio.h> #include <linux/of_platform.h> #include <linux/mfd/syscon.h> @@ -945,13 +946,9 @@ static int mscc_ocelot_init_ports(struct platform_device *pdev, for_each_available_child_of_node(ports, portnp) { struct ocelot_port_private *priv; struct ocelot_port *ocelot_port; - struct device_node *phy_node; struct devlink_port *dlp; - phy_interface_t phy_mode; - struct phy_device *phy; struct regmap *target; struct resource *res; - struct phy *serdes; char res_name[8]; if (of_property_read_u32(portnp, "reg", ®)) @@ -975,77 +972,26 @@ static int mscc_ocelot_init_ports(struct platform_device *pdev, goto out_teardown; } - phy_node = of_parse_phandle(portnp, "phy-handle", 0); - if (!phy_node) - continue; - - phy = of_phy_find_device(phy_node); - of_node_put(phy_node); - if (!phy) - continue; - err = ocelot_port_devlink_init(ocelot, port, DEVLINK_PORT_FLAVOUR_PHYSICAL); if (err) { of_node_put(portnp); goto out_teardown; } - devlink_ports_registered |= BIT(port); - err = ocelot_probe_port(ocelot, port, target, phy); + err = ocelot_probe_port(ocelot, port, target, portnp); if (err) { - of_node_put(portnp); - goto out_teardown; + ocelot_port_devlink_teardown(ocelot, port); + continue; } + devlink_ports_registered |= BIT(port); + ocelot_port = ocelot->ports[port]; priv = container_of(ocelot_port, struct ocelot_port_private, port); dlp = &ocelot->devlink_ports[port]; devlink_port_type_eth_set(dlp, priv->dev); - - of_get_phy_mode(portnp, &phy_mode); - - ocelot_port->phy_mode = phy_mode; - - switch (ocelot_port->phy_mode) { - case PHY_INTERFACE_MODE_NA: - continue; - case PHY_INTERFACE_MODE_SGMII: - break; - case PHY_INTERFACE_MODE_QSGMII: - /* Ensure clock signals and speed is set on all - * QSGMII links - */ - ocelot_port_writel(ocelot_port, - DEV_CLOCK_CFG_LINK_SPEED - (OCELOT_SPEED_1000), - DEV_CLOCK_CFG); - break; - default: - dev_err(ocelot->dev, - "invalid phy mode for port%d, (Q)SGMII only\n", - port); - of_node_put(portnp); - err = -EINVAL; - goto out_teardown; - } - - serdes = devm_of_phy_get(ocelot->dev, portnp, NULL); - if (IS_ERR(serdes)) { - err = PTR_ERR(serdes); - if (err == -EPROBE_DEFER) - dev_dbg(ocelot->dev, "deferring probe\n"); - else - dev_err(ocelot->dev, - "missing SerDes phys for port%d\n", - port); - - of_node_put(portnp); - goto out_teardown; - } - - priv->serdes = serdes; } /* Initialize unused devlink ports at the end */ @@ -1103,7 +1049,8 @@ static int mscc_ocelot_probe(struct platform_device *pdev) if (!np && !pdev->dev.platform_data) return -ENODEV; - devlink = devlink_alloc(&ocelot_devlink_ops, sizeof(*ocelot)); + devlink = + devlink_alloc(&ocelot_devlink_ops, sizeof(*ocelot), &pdev->dev); if (!devlink) return -ENOMEM; @@ -1187,7 +1134,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev) if (err) goto out_put_ports; - err = devlink_register(devlink, ocelot->dev); + err = devlink_register(devlink); if (err) goto out_ocelot_deinit; diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index fc99ad8e4a38..c1a75b08ced7 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -850,9 +850,9 @@ static int myri10ge_dma_test(struct myri10ge_priv *mgp, int test_type) dmatest_page = alloc_page(GFP_KERNEL); if (!dmatest_page) return -ENOMEM; - dmatest_bus = pci_map_page(mgp->pdev, dmatest_page, 0, PAGE_SIZE, - DMA_BIDIRECTIONAL); - if (unlikely(pci_dma_mapping_error(mgp->pdev, dmatest_bus))) { + dmatest_bus = dma_map_page(&mgp->pdev->dev, dmatest_page, 0, + PAGE_SIZE, DMA_BIDIRECTIONAL); + if (unlikely(dma_mapping_error(&mgp->pdev->dev, dmatest_bus))) { __free_page(dmatest_page); return -ENOMEM; } @@ -899,7 +899,8 @@ static int myri10ge_dma_test(struct myri10ge_priv *mgp, int test_type) (cmd.data0 & 0xffff); abort: - pci_unmap_page(mgp->pdev, dmatest_bus, PAGE_SIZE, DMA_BIDIRECTIONAL); + dma_unmap_page(&mgp->pdev->dev, dmatest_bus, PAGE_SIZE, + DMA_BIDIRECTIONAL); put_page(dmatest_page); if (status != 0 && test_type != MXGEFW_CMD_UNALIGNED_TEST) @@ -1205,10 +1206,10 @@ myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx, return; } - bus = pci_map_page(mgp->pdev, page, 0, + bus = dma_map_page(&mgp->pdev->dev, page, 0, MYRI10GE_ALLOC_SIZE, - PCI_DMA_FROMDEVICE); - if (unlikely(pci_dma_mapping_error(mgp->pdev, bus))) { + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(&mgp->pdev->dev, bus))) { __free_pages(page, MYRI10GE_ALLOC_ORDER); if (rx->fill_cnt - rx->cnt < 16) rx->watchdog_needed = 1; @@ -1256,9 +1257,9 @@ myri10ge_unmap_rx_page(struct pci_dev *pdev, /* unmap the recvd page if we're the only or last user of it */ if (bytes >= MYRI10GE_ALLOC_SIZE / 2 || (info->page_offset + 2 * bytes) > MYRI10GE_ALLOC_SIZE) { - pci_unmap_page(pdev, (dma_unmap_addr(info, bus) - & ~(MYRI10GE_ALLOC_SIZE - 1)), - MYRI10GE_ALLOC_SIZE, PCI_DMA_FROMDEVICE); + dma_unmap_page(&pdev->dev, (dma_unmap_addr(info, bus) + & ~(MYRI10GE_ALLOC_SIZE - 1)), + MYRI10GE_ALLOC_SIZE, DMA_FROM_DEVICE); } } @@ -1398,16 +1399,16 @@ myri10ge_tx_done(struct myri10ge_slice_state *ss, int mcp_index) ss->stats.tx_packets++; dev_consume_skb_irq(skb); if (len) - pci_unmap_single(pdev, + dma_unmap_single(&pdev->dev, dma_unmap_addr(&tx->info[idx], bus), len, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); } else { if (len) - pci_unmap_page(pdev, + dma_unmap_page(&pdev->dev, dma_unmap_addr(&tx->info[idx], bus), len, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); } } @@ -1651,8 +1652,10 @@ myri10ge_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) strlcpy(info->bus_info, pci_name(mgp->pdev), sizeof(info->bus_info)); } -static int -myri10ge_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal) +static int myri10ge_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct myri10ge_priv *mgp = netdev_priv(netdev); @@ -1660,8 +1663,10 @@ myri10ge_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal) return 0; } -static int -myri10ge_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal) +static int myri10ge_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct myri10ge_priv *mgp = netdev_priv(netdev); @@ -2110,16 +2115,16 @@ static void myri10ge_free_rings(struct myri10ge_slice_state *ss) ss->stats.tx_dropped++; dev_kfree_skb_any(skb); if (len) - pci_unmap_single(mgp->pdev, + dma_unmap_single(&mgp->pdev->dev, dma_unmap_addr(&tx->info[idx], bus), len, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); } else { if (len) - pci_unmap_page(mgp->pdev, + dma_unmap_page(&mgp->pdev->dev, dma_unmap_addr(&tx->info[idx], bus), len, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); } } kfree(ss->rx_big.info); @@ -2584,15 +2589,15 @@ static void myri10ge_unmap_tx_dma(struct myri10ge_priv *mgp, len = dma_unmap_len(&tx->info[idx], len); if (len) { if (tx->info[idx].skb != NULL) - pci_unmap_single(mgp->pdev, + dma_unmap_single(&mgp->pdev->dev, dma_unmap_addr(&tx->info[idx], bus), len, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); else - pci_unmap_page(mgp->pdev, + dma_unmap_page(&mgp->pdev->dev, dma_unmap_addr(&tx->info[idx], bus), len, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); dma_unmap_len_set(&tx->info[idx], len, 0); tx->info[idx].skb = NULL; } @@ -2715,8 +2720,8 @@ again: /* map the skb for DMA */ len = skb_headlen(skb); - bus = pci_map_single(mgp->pdev, skb->data, len, PCI_DMA_TODEVICE); - if (unlikely(pci_dma_mapping_error(mgp->pdev, bus))) + bus = dma_map_single(&mgp->pdev->dev, skb->data, len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(&mgp->pdev->dev, bus))) goto drop; idx = tx->req & tx->mask; @@ -2824,7 +2829,7 @@ again: len = skb_frag_size(frag); bus = skb_frag_dma_map(&mgp->pdev->dev, frag, 0, len, DMA_TO_DEVICE); - if (unlikely(pci_dma_mapping_error(mgp->pdev, bus))) { + if (unlikely(dma_mapping_error(&mgp->pdev->dev, bus))) { myri10ge_unmap_tx_dma(mgp, tx, idx); goto drop; } @@ -3776,19 +3781,17 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) myri10ge_mask_surprise_down(pdev); pci_set_master(pdev); dac_enabled = 1; - status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (status != 0) { dac_enabled = 0; dev_err(&pdev->dev, - "64-bit pci address mask was refused, " - "trying 32-bit\n"); - status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + "64-bit pci address mask was refused, trying 32-bit\n"); + status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); } if (status != 0) { dev_err(&pdev->dev, "Error %d setting DMA mask\n", status); goto abort_with_enabled; } - (void)pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd), &mgp->cmd_bus, GFP_KERNEL); if (!mgp->cmd) { diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c index ce3eca5d152b..d74a80f010c5 100644 --- a/drivers/net/ethernet/natsemi/jazzsonic.c +++ b/drivers/net/ethernet/natsemi/jazzsonic.c @@ -193,8 +193,6 @@ static int jazz_sonic_probe(struct platform_device *pdev) SET_NETDEV_DEV(dev, &pdev->dev); platform_set_drvdata(pdev, dev); - netdev_boot_setup_check(dev); - dev->base_addr = res->start; dev->irq = platform_get_irq(pdev, 0); err = sonic_probe1(dev); diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c index 84f7dbe9edff..3f982033944b 100644 --- a/drivers/net/ethernet/natsemi/natsemi.c +++ b/drivers/net/ethernet/natsemi/natsemi.c @@ -790,7 +790,7 @@ static const struct net_device_ops natsemi_netdev_ops = { .ndo_get_stats = get_stats, .ndo_set_rx_mode = set_rx_mode, .ndo_change_mtu = natsemi_change_mtu, - .ndo_do_ioctl = netdev_ioctl, + .ndo_eth_ioctl = netdev_ioctl, .ndo_tx_timeout = ns_tx_timeout, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c index 28d9e98db81a..ca4686094701 100644 --- a/drivers/net/ethernet/natsemi/xtsonic.c +++ b/drivers/net/ethernet/natsemi/xtsonic.c @@ -215,7 +215,6 @@ int xtsonic_probe(struct platform_device *pdev) lp->device = &pdev->dev; platform_set_drvdata(pdev, dev); SET_NETDEV_DEV(dev, &pdev->dev); - netdev_boot_setup_check(dev); dev->base_addr = resmem->start; dev->irq = resirq->start; diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index 0b017d4f5c08..09c0e839cca5 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -7625,7 +7625,7 @@ static const struct net_device_ops s2io_netdev_ops = { .ndo_start_xmit = s2io_xmit, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = s2io_ndo_set_multicast, - .ndo_do_ioctl = s2io_ioctl, + .ndo_eth_ioctl = s2io_ioctl, .ndo_set_mac_address = s2io_set_mac_addr, .ndo_change_mtu = s2io_change_mtu, .ndo_set_features = s2io_set_features, diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c index 7abd13e69471..df4a3f3da83a 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c @@ -3339,7 +3339,7 @@ static const struct net_device_ops vxge_netdev_ops = { .ndo_start_xmit = vxge_xmit, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = vxge_set_multicast, - .ndo_do_ioctl = vxge_ioctl, + .ndo_eth_ioctl = vxge_ioctl, .ndo_set_mac_address = vxge_set_mac_addr, .ndo_change_mtu = vxge_change_mtu, .ndo_fix_features = vxge_fix_features, diff --git a/drivers/net/ethernet/netronome/Kconfig b/drivers/net/ethernet/netronome/Kconfig index b82758d5beed..8844d1ac053a 100644 --- a/drivers/net/ethernet/netronome/Kconfig +++ b/drivers/net/ethernet/netronome/Kconfig @@ -23,6 +23,7 @@ config NFP depends on TLS && TLS_DEVICE || TLS_DEVICE=n select NET_DEVLINK select CRC32 + select DIMLIB help This driver supports the Netronome(R) NFP4000/NFP6000 based cards working as a advanced Ethernet NIC. It works with both diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index 1cbe2c9f3959..2a432de11858 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c @@ -262,10 +262,10 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output, } static bool -nfp_flower_tun_is_gre(struct flow_cls_offload *flow, int start_idx) +nfp_flower_tun_is_gre(struct flow_rule *rule, int start_idx) { - struct flow_action_entry *act = flow->rule->action.entries; - int num_act = flow->rule->action.num_entries; + struct flow_action_entry *act = rule->action.entries; + int num_act = rule->action.num_entries; int act_idx; /* Preparse action list for next mirred or redirect action */ @@ -279,7 +279,7 @@ nfp_flower_tun_is_gre(struct flow_cls_offload *flow, int start_idx) static enum nfp_flower_tun_type nfp_fl_get_tun_from_act(struct nfp_app *app, - struct flow_cls_offload *flow, + struct flow_rule *rule, const struct flow_action_entry *act, int act_idx) { const struct ip_tunnel_info *tun = act->tunnel; @@ -288,7 +288,7 @@ nfp_fl_get_tun_from_act(struct nfp_app *app, /* Determine the tunnel type based on the egress netdev * in the mirred action for tunnels without l4. */ - if (nfp_flower_tun_is_gre(flow, act_idx)) + if (nfp_flower_tun_is_gre(rule, act_idx)) return NFP_FL_TUNNEL_GRE; switch (tun->key.tp_dst) { @@ -788,11 +788,10 @@ struct nfp_flower_pedit_acts { }; static int -nfp_fl_commit_mangle(struct flow_cls_offload *flow, char *nfp_action, +nfp_fl_commit_mangle(struct flow_rule *rule, char *nfp_action, int *a_len, struct nfp_flower_pedit_acts *set_act, u32 *csum_updated) { - struct flow_rule *rule = flow_cls_offload_flow_rule(flow); size_t act_size = 0; u8 ip_proto = 0; @@ -890,7 +889,7 @@ nfp_fl_commit_mangle(struct flow_cls_offload *flow, char *nfp_action, static int nfp_fl_pedit(const struct flow_action_entry *act, - struct flow_cls_offload *flow, char *nfp_action, int *a_len, + char *nfp_action, int *a_len, u32 *csum_updated, struct nfp_flower_pedit_acts *set_act, struct netlink_ext_ack *extack) { @@ -977,7 +976,7 @@ nfp_flower_output_action(struct nfp_app *app, static int nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act, - struct flow_cls_offload *flow, + struct flow_rule *rule, struct nfp_fl_payload *nfp_fl, int *a_len, struct net_device *netdev, enum nfp_flower_tun_type *tun_type, int *tun_out_cnt, @@ -1045,7 +1044,7 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act, case FLOW_ACTION_TUNNEL_ENCAP: { const struct ip_tunnel_info *ip_tun = act->tunnel; - *tun_type = nfp_fl_get_tun_from_act(app, flow, act, act_idx); + *tun_type = nfp_fl_get_tun_from_act(app, rule, act, act_idx); if (*tun_type == NFP_FL_TUNNEL_NONE) { NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported tunnel type in action list"); return -EOPNOTSUPP; @@ -1086,7 +1085,7 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act, /* Tunnel decap is handled by default so accept action. */ return 0; case FLOW_ACTION_MANGLE: - if (nfp_fl_pedit(act, flow, &nfp_fl->action_data[*a_len], + if (nfp_fl_pedit(act, &nfp_fl->action_data[*a_len], a_len, csum_updated, set_act, extack)) return -EOPNOTSUPP; break; @@ -1195,7 +1194,7 @@ static bool nfp_fl_check_mangle_end(struct flow_action *flow_act, } int nfp_flower_compile_action(struct nfp_app *app, - struct flow_cls_offload *flow, + struct flow_rule *rule, struct net_device *netdev, struct nfp_fl_payload *nfp_flow, struct netlink_ext_ack *extack) @@ -1207,7 +1206,7 @@ int nfp_flower_compile_action(struct nfp_app *app, bool pkt_host = false; u32 csum_updated = 0; - if (!flow_action_hw_stats_check(&flow->rule->action, extack, + if (!flow_action_hw_stats_check(&rule->action, extack, FLOW_ACTION_HW_STATS_DELAYED_BIT)) return -EOPNOTSUPP; @@ -1219,18 +1218,18 @@ int nfp_flower_compile_action(struct nfp_app *app, tun_out_cnt = 0; out_cnt = 0; - flow_action_for_each(i, act, &flow->rule->action) { - if (nfp_fl_check_mangle_start(&flow->rule->action, i)) + flow_action_for_each(i, act, &rule->action) { + if (nfp_fl_check_mangle_start(&rule->action, i)) memset(&set_act, 0, sizeof(set_act)); - err = nfp_flower_loop_action(app, act, flow, nfp_flow, &act_len, + err = nfp_flower_loop_action(app, act, rule, nfp_flow, &act_len, netdev, &tun_type, &tun_out_cnt, &out_cnt, &csum_updated, &set_act, &pkt_host, extack, i); if (err) return err; act_cnt++; - if (nfp_fl_check_mangle_end(&flow->rule->action, i)) - nfp_fl_commit_mangle(flow, + if (nfp_fl_check_mangle_end(&rule->action, i)) + nfp_fl_commit_mangle(rule, &nfp_flow->action_data[act_len], &act_len, &set_act, &csum_updated); } diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c index 062bb2db68bf..bfd7d1c35076 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c +++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c @@ -2,6 +2,7 @@ /* Copyright (C) 2021 Corigine, Inc. */ #include "conntrack.h" +#include "../nfp_port.h" const struct rhashtable_params nfp_tc_ct_merge_params = { .head_offset = offsetof(struct nfp_fl_ct_tc_merge, @@ -407,15 +408,491 @@ static int nfp_ct_check_meta(struct nfp_fl_ct_flow_entry *post_ct_entry, return -EINVAL; } +static int +nfp_fl_calc_key_layers_sz(struct nfp_fl_key_ls in_key_ls, uint16_t *map) +{ + int key_size; + + /* This field must always be present */ + key_size = sizeof(struct nfp_flower_meta_tci); + map[FLOW_PAY_META_TCI] = 0; + + if (in_key_ls.key_layer & NFP_FLOWER_LAYER_EXT_META) { + map[FLOW_PAY_EXT_META] = key_size; + key_size += sizeof(struct nfp_flower_ext_meta); + } + if (in_key_ls.key_layer & NFP_FLOWER_LAYER_PORT) { + map[FLOW_PAY_INPORT] = key_size; + key_size += sizeof(struct nfp_flower_in_port); + } + if (in_key_ls.key_layer & NFP_FLOWER_LAYER_MAC) { + map[FLOW_PAY_MAC_MPLS] = key_size; + key_size += sizeof(struct nfp_flower_mac_mpls); + } + if (in_key_ls.key_layer & NFP_FLOWER_LAYER_TP) { + map[FLOW_PAY_L4] = key_size; + key_size += sizeof(struct nfp_flower_tp_ports); + } + if (in_key_ls.key_layer & NFP_FLOWER_LAYER_IPV4) { + map[FLOW_PAY_IPV4] = key_size; + key_size += sizeof(struct nfp_flower_ipv4); + } + if (in_key_ls.key_layer & NFP_FLOWER_LAYER_IPV6) { + map[FLOW_PAY_IPV6] = key_size; + key_size += sizeof(struct nfp_flower_ipv6); + } + + if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_GRE) { + map[FLOW_PAY_GRE] = key_size; + if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) + key_size += sizeof(struct nfp_flower_ipv6_gre_tun); + else + key_size += sizeof(struct nfp_flower_ipv4_gre_tun); + } + + if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_QINQ) { + map[FLOW_PAY_QINQ] = key_size; + key_size += sizeof(struct nfp_flower_vlan); + } + + if ((in_key_ls.key_layer & NFP_FLOWER_LAYER_VXLAN) || + (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_GENEVE)) { + map[FLOW_PAY_UDP_TUN] = key_size; + if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) + key_size += sizeof(struct nfp_flower_ipv6_udp_tun); + else + key_size += sizeof(struct nfp_flower_ipv4_udp_tun); + } + + if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) { + map[FLOW_PAY_GENEVE_OPT] = key_size; + key_size += sizeof(struct nfp_flower_geneve_options); + } + + return key_size; +} + +static int nfp_fl_merge_actions_offload(struct flow_rule **rules, + struct nfp_flower_priv *priv, + struct net_device *netdev, + struct nfp_fl_payload *flow_pay) +{ + struct flow_action_entry *a_in; + int i, j, num_actions, id; + struct flow_rule *a_rule; + int err = 0, offset = 0; + + num_actions = rules[CT_TYPE_PRE_CT]->action.num_entries + + rules[CT_TYPE_NFT]->action.num_entries + + rules[CT_TYPE_POST_CT]->action.num_entries; + + a_rule = flow_rule_alloc(num_actions); + if (!a_rule) + return -ENOMEM; + + /* Actions need a BASIC dissector. */ + a_rule->match = rules[CT_TYPE_PRE_CT]->match; + + /* Copy actions */ + for (j = 0; j < _CT_TYPE_MAX; j++) { + if (flow_rule_match_key(rules[j], FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_match_basic match; + + /* ip_proto is the only field that needed in later compile_action, + * needed to set the correct checksum flags. It doesn't really matter + * which input rule's ip_proto field we take as the earlier merge checks + * would have made sure that they don't conflict. We do not know which + * of the subflows would have the ip_proto filled in, so we need to iterate + * through the subflows and assign the proper subflow to a_rule + */ + flow_rule_match_basic(rules[j], &match); + if (match.mask->ip_proto) + a_rule->match = rules[j]->match; + } + + for (i = 0; i < rules[j]->action.num_entries; i++) { + a_in = &rules[j]->action.entries[i]; + id = a_in->id; + + /* Ignore CT related actions as these would already have + * been taken care of by previous checks, and we do not send + * any CT actions to the firmware. + */ + switch (id) { + case FLOW_ACTION_CT: + case FLOW_ACTION_GOTO: + case FLOW_ACTION_CT_METADATA: + continue; + default: + memcpy(&a_rule->action.entries[offset++], + a_in, sizeof(struct flow_action_entry)); + break; + } + } + } + + /* Some actions would have been ignored, so update the num_entries field */ + a_rule->action.num_entries = offset; + err = nfp_flower_compile_action(priv->app, a_rule, netdev, flow_pay, NULL); + kfree(a_rule); + + return err; +} + static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry) { - return 0; + enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE; + struct nfp_fl_ct_zone_entry *zt = m_entry->zt; + struct nfp_fl_key_ls key_layer, tmp_layer; + struct nfp_flower_priv *priv = zt->priv; + u16 key_map[_FLOW_PAY_LAYERS_MAX]; + struct nfp_fl_payload *flow_pay; + + struct flow_rule *rules[_CT_TYPE_MAX]; + u8 *key, *msk, *kdata, *mdata; + struct nfp_port *port = NULL; + struct net_device *netdev; + bool qinq_sup; + u32 port_id; + u16 offset; + int i, err; + + netdev = m_entry->netdev; + qinq_sup = !!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ); + + rules[CT_TYPE_PRE_CT] = m_entry->tc_m_parent->pre_ct_parent->rule; + rules[CT_TYPE_NFT] = m_entry->nft_parent->rule; + rules[CT_TYPE_POST_CT] = m_entry->tc_m_parent->post_ct_parent->rule; + + memset(&key_layer, 0, sizeof(struct nfp_fl_key_ls)); + memset(&key_map, 0, sizeof(key_map)); + + /* Calculate the resultant key layer and size for offload */ + for (i = 0; i < _CT_TYPE_MAX; i++) { + err = nfp_flower_calculate_key_layers(priv->app, + m_entry->netdev, + &tmp_layer, rules[i], + &tun_type, NULL); + if (err) + return err; + + key_layer.key_layer |= tmp_layer.key_layer; + key_layer.key_layer_two |= tmp_layer.key_layer_two; + } + key_layer.key_size = nfp_fl_calc_key_layers_sz(key_layer, key_map); + + flow_pay = nfp_flower_allocate_new(&key_layer); + if (!flow_pay) + return -ENOMEM; + + memset(flow_pay->unmasked_data, 0, key_layer.key_size); + memset(flow_pay->mask_data, 0, key_layer.key_size); + + kdata = flow_pay->unmasked_data; + mdata = flow_pay->mask_data; + + offset = key_map[FLOW_PAY_META_TCI]; + key = kdata + offset; + msk = mdata + offset; + nfp_flower_compile_meta((struct nfp_flower_meta_tci *)key, + (struct nfp_flower_meta_tci *)msk, + key_layer.key_layer); + + if (NFP_FLOWER_LAYER_EXT_META & key_layer.key_layer) { + offset = key_map[FLOW_PAY_EXT_META]; + key = kdata + offset; + msk = mdata + offset; + nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)key, + key_layer.key_layer_two); + nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)msk, + key_layer.key_layer_two); + } + + /* Using in_port from the -trk rule. The tc merge checks should already + * be checking that the ingress netdevs are the same + */ + port_id = nfp_flower_get_port_id_from_netdev(priv->app, netdev); + offset = key_map[FLOW_PAY_INPORT]; + key = kdata + offset; + msk = mdata + offset; + err = nfp_flower_compile_port((struct nfp_flower_in_port *)key, + port_id, false, tun_type, NULL); + if (err) + goto ct_offload_err; + err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk, + port_id, true, tun_type, NULL); + if (err) + goto ct_offload_err; + + /* This following part works on the assumption that previous checks has + * already filtered out flows that has different values for the different + * layers. Here we iterate through all three rules and merge their respective + * masked value(cared bits), basic method is: + * final_key = (r1_key & r1_mask) | (r2_key & r2_mask) | (r3_key & r3_mask) + * final_mask = r1_mask | r2_mask | r3_mask + * If none of the rules contains a match that is also fine, that simply means + * that the layer is not present. + */ + if (!qinq_sup) { + for (i = 0; i < _CT_TYPE_MAX; i++) { + offset = key_map[FLOW_PAY_META_TCI]; + key = kdata + offset; + msk = mdata + offset; + nfp_flower_compile_tci((struct nfp_flower_meta_tci *)key, + (struct nfp_flower_meta_tci *)msk, + rules[i]); + } + } + + if (NFP_FLOWER_LAYER_MAC & key_layer.key_layer) { + offset = key_map[FLOW_PAY_MAC_MPLS]; + key = kdata + offset; + msk = mdata + offset; + for (i = 0; i < _CT_TYPE_MAX; i++) { + nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)key, + (struct nfp_flower_mac_mpls *)msk, + rules[i]); + err = nfp_flower_compile_mpls((struct nfp_flower_mac_mpls *)key, + (struct nfp_flower_mac_mpls *)msk, + rules[i], NULL); + if (err) + goto ct_offload_err; + } + } + + if (NFP_FLOWER_LAYER_IPV4 & key_layer.key_layer) { + offset = key_map[FLOW_PAY_IPV4]; + key = kdata + offset; + msk = mdata + offset; + for (i = 0; i < _CT_TYPE_MAX; i++) { + nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)key, + (struct nfp_flower_ipv4 *)msk, + rules[i]); + } + } + + if (NFP_FLOWER_LAYER_IPV6 & key_layer.key_layer) { + offset = key_map[FLOW_PAY_IPV6]; + key = kdata + offset; + msk = mdata + offset; + for (i = 0; i < _CT_TYPE_MAX; i++) { + nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)key, + (struct nfp_flower_ipv6 *)msk, + rules[i]); + } + } + + if (NFP_FLOWER_LAYER_TP & key_layer.key_layer) { + offset = key_map[FLOW_PAY_L4]; + key = kdata + offset; + msk = mdata + offset; + for (i = 0; i < _CT_TYPE_MAX; i++) { + nfp_flower_compile_tport((struct nfp_flower_tp_ports *)key, + (struct nfp_flower_tp_ports *)msk, + rules[i]); + } + } + + if (key_layer.key_layer_two & NFP_FLOWER_LAYER2_GRE) { + offset = key_map[FLOW_PAY_GRE]; + key = kdata + offset; + msk = mdata + offset; + if (key_layer.key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) { + struct nfp_flower_ipv6_gre_tun *gre_match; + struct nfp_ipv6_addr_entry *entry; + struct in6_addr *dst; + + for (i = 0; i < _CT_TYPE_MAX; i++) { + nfp_flower_compile_ipv6_gre_tun((void *)key, + (void *)msk, rules[i]); + } + gre_match = (struct nfp_flower_ipv6_gre_tun *)key; + dst = &gre_match->ipv6.dst; + + entry = nfp_tunnel_add_ipv6_off(priv->app, dst); + if (!entry) { + err = -ENOMEM; + goto ct_offload_err; + } + + flow_pay->nfp_tun_ipv6 = entry; + } else { + __be32 dst; + + for (i = 0; i < _CT_TYPE_MAX; i++) { + nfp_flower_compile_ipv4_gre_tun((void *)key, + (void *)msk, rules[i]); + } + dst = ((struct nfp_flower_ipv4_gre_tun *)key)->ipv4.dst; + + /* Store the tunnel destination in the rule data. + * This must be present and be an exact match. + */ + flow_pay->nfp_tun_ipv4_addr = dst; + nfp_tunnel_add_ipv4_off(priv->app, dst); + } + } + + if (NFP_FLOWER_LAYER2_QINQ & key_layer.key_layer_two) { + offset = key_map[FLOW_PAY_QINQ]; + key = kdata + offset; + msk = mdata + offset; + for (i = 0; i < _CT_TYPE_MAX; i++) { + nfp_flower_compile_vlan((struct nfp_flower_vlan *)key, + (struct nfp_flower_vlan *)msk, + rules[i]); + } + } + + if (key_layer.key_layer & NFP_FLOWER_LAYER_VXLAN || + key_layer.key_layer_two & NFP_FLOWER_LAYER2_GENEVE) { + offset = key_map[FLOW_PAY_UDP_TUN]; + key = kdata + offset; + msk = mdata + offset; + if (key_layer.key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) { + struct nfp_flower_ipv6_udp_tun *udp_match; + struct nfp_ipv6_addr_entry *entry; + struct in6_addr *dst; + + for (i = 0; i < _CT_TYPE_MAX; i++) { + nfp_flower_compile_ipv6_udp_tun((void *)key, + (void *)msk, rules[i]); + } + udp_match = (struct nfp_flower_ipv6_udp_tun *)key; + dst = &udp_match->ipv6.dst; + + entry = nfp_tunnel_add_ipv6_off(priv->app, dst); + if (!entry) { + err = -ENOMEM; + goto ct_offload_err; + } + + flow_pay->nfp_tun_ipv6 = entry; + } else { + __be32 dst; + + for (i = 0; i < _CT_TYPE_MAX; i++) { + nfp_flower_compile_ipv4_udp_tun((void *)key, + (void *)msk, rules[i]); + } + dst = ((struct nfp_flower_ipv4_udp_tun *)key)->ipv4.dst; + + /* Store the tunnel destination in the rule data. + * This must be present and be an exact match. + */ + flow_pay->nfp_tun_ipv4_addr = dst; + nfp_tunnel_add_ipv4_off(priv->app, dst); + } + + if (key_layer.key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) { + offset = key_map[FLOW_PAY_GENEVE_OPT]; + key = kdata + offset; + msk = mdata + offset; + for (i = 0; i < _CT_TYPE_MAX; i++) + nfp_flower_compile_geneve_opt(key, msk, rules[i]); + } + } + + /* Merge actions into flow_pay */ + err = nfp_fl_merge_actions_offload(rules, priv, netdev, flow_pay); + if (err) + goto ct_offload_err; + + /* Use the pointer address as the cookie, but set the last bit to 1. + * This is to avoid the 'is_merge_flow' check from detecting this as + * an already merged flow. This works since address alignment means + * that the last bit for pointer addresses will be 0. + */ + flow_pay->tc_flower_cookie = ((unsigned long)flow_pay) | 0x1; + err = nfp_compile_flow_metadata(priv->app, flow_pay->tc_flower_cookie, + flow_pay, netdev, NULL); + if (err) + goto ct_offload_err; + + if (nfp_netdev_is_nfp_repr(netdev)) + port = nfp_port_from_netdev(netdev); + + err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node, + nfp_flower_table_params); + if (err) + goto ct_release_offload_meta_err; + + err = nfp_flower_xmit_flow(priv->app, flow_pay, + NFP_FLOWER_CMSG_TYPE_FLOW_ADD); + if (err) + goto ct_remove_rhash_err; + + m_entry->tc_flower_cookie = flow_pay->tc_flower_cookie; + m_entry->flow_pay = flow_pay; + + if (port) + port->tc_offload_cnt++; + + return err; + +ct_remove_rhash_err: + WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table, + &flow_pay->fl_node, + nfp_flower_table_params)); +ct_release_offload_meta_err: + nfp_modify_flow_metadata(priv->app, flow_pay); +ct_offload_err: + if (flow_pay->nfp_tun_ipv4_addr) + nfp_tunnel_del_ipv4_off(priv->app, flow_pay->nfp_tun_ipv4_addr); + if (flow_pay->nfp_tun_ipv6) + nfp_tunnel_put_ipv6_off(priv->app, flow_pay->nfp_tun_ipv6); + kfree(flow_pay->action_data); + kfree(flow_pay->mask_data); + kfree(flow_pay->unmasked_data); + kfree(flow_pay); + return err; } static int nfp_fl_ct_del_offload(struct nfp_app *app, unsigned long cookie, struct net_device *netdev) { - return 0; + struct nfp_flower_priv *priv = app->priv; + struct nfp_fl_payload *flow_pay; + struct nfp_port *port = NULL; + int err = 0; + + if (nfp_netdev_is_nfp_repr(netdev)) + port = nfp_port_from_netdev(netdev); + + flow_pay = nfp_flower_search_fl_table(app, cookie, netdev); + if (!flow_pay) + return -ENOENT; + + err = nfp_modify_flow_metadata(app, flow_pay); + if (err) + goto err_free_merge_flow; + + if (flow_pay->nfp_tun_ipv4_addr) + nfp_tunnel_del_ipv4_off(app, flow_pay->nfp_tun_ipv4_addr); + + if (flow_pay->nfp_tun_ipv6) + nfp_tunnel_put_ipv6_off(app, flow_pay->nfp_tun_ipv6); + + if (!flow_pay->in_hw) { + err = 0; + goto err_free_merge_flow; + } + + err = nfp_flower_xmit_flow(app, flow_pay, + NFP_FLOWER_CMSG_TYPE_FLOW_DEL); + +err_free_merge_flow: + nfp_flower_del_linked_merge_flows(app, flow_pay); + if (port) + port->tc_offload_cnt--; + kfree(flow_pay->action_data); + kfree(flow_pay->mask_data); + kfree(flow_pay->unmasked_data); + WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table, + &flow_pay->fl_node, + nfp_flower_table_params)); + kfree_rcu(flow_pay, rcu); + return err; } static int nfp_ct_do_nft_merge(struct nfp_fl_ct_zone_entry *zt, @@ -1048,6 +1525,139 @@ int nfp_fl_ct_handle_post_ct(struct nfp_flower_priv *priv, return 0; } +static void +nfp_fl_ct_sub_stats(struct nfp_fl_nft_tc_merge *nft_merge, + enum ct_entry_type type, u64 *m_pkts, + u64 *m_bytes, u64 *m_used) +{ + struct nfp_flower_priv *priv = nft_merge->zt->priv; + struct nfp_fl_payload *nfp_flow; + u32 ctx_id; + + nfp_flow = nft_merge->flow_pay; + if (!nfp_flow) + return; + + ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id); + *m_pkts += priv->stats[ctx_id].pkts; + *m_bytes += priv->stats[ctx_id].bytes; + *m_used = max_t(u64, *m_used, priv->stats[ctx_id].used); + + /* If request is for a sub_flow which is part of a tunnel merged + * flow then update stats from tunnel merged flows first. + */ + if (!list_empty(&nfp_flow->linked_flows)) + nfp_flower_update_merge_stats(priv->app, nfp_flow); + + if (type != CT_TYPE_NFT) { + /* Update nft cached stats */ + flow_stats_update(&nft_merge->nft_parent->stats, + priv->stats[ctx_id].bytes, + priv->stats[ctx_id].pkts, + 0, priv->stats[ctx_id].used, + FLOW_ACTION_HW_STATS_DELAYED); + } else { + /* Update pre_ct cached stats */ + flow_stats_update(&nft_merge->tc_m_parent->pre_ct_parent->stats, + priv->stats[ctx_id].bytes, + priv->stats[ctx_id].pkts, + 0, priv->stats[ctx_id].used, + FLOW_ACTION_HW_STATS_DELAYED); + /* Update post_ct cached stats */ + flow_stats_update(&nft_merge->tc_m_parent->post_ct_parent->stats, + priv->stats[ctx_id].bytes, + priv->stats[ctx_id].pkts, + 0, priv->stats[ctx_id].used, + FLOW_ACTION_HW_STATS_DELAYED); + } + /* Reset stats from the nfp */ + priv->stats[ctx_id].pkts = 0; + priv->stats[ctx_id].bytes = 0; +} + +int nfp_fl_ct_stats(struct flow_cls_offload *flow, + struct nfp_fl_ct_map_entry *ct_map_ent) +{ + struct nfp_fl_ct_flow_entry *ct_entry = ct_map_ent->ct_entry; + struct nfp_fl_nft_tc_merge *nft_merge, *nft_m_tmp; + struct nfp_fl_ct_tc_merge *tc_merge, *tc_m_tmp; + + u64 pkts = 0, bytes = 0, used = 0; + u64 m_pkts, m_bytes, m_used; + + spin_lock_bh(&ct_entry->zt->priv->stats_lock); + + if (ct_entry->type == CT_TYPE_PRE_CT) { + /* Iterate tc_merge entries associated with this flow */ + list_for_each_entry_safe(tc_merge, tc_m_tmp, &ct_entry->children, + pre_ct_list) { + m_pkts = 0; + m_bytes = 0; + m_used = 0; + /* Iterate nft_merge entries associated with this tc_merge flow */ + list_for_each_entry_safe(nft_merge, nft_m_tmp, &tc_merge->children, + tc_merge_list) { + nfp_fl_ct_sub_stats(nft_merge, CT_TYPE_PRE_CT, + &m_pkts, &m_bytes, &m_used); + } + pkts += m_pkts; + bytes += m_bytes; + used = max_t(u64, used, m_used); + /* Update post_ct partner */ + flow_stats_update(&tc_merge->post_ct_parent->stats, + m_bytes, m_pkts, 0, m_used, + FLOW_ACTION_HW_STATS_DELAYED); + } + } else if (ct_entry->type == CT_TYPE_POST_CT) { + /* Iterate tc_merge entries associated with this flow */ + list_for_each_entry_safe(tc_merge, tc_m_tmp, &ct_entry->children, + post_ct_list) { + m_pkts = 0; + m_bytes = 0; + m_used = 0; + /* Iterate nft_merge entries associated with this tc_merge flow */ + list_for_each_entry_safe(nft_merge, nft_m_tmp, &tc_merge->children, + tc_merge_list) { + nfp_fl_ct_sub_stats(nft_merge, CT_TYPE_POST_CT, + &m_pkts, &m_bytes, &m_used); + } + pkts += m_pkts; + bytes += m_bytes; + used = max_t(u64, used, m_used); + /* Update pre_ct partner */ + flow_stats_update(&tc_merge->pre_ct_parent->stats, + m_bytes, m_pkts, 0, m_used, + FLOW_ACTION_HW_STATS_DELAYED); + } + } else { + /* Iterate nft_merge entries associated with this nft flow */ + list_for_each_entry_safe(nft_merge, nft_m_tmp, &ct_entry->children, + nft_flow_list) { + nfp_fl_ct_sub_stats(nft_merge, CT_TYPE_NFT, + &pkts, &bytes, &used); + } + } + + /* Add stats from this request to stats potentially cached by + * previous requests. + */ + flow_stats_update(&ct_entry->stats, bytes, pkts, 0, used, + FLOW_ACTION_HW_STATS_DELAYED); + /* Finally update the flow stats from the original stats request */ + flow_stats_update(&flow->stats, ct_entry->stats.bytes, + ct_entry->stats.pkts, 0, + ct_entry->stats.lastused, + FLOW_ACTION_HW_STATS_DELAYED); + /* Stats has been synced to original flow, can now clear + * the cache. + */ + ct_entry->stats.pkts = 0; + ct_entry->stats.bytes = 0; + spin_unlock_bh(&ct_entry->zt->priv->stats_lock); + + return 0; +} + static int nfp_fl_ct_offload_nft_flow(struct nfp_fl_ct_zone_entry *zt, struct flow_cls_offload *flow) { @@ -1080,7 +1690,11 @@ nfp_fl_ct_offload_nft_flow(struct nfp_fl_ct_zone_entry *zt, struct flow_cls_offl nfp_ct_map_params); return nfp_fl_ct_del_flow(ct_map_ent); case FLOW_CLS_STATS: - return 0; + ct_map_ent = rhashtable_lookup_fast(&zt->priv->ct_map_table, &flow->cookie, + nfp_ct_map_params); + if (ct_map_ent) + return nfp_fl_ct_stats(flow, ct_map_ent); + break; default: break; } diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.h b/drivers/net/ethernet/netronome/nfp/flower/conntrack.h index 170b6cdb8cd0..beb6cceff9d8 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.h +++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.h @@ -83,6 +83,24 @@ enum ct_entry_type { CT_TYPE_PRE_CT, CT_TYPE_NFT, CT_TYPE_POST_CT, + _CT_TYPE_MAX, +}; + +enum nfp_nfp_layer_name { + FLOW_PAY_META_TCI = 0, + FLOW_PAY_INPORT, + FLOW_PAY_EXT_META, + FLOW_PAY_MAC_MPLS, + FLOW_PAY_L4, + FLOW_PAY_IPV4, + FLOW_PAY_IPV6, + FLOW_PAY_CT, + FLOW_PAY_GRE, + FLOW_PAY_QINQ, + FLOW_PAY_UDP_TUN, + FLOW_PAY_GENEVE_OPT, + + _FLOW_PAY_LAYERS_MAX }; /** @@ -228,4 +246,12 @@ int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent); */ int nfp_fl_ct_handle_nft_flow(enum tc_setup_type type, void *type_data, void *cb_priv); + +/** + * nfp_fl_ct_stats() - Handle flower stats callbacks for ct flows + * @flow: TC flower classifier offload structure. + * @ct_map_ent: ct map entry for the flow that needs deleting + */ +int nfp_fl_ct_stats(struct flow_cls_offload *flow, + struct nfp_fl_ct_map_entry *ct_map_ent); #endif diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index 0fbd682ccf72..917c450a7aad 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -413,20 +413,73 @@ int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev, int nfp_flower_merge_offloaded_flows(struct nfp_app *app, struct nfp_fl_payload *sub_flow1, struct nfp_fl_payload *sub_flow2); +void +nfp_flower_compile_meta(struct nfp_flower_meta_tci *ext, + struct nfp_flower_meta_tci *msk, u8 key_type); +void +nfp_flower_compile_tci(struct nfp_flower_meta_tci *ext, + struct nfp_flower_meta_tci *msk, + struct flow_rule *rule); +void +nfp_flower_compile_ext_meta(struct nfp_flower_ext_meta *frame, u32 key_ext); +int +nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port, + bool mask_version, enum nfp_flower_tun_type tun_type, + struct netlink_ext_ack *extack); +void +nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext, + struct nfp_flower_mac_mpls *msk, + struct flow_rule *rule); +int +nfp_flower_compile_mpls(struct nfp_flower_mac_mpls *ext, + struct nfp_flower_mac_mpls *msk, + struct flow_rule *rule, + struct netlink_ext_ack *extack); +void +nfp_flower_compile_tport(struct nfp_flower_tp_ports *ext, + struct nfp_flower_tp_ports *msk, + struct flow_rule *rule); +void +nfp_flower_compile_vlan(struct nfp_flower_vlan *ext, + struct nfp_flower_vlan *msk, + struct flow_rule *rule); +void +nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *ext, + struct nfp_flower_ipv4 *msk, struct flow_rule *rule); +void +nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *ext, + struct nfp_flower_ipv6 *msk, struct flow_rule *rule); +void +nfp_flower_compile_geneve_opt(u8 *ext, u8 *msk, struct flow_rule *rule); +void +nfp_flower_compile_ipv4_gre_tun(struct nfp_flower_ipv4_gre_tun *ext, + struct nfp_flower_ipv4_gre_tun *msk, + struct flow_rule *rule); +void +nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *ext, + struct nfp_flower_ipv4_udp_tun *msk, + struct flow_rule *rule); +void +nfp_flower_compile_ipv6_udp_tun(struct nfp_flower_ipv6_udp_tun *ext, + struct nfp_flower_ipv6_udp_tun *msk, + struct flow_rule *rule); +void +nfp_flower_compile_ipv6_gre_tun(struct nfp_flower_ipv6_gre_tun *ext, + struct nfp_flower_ipv6_gre_tun *msk, + struct flow_rule *rule); int nfp_flower_compile_flow_match(struct nfp_app *app, - struct flow_cls_offload *flow, + struct flow_rule *rule, struct nfp_fl_key_ls *key_ls, struct net_device *netdev, struct nfp_fl_payload *nfp_flow, enum nfp_flower_tun_type tun_type, struct netlink_ext_ack *extack); int nfp_flower_compile_action(struct nfp_app *app, - struct flow_cls_offload *flow, + struct flow_rule *rule, struct net_device *netdev, struct nfp_fl_payload *nfp_flow, struct netlink_ext_ack *extack); -int nfp_compile_flow_metadata(struct nfp_app *app, - struct flow_cls_offload *flow, +int nfp_compile_flow_metadata(struct nfp_app *app, u32 cookie, struct nfp_fl_payload *nfp_flow, struct net_device *netdev, struct netlink_ext_ack *extack); @@ -498,4 +551,22 @@ int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app, struct nfp_fl_payload *flow); int nfp_flower_xmit_pre_tun_del_flow(struct nfp_app *app, struct nfp_fl_payload *flow); + +struct nfp_fl_payload * +nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer); +int nfp_flower_calculate_key_layers(struct nfp_app *app, + struct net_device *netdev, + struct nfp_fl_key_ls *ret_key_ls, + struct flow_rule *flow, + enum nfp_flower_tun_type *tun_type, + struct netlink_ext_ack *extack); +void +nfp_flower_del_linked_merge_flows(struct nfp_app *app, + struct nfp_fl_payload *sub_flow); +int +nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow, + u8 mtype); +void +nfp_flower_update_merge_stats(struct nfp_app *app, + struct nfp_fl_payload *sub_flow); #endif diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c index 255a4dff6288..9d86eea4dc16 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/match.c +++ b/drivers/net/ethernet/netronome/nfp/flower/match.c @@ -7,51 +7,68 @@ #include "cmsg.h" #include "main.h" -static void -nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext, - struct nfp_flower_meta_tci *msk, - struct flow_rule *rule, u8 key_type, bool qinq_sup) +void +nfp_flower_compile_meta(struct nfp_flower_meta_tci *ext, + struct nfp_flower_meta_tci *msk, u8 key_type) { - u16 tmp_tci; - - memset(ext, 0, sizeof(struct nfp_flower_meta_tci)); - memset(msk, 0, sizeof(struct nfp_flower_meta_tci)); - /* Populate the metadata frame. */ ext->nfp_flow_key_layer = key_type; ext->mask_id = ~0; msk->nfp_flow_key_layer = key_type; msk->mask_id = ~0; +} - if (!qinq_sup && flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { +void +nfp_flower_compile_tci(struct nfp_flower_meta_tci *ext, + struct nfp_flower_meta_tci *msk, + struct flow_rule *rule) +{ + u16 msk_tci, key_tci; + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { struct flow_match_vlan match; flow_rule_match_vlan(rule, &match); /* Populate the tci field. */ - tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT; - tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, + key_tci = NFP_FLOWER_MASK_VLAN_PRESENT; + key_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, match.key->vlan_priority) | FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, match.key->vlan_id); - ext->tci = cpu_to_be16(tmp_tci); - tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT; - tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, + msk_tci = NFP_FLOWER_MASK_VLAN_PRESENT; + msk_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, match.mask->vlan_priority) | FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, match.mask->vlan_id); - msk->tci = cpu_to_be16(tmp_tci); + + ext->tci |= cpu_to_be16((key_tci & msk_tci)); + msk->tci |= cpu_to_be16(msk_tci); } } static void +nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext, + struct nfp_flower_meta_tci *msk, + struct flow_rule *rule, u8 key_type, bool qinq_sup) +{ + memset(ext, 0, sizeof(struct nfp_flower_meta_tci)); + memset(msk, 0, sizeof(struct nfp_flower_meta_tci)); + + nfp_flower_compile_meta(ext, msk, key_type); + + if (!qinq_sup) + nfp_flower_compile_tci(ext, msk, rule); +} + +void nfp_flower_compile_ext_meta(struct nfp_flower_ext_meta *frame, u32 key_ext) { frame->nfp_flow_key_layer2 = cpu_to_be32(key_ext); } -static int +int nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port, bool mask_version, enum nfp_flower_tun_type tun_type, struct netlink_ext_ack *extack) @@ -74,28 +91,37 @@ nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port, return 0; } -static int +void nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext, - struct nfp_flower_mac_mpls *msk, struct flow_rule *rule, - struct netlink_ext_ack *extack) + struct nfp_flower_mac_mpls *msk, + struct flow_rule *rule) { - memset(ext, 0, sizeof(struct nfp_flower_mac_mpls)); - memset(msk, 0, sizeof(struct nfp_flower_mac_mpls)); - if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { struct flow_match_eth_addrs match; + int i; flow_rule_match_eth_addrs(rule, &match); /* Populate mac frame. */ - ether_addr_copy(ext->mac_dst, &match.key->dst[0]); - ether_addr_copy(ext->mac_src, &match.key->src[0]); - ether_addr_copy(msk->mac_dst, &match.mask->dst[0]); - ether_addr_copy(msk->mac_src, &match.mask->src[0]); + for (i = 0; i < ETH_ALEN; i++) { + ext->mac_dst[i] |= match.key->dst[i] & + match.mask->dst[i]; + msk->mac_dst[i] |= match.mask->dst[i]; + ext->mac_src[i] |= match.key->src[i] & + match.mask->src[i]; + msk->mac_src[i] |= match.mask->src[i]; + } } +} +int +nfp_flower_compile_mpls(struct nfp_flower_mac_mpls *ext, + struct nfp_flower_mac_mpls *msk, + struct flow_rule *rule, + struct netlink_ext_ack *extack) +{ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) { struct flow_match_mpls match; - u32 t_mpls; + u32 key_mpls, msk_mpls; flow_rule_match_mpls(rule, &match); @@ -106,22 +132,24 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext, return -EOPNOTSUPP; } - t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, - match.key->ls[0].mpls_label) | - FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, - match.key->ls[0].mpls_tc) | - FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, - match.key->ls[0].mpls_bos) | - NFP_FLOWER_MASK_MPLS_Q; - ext->mpls_lse = cpu_to_be32(t_mpls); - t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, - match.mask->ls[0].mpls_label) | - FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, - match.mask->ls[0].mpls_tc) | - FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, - match.mask->ls[0].mpls_bos) | - NFP_FLOWER_MASK_MPLS_Q; - msk->mpls_lse = cpu_to_be32(t_mpls); + key_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, + match.key->ls[0].mpls_label) | + FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, + match.key->ls[0].mpls_tc) | + FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, + match.key->ls[0].mpls_bos) | + NFP_FLOWER_MASK_MPLS_Q; + + msk_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, + match.mask->ls[0].mpls_label) | + FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, + match.mask->ls[0].mpls_tc) | + FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, + match.mask->ls[0].mpls_bos) | + NFP_FLOWER_MASK_MPLS_Q; + + ext->mpls_lse |= cpu_to_be32((key_mpls & msk_mpls)); + msk->mpls_lse |= cpu_to_be32(msk_mpls); } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { /* Check for mpls ether type and set NFP_FLOWER_MASK_MPLS_Q * bit, which indicates an mpls ether type but without any @@ -132,30 +160,41 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext, flow_rule_match_basic(rule, &match); if (match.key->n_proto == cpu_to_be16(ETH_P_MPLS_UC) || match.key->n_proto == cpu_to_be16(ETH_P_MPLS_MC)) { - ext->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q); - msk->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q); + ext->mpls_lse |= cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q); + msk->mpls_lse |= cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q); } } return 0; } -static void +static int +nfp_flower_compile_mac_mpls(struct nfp_flower_mac_mpls *ext, + struct nfp_flower_mac_mpls *msk, + struct flow_rule *rule, + struct netlink_ext_ack *extack) +{ + memset(ext, 0, sizeof(struct nfp_flower_mac_mpls)); + memset(msk, 0, sizeof(struct nfp_flower_mac_mpls)); + + nfp_flower_compile_mac(ext, msk, rule); + + return nfp_flower_compile_mpls(ext, msk, rule, extack); +} + +void nfp_flower_compile_tport(struct nfp_flower_tp_ports *ext, struct nfp_flower_tp_ports *msk, struct flow_rule *rule) { - memset(ext, 0, sizeof(struct nfp_flower_tp_ports)); - memset(msk, 0, sizeof(struct nfp_flower_tp_ports)); - if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { struct flow_match_ports match; flow_rule_match_ports(rule, &match); - ext->port_src = match.key->src; - ext->port_dst = match.key->dst; - msk->port_src = match.mask->src; - msk->port_dst = match.mask->dst; + ext->port_src |= match.key->src & match.mask->src; + ext->port_dst |= match.key->dst & match.mask->dst; + msk->port_src |= match.mask->src; + msk->port_dst |= match.mask->dst; } } @@ -167,18 +206,18 @@ nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *ext, struct flow_match_basic match; flow_rule_match_basic(rule, &match); - ext->proto = match.key->ip_proto; - msk->proto = match.mask->ip_proto; + ext->proto |= match.key->ip_proto & match.mask->ip_proto; + msk->proto |= match.mask->ip_proto; } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { struct flow_match_ip match; flow_rule_match_ip(rule, &match); - ext->tos = match.key->tos; - ext->ttl = match.key->ttl; - msk->tos = match.mask->tos; - msk->ttl = match.mask->ttl; + ext->tos |= match.key->tos & match.mask->tos; + ext->ttl |= match.key->ttl & match.mask->ttl; + msk->tos |= match.mask->tos; + msk->ttl |= match.mask->ttl; } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) { @@ -231,99 +270,108 @@ nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *ext, } static void -nfp_flower_fill_vlan(struct flow_dissector_key_vlan *key, - struct nfp_flower_vlan *frame, - bool outer_vlan) +nfp_flower_fill_vlan(struct flow_match_vlan *match, + struct nfp_flower_vlan *ext, + struct nfp_flower_vlan *msk, bool outer_vlan) { - u16 tci; - - tci = NFP_FLOWER_MASK_VLAN_PRESENT; - tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, - key->vlan_priority) | - FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, - key->vlan_id); + struct flow_dissector_key_vlan *mask = match->mask; + struct flow_dissector_key_vlan *key = match->key; + u16 msk_tci, key_tci; + + key_tci = NFP_FLOWER_MASK_VLAN_PRESENT; + key_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, + key->vlan_priority) | + FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, + key->vlan_id); + msk_tci = NFP_FLOWER_MASK_VLAN_PRESENT; + msk_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, + mask->vlan_priority) | + FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, + mask->vlan_id); if (outer_vlan) { - frame->outer_tci = cpu_to_be16(tci); - frame->outer_tpid = key->vlan_tpid; + ext->outer_tci |= cpu_to_be16((key_tci & msk_tci)); + ext->outer_tpid |= key->vlan_tpid & mask->vlan_tpid; + msk->outer_tci |= cpu_to_be16(msk_tci); + msk->outer_tpid |= mask->vlan_tpid; } else { - frame->inner_tci = cpu_to_be16(tci); - frame->inner_tpid = key->vlan_tpid; + ext->inner_tci |= cpu_to_be16((key_tci & msk_tci)); + ext->inner_tpid |= key->vlan_tpid & mask->vlan_tpid; + msk->inner_tci |= cpu_to_be16(msk_tci); + msk->inner_tpid |= mask->vlan_tpid; } } -static void +void nfp_flower_compile_vlan(struct nfp_flower_vlan *ext, struct nfp_flower_vlan *msk, struct flow_rule *rule) { struct flow_match_vlan match; - memset(ext, 0, sizeof(struct nfp_flower_vlan)); - memset(msk, 0, sizeof(struct nfp_flower_vlan)); - if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { flow_rule_match_vlan(rule, &match); - nfp_flower_fill_vlan(match.key, ext, true); - nfp_flower_fill_vlan(match.mask, msk, true); + nfp_flower_fill_vlan(&match, ext, msk, true); } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) { flow_rule_match_cvlan(rule, &match); - nfp_flower_fill_vlan(match.key, ext, false); - nfp_flower_fill_vlan(match.mask, msk, false); + nfp_flower_fill_vlan(&match, ext, msk, false); } } -static void +void nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *ext, struct nfp_flower_ipv4 *msk, struct flow_rule *rule) { - struct flow_match_ipv4_addrs match; - - memset(ext, 0, sizeof(struct nfp_flower_ipv4)); - memset(msk, 0, sizeof(struct nfp_flower_ipv4)); - if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { + struct flow_match_ipv4_addrs match; + flow_rule_match_ipv4_addrs(rule, &match); - ext->ipv4_src = match.key->src; - ext->ipv4_dst = match.key->dst; - msk->ipv4_src = match.mask->src; - msk->ipv4_dst = match.mask->dst; + ext->ipv4_src |= match.key->src & match.mask->src; + ext->ipv4_dst |= match.key->dst & match.mask->dst; + msk->ipv4_src |= match.mask->src; + msk->ipv4_dst |= match.mask->dst; } nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, rule); } -static void +void nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *ext, struct nfp_flower_ipv6 *msk, struct flow_rule *rule) { - memset(ext, 0, sizeof(struct nfp_flower_ipv6)); - memset(msk, 0, sizeof(struct nfp_flower_ipv6)); - if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { struct flow_match_ipv6_addrs match; + int i; flow_rule_match_ipv6_addrs(rule, &match); - ext->ipv6_src = match.key->src; - ext->ipv6_dst = match.key->dst; - msk->ipv6_src = match.mask->src; - msk->ipv6_dst = match.mask->dst; + for (i = 0; i < sizeof(ext->ipv6_src); i++) { + ext->ipv6_src.s6_addr[i] |= match.key->src.s6_addr[i] & + match.mask->src.s6_addr[i]; + ext->ipv6_dst.s6_addr[i] |= match.key->dst.s6_addr[i] & + match.mask->dst.s6_addr[i]; + msk->ipv6_src.s6_addr[i] |= match.mask->src.s6_addr[i]; + msk->ipv6_dst.s6_addr[i] |= match.mask->dst.s6_addr[i]; + } } nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, rule); } -static int -nfp_flower_compile_geneve_opt(void *ext, void *msk, struct flow_rule *rule) +void +nfp_flower_compile_geneve_opt(u8 *ext, u8 *msk, struct flow_rule *rule) { struct flow_match_enc_opts match; + int i; - flow_rule_match_enc_opts(rule, &match); - memcpy(ext, match.key->data, match.key->len); - memcpy(msk, match.mask->data, match.mask->len); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS)) { + flow_rule_match_enc_opts(rule, &match); - return 0; + for (i = 0; i < match.mask->len; i++) { + ext[i] |= match.key->data[i] & match.mask->data[i]; + msk[i] |= match.mask->data[i]; + } + } } static void @@ -335,10 +383,10 @@ nfp_flower_compile_tun_ipv4_addrs(struct nfp_flower_tun_ipv4 *ext, struct flow_match_ipv4_addrs match; flow_rule_match_enc_ipv4_addrs(rule, &match); - ext->src = match.key->src; - ext->dst = match.key->dst; - msk->src = match.mask->src; - msk->dst = match.mask->dst; + ext->src |= match.key->src & match.mask->src; + ext->dst |= match.key->dst & match.mask->dst; + msk->src |= match.mask->src; + msk->dst |= match.mask->dst; } } @@ -349,12 +397,17 @@ nfp_flower_compile_tun_ipv6_addrs(struct nfp_flower_tun_ipv6 *ext, { if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) { struct flow_match_ipv6_addrs match; + int i; flow_rule_match_enc_ipv6_addrs(rule, &match); - ext->src = match.key->src; - ext->dst = match.key->dst; - msk->src = match.mask->src; - msk->dst = match.mask->dst; + for (i = 0; i < sizeof(ext->src); i++) { + ext->src.s6_addr[i] |= match.key->src.s6_addr[i] & + match.mask->src.s6_addr[i]; + ext->dst.s6_addr[i] |= match.key->dst.s6_addr[i] & + match.mask->dst.s6_addr[i]; + msk->src.s6_addr[i] |= match.mask->src.s6_addr[i]; + msk->dst.s6_addr[i] |= match.mask->dst.s6_addr[i]; + } } } @@ -367,10 +420,10 @@ nfp_flower_compile_tun_ip_ext(struct nfp_flower_tun_ip_ext *ext, struct flow_match_ip match; flow_rule_match_enc_ip(rule, &match); - ext->tos = match.key->tos; - ext->ttl = match.key->ttl; - msk->tos = match.mask->tos; - msk->ttl = match.mask->ttl; + ext->tos |= match.key->tos & match.mask->tos; + ext->ttl |= match.key->ttl & match.mask->ttl; + msk->tos |= match.mask->tos; + msk->ttl |= match.mask->ttl; } } @@ -383,10 +436,11 @@ nfp_flower_compile_tun_udp_key(__be32 *key, __be32 *key_msk, u32 vni; flow_rule_match_enc_keyid(rule, &match); - vni = be32_to_cpu(match.key->keyid) << NFP_FL_TUN_VNI_OFFSET; - *key = cpu_to_be32(vni); + vni = be32_to_cpu((match.key->keyid & match.mask->keyid)) << + NFP_FL_TUN_VNI_OFFSET; + *key |= cpu_to_be32(vni); vni = be32_to_cpu(match.mask->keyid) << NFP_FL_TUN_VNI_OFFSET; - *key_msk = cpu_to_be32(vni); + *key_msk |= cpu_to_be32(vni); } } @@ -398,22 +452,19 @@ nfp_flower_compile_tun_gre_key(__be32 *key, __be32 *key_msk, __be16 *flags, struct flow_match_enc_keyid match; flow_rule_match_enc_keyid(rule, &match); - *key = match.key->keyid; - *key_msk = match.mask->keyid; + *key |= match.key->keyid & match.mask->keyid; + *key_msk |= match.mask->keyid; *flags = cpu_to_be16(NFP_FL_GRE_FLAG_KEY); *flags_msk = cpu_to_be16(NFP_FL_GRE_FLAG_KEY); } } -static void +void nfp_flower_compile_ipv4_gre_tun(struct nfp_flower_ipv4_gre_tun *ext, struct nfp_flower_ipv4_gre_tun *msk, struct flow_rule *rule) { - memset(ext, 0, sizeof(struct nfp_flower_ipv4_gre_tun)); - memset(msk, 0, sizeof(struct nfp_flower_ipv4_gre_tun)); - /* NVGRE is the only supported GRE tunnel type */ ext->ethertype = cpu_to_be16(ETH_P_TEB); msk->ethertype = cpu_to_be16(~0); @@ -424,40 +475,31 @@ nfp_flower_compile_ipv4_gre_tun(struct nfp_flower_ipv4_gre_tun *ext, &ext->tun_flags, &msk->tun_flags, rule); } -static void +void nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *ext, struct nfp_flower_ipv4_udp_tun *msk, struct flow_rule *rule) { - memset(ext, 0, sizeof(struct nfp_flower_ipv4_udp_tun)); - memset(msk, 0, sizeof(struct nfp_flower_ipv4_udp_tun)); - nfp_flower_compile_tun_ipv4_addrs(&ext->ipv4, &msk->ipv4, rule); nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule); nfp_flower_compile_tun_udp_key(&ext->tun_id, &msk->tun_id, rule); } -static void +void nfp_flower_compile_ipv6_udp_tun(struct nfp_flower_ipv6_udp_tun *ext, struct nfp_flower_ipv6_udp_tun *msk, struct flow_rule *rule) { - memset(ext, 0, sizeof(struct nfp_flower_ipv6_udp_tun)); - memset(msk, 0, sizeof(struct nfp_flower_ipv6_udp_tun)); - nfp_flower_compile_tun_ipv6_addrs(&ext->ipv6, &msk->ipv6, rule); nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule); nfp_flower_compile_tun_udp_key(&ext->tun_id, &msk->tun_id, rule); } -static void +void nfp_flower_compile_ipv6_gre_tun(struct nfp_flower_ipv6_gre_tun *ext, struct nfp_flower_ipv6_gre_tun *msk, struct flow_rule *rule) { - memset(ext, 0, sizeof(struct nfp_flower_ipv6_gre_tun)); - memset(msk, 0, sizeof(struct nfp_flower_ipv6_gre_tun)); - /* NVGRE is the only supported GRE tunnel type */ ext->ethertype = cpu_to_be16(ETH_P_TEB); msk->ethertype = cpu_to_be16(~0); @@ -469,14 +511,13 @@ nfp_flower_compile_ipv6_gre_tun(struct nfp_flower_ipv6_gre_tun *ext, } int nfp_flower_compile_flow_match(struct nfp_app *app, - struct flow_cls_offload *flow, + struct flow_rule *rule, struct nfp_fl_key_ls *key_ls, struct net_device *netdev, struct nfp_fl_payload *nfp_flow, enum nfp_flower_tun_type tun_type, struct netlink_ext_ack *extack) { - struct flow_rule *rule = flow_cls_offload_flow_rule(flow); struct nfp_flower_priv *priv = app->priv; bool qinq_sup; u32 port_id; @@ -527,9 +568,9 @@ int nfp_flower_compile_flow_match(struct nfp_app *app, msk += sizeof(struct nfp_flower_in_port); if (NFP_FLOWER_LAYER_MAC & key_ls->key_layer) { - err = nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)ext, - (struct nfp_flower_mac_mpls *)msk, - rule, extack); + err = nfp_flower_compile_mac_mpls((struct nfp_flower_mac_mpls *)ext, + (struct nfp_flower_mac_mpls *)msk, + rule, extack); if (err) return err; @@ -640,9 +681,7 @@ int nfp_flower_compile_flow_match(struct nfp_app *app, } if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) { - err = nfp_flower_compile_geneve_opt(ext, msk, rule); - if (err) - return err; + nfp_flower_compile_geneve_opt(ext, msk, rule); } } diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c index 621113650a9b..2af9faee96c5 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c +++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c @@ -290,8 +290,7 @@ nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len, return true; } -int nfp_compile_flow_metadata(struct nfp_app *app, - struct flow_cls_offload *flow, +int nfp_compile_flow_metadata(struct nfp_app *app, u32 cookie, struct nfp_fl_payload *nfp_flow, struct net_device *netdev, struct netlink_ext_ack *extack) @@ -310,7 +309,7 @@ int nfp_compile_flow_metadata(struct nfp_app *app, } nfp_flow->meta.host_ctx_id = cpu_to_be32(stats_cxt); - nfp_flow->meta.host_cookie = cpu_to_be64(flow->cookie); + nfp_flow->meta.host_cookie = cpu_to_be64(cookie); nfp_flow->ingress_dev = netdev; ctx_entry = kzalloc(sizeof(*ctx_entry), GFP_KERNEL); @@ -357,7 +356,7 @@ int nfp_compile_flow_metadata(struct nfp_app *app, priv->stats[stats_cxt].bytes = 0; priv->stats[stats_cxt].used = jiffies; - check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev); + check_entry = nfp_flower_search_fl_table(app, cookie, netdev); if (check_entry) { NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot offload duplicate flow entry"); if (nfp_release_stats_entry(app, stats_cxt)) { diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 2406d33356ad..556c3495211d 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -41,6 +41,8 @@ BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \ BIT(FLOW_DISSECTOR_KEY_ENC_IP) | \ BIT(FLOW_DISSECTOR_KEY_MPLS) | \ + BIT(FLOW_DISSECTOR_KEY_CT) | \ + BIT(FLOW_DISSECTOR_KEY_META) | \ BIT(FLOW_DISSECTOR_KEY_IP)) #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR \ @@ -89,7 +91,7 @@ struct nfp_flower_merge_check { }; }; -static int +int nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow, u8 mtype) { @@ -134,20 +136,16 @@ nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow, return 0; } -static bool nfp_flower_check_higher_than_mac(struct flow_cls_offload *f) +static bool nfp_flower_check_higher_than_mac(struct flow_rule *rule) { - struct flow_rule *rule = flow_cls_offload_flow_rule(f); - return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS) || flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS) || flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) || flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP); } -static bool nfp_flower_check_higher_than_l3(struct flow_cls_offload *f) +static bool nfp_flower_check_higher_than_l3(struct flow_rule *rule) { - struct flow_rule *rule = flow_cls_offload_flow_rule(f); - return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) || flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP); } @@ -236,15 +234,14 @@ nfp_flower_calc_udp_tun_layer(struct flow_dissector_key_ports *enc_ports, return 0; } -static int +int nfp_flower_calculate_key_layers(struct nfp_app *app, struct net_device *netdev, struct nfp_fl_key_ls *ret_key_ls, - struct flow_cls_offload *flow, + struct flow_rule *rule, enum nfp_flower_tun_type *tun_type, struct netlink_ext_ack *extack) { - struct flow_rule *rule = flow_cls_offload_flow_rule(flow); struct flow_dissector *dissector = rule->match.dissector; struct flow_match_basic basic = { NULL, NULL}; struct nfp_flower_priv *priv = app->priv; @@ -452,7 +449,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on given EtherType is not supported"); return -EOPNOTSUPP; } - } else if (nfp_flower_check_higher_than_mac(flow)) { + } else if (nfp_flower_check_higher_than_mac(rule)) { NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot match above L2 without specified EtherType"); return -EOPNOTSUPP; } @@ -471,7 +468,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, } if (!(key_layer & NFP_FLOWER_LAYER_TP) && - nfp_flower_check_higher_than_l3(flow)) { + nfp_flower_check_higher_than_l3(rule)) { NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot match on L4 information without specified IP protocol type"); return -EOPNOTSUPP; } @@ -543,7 +540,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, return 0; } -static struct nfp_fl_payload * +struct nfp_fl_payload * nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer) { struct nfp_fl_payload *flow_pay; @@ -1005,9 +1002,7 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app, struct nfp_fl_payload *sub_flow1, struct nfp_fl_payload *sub_flow2) { - struct flow_cls_offload merge_tc_off; struct nfp_flower_priv *priv = app->priv; - struct netlink_ext_ack *extack = NULL; struct nfp_fl_payload *merge_flow; struct nfp_fl_key_ls merge_key_ls; struct nfp_merge_info *merge_info; @@ -1016,7 +1011,6 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app, ASSERT_RTNL(); - extack = merge_tc_off.common.extack; if (sub_flow1 == sub_flow2 || nfp_flower_is_merge_flow(sub_flow1) || nfp_flower_is_merge_flow(sub_flow2)) @@ -1061,9 +1055,8 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app, if (err) goto err_unlink_sub_flow1; - merge_tc_off.cookie = merge_flow->tc_flower_cookie; - err = nfp_compile_flow_metadata(app, &merge_tc_off, merge_flow, - merge_flow->ingress_dev, extack); + err = nfp_compile_flow_metadata(app, merge_flow->tc_flower_cookie, merge_flow, + merge_flow->ingress_dev, NULL); if (err) goto err_unlink_sub_flow2; @@ -1305,6 +1298,7 @@ static int nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, struct flow_cls_offload *flow) { + struct flow_rule *rule = flow_cls_offload_flow_rule(flow); enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE; struct nfp_flower_priv *priv = app->priv; struct netlink_ext_ack *extack = NULL; @@ -1330,7 +1324,7 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, if (!key_layer) return -ENOMEM; - err = nfp_flower_calculate_key_layers(app, netdev, key_layer, flow, + err = nfp_flower_calculate_key_layers(app, netdev, key_layer, rule, &tun_type, extack); if (err) goto err_free_key_ls; @@ -1341,12 +1335,12 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, goto err_free_key_ls; } - err = nfp_flower_compile_flow_match(app, flow, key_layer, netdev, + err = nfp_flower_compile_flow_match(app, rule, key_layer, netdev, flow_pay, tun_type, extack); if (err) goto err_destroy_flow; - err = nfp_flower_compile_action(app, flow, netdev, flow_pay, extack); + err = nfp_flower_compile_action(app, rule, netdev, flow_pay, extack); if (err) goto err_destroy_flow; @@ -1356,7 +1350,7 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, goto err_destroy_flow; } - err = nfp_compile_flow_metadata(app, flow, flow_pay, netdev, extack); + err = nfp_compile_flow_metadata(app, flow->cookie, flow_pay, netdev, extack); if (err) goto err_destroy_flow; @@ -1476,7 +1470,7 @@ err_free_links: kfree_rcu(merge_flow, rcu); } -static void +void nfp_flower_del_linked_merge_flows(struct nfp_app *app, struct nfp_fl_payload *sub_flow) { @@ -1601,7 +1595,7 @@ __nfp_flower_update_merge_stats(struct nfp_app *app, } } -static void +void nfp_flower_update_merge_stats(struct nfp_app *app, struct nfp_fl_payload *sub_flow) { @@ -1628,10 +1622,17 @@ nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev, struct flow_cls_offload *flow) { struct nfp_flower_priv *priv = app->priv; + struct nfp_fl_ct_map_entry *ct_map_ent; struct netlink_ext_ack *extack = NULL; struct nfp_fl_payload *nfp_flow; u32 ctx_id; + /* Check ct_map table first */ + ct_map_ent = rhashtable_lookup_fast(&priv->ct_map_table, &flow->cookie, + nfp_ct_map_params); + if (ct_map_ent) + return nfp_fl_ct_stats(flow, ct_map_ent); + extack = flow->common.extack; nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev); if (!nfp_flow) { diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index 742a420152b3..bb3b8a7f6c5d 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -692,7 +692,7 @@ static int nfp_pci_probe(struct pci_dev *pdev, goto err_pci_disable; } - devlink = devlink_alloc(&nfp_devlink_ops, sizeof(*pf)); + devlink = devlink_alloc(&nfp_devlink_ops, sizeof(*pf), &pdev->dev); if (!devlink) { err = -ENOMEM; goto err_rel_regions; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h index df5b748be068..df203738511b 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h @@ -17,6 +17,7 @@ #include <linux/list.h> #include <linux/netdevice.h> #include <linux/pci.h> +#include <linux/dim.h> #include <linux/io-64-nonatomic-hi-lo.h> #include <linux/semaphore.h> #include <linux/workqueue.h> @@ -360,6 +361,9 @@ struct nfp_net_rx_ring { * @rx_ring: Pointer to RX ring * @xdp_ring: Pointer to an extra TX ring for XDP * @irq_entry: MSI-X table entry (use for talking to the device) + * @event_ctr: Number of interrupt + * @rx_dim: Dynamic interrupt moderation structure for RX + * @tx_dim: Dynamic interrupt moderation structure for TX * @rx_sync: Seqlock for atomic updates of RX stats * @rx_pkts: Number of received packets * @rx_bytes: Number of received bytes @@ -410,6 +414,10 @@ struct nfp_net_r_vector { u16 irq_entry; + u16 event_ctr; + struct dim rx_dim; + struct dim tx_dim; + struct u64_stats_sync rx_sync; u64 rx_pkts; u64 rx_bytes; @@ -571,6 +579,8 @@ struct nfp_net_dp { * mailbox area, crypto TLV * @link_up: Is the link up? * @link_status_lock: Protects @link_* and ensures atomicity with BAR reading + * @rx_coalesce_adapt_on: Is RX interrupt moderation adaptive? + * @tx_coalesce_adapt_on: Is TX interrupt moderation adaptive? * @rx_coalesce_usecs: RX interrupt moderation usecs delay parameter * @rx_coalesce_max_frames: RX interrupt moderation frame count parameter * @tx_coalesce_usecs: TX interrupt moderation usecs delay parameter @@ -654,6 +664,8 @@ struct nfp_net { struct semaphore bar_lock; + bool rx_coalesce_adapt_on; + bool tx_coalesce_adapt_on; u32 rx_coalesce_usecs; u32 rx_coalesce_max_frames; u32 tx_coalesce_usecs; @@ -919,6 +931,14 @@ static inline bool nfp_netdev_is_nfp_net(struct net_device *netdev) return netdev->netdev_ops == &nfp_net_netdev_ops; } +static inline int nfp_net_coalesce_para_check(u32 usecs, u32 pkts) +{ + if ((usecs >= ((1 << 16) - 1)) || (pkts >= ((1 << 16) - 1))) + return -EINVAL; + + return 0; +} + /* Prototypes */ void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver, void __iomem *ctrl_bar); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 5dfa4799c34f..5bfa22accf2c 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -474,6 +474,12 @@ static irqreturn_t nfp_net_irq_rxtx(int irq, void *data) { struct nfp_net_r_vector *r_vec = data; + /* Currently we cannot tell if it's a rx or tx interrupt, + * since dim does not need accurate event_ctr to calculate, + * we just use this counter for both rx and tx dim. + */ + r_vec->event_ctr++; + napi_schedule_irqoff(&r_vec->napi); /* The FW auto-masks any interrupt, either via the MASK bit in @@ -1697,7 +1703,7 @@ nfp_net_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta, case NFP_NET_META_RESYNC_INFO: if (nfp_net_tls_rx_resync_req(netdev, data, pkt, pkt_len)) - return NULL; + return false; data += sizeof(struct nfp_net_tls_resync_req); break; default: @@ -2061,6 +2067,36 @@ static int nfp_net_poll(struct napi_struct *napi, int budget) if (napi_complete_done(napi, pkts_polled)) nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry); + if (r_vec->nfp_net->rx_coalesce_adapt_on) { + struct dim_sample dim_sample = {}; + unsigned int start; + u64 pkts, bytes; + + do { + start = u64_stats_fetch_begin(&r_vec->rx_sync); + pkts = r_vec->rx_pkts; + bytes = r_vec->rx_bytes; + } while (u64_stats_fetch_retry(&r_vec->rx_sync, start)); + + dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample); + net_dim(&r_vec->rx_dim, dim_sample); + } + + if (r_vec->nfp_net->tx_coalesce_adapt_on) { + struct dim_sample dim_sample = {}; + unsigned int start; + u64 pkts, bytes; + + do { + start = u64_stats_fetch_begin(&r_vec->tx_sync); + pkts = r_vec->tx_pkts; + bytes = r_vec->tx_bytes; + } while (u64_stats_fetch_retry(&r_vec->tx_sync, start)); + + dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample); + net_dim(&r_vec->tx_dim, dim_sample); + } + return pkts_polled; } @@ -2873,6 +2909,7 @@ static int nfp_net_set_config_and_enable(struct nfp_net *nn) */ static void nfp_net_close_stack(struct nfp_net *nn) { + struct nfp_net_r_vector *r_vec; unsigned int r; disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector); @@ -2880,8 +2917,16 @@ static void nfp_net_close_stack(struct nfp_net *nn) nn->link_up = false; for (r = 0; r < nn->dp.num_r_vecs; r++) { - disable_irq(nn->r_vecs[r].irq_vector); - napi_disable(&nn->r_vecs[r].napi); + r_vec = &nn->r_vecs[r]; + + disable_irq(r_vec->irq_vector); + napi_disable(&r_vec->napi); + + if (r_vec->rx_ring) + cancel_work_sync(&r_vec->rx_dim.work); + + if (r_vec->tx_ring) + cancel_work_sync(&r_vec->tx_dim.work); } netif_tx_disable(nn->dp.netdev); @@ -2948,17 +2993,92 @@ void nfp_ctrl_close(struct nfp_net *nn) rtnl_unlock(); } +static void nfp_net_rx_dim_work(struct work_struct *work) +{ + struct nfp_net_r_vector *r_vec; + unsigned int factor, value; + struct dim_cq_moder moder; + struct nfp_net *nn; + struct dim *dim; + + dim = container_of(work, struct dim, work); + moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix); + r_vec = container_of(dim, struct nfp_net_r_vector, rx_dim); + nn = r_vec->nfp_net; + + /* Compute factor used to convert coalesce '_usecs' parameters to + * ME timestamp ticks. There are 16 ME clock cycles for each timestamp + * count. + */ + factor = nn->tlv_caps.me_freq_mhz / 16; + if (nfp_net_coalesce_para_check(factor * moder.usec, moder.pkts)) + return; + + /* copy RX interrupt coalesce parameters */ + value = (moder.pkts << 16) | (factor * moder.usec); + rtnl_lock(); + nn_writel(nn, NFP_NET_CFG_RXR_IRQ_MOD(r_vec->rx_ring->idx), value); + (void)nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_IRQMOD); + rtnl_unlock(); + + dim->state = DIM_START_MEASURE; +} + +static void nfp_net_tx_dim_work(struct work_struct *work) +{ + struct nfp_net_r_vector *r_vec; + unsigned int factor, value; + struct dim_cq_moder moder; + struct nfp_net *nn; + struct dim *dim; + + dim = container_of(work, struct dim, work); + moder = net_dim_get_tx_moderation(dim->mode, dim->profile_ix); + r_vec = container_of(dim, struct nfp_net_r_vector, tx_dim); + nn = r_vec->nfp_net; + + /* Compute factor used to convert coalesce '_usecs' parameters to + * ME timestamp ticks. There are 16 ME clock cycles for each timestamp + * count. + */ + factor = nn->tlv_caps.me_freq_mhz / 16; + if (nfp_net_coalesce_para_check(factor * moder.usec, moder.pkts)) + return; + + /* copy TX interrupt coalesce parameters */ + value = (moder.pkts << 16) | (factor * moder.usec); + rtnl_lock(); + nn_writel(nn, NFP_NET_CFG_TXR_IRQ_MOD(r_vec->tx_ring->idx), value); + (void)nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_IRQMOD); + rtnl_unlock(); + + dim->state = DIM_START_MEASURE; +} + /** * nfp_net_open_stack() - Start the device from stack's perspective * @nn: NFP Net device to reconfigure */ static void nfp_net_open_stack(struct nfp_net *nn) { + struct nfp_net_r_vector *r_vec; unsigned int r; for (r = 0; r < nn->dp.num_r_vecs; r++) { - napi_enable(&nn->r_vecs[r].napi); - enable_irq(nn->r_vecs[r].irq_vector); + r_vec = &nn->r_vecs[r]; + + if (r_vec->rx_ring) { + INIT_WORK(&r_vec->rx_dim.work, nfp_net_rx_dim_work); + r_vec->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; + } + + if (r_vec->tx_ring) { + INIT_WORK(&r_vec->tx_dim.work, nfp_net_tx_dim_work); + r_vec->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; + } + + napi_enable(&r_vec->napi); + enable_irq(r_vec->irq_vector); } netif_tx_wake_all_queues(nn->dp.netdev); @@ -3161,17 +3281,12 @@ static int nfp_net_dp_swap_enable(struct nfp_net *nn, struct nfp_net_dp *dp) for (r = 0; r < nn->max_r_vecs; r++) nfp_net_vector_assign_rings(&nn->dp, &nn->r_vecs[r], r); - err = netif_set_real_num_rx_queues(nn->dp.netdev, nn->dp.num_rx_rings); + err = netif_set_real_num_queues(nn->dp.netdev, + nn->dp.num_stack_tx_rings, + nn->dp.num_rx_rings); if (err) return err; - if (nn->dp.netdev->real_num_tx_queues != nn->dp.num_stack_tx_rings) { - err = netif_set_real_num_tx_queues(nn->dp.netdev, - nn->dp.num_stack_tx_rings); - if (err) - return err; - } - return nfp_net_set_config_and_enable(nn); } @@ -3893,6 +4008,9 @@ static void nfp_net_irqmod_init(struct nfp_net *nn) nn->rx_coalesce_max_frames = 64; nn->tx_coalesce_usecs = 50; nn->tx_coalesce_max_frames = 64; + + nn->rx_coalesce_adapt_on = true; + nn->tx_coalesce_adapt_on = true; } static void nfp_net_netdev_init(struct nfp_net *nn) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index 8803faadd302..0685ece1f155 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -1078,13 +1078,18 @@ static void nfp_net_get_regs(struct net_device *netdev, } static int nfp_net_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct nfp_net *nn = netdev_priv(netdev); if (!(nn->cap & NFP_NET_CFG_CTRL_IRQMOD)) return -EINVAL; + ec->use_adaptive_rx_coalesce = nn->rx_coalesce_adapt_on; + ec->use_adaptive_tx_coalesce = nn->tx_coalesce_adapt_on; + ec->rx_coalesce_usecs = nn->rx_coalesce_usecs; ec->rx_max_coalesced_frames = nn->rx_coalesce_max_frames; ec->tx_coalesce_usecs = nn->tx_coalesce_usecs; @@ -1327,7 +1332,9 @@ exit_close_nsp: } static int nfp_net_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct nfp_net *nn = netdev_priv(netdev); unsigned int factor; @@ -1361,19 +1368,18 @@ static int nfp_net_set_coalesce(struct net_device *netdev, if (!ec->tx_coalesce_usecs && !ec->tx_max_coalesced_frames) return -EINVAL; - if (ec->rx_coalesce_usecs * factor >= ((1 << 16) - 1)) - return -EINVAL; - - if (ec->tx_coalesce_usecs * factor >= ((1 << 16) - 1)) + if (nfp_net_coalesce_para_check(ec->rx_coalesce_usecs * factor, + ec->rx_max_coalesced_frames)) return -EINVAL; - if (ec->rx_max_coalesced_frames >= ((1 << 16) - 1)) - return -EINVAL; - - if (ec->tx_max_coalesced_frames >= ((1 << 16) - 1)) + if (nfp_net_coalesce_para_check(ec->tx_coalesce_usecs * factor, + ec->tx_max_coalesced_frames)) return -EINVAL; /* configuration is valid */ + nn->rx_coalesce_adapt_on = !!ec->use_adaptive_rx_coalesce; + nn->tx_coalesce_adapt_on = !!ec->use_adaptive_tx_coalesce; + nn->rx_coalesce_usecs = ec->rx_coalesce_usecs; nn->rx_coalesce_max_frames = ec->rx_max_coalesced_frames; nn->tx_coalesce_usecs = ec->tx_coalesce_usecs; @@ -1445,7 +1451,8 @@ static int nfp_net_set_channels(struct net_device *netdev, static const struct ethtool_ops nfp_net_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | - ETHTOOL_COALESCE_MAX_FRAMES, + ETHTOOL_COALESCE_MAX_FRAMES | + ETHTOOL_COALESCE_USE_ADAPTIVE, .get_drvinfo = nfp_net_get_drvinfo, .get_link = ethtool_op_get_link, .get_ringparam = nfp_net_get_ringparam, diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c index 921db40047d7..d10a93801344 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c @@ -701,7 +701,7 @@ int nfp_net_pci_probe(struct nfp_pf *pf) if (err) goto err_unmap; - err = devlink_register(devlink, &pf->pdev->dev); + err = devlink_register(devlink); if (err) goto err_app_clean; diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c index 2d097dcb7bda..346145d3180e 100644 --- a/drivers/net/ethernet/ni/nixge.c +++ b/drivers/net/ethernet/ni/nixge.c @@ -993,8 +993,11 @@ static void nixge_ethtools_get_drvinfo(struct net_device *ndev, strlcpy(ed->bus_info, "platform", sizeof(ed->bus_info)); } -static int nixge_ethtools_get_coalesce(struct net_device *ndev, - struct ethtool_coalesce *ecoalesce) +static int +nixge_ethtools_get_coalesce(struct net_device *ndev, + struct ethtool_coalesce *ecoalesce, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct nixge_priv *priv = netdev_priv(ndev); u32 regval = 0; @@ -1008,8 +1011,11 @@ static int nixge_ethtools_get_coalesce(struct net_device *ndev, return 0; } -static int nixge_ethtools_set_coalesce(struct net_device *ndev, - struct ethtool_coalesce *ecoalesce) +static int +nixge_ethtools_set_coalesce(struct net_device *ndev, + struct ethtool_coalesce *ecoalesce, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct nixge_priv *priv = netdev_priv(ndev); @@ -1223,7 +1229,6 @@ static int nixge_of_get_resources(struct platform_device *pdev) { const struct of_device_id *of_id; enum nixge_version version; - struct resource *ctrlres; struct net_device *ndev; struct nixge_priv *priv; @@ -1242,13 +1247,10 @@ static int nixge_of_get_resources(struct platform_device *pdev) netdev_err(ndev, "failed to map dma regs\n"); return PTR_ERR(priv->dma_regs); } - if (version <= NIXGE_V2) { + if (version <= NIXGE_V2) priv->ctrl_regs = priv->dma_regs + NIXGE_REG_CTRL_OFFSET; - } else { - ctrlres = platform_get_resource_byname(pdev, IORESOURCE_MEM, - "ctrl"); - priv->ctrl_regs = devm_ioremap_resource(&pdev->dev, ctrlres); - } + else + priv->ctrl_regs = devm_platform_ioremap_resource_byname(pdev, "ctrl"); if (IS_ERR(priv->ctrl_regs)) { netdev_err(ndev, "failed to map ctrl regs\n"); return PTR_ERR(priv->ctrl_regs); diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index 8724d6a9ed02..ef3fb4cc90af 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -5782,15 +5782,11 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) np->desc_ver = DESC_VER_3; np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; if (dma_64bit) { - if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(39))) + if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(39))) dev_info(&pci_dev->dev, "64-bit DMA failed, using 32-bit addressing\n"); else dev->features |= NETIF_F_HIGHDMA; - if (pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(39))) { - dev_info(&pci_dev->dev, - "64-bit DMA (consistent) failed, using 32-bit ring buffers\n"); - } } } else if (id->driver_data & DEV_HAS_LARGEDESC) { /* packet format 2: supports jumbo frames */ diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index 64c6842bd452..d29fe562b3de 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -1219,7 +1219,7 @@ static const struct net_device_ops lpc_netdev_ops = { .ndo_stop = lpc_eth_close, .ndo_start_xmit = lpc_eth_hard_start_xmit, .ndo_set_rx_mode = lpc_eth_set_multicast_list, - .ndo_do_ioctl = phy_do_ioctl_running, + .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_set_mac_address = lpc_set_mac_address, .ndo_validate_addr = eth_validate_addr, }; diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig index af84f72bf08e..4e18b64dceb9 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig +++ b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig @@ -6,6 +6,7 @@ config PCH_GBE tristate "OKI SEMICONDUCTOR IOH(ML7223/ML7831) GbE" depends on PCI && (X86_32 || COMPILE_TEST) + depends on PTP_1588_CLOCK select MII select PTP_1588_CLOCK_PCH select NET_PTP_CLASSIFY diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index e351f3d1608f..ec3e558f890e 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -1031,13 +1031,7 @@ static void pch_gbe_watchdog(struct timer_list *t) struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET }; netdev->tx_queue_len = adapter->tx_queue_len; /* mii library handles link maintenance tasks */ - if (mii_ethtool_gset(&adapter->mii, &cmd)) { - netdev_err(netdev, "ethtool get setting Error\n"); - mod_timer(&adapter->watchdog_timer, - round_jiffies(jiffies + - PCH_GBE_WATCHDOG_PERIOD)); - return; - } + mii_ethtool_gset(&adapter->mii, &cmd); hw->mac.link_speed = ethtool_cmd_speed(&cmd); hw->mac.link_duplex = cmd.duplex; /* Set the RGMII control. */ @@ -2333,7 +2327,7 @@ static const struct net_device_ops pch_gbe_netdev_ops = { .ndo_tx_timeout = pch_gbe_tx_timeout, .ndo_change_mtu = pch_gbe_change_mtu, .ndo_set_features = pch_gbe_set_features, - .ndo_do_ioctl = pch_gbe_ioctl, + .ndo_eth_ioctl = pch_gbe_ioctl, .ndo_set_rx_mode = pch_gbe_set_multi, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = pch_gbe_netpoll, diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c index ed832046216a..3426f6fa2b57 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c @@ -301,9 +301,7 @@ void pch_gbe_phy_init_setting(struct pch_gbe_hw *hw) int ret; u16 mii_reg; - ret = mii_ethtool_gset(&adapter->mii, &cmd); - if (ret) - netdev_err(adapter->netdev, "Error: mii_ethtool_gset\n"); + mii_ethtool_gset(&adapter->mii, &cmd); ethtool_cmd_speed_set(&cmd, hw->mac.link_speed); cmd.duplex = hw->mac.link_duplex; diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c index d058a63602a9..1a6336a56d3d 100644 --- a/drivers/net/ethernet/packetengines/hamachi.c +++ b/drivers/net/ethernet/packetengines/hamachi.c @@ -546,7 +546,9 @@ static int read_eeprom(void __iomem *ioaddr, int location); static int mdio_read(struct net_device *dev, int phy_id, int location); static void mdio_write(struct net_device *dev, int phy_id, int location, int value); static int hamachi_open(struct net_device *dev); -static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); +static int hamachi_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); +static int hamachi_siocdevprivate(struct net_device *dev, struct ifreq *rq, + void __user *data, int cmd); static void hamachi_timer(struct timer_list *t); static void hamachi_tx_timeout(struct net_device *dev, unsigned int txqueue); static void hamachi_init_ring(struct net_device *dev); @@ -571,7 +573,8 @@ static const struct net_device_ops hamachi_netdev_ops = { .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, .ndo_tx_timeout = hamachi_tx_timeout, - .ndo_do_ioctl = netdev_ioctl, + .ndo_eth_ioctl = hamachi_ioctl, + .ndo_siocdevprivate = hamachi_siocdevprivate, }; @@ -1867,7 +1870,36 @@ static const struct ethtool_ops ethtool_ops_no_mii = { .get_drvinfo = hamachi_get_drvinfo, }; -static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +/* private ioctl: set rx,tx intr params */ +static int hamachi_siocdevprivate(struct net_device *dev, struct ifreq *rq, + void __user *data, int cmd) +{ + struct hamachi_private *np = netdev_priv(dev); + u32 *d = (u32 *)&rq->ifr_ifru; + + if (!netif_running(dev)) + return -EINVAL; + + if (cmd != SIOCDEVPRIVATE + 3) + return -EOPNOTSUPP; + + /* Should add this check here or an ordinary user can do nasty + * things. -KDU + * + * TODO: Shut down the Rx and Tx engines while doing this. + */ + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + writel(d[0], np->base + TxIntrCtrl); + writel(d[1], np->base + RxIntrCtrl); + printk(KERN_NOTICE "%s: tx %08x, rx %08x intr\n", dev->name, + (u32)readl(np->base + TxIntrCtrl), + (u32)readl(np->base + RxIntrCtrl)); + + return 0; +} + +static int hamachi_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct hamachi_private *np = netdev_priv(dev); struct mii_ioctl_data *data = if_mii(rq); @@ -1876,28 +1908,9 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) if (!netif_running(dev)) return -EINVAL; - if (cmd == (SIOCDEVPRIVATE+3)) { /* set rx,tx intr params */ - u32 *d = (u32 *)&rq->ifr_ifru; - /* Should add this check here or an ordinary user can do nasty - * things. -KDU - * - * TODO: Shut down the Rx and Tx engines while doing this. - */ - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - writel(d[0], np->base + TxIntrCtrl); - writel(d[1], np->base + RxIntrCtrl); - printk(KERN_NOTICE "%s: tx %08x, rx %08x intr\n", dev->name, - (u32) readl(np->base + TxIntrCtrl), - (u32) readl(np->base + RxIntrCtrl)); - rc = 0; - } - - else { - spin_lock_irq(&np->lock); - rc = generic_mii_ioctl(&np->mii_if, data, cmd, NULL); - spin_unlock_irq(&np->lock); - } + spin_lock_irq(&np->lock); + rc = generic_mii_ioctl(&np->mii_if, data, cmd, NULL); + spin_unlock_irq(&np->lock); return rc; } diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c index d1dd9bc1bc7f..f5cd8f51be7c 100644 --- a/drivers/net/ethernet/packetengines/yellowfin.c +++ b/drivers/net/ethernet/packetengines/yellowfin.c @@ -362,7 +362,7 @@ static const struct net_device_ops netdev_ops = { .ndo_set_rx_mode = set_rx_mode, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, - .ndo_do_ioctl = netdev_ioctl, + .ndo_eth_ioctl = netdev_ioctl, .ndo_tx_timeout = yellowfin_tx_timeout, }; diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c index 040a15a828b4..7e096b2888b9 100644 --- a/drivers/net/ethernet/pasemi/pasemi_mac.c +++ b/drivers/net/ethernet/pasemi/pasemi_mac.c @@ -247,12 +247,13 @@ static int pasemi_mac_unmap_tx_skb(struct pasemi_mac *mac, int f; struct pci_dev *pdev = mac->dma_pdev; - pci_unmap_single(pdev, dmas[0], skb_headlen(skb), PCI_DMA_TODEVICE); + dma_unmap_single(&pdev->dev, dmas[0], skb_headlen(skb), DMA_TO_DEVICE); for (f = 0; f < nfrags; f++) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; - pci_unmap_page(pdev, dmas[f+1], skb_frag_size(frag), PCI_DMA_TODEVICE); + dma_unmap_page(&pdev->dev, dmas[f + 1], skb_frag_size(frag), + DMA_TO_DEVICE); } dev_kfree_skb_irq(skb); @@ -548,10 +549,8 @@ static void pasemi_mac_free_rx_buffers(struct pasemi_mac *mac) for (i = 0; i < RX_RING_SIZE; i++) { info = &RX_DESC_INFO(rx, i); if (info->skb && info->dma) { - pci_unmap_single(mac->dma_pdev, - info->dma, - info->skb->len, - PCI_DMA_FROMDEVICE); + dma_unmap_single(&mac->dma_pdev->dev, info->dma, + info->skb->len, DMA_FROM_DEVICE); dev_kfree_skb_any(info->skb); } info->dma = 0; @@ -600,11 +599,11 @@ static void pasemi_mac_replenish_rx_ring(struct net_device *dev, if (unlikely(!skb)) break; - dma = pci_map_single(mac->dma_pdev, skb->data, + dma = dma_map_single(&mac->dma_pdev->dev, skb->data, mac->bufsz - LOCAL_SKB_ALIGN, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); - if (unlikely(pci_dma_mapping_error(mac->dma_pdev, dma))) { + if (dma_mapping_error(&mac->dma_pdev->dev, dma)) { dev_kfree_skb_irq(info->skb); break; } @@ -741,8 +740,9 @@ static int pasemi_mac_clean_rx(struct pasemi_mac_rxring *rx, len = (macrx & XCT_MACRX_LLEN_M) >> XCT_MACRX_LLEN_S; - pci_unmap_single(pdev, dma, mac->bufsz - LOCAL_SKB_ALIGN, - PCI_DMA_FROMDEVICE); + dma_unmap_single(&pdev->dev, dma, + mac->bufsz - LOCAL_SKB_ALIGN, + DMA_FROM_DEVICE); if (macrx & XCT_MACRX_CRC) { /* CRC error flagged */ @@ -1444,10 +1444,10 @@ static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev) nfrags = skb_shinfo(skb)->nr_frags; - map[0] = pci_map_single(mac->dma_pdev, skb->data, skb_headlen(skb), - PCI_DMA_TODEVICE); + map[0] = dma_map_single(&mac->dma_pdev->dev, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); map_size[0] = skb_headlen(skb); - if (pci_dma_mapping_error(mac->dma_pdev, map[0])) + if (dma_mapping_error(&mac->dma_pdev->dev, map[0])) goto out_err_nolock; for (i = 0; i < nfrags; i++) { @@ -1534,8 +1534,8 @@ out_err: spin_unlock_irqrestore(&txring->lock, flags); out_err_nolock: while (nfrags--) - pci_unmap_single(mac->dma_pdev, map[nfrags], map_size[nfrags], - PCI_DMA_TODEVICE); + dma_unmap_single(&mac->dma_pdev->dev, map[nfrags], + map_size[nfrags], DMA_TO_DEVICE); return NETDEV_TX_BUSY; } diff --git a/drivers/net/ethernet/pensando/Kconfig b/drivers/net/ethernet/pensando/Kconfig index 202973a82712..3f7519e435b8 100644 --- a/drivers/net/ethernet/pensando/Kconfig +++ b/drivers/net/ethernet/pensando/Kconfig @@ -20,7 +20,7 @@ if NET_VENDOR_PENSANDO config IONIC tristate "Pensando Ethernet IONIC Support" depends on 64BIT && PCI - depends on PTP_1588_CLOCK || !PTP_1588_CLOCK + depends on PTP_1588_CLOCK_OPTIONAL select NET_DEVLINK select DIMLIB help diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c index e4a5416adc80..7e296fa71b36 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c @@ -165,10 +165,10 @@ static int ionic_vf_alloc(struct ionic *ionic, int num_vfs) goto out; } + ionic->num_vfs++; /* ignore failures from older FW, we just won't get stats */ (void)ionic_set_vf_config(ionic, i, IONIC_VF_ATTR_STATSADDR, (u8 *)&v->stats_pa); - ionic->num_vfs++; } out: @@ -373,9 +373,6 @@ static void ionic_remove(struct pci_dev *pdev) { struct ionic *ionic = pci_get_drvdata(pdev); - if (!ionic) - return; - del_timer_sync(&ionic->watchdog_timer); if (ionic->lif) { diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c index 1dfe962e22e0..0d6858ab511c 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c @@ -15,6 +15,7 @@ static void ionic_watchdog_cb(struct timer_list *t) { struct ionic *ionic = from_timer(ionic, t, watchdog_timer); struct ionic_lif *lif = ionic->lif; + struct ionic_deferred_work *work; int hb; mod_timer(&ionic->watchdog_timer, @@ -31,6 +32,18 @@ static void ionic_watchdog_cb(struct timer_list *t) if (hb >= 0 && !test_bit(IONIC_LIF_F_FW_RESET, lif->state)) ionic_link_status_check_request(lif, CAN_NOT_SLEEP); + + if (test_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state)) { + work = kzalloc(sizeof(*work), GFP_ATOMIC); + if (!work) { + netdev_err(lif->netdev, "rxmode change dropped\n"); + return; + } + + work->type = IONIC_DW_TYPE_RX_MODE; + netdev_dbg(lif->netdev, "deferred: rx_mode\n"); + ionic_lif_deferred_enqueue(&lif->deferred, work); + } } void ionic_init_devinfo(struct ionic *ionic) @@ -106,6 +119,8 @@ int ionic_dev_setup(struct ionic *ionic) idev->last_fw_hb = 0; idev->fw_hb_ready = true; idev->fw_status_ready = true; + idev->fw_generation = IONIC_FW_STS_F_GENERATION & + ioread8(&idev->dev_info_regs->fw_status); mod_timer(&ionic->watchdog_timer, round_jiffies(jiffies + ionic->watchdog_period)); @@ -121,7 +136,9 @@ int ionic_heartbeat_check(struct ionic *ionic) { struct ionic_dev *idev = &ionic->idev; unsigned long check_time, last_check_time; - bool fw_status_ready, fw_hb_ready; + bool fw_status_ready = true; + bool fw_hb_ready; + u8 fw_generation; u8 fw_status; u32 fw_hb; @@ -140,9 +157,29 @@ do_check_time: /* firmware is useful only if the running bit is set and * fw_status != 0xff (bad PCI read) + * If fw_status is not ready don't bother with the generation. */ fw_status = ioread8(&idev->dev_info_regs->fw_status); - fw_status_ready = (fw_status != 0xff) && (fw_status & IONIC_FW_STS_F_RUNNING); + + if (fw_status == 0xff || !(fw_status & IONIC_FW_STS_F_RUNNING)) { + fw_status_ready = false; + } else { + fw_generation = fw_status & IONIC_FW_STS_F_GENERATION; + if (idev->fw_generation != fw_generation) { + dev_info(ionic->dev, "FW generation 0x%02x -> 0x%02x\n", + idev->fw_generation, fw_generation); + + idev->fw_generation = fw_generation; + + /* If the generation changed, the fw status is not + * ready so we need to trigger a fw-down cycle. After + * the down, the next watchdog will see the fw is up + * and the generation value stable, so will trigger + * the fw-up activity. + */ + fw_status_ready = false; + } + } /* is this a transition? */ if (fw_status_ready != idev->fw_status_ready) { diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h index c25cf9b744c5..8311086fb1f4 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h @@ -143,6 +143,7 @@ struct ionic_dev { u32 last_fw_hb; bool fw_hb_ready; bool fw_status_ready; + u8 fw_generation; u64 __iomem *db_pages; dma_addr_t phy_db_pages; @@ -160,8 +161,6 @@ struct ionic_dev { struct ionic_cq_info { union { void *cq_desc; - struct ionic_txq_comp *txcq; - struct ionic_rxq_comp *rxcq; struct ionic_admin_comp *admincq; struct ionic_notifyq_event *notifyq; }; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c index b41301a5b0df..c7d0e195d176 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c @@ -64,7 +64,7 @@ struct ionic *ionic_devlink_alloc(struct device *dev) { struct devlink *dl; - dl = devlink_alloc(&ionic_dl_ops, sizeof(struct ionic)); + dl = devlink_alloc(&ionic_dl_ops, sizeof(struct ionic), dev); return devlink_priv(dl); } @@ -82,7 +82,7 @@ int ionic_devlink_register(struct ionic *ionic) struct devlink_port_attrs attrs = {}; int err; - err = devlink_register(dl, ionic->dev); + err = devlink_register(dl); if (err) { dev_warn(ionic->dev, "devlink_register failed: %d\n", err); return err; @@ -91,20 +91,20 @@ int ionic_devlink_register(struct ionic *ionic) attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; devlink_port_attrs_set(&ionic->dl_port, &attrs); err = devlink_port_register(dl, &ionic->dl_port, 0); - if (err) + if (err) { dev_err(ionic->dev, "devlink_port_register failed: %d\n", err); - else - devlink_port_type_eth_set(&ionic->dl_port, - ionic->lif->netdev); + devlink_unregister(dl); + return err; + } - return err; + devlink_port_type_eth_set(&ionic->dl_port, ionic->lif->netdev); + return 0; } void ionic_devlink_unregister(struct ionic *ionic) { struct devlink *dl = priv_to_devlink(ionic); - if (ionic->dl_port.registered) - devlink_port_unregister(&ionic->dl_port); + devlink_port_unregister(&ionic->dl_port); devlink_unregister(dl); } diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c index 6583be570e45..e91b4874a57f 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c @@ -32,6 +32,9 @@ static void ionic_get_stats(struct net_device *netdev, struct ionic_lif *lif = netdev_priv(netdev); u32 i; + if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) + return; + memset(buf, 0, stats->n_stats * sizeof(*buf)); for (i = 0; i < ionic_num_stats_grps; i++) ionic_stats_groups[i].get_values(lif, &buf); @@ -274,6 +277,9 @@ static int ionic_set_link_ksettings(struct net_device *netdev, struct ionic *ionic = lif->ionic; int err = 0; + if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) + return -EBUSY; + /* set autoneg */ if (ks->base.autoneg != idev->port_info->config.an_enable) { mutex_lock(&ionic->dev_cmd_lock); @@ -320,6 +326,9 @@ static int ionic_set_pauseparam(struct net_device *netdev, u32 requested_pause; int err; + if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) + return -EBUSY; + if (pause->autoneg) return -EOPNOTSUPP; @@ -372,6 +381,9 @@ static int ionic_set_fecparam(struct net_device *netdev, u8 fec_type; int ret = 0; + if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) + return -EBUSY; + if (lif->ionic->idev.port_info->config.an_enable) { netdev_err(netdev, "FEC request not allowed while autoneg is enabled\n"); return -EINVAL; @@ -408,7 +420,9 @@ static int ionic_set_fecparam(struct net_device *netdev, } static int ionic_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *coalesce) + struct ethtool_coalesce *coalesce, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct ionic_lif *lif = netdev_priv(netdev); @@ -426,7 +440,9 @@ static int ionic_get_coalesce(struct net_device *netdev, } static int ionic_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *coalesce) + struct ethtool_coalesce *coalesce, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct ionic_lif *lif = netdev_priv(netdev); struct ionic_identity *ident; @@ -528,6 +544,9 @@ static int ionic_set_ringparam(struct net_device *netdev, struct ionic_queue_params qparam; int err; + if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) + return -EBUSY; + ionic_init_queue_params(lif, &qparam); if (ring->rx_mini_pending || ring->rx_jumbo_pending) { @@ -597,6 +616,9 @@ static int ionic_set_channels(struct net_device *netdev, int max_cnt; int err; + if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) + return -EBUSY; + ionic_init_queue_params(lif, &qparam); if (ch->rx_count != ch->tx_count) { @@ -947,6 +969,9 @@ static int ionic_nway_reset(struct net_device *netdev) struct ionic *ionic = lif->ionic; int err = 0; + if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) + return -EBUSY; + /* flap the link to force auto-negotiation */ mutex_lock(&ionic->dev_cmd_lock); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h index 0478b48d9895..278610ed7227 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_if.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h @@ -2936,6 +2936,8 @@ struct ionic_hwstamp_regs { * @asic_type: Asic type * @asic_rev: Asic revision * @fw_status: Firmware status + * bit 0 - 1 = fw running + * bit 4-7 - 4 bit generation number, changes on fw restart * @fw_heartbeat: Firmware heartbeat counter * @serial_num: Serial number * @fw_version: Firmware version @@ -2949,7 +2951,8 @@ union ionic_dev_info_regs { u8 version; u8 asic_type; u8 asic_rev; -#define IONIC_FW_STS_F_RUNNING 0x1 +#define IONIC_FW_STS_F_RUNNING 0x01 +#define IONIC_FW_STS_F_GENERATION 0xF0 u8 fw_status; u32 fw_heartbeat; char fw_version[IONIC_DEVINFO_FWVERS_BUFLEN]; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index e795fa63ca12..23c9e196a784 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -11,6 +11,7 @@ #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/cpumask.h> +#include <linux/crash_dump.h> #include "ionic.h" #include "ionic_bus.h" @@ -29,9 +30,6 @@ static const u8 ionic_qtype_versions[IONIC_QTYPE_MAX] = { */ }; -static void ionic_lif_rx_mode(struct ionic_lif *lif); -static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr); -static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr); static void ionic_link_status_check(struct ionic_lif *lif); static void ionic_lif_handle_fw_down(struct ionic_lif *lif); static void ionic_lif_handle_fw_up(struct ionic_lif *lif); @@ -91,20 +89,21 @@ static void ionic_lif_deferred_work(struct work_struct *work) case IONIC_DW_TYPE_RX_MODE: ionic_lif_rx_mode(lif); break; - case IONIC_DW_TYPE_RX_ADDR_ADD: - ionic_lif_addr_add(lif, w->addr); - break; - case IONIC_DW_TYPE_RX_ADDR_DEL: - ionic_lif_addr_del(lif, w->addr); - break; case IONIC_DW_TYPE_LINK_STATUS: ionic_link_status_check(lif); break; case IONIC_DW_TYPE_LIF_RESET: - if (w->fw_status) + if (w->fw_status) { ionic_lif_handle_fw_up(lif); - else + } else { ionic_lif_handle_fw_down(lif); + + /* Fire off another watchdog to see + * if the FW is already back rather than + * waiting another whole cycle + */ + mod_timer(&lif->ionic->watchdog_timer, jiffies + 1); + } break; default: break; @@ -850,10 +849,8 @@ int ionic_lif_create_hwstamp_txq(struct ionic_lif *lif) u64 features; int err; - mutex_lock(&lif->queue_lock); - if (lif->hwstamp_txq) - goto out; + return 0; features = IONIC_Q_F_2X_CQ_DESC | IONIC_TXQ_F_HWSTAMP; @@ -895,9 +892,6 @@ int ionic_lif_create_hwstamp_txq(struct ionic_lif *lif) } } -out: - mutex_unlock(&lif->queue_lock); - return 0; err_qcq_enable: @@ -908,7 +902,6 @@ err_qcq_init: ionic_qcq_free(lif, txq); devm_kfree(lif->ionic->dev, txq); err_qcq_alloc: - mutex_unlock(&lif->queue_lock); return err; } @@ -920,10 +913,8 @@ int ionic_lif_create_hwstamp_rxq(struct ionic_lif *lif) u64 features; int err; - mutex_lock(&lif->queue_lock); - if (lif->hwstamp_rxq) - goto out; + return 0; features = IONIC_Q_F_2X_CQ_DESC | IONIC_RXQ_F_HWSTAMP; @@ -961,9 +952,6 @@ int ionic_lif_create_hwstamp_rxq(struct ionic_lif *lif) } } -out: - mutex_unlock(&lif->queue_lock); - return 0; err_qcq_enable: @@ -974,7 +962,6 @@ err_qcq_init: ionic_qcq_free(lif, rxq); devm_kfree(lif->ionic->dev, rxq); err_qcq_alloc: - mutex_unlock(&lif->queue_lock); return err; } @@ -1077,7 +1064,11 @@ static int ionic_lif_add_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class) if (err && err != -EEXIST) return err; - return ionic_rx_filter_save(lif, 0, qid, 0, &ctx); + spin_lock_bh(&lif->rx_filters.lock); + err = ionic_rx_filter_save(lif, 0, qid, 0, &ctx, IONIC_FILTER_STATE_SYNCED); + spin_unlock_bh(&lif->rx_filters.lock); + + return err; } int ionic_lif_set_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class) @@ -1250,7 +1241,7 @@ void ionic_get_stats64(struct net_device *netdev, ns->tx_errors = ns->tx_aborted_errors; } -static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr) +int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr) { struct ionic_admin_ctx ctx = { .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), @@ -1260,27 +1251,83 @@ static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr) .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC), }, }; + int nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters); + bool mc = is_multicast_ether_addr(addr); struct ionic_rx_filter *f; - int err; + int err = 0; + + memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN); - /* don't bother if we already have it */ spin_lock_bh(&lif->rx_filters.lock); f = ionic_rx_filter_by_addr(lif, addr); + if (f) { + /* don't bother if we already have it and it is sync'd */ + if (f->state == IONIC_FILTER_STATE_SYNCED) { + spin_unlock_bh(&lif->rx_filters.lock); + return 0; + } + + /* mark preemptively as sync'd to block any parallel attempts */ + f->state = IONIC_FILTER_STATE_SYNCED; + } else { + /* save as SYNCED to catch any DEL requests while processing */ + err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx, + IONIC_FILTER_STATE_SYNCED); + } spin_unlock_bh(&lif->rx_filters.lock); - if (f) - return 0; + if (err) + return err; netdev_dbg(lif->netdev, "rx_filter add ADDR %pM\n", addr); - memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN); - err = ionic_adminq_post_wait(lif, &ctx); - if (err && err != -EEXIST) - return err; + /* Don't bother with the write to FW if we know there's no room, + * we can try again on the next sync attempt. + */ + if ((lif->nucast + lif->nmcast) >= nfilters) + err = -ENOSPC; + else + err = ionic_adminq_post_wait(lif, &ctx); + + spin_lock_bh(&lif->rx_filters.lock); + if (err && err != -EEXIST) { + /* set the state back to NEW so we can try again later */ + f = ionic_rx_filter_by_addr(lif, addr); + if (f && f->state == IONIC_FILTER_STATE_SYNCED) + f->state = IONIC_FILTER_STATE_NEW; + + spin_unlock_bh(&lif->rx_filters.lock); + + if (err == -ENOSPC) + return 0; + else + return err; + } - return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx); + if (mc) + lif->nmcast++; + else + lif->nucast++; + + f = ionic_rx_filter_by_addr(lif, addr); + if (f && f->state == IONIC_FILTER_STATE_OLD) { + /* Someone requested a delete while we were adding + * so update the filter info with the results from the add + * and the data will be there for the delete on the next + * sync cycle. + */ + err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx, + IONIC_FILTER_STATE_OLD); + } else { + err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx, + IONIC_FILTER_STATE_SYNCED); + } + + spin_unlock_bh(&lif->rx_filters.lock); + + return err; } -static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr) +int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr) { struct ionic_admin_ctx ctx = { .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), @@ -1290,6 +1337,7 @@ static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr) }, }; struct ionic_rx_filter *f; + int state; int err; spin_lock_bh(&lif->rx_filters.lock); @@ -1302,65 +1350,37 @@ static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr) netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n", addr, f->filter_id); + state = f->state; ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id); ionic_rx_filter_free(lif, f); - spin_unlock_bh(&lif->rx_filters.lock); - - err = ionic_adminq_post_wait(lif, &ctx); - if (err && err != -EEXIST) - return err; - return 0; -} + if (is_multicast_ether_addr(addr) && lif->nmcast) + lif->nmcast--; + else if (!is_multicast_ether_addr(addr) && lif->nucast) + lif->nucast--; -static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add) -{ - unsigned int nmfilters; - unsigned int nufilters; + spin_unlock_bh(&lif->rx_filters.lock); - if (add) { - /* Do we have space for this filter? We test the counters - * here before checking the need for deferral so that we - * can return an overflow error to the stack. - */ - nmfilters = le32_to_cpu(lif->identity->eth.max_mcast_filters); - nufilters = le32_to_cpu(lif->identity->eth.max_ucast_filters); - - if ((is_multicast_ether_addr(addr) && lif->nmcast < nmfilters)) - lif->nmcast++; - else if (!is_multicast_ether_addr(addr) && - lif->nucast < nufilters) - lif->nucast++; - else - return -ENOSPC; - } else { - if (is_multicast_ether_addr(addr) && lif->nmcast) - lif->nmcast--; - else if (!is_multicast_ether_addr(addr) && lif->nucast) - lif->nucast--; + if (state != IONIC_FILTER_STATE_NEW) { + err = ionic_adminq_post_wait(lif, &ctx); + if (err && err != -EEXIST) + return err; } - netdev_dbg(lif->netdev, "rx_filter %s %pM\n", - add ? "add" : "del", addr); - if (add) - return ionic_lif_addr_add(lif, addr); - else - return ionic_lif_addr_del(lif, addr); - return 0; } static int ionic_addr_add(struct net_device *netdev, const u8 *addr) { - return ionic_lif_addr(netdev_priv(netdev), addr, ADD_ADDR); + return ionic_lif_list_addr(netdev_priv(netdev), addr, ADD_ADDR); } static int ionic_addr_del(struct net_device *netdev, const u8 *addr) { - return ionic_lif_addr(netdev_priv(netdev), addr, DEL_ADDR); + return ionic_lif_list_addr(netdev_priv(netdev), addr, DEL_ADDR); } -static void ionic_lif_rx_mode(struct ionic_lif *lif) +void ionic_lif_rx_mode(struct ionic_lif *lif) { struct net_device *netdev = lif->netdev; unsigned int nfilters; @@ -1381,32 +1401,26 @@ static void ionic_lif_rx_mode(struct ionic_lif *lif) rx_mode |= (nd_flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0; rx_mode |= (nd_flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0; - /* sync unicast addresses - * next check to see if we're in an overflow state + /* sync the mac filters */ + ionic_rx_filter_sync(lif); + + /* check for overflow state * if so, we track that we overflowed and enable NIC PROMISC * else if the overflow is set and not needed * we remove our overflow flag and check the netdev flags * to see if we can disable NIC PROMISC */ - __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del); nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters); - if (netdev_uc_count(netdev) + 1 > nfilters) { + if ((lif->nucast + lif->nmcast) >= nfilters) { rx_mode |= IONIC_RX_MODE_F_PROMISC; + rx_mode |= IONIC_RX_MODE_F_ALLMULTI; lif->uc_overflow = true; + lif->mc_overflow = true; } else if (lif->uc_overflow) { lif->uc_overflow = false; + lif->mc_overflow = false; if (!(nd_flags & IFF_PROMISC)) rx_mode &= ~IONIC_RX_MODE_F_PROMISC; - } - - /* same for multicast */ - __dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del); - nfilters = le32_to_cpu(lif->identity->eth.max_mcast_filters); - if (netdev_mc_count(netdev) > nfilters) { - rx_mode |= IONIC_RX_MODE_F_ALLMULTI; - lif->mc_overflow = true; - } else if (lif->mc_overflow) { - lif->mc_overflow = false; if (!(nd_flags & IFF_ALLMULTI)) rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI; } @@ -1449,28 +1463,26 @@ static void ionic_lif_rx_mode(struct ionic_lif *lif) mutex_unlock(&lif->config_lock); } -static void ionic_set_rx_mode(struct net_device *netdev, bool can_sleep) +static void ionic_ndo_set_rx_mode(struct net_device *netdev) { struct ionic_lif *lif = netdev_priv(netdev); struct ionic_deferred_work *work; - if (!can_sleep) { - work = kzalloc(sizeof(*work), GFP_ATOMIC); - if (!work) { - netdev_err(lif->netdev, "rxmode change dropped\n"); - return; - } - work->type = IONIC_DW_TYPE_RX_MODE; - netdev_dbg(lif->netdev, "deferred: rx_mode\n"); - ionic_lif_deferred_enqueue(&lif->deferred, work); - } else { - ionic_lif_rx_mode(lif); - } -} + /* Sync the kernel filter list with the driver filter list */ + __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del); + __dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del); -static void ionic_ndo_set_rx_mode(struct net_device *netdev) -{ - ionic_set_rx_mode(netdev, CAN_NOT_SLEEP); + /* Shove off the rest of the rxmode work to the work task + * which will include syncing the filters to the firmware. + */ + work = kzalloc(sizeof(*work), GFP_ATOMIC); + if (!work) { + netdev_err(lif->netdev, "rxmode change dropped\n"); + return; + } + work->type = IONIC_DW_TYPE_RX_MODE; + netdev_dbg(lif->netdev, "deferred: rx_mode\n"); + ionic_lif_deferred_enqueue(&lif->deferred, work); } static __le64 ionic_netdev_features_to_nic(netdev_features_t features) @@ -1599,7 +1611,6 @@ static int ionic_init_nic_features(struct ionic_lif *lif) features = NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER | - NETIF_F_RXHASH | NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM | @@ -1607,6 +1618,9 @@ static int ionic_init_nic_features(struct ionic_lif *lif) NETIF_F_TSO6 | NETIF_F_TSO_ECN; + if (lif->nxqs > 1) + features |= NETIF_F_RXHASH; + err = ionic_set_nic_features(lif, features); if (err) return err; @@ -1689,13 +1703,13 @@ static int ionic_set_mac_address(struct net_device *netdev, void *sa) if (!is_zero_ether_addr(netdev->dev_addr)) { netdev_info(netdev, "deleting mac addr %pM\n", netdev->dev_addr); - ionic_addr_del(netdev, netdev->dev_addr); + ionic_lif_addr_del(netdev_priv(netdev), netdev->dev_addr); } eth_commit_mac_addr_change(netdev, addr); netdev_info(netdev, "updating mac addr %pM\n", mac); - return ionic_addr_add(netdev, mac); + return ionic_lif_addr_add(netdev_priv(netdev), mac); } static void ionic_stop_queues_reconfig(struct ionic_lif *lif) @@ -1801,7 +1815,12 @@ static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, if (err) return err; - return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx); + spin_lock_bh(&lif->rx_filters.lock); + err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx, + IONIC_FILTER_STATE_SYNCED); + spin_unlock_bh(&lif->rx_filters.lock); + + return err; } static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, @@ -2104,7 +2123,7 @@ static int ionic_txrx_init(struct ionic_lif *lif) if (lif->netdev->features & NETIF_F_RXHASH) ionic_lif_rss_init(lif); - ionic_set_rx_mode(lif->netdev, CAN_SLEEP); + ionic_lif_rx_mode(lif); return 0; @@ -2202,9 +2221,11 @@ static int ionic_open(struct net_device *netdev) if (test_and_clear_bit(IONIC_LIF_F_BROKEN, lif->state)) netdev_info(netdev, "clearing broken state\n"); + mutex_lock(&lif->queue_lock); + err = ionic_txrx_alloc(lif); if (err) - return err; + goto err_unlock; err = ionic_txrx_init(lif); if (err) @@ -2225,12 +2246,21 @@ static int ionic_open(struct net_device *netdev) goto err_txrx_deinit; } + /* If hardware timestamping is enabled, but the queues were freed by + * ionic_stop, those need to be reallocated and initialized, too. + */ + ionic_lif_hwstamp_recreate_queues(lif); + + mutex_unlock(&lif->queue_lock); + return 0; err_txrx_deinit: ionic_txrx_deinit(lif); err_txrx_free: ionic_txrx_free(lif); +err_unlock: + mutex_unlock(&lif->queue_lock); return err; } @@ -2250,14 +2280,16 @@ static int ionic_stop(struct net_device *netdev) if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) return 0; + mutex_lock(&lif->queue_lock); ionic_stop_queues(lif); ionic_txrx_deinit(lif); ionic_txrx_free(lif); + mutex_unlock(&lif->queue_lock); return 0; } -static int ionic_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +static int ionic_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { struct ionic_lif *lif = netdev_priv(netdev); @@ -2519,7 +2551,7 @@ static int ionic_set_vf_link_state(struct net_device *netdev, int vf, int set) static const struct net_device_ops ionic_netdev_ops = { .ndo_open = ionic_open, .ndo_stop = ionic_stop, - .ndo_do_ioctl = ionic_do_ioctl, + .ndo_eth_ioctl = ionic_eth_ioctl, .ndo_start_xmit = ionic_start_xmit, .ndo_get_stats64 = ionic_get_stats64, .ndo_set_rx_mode = ionic_ndo_set_rx_mode, @@ -2580,22 +2612,26 @@ int ionic_reconfigure_queues(struct ionic_lif *lif, struct ionic_qcq **tx_qcqs = NULL; struct ionic_qcq **rx_qcqs = NULL; unsigned int flags, i; - int err = -ENOMEM; + int err = 0; /* allocate temporary qcq arrays to hold new queue structs */ if (qparam->nxqs != lif->nxqs || qparam->ntxq_descs != lif->ntxq_descs) { tx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->ntxqs_per_lif, sizeof(struct ionic_qcq *), GFP_KERNEL); - if (!tx_qcqs) + if (!tx_qcqs) { + err = -ENOMEM; goto err_out; + } } if (qparam->nxqs != lif->nxqs || qparam->nrxq_descs != lif->nrxq_descs || qparam->rxq_features != lif->rxq_features) { rx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->nrxqs_per_lif, sizeof(struct ionic_qcq *), GFP_KERNEL); - if (!rx_qcqs) + if (!rx_qcqs) { + err = -ENOMEM; goto err_out; + } } /* allocate new desc_info and rings, but leave the interrupt setup @@ -2774,6 +2810,9 @@ err_out: ionic_qcq_free(lif, lif->rxqcqs[i]); } + if (err) + netdev_info(lif->netdev, "%s: failed %d\n", __func__, err); + return err; } @@ -2827,8 +2866,14 @@ int ionic_lif_alloc(struct ionic *ionic) lif->ionic = ionic; lif->index = 0; - lif->ntxq_descs = IONIC_DEF_TXRX_DESC; - lif->nrxq_descs = IONIC_DEF_TXRX_DESC; + + if (is_kdump_kernel()) { + lif->ntxq_descs = IONIC_MIN_TXRX_DESC; + lif->nrxq_descs = IONIC_MIN_TXRX_DESC; + } else { + lif->ntxq_descs = IONIC_DEF_TXRX_DESC; + lif->nrxq_descs = IONIC_DEF_TXRX_DESC; + } /* Convert the default coalesce value to actual hw resolution */ lif->rx_coalesce_usecs = IONIC_ITR_COAL_USEC_DEFAULT; @@ -3179,7 +3224,7 @@ static int ionic_station_set(struct ionic_lif *lif) */ if (!ether_addr_equal(ctx.comp.lif_getattr.mac, netdev->dev_addr)) - ionic_lif_addr(lif, netdev->dev_addr, ADD_ADDR); + ionic_lif_addr_add(lif, netdev->dev_addr); } else { /* Update the netdev mac with the device's mac */ memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len); @@ -3196,7 +3241,7 @@ static int ionic_station_set(struct ionic_lif *lif) netdev_dbg(lif->netdev, "adding station MAC addr %pM\n", netdev->dev_addr); - ionic_lif_addr(lif, netdev->dev_addr, ADD_ADDR); + ionic_lif_addr_add(lif, netdev->dev_addr); return 0; } @@ -3514,6 +3559,7 @@ int ionic_lif_size(struct ionic *ionic) unsigned int min_intrs; int err; + /* retrieve basic values from FW */ lc = &ident->lif.eth.config; dev_nintrs = le32_to_cpu(ident->dev.nintrs); neqs_per_lif = le32_to_cpu(ident->lif.rdma.eq_qtype.qid_count); @@ -3521,6 +3567,15 @@ int ionic_lif_size(struct ionic *ionic) ntxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_TXQ]); nrxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_RXQ]); + /* limit values to play nice with kdump */ + if (is_kdump_kernel()) { + dev_nintrs = 2; + neqs_per_lif = 0; + nnqs_per_lif = 0; + ntxqs_per_lif = 1; + nrxqs_per_lif = 1; + } + /* reserve last queue id for hardware timestamping */ if (lc->features & cpu_to_le64(IONIC_ETH_HW_TIMESTAMP)) { if (ntxqs_per_lif <= 1 || nrxqs_per_lif <= 1) { diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h index 69ab59fedb6c..4915184f3efb 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h @@ -98,8 +98,6 @@ struct ionic_qcq { enum ionic_deferred_work_type { IONIC_DW_TYPE_RX_MODE, - IONIC_DW_TYPE_RX_ADDR_ADD, - IONIC_DW_TYPE_RX_ADDR_DEL, IONIC_DW_TYPE_LINK_STATUS, IONIC_DW_TYPE_LIF_RESET, }; @@ -147,6 +145,7 @@ enum ionic_lif_state_flags { IONIC_LIF_F_SW_DEBUG_STATS, IONIC_LIF_F_UP, IONIC_LIF_F_LINK_CHECK_REQUESTED, + IONIC_LIF_F_FILTER_SYNC_NEEDED, IONIC_LIF_F_FW_RESET, IONIC_LIF_F_SPLIT_INTR, IONIC_LIF_F_BROKEN, @@ -295,6 +294,10 @@ int ionic_lif_alloc(struct ionic *ionic); int ionic_lif_init(struct ionic_lif *lif); void ionic_lif_free(struct ionic_lif *lif); void ionic_lif_deinit(struct ionic_lif *lif); + +int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr); +int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr); + int ionic_lif_register(struct ionic_lif *lif); void ionic_lif_unregister(struct ionic_lif *lif); int ionic_lif_identify(struct ionic *ionic, u8 lif_type, @@ -303,6 +306,7 @@ int ionic_lif_size(struct ionic *ionic); #if IS_ENABLED(CONFIG_PTP_1588_CLOCK) void ionic_lif_hwstamp_replay(struct ionic_lif *lif); +void ionic_lif_hwstamp_recreate_queues(struct ionic_lif *lif); int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr); int ionic_lif_hwstamp_get(struct ionic_lif *lif, struct ifreq *ifr); ktime_t ionic_lif_phc_ktime(struct ionic_lif *lif, u64 counter); @@ -312,6 +316,7 @@ void ionic_lif_alloc_phc(struct ionic_lif *lif); void ionic_lif_free_phc(struct ionic_lif *lif); #else static inline void ionic_lif_hwstamp_replay(struct ionic_lif *lif) {} +static inline void ionic_lif_hwstamp_recreate_queues(struct ionic_lif *lif) {} static inline int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr) { @@ -342,6 +347,7 @@ int ionic_lif_set_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class); int ionic_lif_rss_config(struct ionic_lif *lif, u16 types, const u8 *key, const u32 *indir); +void ionic_lif_rx_mode(struct ionic_lif *lif); int ionic_reconfigure_queues(struct ionic_lif *lif, struct ionic_queue_params *qparam); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c index 61cfe2120817..6f07bf509efe 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_main.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c @@ -375,8 +375,8 @@ try_again: * heartbeat check but is still alive and will process this * request, so don't clean the dev_cmd in this case. */ - dev_warn(ionic->dev, "DEVCMD %s (%d) failed - FW halted\n", - ionic_opcode_to_str(opcode), opcode); + dev_dbg(ionic->dev, "DEVCMD %s (%d) failed - FW halted\n", + ionic_opcode_to_str(opcode), opcode); return -ENXIO; } @@ -450,6 +450,8 @@ int ionic_identify(struct ionic *ionic) } mutex_unlock(&ionic->dev_cmd_lock); + dev_info(ionic->dev, "FW: %s\n", idev->dev_info.fw_version); + if (err) { dev_err(ionic->dev, "Cannot identify ionic: %dn", err); goto err_out; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_phc.c b/drivers/net/ethernet/pensando/ionic/ionic_phc.c index 6e2403c71608..eed2db69d708 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_phc.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_phc.c @@ -119,8 +119,8 @@ static int ionic_lif_hwstamp_set_ts_config(struct ionic_lif *lif, config->rx_filter = HWTSTAMP_FILTER_ALL; } - dev_dbg(ionic->dev, "config_rx_filter %d rx_filt %#llx rx_all %d\n", - config->rx_filter, rx_filt, rx_all); + dev_dbg(ionic->dev, "%s: config_rx_filter %d rx_filt %#llx rx_all %d\n", + __func__, config->rx_filter, rx_filt, rx_all); if (tx_mode) { err = ionic_lif_create_hwstamp_txq(lif); @@ -194,7 +194,9 @@ int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr) if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) return -EFAULT; + mutex_lock(&lif->queue_lock); err = ionic_lif_hwstamp_set_ts_config(lif, &config); + mutex_unlock(&lif->queue_lock); if (err) { netdev_info(lif->netdev, "hwstamp set failed: %d\n", err); return err; @@ -213,11 +215,37 @@ void ionic_lif_hwstamp_replay(struct ionic_lif *lif) if (!lif->phc || !lif->phc->ptp) return; + mutex_lock(&lif->queue_lock); err = ionic_lif_hwstamp_set_ts_config(lif, NULL); + mutex_unlock(&lif->queue_lock); if (err) netdev_info(lif->netdev, "hwstamp replay failed: %d\n", err); } +void ionic_lif_hwstamp_recreate_queues(struct ionic_lif *lif) +{ + int err; + + if (!lif->phc || !lif->phc->ptp) + return; + + mutex_lock(&lif->phc->config_lock); + + if (lif->phc->ts_config_tx_mode) { + err = ionic_lif_create_hwstamp_txq(lif); + if (err) + netdev_info(lif->netdev, "hwstamp recreate txq failed: %d\n", err); + } + + if (lif->phc->ts_config_rx_filt) { + err = ionic_lif_create_hwstamp_rxq(lif); + if (err) + netdev_info(lif->netdev, "hwstamp recreate rxq failed: %d\n", err); + } + + mutex_unlock(&lif->phc->config_lock); +} + int ionic_lif_hwstamp_get(struct ionic_lif *lif, struct ifreq *ifr) { struct hwtstamp_config config; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c index d71316d9ded2..7e3a5634c161 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c @@ -4,6 +4,7 @@ #include <linux/netdevice.h> #include <linux/dynamic_debug.h> #include <linux/etherdevice.h> +#include <linux/list.h> #include "ionic.h" #include "ionic_lif.h" @@ -120,11 +121,12 @@ void ionic_rx_filters_deinit(struct ionic_lif *lif) } int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index, - u32 hash, struct ionic_admin_ctx *ctx) + u32 hash, struct ionic_admin_ctx *ctx, + enum ionic_filter_state state) { struct device *dev = lif->ionic->dev; struct ionic_rx_filter_add_cmd *ac; - struct ionic_rx_filter *f; + struct ionic_rx_filter *f = NULL; struct hlist_head *head; unsigned int key; @@ -133,9 +135,11 @@ int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index, switch (le16_to_cpu(ac->match)) { case IONIC_RX_FILTER_MATCH_VLAN: key = le16_to_cpu(ac->vlan.vlan); + f = ionic_rx_filter_by_vlan(lif, le16_to_cpu(ac->vlan.vlan)); break; case IONIC_RX_FILTER_MATCH_MAC: key = *(u32 *)ac->mac.addr; + f = ionic_rx_filter_by_addr(lif, ac->mac.addr); break; case IONIC_RX_FILTER_MATCH_MAC_VLAN: key = le16_to_cpu(ac->mac_vlan.vlan); @@ -147,12 +151,19 @@ int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index, return -EINVAL; } - f = devm_kzalloc(dev, sizeof(*f), GFP_KERNEL); - if (!f) - return -ENOMEM; + if (f) { + /* remove from current linking so we can refresh it */ + hlist_del(&f->by_id); + hlist_del(&f->by_hash); + } else { + f = devm_kzalloc(dev, sizeof(*f), GFP_ATOMIC); + if (!f) + return -ENOMEM; + } f->flow_id = flow_id; f->filter_id = le32_to_cpu(ctx->comp.rx_filter_add.filter_id); + f->state = state; f->rxq_index = rxq_index; memcpy(&f->cmd, ac, sizeof(f->cmd)); netdev_dbg(lif->netdev, "rx_filter add filter_id %d\n", f->filter_id); @@ -160,8 +171,6 @@ int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index, INIT_HLIST_NODE(&f->by_hash); INIT_HLIST_NODE(&f->by_id); - spin_lock_bh(&lif->rx_filters.lock); - key = hash_32(key, IONIC_RX_FILTER_HASH_BITS); head = &lif->rx_filters.by_hash[key]; hlist_add_head(&f->by_hash, head); @@ -170,8 +179,6 @@ int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index, head = &lif->rx_filters.by_id[key]; hlist_add_head(&f->by_id, head); - spin_unlock_bh(&lif->rx_filters.lock); - return 0; } @@ -231,3 +238,121 @@ struct ionic_rx_filter *ionic_rx_filter_rxsteer(struct ionic_lif *lif) return NULL; } + +int ionic_lif_list_addr(struct ionic_lif *lif, const u8 *addr, bool mode) +{ + struct ionic_rx_filter *f; + int err; + + spin_lock_bh(&lif->rx_filters.lock); + + f = ionic_rx_filter_by_addr(lif, addr); + if (mode == ADD_ADDR && !f) { + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.rx_filter_add = { + .opcode = IONIC_CMD_RX_FILTER_ADD, + .lif_index = cpu_to_le16(lif->index), + .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC), + }, + }; + + memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN); + err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx, + IONIC_FILTER_STATE_NEW); + if (err) { + spin_unlock_bh(&lif->rx_filters.lock); + return err; + } + + } else if (mode == ADD_ADDR && f) { + if (f->state == IONIC_FILTER_STATE_OLD) + f->state = IONIC_FILTER_STATE_SYNCED; + + } else if (mode == DEL_ADDR && f) { + if (f->state == IONIC_FILTER_STATE_NEW) + ionic_rx_filter_free(lif, f); + else if (f->state == IONIC_FILTER_STATE_SYNCED) + f->state = IONIC_FILTER_STATE_OLD; + } else if (mode == DEL_ADDR && !f) { + spin_unlock_bh(&lif->rx_filters.lock); + return -ENOENT; + } + + spin_unlock_bh(&lif->rx_filters.lock); + + set_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state); + + return 0; +} + +struct sync_item { + struct list_head list; + struct ionic_rx_filter f; +}; + +void ionic_rx_filter_sync(struct ionic_lif *lif) +{ + struct device *dev = lif->ionic->dev; + struct list_head sync_add_list; + struct list_head sync_del_list; + struct sync_item *sync_item; + struct ionic_rx_filter *f; + struct hlist_head *head; + struct hlist_node *tmp; + struct sync_item *spos; + unsigned int i; + + INIT_LIST_HEAD(&sync_add_list); + INIT_LIST_HEAD(&sync_del_list); + + clear_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state); + + /* Copy the filters to be added and deleted + * into a separate local list that needs no locking. + */ + spin_lock_bh(&lif->rx_filters.lock); + for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) { + head = &lif->rx_filters.by_id[i]; + hlist_for_each_entry_safe(f, tmp, head, by_id) { + if (f->state == IONIC_FILTER_STATE_NEW || + f->state == IONIC_FILTER_STATE_OLD) { + sync_item = devm_kzalloc(dev, sizeof(*sync_item), + GFP_KERNEL); + if (!sync_item) + goto loop_out; + + sync_item->f = *f; + + if (f->state == IONIC_FILTER_STATE_NEW) + list_add(&sync_item->list, &sync_add_list); + else + list_add(&sync_item->list, &sync_del_list); + } + } + } +loop_out: + spin_unlock_bh(&lif->rx_filters.lock); + + /* If the add or delete fails, it won't get marked as sync'd + * and will be tried again in the next sync action. + * Do the deletes first in case we're in an overflow state and + * they can clear room for some new filters + */ + list_for_each_entry_safe(sync_item, spos, &sync_del_list, list) { + (void)ionic_lif_addr_del(lif, sync_item->f.cmd.mac.addr); + + list_del(&sync_item->list); + devm_kfree(dev, sync_item); + } + + list_for_each_entry_safe(sync_item, spos, &sync_add_list, list) { + (void)ionic_lif_addr_add(lif, sync_item->f.cmd.mac.addr); + + if (sync_item->f.state != IONIC_FILTER_STATE_SYNCED) + set_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state); + + list_del(&sync_item->list); + devm_kfree(dev, sync_item); + } +} diff --git a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h index 1ead48be3c83..a66e35f0833b 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h @@ -5,10 +5,18 @@ #define _IONIC_RX_FILTER_H_ #define IONIC_RXQ_INDEX_ANY (0xFFFF) + +enum ionic_filter_state { + IONIC_FILTER_STATE_SYNCED, + IONIC_FILTER_STATE_NEW, + IONIC_FILTER_STATE_OLD, +}; + struct ionic_rx_filter { u32 flow_id; u32 filter_id; u16 rxq_index; + enum ionic_filter_state state; struct ionic_rx_filter_add_cmd cmd; struct hlist_node by_hash; struct hlist_node by_id; @@ -28,9 +36,13 @@ void ionic_rx_filter_replay(struct ionic_lif *lif); int ionic_rx_filters_init(struct ionic_lif *lif); void ionic_rx_filters_deinit(struct ionic_lif *lif); int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index, - u32 hash, struct ionic_admin_ctx *ctx); + u32 hash, struct ionic_admin_ctx *ctx, + enum ionic_filter_state state); struct ionic_rx_filter *ionic_rx_filter_by_vlan(struct ionic_lif *lif, u16 vid); struct ionic_rx_filter *ionic_rx_filter_by_addr(struct ionic_lif *lif, const u8 *addr); struct ionic_rx_filter *ionic_rx_filter_rxsteer(struct ionic_lif *lif); +void ionic_rx_filter_sync(struct ionic_lif *lif); +int ionic_lif_list_addr(struct ionic_lif *lif, const u8 *addr, bool mode); +int ionic_rx_filters_need_sync(struct ionic_lif *lif); #endif /* _IONIC_RX_FILTER_H_ */ diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c index 08870190e4d2..37c39581b659 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c @@ -32,19 +32,13 @@ static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q) return netdev_get_tx_queue(q->lif->netdev, q->index); } -static void ionic_rx_buf_reset(struct ionic_buf_info *buf_info) -{ - buf_info->page = NULL; - buf_info->page_offset = 0; - buf_info->dma_addr = 0; -} - static int ionic_rx_page_alloc(struct ionic_queue *q, struct ionic_buf_info *buf_info) { struct net_device *netdev = q->lif->netdev; struct ionic_rx_stats *stats; struct device *dev; + struct page *page; dev = q->dev; stats = q_to_rx_stats(q); @@ -55,26 +49,27 @@ static int ionic_rx_page_alloc(struct ionic_queue *q, return -EINVAL; } - buf_info->page = alloc_pages(IONIC_PAGE_GFP_MASK, 0); - if (unlikely(!buf_info->page)) { + page = alloc_pages(IONIC_PAGE_GFP_MASK, 0); + if (unlikely(!page)) { net_err_ratelimited("%s: %s page alloc failed\n", netdev->name, q->name); stats->alloc_err++; return -ENOMEM; } - buf_info->page_offset = 0; - buf_info->dma_addr = dma_map_page(dev, buf_info->page, buf_info->page_offset, + buf_info->dma_addr = dma_map_page(dev, page, 0, IONIC_PAGE_SIZE, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(dev, buf_info->dma_addr))) { - __free_pages(buf_info->page, 0); - ionic_rx_buf_reset(buf_info); + __free_pages(page, 0); net_err_ratelimited("%s: %s dma map failed\n", netdev->name, q->name); stats->dma_map_err++; return -EIO; } + buf_info->page = page; + buf_info->page_offset = 0; + return 0; } @@ -95,7 +90,7 @@ static void ionic_rx_page_free(struct ionic_queue *q, dma_unmap_page(dev, buf_info->dma_addr, IONIC_PAGE_SIZE, DMA_FROM_DEVICE); __free_pages(buf_info->page, 0); - ionic_rx_buf_reset(buf_info); + buf_info->page = NULL; } static bool ionic_rx_buf_recycle(struct ionic_queue *q, @@ -139,7 +134,7 @@ static struct sk_buff *ionic_rx_frags(struct ionic_queue *q, buf_info = &desc_info->bufs[0]; len = le16_to_cpu(comp->len); - prefetch(buf_info->page); + prefetchw(buf_info->page); skb = napi_get_frags(&q_to_qcq(q)->napi); if (unlikely(!skb)) { @@ -170,7 +165,7 @@ static struct sk_buff *ionic_rx_frags(struct ionic_queue *q, if (!ionic_rx_buf_recycle(q, buf_info, frag_len)) { dma_unmap_page(dev, buf_info->dma_addr, IONIC_PAGE_SIZE, DMA_FROM_DEVICE); - ionic_rx_buf_reset(buf_info); + buf_info->page = NULL; } buf_info++; diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig index 98f430905ffa..1203353238e5 100644 --- a/drivers/net/ethernet/qlogic/Kconfig +++ b/drivers/net/ethernet/qlogic/Kconfig @@ -99,7 +99,7 @@ config QED_SRIOV config QEDE tristate "QLogic QED 25/40/100Gb Ethernet NIC" depends on QED - imply PTP_1588_CLOCK + depends on PTP_1588_CLOCK_OPTIONAL help This enables the support for Marvell FastLinQ adapters family, ethernet driver. diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h index e5c51256243a..f13fa7396aef 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h @@ -1863,7 +1863,6 @@ static inline u32 netxen_tx_avail(struct nx_host_tx_ring *tx_ring) int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 *mac); int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, u64 *mac); void netxen_change_ringparam(struct netxen_adapter *adapter); -int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp); extern const struct ethtool_ops netxen_nic_ethtool_ops; diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c index dd22cb056d03..a075643f5826 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c @@ -731,7 +731,9 @@ netxen_nic_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) * firmware coalescing to default. */ static int netxen_set_intr_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ethcoal) + struct ethtool_coalesce *ethcoal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct netxen_adapter *adapter = netdev_priv(netdev); @@ -775,7 +777,9 @@ static int netxen_set_intr_coalesce(struct net_device *netdev, } static int netxen_get_intr_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ethcoal) + struct ethtool_coalesce *ethcoal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct netxen_adapter *adapter = netdev_priv(netdev); diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index b590c70539b5..d58e021614cd 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -26,15 +26,6 @@ extern const struct qed_common_ops qed_common_ops_pass; -#define QED_MAJOR_VERSION 8 -#define QED_MINOR_VERSION 37 -#define QED_REVISION_VERSION 0 -#define QED_ENGINEERING_VERSION 20 - -#define QED_VERSION \ - ((QED_MAJOR_VERSION << 24) | (QED_MINOR_VERSION << 16) | \ - (QED_REVISION_VERSION << 8) | QED_ENGINEERING_VERSION) - #define STORM_FW_VERSION \ ((FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | \ (FW_REVISION_VERSION << 8) | FW_ENGINEERING_VERSION) @@ -517,12 +508,6 @@ enum qed_hsi_def_type { QED_NUM_HSI_DEFS }; -#define DRV_MODULE_VERSION \ - __stringify(QED_MAJOR_VERSION) "." \ - __stringify(QED_MINOR_VERSION) "." \ - __stringify(QED_REVISION_VERSION) "." \ - __stringify(QED_ENGINEERING_VERSION) - struct qed_simd_fp_handler { void *token; void (*func)(void *); diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index e81dd34a3cac..dc93ddea8906 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -741,7 +741,6 @@ static int qed_dcbx_read_local_lldp_mib(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_dcbx_mib_meta_data data; - int rc = 0; memset(&data, 0, sizeof(data)); data.addr = p_hwfn->mcp_info->port_addr + offsetof(struct public_port, @@ -750,7 +749,7 @@ qed_dcbx_read_local_lldp_mib(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) data.size = sizeof(struct lldp_config_params_s); qed_memcpy_from(p_hwfn, p_ptt, data.lldp_local, data.addr, data.size); - return rc; + return 0; } static int @@ -810,7 +809,6 @@ static int qed_dcbx_read_local_mib(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_dcbx_mib_meta_data data; - int rc = 0; memset(&data, 0, sizeof(data)); data.addr = p_hwfn->mcp_info->port_addr + @@ -819,7 +817,7 @@ qed_dcbx_read_local_mib(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) data.size = sizeof(struct dcbx_local_params); qed_memcpy_from(p_hwfn, p_ptt, data.local_admin, data.addr, data.size); - return rc; + return 0; } static int qed_dcbx_read_mib(struct qed_hwfn *p_hwfn, diff --git a/drivers/net/ethernet/qlogic/qed/qed_devlink.c b/drivers/net/ethernet/qlogic/qed/qed_devlink.c index cf7f4da68e69..78070682f2df 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_devlink.c +++ b/drivers/net/ethernet/qlogic/qed/qed_devlink.c @@ -93,7 +93,7 @@ static const struct devlink_health_reporter_ops qed_fw_fatal_reporter_ops = { .dump = qed_fw_fatal_reporter_dump, }; -#define QED_REPORTER_FW_GRACEFUL_PERIOD 1200000 +#define QED_REPORTER_FW_GRACEFUL_PERIOD 0 void qed_fw_reporters_create(struct devlink *devlink) { @@ -207,14 +207,15 @@ struct devlink *qed_devlink_register(struct qed_dev *cdev) struct devlink *dl; int rc; - dl = devlink_alloc(&qed_dl_ops, sizeof(struct qed_devlink)); + dl = devlink_alloc(&qed_dl_ops, sizeof(struct qed_devlink), + &cdev->pdev->dev); if (!dl) return ERR_PTR(-ENOMEM); qdevlink = devlink_priv(dl); qdevlink->cdev = cdev; - rc = devlink_register(dl, &cdev->pdev->dev); + rc = devlink_register(dl); if (rc) goto err_free; diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index 578935f643b8..f78e6055f654 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -351,6 +351,9 @@ static int qed_fw_assertion(struct qed_hwfn *p_hwfn) qed_hw_err_notify(p_hwfn, p_hwfn->p_dpc_ptt, QED_HW_ERR_FW_ASSERT, "FW assertion!\n"); + /* Clear assert indications */ + qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, MISC_REG_AEU_GENERAL_ATTN_32, 0); + return -EINVAL; } @@ -464,12 +467,19 @@ static int qed_dorq_attn_int_sts(struct qed_hwfn *p_hwfn) u32 int_sts, first_drop_reason, details, address, all_drops_reason; struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt; + int_sts = qed_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS); + if (int_sts == 0xdeadbeaf) { + DP_NOTICE(p_hwfn->cdev, + "DORQ is being reset, skipping int_sts handler\n"); + + return 0; + } + /* int_sts may be zero since all PFs were interrupted for doorbell * overflow but another one already handled it. Can abort here. If * This PF also requires overflow recovery we will be interrupted again. * The masked almost full indication may also be set. Ignoring. */ - int_sts = qed_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS); if (!(int_sts & ~DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) return 0; @@ -528,6 +538,9 @@ static int qed_dorq_attn_int_sts(struct qed_hwfn *p_hwfn) static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn) { + if (p_hwfn->cdev->recov_in_prog) + return 0; + p_hwfn->db_recovery_info.dorq_attn = true; qed_dorq_attn_overflow(p_hwfn); @@ -943,6 +956,13 @@ qed_int_deassertion_aeu_bit(struct qed_hwfn *p_hwfn, DP_INFO(p_hwfn, "`%s' - Disabled future attentions\n", p_bit_name); + /* Re-enable FW aassertion (Gen 32) interrupts */ + val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + MISC_REG_AEU_ENABLE4_IGU_OUT_0); + val |= MISC_REG_AEU_ENABLE4_IGU_OUT_0_GENERAL_ATTN32; + qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, + MISC_REG_AEU_ENABLE4_IGU_OUT_0, val); + out: return rc; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index a99861124630..fc8b3e64f153 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -1624,8 +1624,6 @@ qed_iwarp_get_listener(struct qed_hwfn *p_hwfn, static const u32 ip_zero[4] = { 0, 0, 0, 0 }; bool found = false; - qed_iwarp_print_cm_info(p_hwfn, cm_info); - list_for_each_entry(listener, &p_hwfn->p_rdma_info->iwarp.listen_list, list_entry) { diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 6bb9ec98a12b..15ef59aa34ff 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -49,11 +49,10 @@ #define QED_NVM_CFG_MAX_ATTRS 50 static char version[] = - "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n"; + "QLogic FastLinQ 4xxxx Core Module qed\n"; MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module"); MODULE_LICENSE("GPL"); -MODULE_VERSION(DRV_MODULE_VERSION); #define FW_FILE_VERSION \ __stringify(FW_MAJOR_VERSION) "." \ @@ -1221,6 +1220,10 @@ static void qed_slowpath_task(struct work_struct *work) if (test_and_clear_bit(QED_SLOWPATH_PERIODIC_DB_REC, &hwfn->slowpath_task_flags)) { + /* skip qed_db_rec_handler during recovery/unload */ + if (hwfn->cdev->recov_in_prog || !hwfn->slowpath_wq_active) + goto out; + qed_db_rec_handler(hwfn, ptt); if (hwfn->periodic_db_rec_count--) qed_slowpath_delayed_work(hwfn, @@ -1228,6 +1231,7 @@ static void qed_slowpath_task(struct work_struct *work) QED_PERIODIC_DB_REC_INTERVAL); } +out: qed_ptt_release(hwfn, ptt); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 4387292c37e2..6e5a6cc97d0e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -944,7 +944,6 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn, memset(&in_params, 0, sizeof(in_params)); in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_DEFAULT; - in_params.drv_ver_0 = QED_VERSION; in_params.drv_ver_1 = qed_get_config_bitmap(); in_params.fw_ver = STORM_FW_VERSION; rc = eocre_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role); diff --git a/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.c index c1dd71d19f3f..3b84d00cf987 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.c +++ b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.c @@ -4,7 +4,6 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> -#include <linux/kernel.h> #include <linux/list.h> #include <linux/mm.h> #include <linux/types.h> diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index 9db22be42476..da1b7fdcbda7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -504,6 +504,8 @@ 0x180824UL #define MISC_REG_AEU_GENERAL_ATTN_0 \ 0x008400UL +#define MISC_REG_AEU_GENERAL_ATTN_32 \ + 0x008480UL #define MISC_REG_AEU_GENERAL_ATTN_35 \ 0x00848cUL #define CAU_REG_SB_ADDR_MEMORY \ @@ -518,6 +520,12 @@ 0x180804UL #define MISC_REG_AEU_ENABLE1_IGU_OUT_0 \ 0x00849cUL +#define MISC_REG_AEU_ENABLE4_IGU_OUT_0 \ + 0x0084a8UL +#define MISC_REG_AEU_ENABLE4_IGU_OUT_0_GENERAL_ATTN32 \ + (0x1UL << 0) +#define MISC_REG_AEU_ENABLE4_IGU_OUT_0_GENERAL_ATTN32_SHIFT \ + 0 #define MISC_REG_AEU_AFTER_INVERT_1_IGU \ 0x0087b4UL #define MISC_REG_AEU_MASK_ATTN_IGU \ diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 5630008f38b7..f90dcfe9ee68 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -30,15 +30,6 @@ #include <net/pkt_cls.h> #include <net/tc_act/tc_gact.h> -#define QEDE_MAJOR_VERSION 8 -#define QEDE_MINOR_VERSION 37 -#define QEDE_REVISION_VERSION 0 -#define QEDE_ENGINEERING_VERSION 20 -#define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \ - __stringify(QEDE_MINOR_VERSION) "." \ - __stringify(QEDE_REVISION_VERSION) "." \ - __stringify(QEDE_ENGINEERING_VERSION) - #define DRV_MODULE_SYM qede struct qede_stats_common { @@ -589,7 +580,9 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto, struct flow_cls_offload *f); void qede_forced_speed_maps_init(void); -int qede_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal); +int qede_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack); int qede_set_per_coalesce(struct net_device *dev, u32 queue, struct ethtool_coalesce *coal); diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index 1560ad3d9290..8284c4c1528f 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -625,13 +625,13 @@ static void qede_get_drvinfo(struct net_device *ndev, (edev->dev_info.common.mfw_rev >> 8) & 0xFF, edev->dev_info.common.mfw_rev & 0xFF); - if ((strlen(storm) + strlen(DRV_MODULE_VERSION) + strlen("[storm] ")) < + if ((strlen(storm) + strlen("[storm]")) < sizeof(info->version)) snprintf(info->version, sizeof(info->version), - "%s [storm %s]", DRV_MODULE_VERSION, storm); + "[storm %s]", storm); else snprintf(info->version, sizeof(info->version), - "%s %s", DRV_MODULE_VERSION, storm); + "%s", storm); if (edev->dev_info.common.mbi_version) { snprintf(mbi, ETHTOOL_FWVERS_LEN, "%d.%d.%d", @@ -760,7 +760,9 @@ static int qede_flash_device(struct net_device *dev, } static int qede_get_coalesce(struct net_device *dev, - struct ethtool_coalesce *coal) + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { void *rx_handle = NULL, *tx_handle = NULL; struct qede_dev *edev = netdev_priv(dev); @@ -819,7 +821,9 @@ out: return rc; } -int qede_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal) +int qede_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct qede_dev *edev = netdev_priv(dev); struct qede_fastpath *fp; diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 1c7f9ed6f1c1..9837bdb89cd4 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -39,12 +39,8 @@ #include "qede.h" #include "qede_ptp.h" -static char version[] = - "QLogic FastLinQ 4xxxx Ethernet Driver qede " DRV_MODULE_VERSION "\n"; - MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Ethernet Driver"); MODULE_LICENSE("GPL"); -MODULE_VERSION(DRV_MODULE_VERSION); static uint debug; module_param(debug, uint, 0); @@ -258,7 +254,7 @@ int __init qede_init(void) { int ret; - pr_info("qede_init: %s\n", version); + pr_info("qede init: QLogic FastLinQ 4xxxx Ethernet Driver qede\n"); qede_forced_speed_maps_init(); @@ -644,7 +640,7 @@ static const struct net_device_ops qede_netdev_ops = { .ndo_set_mac_address = qede_set_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = qede_change_mtu, - .ndo_do_ioctl = qede_ioctl, + .ndo_eth_ioctl = qede_ioctl, .ndo_tx_timeout = qede_tx_timeout, #ifdef CONFIG_QED_SRIOV .ndo_set_vf_mac = qede_set_vf_mac, @@ -1157,10 +1153,6 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, /* Start the Slowpath-process */ memset(&sp_params, 0, sizeof(sp_params)); sp_params.int_mode = QED_INT_MODE_MSIX; - sp_params.drv_major = QEDE_MAJOR_VERSION; - sp_params.drv_minor = QEDE_MINOR_VERSION; - sp_params.drv_rev = QEDE_REVISION_VERSION; - sp_params.drv_eng = QEDE_ENGINEERING_VERSION; strlcpy(sp_params.name, "qede LAN", QED_DRV_VER_STR_SIZE); rc = qed_ops->common->slowpath_start(cdev, &sp_params); if (rc) { @@ -1907,6 +1899,12 @@ static int qede_req_msix_irqs(struct qede_dev *edev) &edev->fp_array[i]); if (rc) { DP_ERR(edev, "Request fp %d irq failed\n", i); +#ifdef CONFIG_RFS_ACCEL + if (edev->ndev->rx_cpu_rmap) + free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap); + + edev->ndev->rx_cpu_rmap = NULL; +#endif qede_sync_free_irqs(edev); return rc; } @@ -2299,6 +2297,15 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode, rc = qede_stop_queues(edev); if (rc) { +#ifdef CONFIG_RFS_ACCEL + if (edev->dev_info.common.b_arfs_capable) { + qede_poll_for_freeing_arfs_filters(edev); + if (edev->ndev->rx_cpu_rmap) + free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap); + + edev->ndev->rx_cpu_rmap = NULL; + } +#endif qede_sync_free_irqs(edev); goto out; } @@ -2628,8 +2635,10 @@ static void qede_generic_hw_err_handler(struct qede_dev *edev) "Generic sleepable HW error handling started - err_flags 0x%lx\n", edev->err_flags); - if (edev->devlink) + if (edev->devlink) { + DP_NOTICE(edev, "Reporting fatal error to devlink\n"); edev->ops->common->report_fatal_error(edev->devlink, edev->last_err_type); + } clear_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags); @@ -2651,6 +2660,8 @@ static void qede_set_hw_err_flags(struct qede_dev *edev, case QED_HW_ERR_FW_ASSERT: set_bit(QEDE_ERR_ATTN_CLR_EN, &err_flags); set_bit(QEDE_ERR_GET_DBG_INFO, &err_flags); + /* make this error as recoverable and start recovery*/ + set_bit(QEDE_ERR_IS_RECOVERABLE, &err_flags); break; default: diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c index d8f0863b3934..fc364b4ab6eb 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c @@ -1021,7 +1021,7 @@ clear_diag_irq: static void qlcnic_create_loopback_buff(unsigned char *data, u8 mac[]) { - unsigned char random_data[] = {0xa8, 0x06, 0x45, 0x00}; + static const unsigned char random_data[] = {0xa8, 0x06, 0x45, 0x00}; memset(data, 0x4e, QLCNIC_ILB_PKT_SIZE); @@ -1527,7 +1527,9 @@ qlcnic_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) * firmware coalescing to default. */ static int qlcnic_set_intr_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ethcoal) + struct ethtool_coalesce *ethcoal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct qlcnic_adapter *adapter = netdev_priv(netdev); int err; @@ -1551,7 +1553,9 @@ static int qlcnic_set_intr_coalesce(struct net_device *netdev, } static int qlcnic_get_intr_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ethcoal) + struct ethtool_coalesce *ethcoal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct qlcnic_adapter *adapter = netdev_priv(netdev); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c index e6784023bce4..3d61a767a8a3 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c @@ -94,10 +94,8 @@ void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter) if (rx_buf->skb == NULL) continue; - pci_unmap_single(adapter->pdev, - rx_buf->dma, - rds_ring->dma_size, - PCI_DMA_FROMDEVICE); + dma_unmap_single(&adapter->pdev->dev, rx_buf->dma, + rds_ring->dma_size, DMA_FROM_DEVICE); dev_kfree_skb_any(rx_buf->skb); } @@ -139,16 +137,16 @@ void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter, for (i = 0; i < tx_ring->num_desc; i++) { buffrag = cmd_buf->frag_array; if (buffrag->dma) { - pci_unmap_single(adapter->pdev, buffrag->dma, - buffrag->length, PCI_DMA_TODEVICE); + dma_unmap_single(&adapter->pdev->dev, buffrag->dma, + buffrag->length, DMA_TO_DEVICE); buffrag->dma = 0ULL; } for (j = 1; j < cmd_buf->frag_count; j++) { buffrag++; if (buffrag->dma) { - pci_unmap_page(adapter->pdev, buffrag->dma, - buffrag->length, - PCI_DMA_TODEVICE); + dma_unmap_page(&adapter->pdev->dev, + buffrag->dma, buffrag->length, + DMA_TO_DEVICE); buffrag->dma = 0ULL; } } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index af4c516a9e7c..29cdcb2285b1 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c @@ -587,9 +587,9 @@ static int qlcnic_map_tx_skb(struct pci_dev *pdev, struct sk_buff *skb, nr_frags = skb_shinfo(skb)->nr_frags; nf = &pbuf->frag_array[0]; - map = pci_map_single(pdev, skb->data, skb_headlen(skb), - PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(pdev, map)) + map = dma_map_single(&pdev->dev, skb->data, skb_headlen(skb), + DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, map)) goto out_err; nf->dma = map; @@ -612,11 +612,11 @@ static int qlcnic_map_tx_skb(struct pci_dev *pdev, struct sk_buff *skb, unwind: while (--i >= 0) { nf = &pbuf->frag_array[i+1]; - pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE); + dma_unmap_page(&pdev->dev, nf->dma, nf->length, DMA_TO_DEVICE); } nf = &pbuf->frag_array[0]; - pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE); + dma_unmap_single(&pdev->dev, nf->dma, skb_headlen(skb), DMA_TO_DEVICE); out_err: return -ENOMEM; @@ -630,11 +630,11 @@ static void qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb, for (i = 0; i < nr_frags; i++) { nf = &pbuf->frag_array[i+1]; - pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE); + dma_unmap_page(&pdev->dev, nf->dma, nf->length, DMA_TO_DEVICE); } nf = &pbuf->frag_array[0]; - pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE); + dma_unmap_single(&pdev->dev, nf->dma, skb_headlen(skb), DMA_TO_DEVICE); pbuf->skb = NULL; } @@ -825,10 +825,10 @@ static int qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter, } skb_reserve(skb, NET_IP_ALIGN); - dma = pci_map_single(pdev, skb->data, - rds_ring->dma_size, PCI_DMA_FROMDEVICE); + dma = dma_map_single(&pdev->dev, skb->data, rds_ring->dma_size, + DMA_FROM_DEVICE); - if (pci_dma_mapping_error(pdev, dma)) { + if (dma_mapping_error(&pdev->dev, dma)) { adapter->stats.rx_dma_map_error++; dev_kfree_skb_any(skb); return -ENOMEM; @@ -903,13 +903,13 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter, buffer = &tx_ring->cmd_buf_arr[sw_consumer]; if (buffer->skb) { frag = &buffer->frag_array[0]; - pci_unmap_single(pdev, frag->dma, frag->length, - PCI_DMA_TODEVICE); + dma_unmap_single(&pdev->dev, frag->dma, frag->length, + DMA_TO_DEVICE); frag->dma = 0ULL; for (i = 1; i < buffer->frag_count; i++) { frag++; - pci_unmap_page(pdev, frag->dma, frag->length, - PCI_DMA_TODEVICE); + dma_unmap_page(&pdev->dev, frag->dma, + frag->length, DMA_TO_DEVICE); frag->dma = 0ULL; } tx_ring->tx_stats.xmit_finished++; @@ -1147,8 +1147,8 @@ static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter, return NULL; } - pci_unmap_single(adapter->pdev, buffer->dma, ring->dma_size, - PCI_DMA_FROMDEVICE); + dma_unmap_single(&adapter->pdev->dev, buffer->dma, ring->dma_size, + DMA_FROM_DEVICE); skb = buffer->skb; if (likely((adapter->netdev->features & NETIF_F_RXCSUM) && diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index a4fa507903ee..75960a29f80e 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -2343,11 +2343,9 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev, static int qlcnic_set_dma_mask(struct pci_dev *pdev, int *pci_using_dac) { - if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && - !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) + if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) *pci_using_dac = 1; - else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) && - !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) + else if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) *pci_using_dac = 0; else { dev_err(&pdev->dev, "Unable to set DMA mask, aborting\n"); diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c index ad655f0a4965..9015a38eaced 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac.c @@ -377,7 +377,7 @@ static const struct net_device_ops emac_netdev_ops = { .ndo_start_xmit = emac_start_xmit, .ndo_set_mac_address = eth_mac_addr, .ndo_change_mtu = emac_change_mtu, - .ndo_do_ioctl = phy_do_ioctl_running, + .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_tx_timeout = emac_tx_timeout, .ndo_get_stats64 = emac_get_stats64, .ndo_set_features = emac_set_features, diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c index b64c254e00ba..8427fe1b8fd1 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.c +++ b/drivers/net/ethernet/qualcomm/qca_spi.c @@ -434,7 +434,7 @@ qcaspi_receive(struct qcaspi *qca) skb_put(qca->rx_skb, retcode); qca->rx_skb->protocol = eth_type_trans( qca->rx_skb, qca->rx_skb->dev); - qca->rx_skb->ip_summed = CHECKSUM_UNNECESSARY; + skb_checksum_none_assert(qca->rx_skb); netif_rx_ni(qca->rx_skb); qca->rx_skb = netdev_alloc_skb_ip_align(net_dev, net_dev->mtu + VLAN_ETH_HLEN); diff --git a/drivers/net/ethernet/qualcomm/qca_uart.c b/drivers/net/ethernet/qualcomm/qca_uart.c index bcdeca7b3366..ce3f7ce31adc 100644 --- a/drivers/net/ethernet/qualcomm/qca_uart.c +++ b/drivers/net/ethernet/qualcomm/qca_uart.c @@ -107,7 +107,7 @@ qca_tty_receive(struct serdev_device *serdev, const unsigned char *data, skb_put(qca->rx_skb, retcode); qca->rx_skb->protocol = eth_type_trans( qca->rx_skb, qca->rx_skb->dev); - qca->rx_skb->ip_summed = CHECKSUM_UNNECESSARY; + skb_checksum_none_assert(qca->rx_skb); netif_rx_ni(qca->rx_skb); qca->rx_skb = netdev_alloc_skb_ip_align(netdev, netdev->mtu + diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c index 47e9998b62f0..4b2eca5e08e2 100644 --- a/drivers/net/ethernet/rdc/r6040.c +++ b/drivers/net/ethernet/rdc/r6040.c @@ -954,7 +954,7 @@ static const struct net_device_ops r6040_netdev_ops = { .ndo_set_rx_mode = r6040_multicast_list, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, - .ndo_do_ioctl = phy_do_ioctl, + .ndo_eth_ioctl = phy_do_ioctl, .ndo_tx_timeout = r6040_tx_timeout, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = r6040_poll_controller, diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index 9677e257e9a1..2b84b4565e64 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c @@ -514,7 +514,7 @@ static int cp_rx_poll(struct napi_struct *napi, int budget) } new_mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); if (dma_mapping_error(&cp->pdev->dev, new_mapping)) { dev->stats.rx_dropped++; kfree_skb(new_skb); @@ -522,7 +522,7 @@ static int cp_rx_poll(struct napi_struct *napi, int budget) } dma_unmap_single(&cp->pdev->dev, mapping, - buflen, PCI_DMA_FROMDEVICE); + buflen, DMA_FROM_DEVICE); /* Handle checksum offloading for incoming packets. */ if (cp_rx_csum_ok(status)) @@ -666,7 +666,7 @@ static void cp_tx (struct cp_private *cp) dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr), cp->tx_opts[tx_tail] & 0xffff, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); if (status & LastFrag) { if (status & (TxError | TxFIFOUnder)) { @@ -724,7 +724,7 @@ static void unwind_tx_frag_mapping(struct cp_private *cp, struct sk_buff *skb, txd = &cp->tx_ring[index]; this_frag = &skb_shinfo(skb)->frags[frag]; dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr), - skb_frag_size(this_frag), PCI_DMA_TODEVICE); + skb_frag_size(this_frag), DMA_TO_DEVICE); } } @@ -781,7 +781,7 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb, dma_addr_t mapping; len = skb->len; - mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE); + mapping = dma_map_single(&cp->pdev->dev, skb->data, len, DMA_TO_DEVICE); if (dma_mapping_error(&cp->pdev->dev, mapping)) goto out_dma_error; @@ -810,7 +810,7 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb, first_eor = eor; first_len = skb_headlen(skb); first_mapping = dma_map_single(&cp->pdev->dev, skb->data, - first_len, PCI_DMA_TODEVICE); + first_len, DMA_TO_DEVICE); if (dma_mapping_error(&cp->pdev->dev, first_mapping)) goto out_dma_error; @@ -826,7 +826,7 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb, len = skb_frag_size(this_frag); mapping = dma_map_single(&cp->pdev->dev, skb_frag_address(this_frag), - len, PCI_DMA_TODEVICE); + len, DMA_TO_DEVICE); if (dma_mapping_error(&cp->pdev->dev, mapping)) { unwind_tx_frag_mapping(cp, skb, first_entry, entry); goto out_dma_error; @@ -1069,7 +1069,7 @@ static int cp_refill_rx(struct cp_private *cp) goto err_out; mapping = dma_map_single(&cp->pdev->dev, skb->data, - cp->rx_buf_sz, PCI_DMA_FROMDEVICE); + cp->rx_buf_sz, DMA_FROM_DEVICE); if (dma_mapping_error(&cp->pdev->dev, mapping)) { kfree_skb(skb); goto err_out; @@ -1139,7 +1139,7 @@ static void cp_clean_rings (struct cp_private *cp) if (cp->rx_skb[i]) { desc = cp->rx_ring + i; dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr), - cp->rx_buf_sz, PCI_DMA_FROMDEVICE); + cp->rx_buf_sz, DMA_FROM_DEVICE); dev_kfree_skb_any(cp->rx_skb[i]); } } @@ -1151,7 +1151,7 @@ static void cp_clean_rings (struct cp_private *cp) desc = cp->tx_ring + i; dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr), le32_to_cpu(desc->opts1) & 0xffff, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); if (le32_to_cpu(desc->opts1) & LastFrag) dev_kfree_skb_any(skb); cp->dev->stats.tx_dropped++; @@ -1869,7 +1869,7 @@ static const struct net_device_ops cp_netdev_ops = { .ndo_set_mac_address = cp_set_mac_address, .ndo_set_rx_mode = cp_set_rx_mode, .ndo_get_stats = cp_get_stats, - .ndo_do_ioctl = cp_ioctl, + .ndo_eth_ioctl = cp_ioctl, .ndo_start_xmit = cp_start_xmit, .ndo_tx_timeout = cp_tx_timeout, .ndo_set_features = cp_set_features, @@ -1945,24 +1945,17 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) /* Configure DMA attributes. */ if ((sizeof(dma_addr_t) > 4) && - !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) && - !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { pci_using_dac = 1; } else { pci_using_dac = 0; - rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (rc) { dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); goto err_out_res; } - rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); - if (rc) { - dev_err(&pdev->dev, - "No usable consistent DMA configuration, aborting\n"); - goto err_out_res; - } } cp->cpcmd = (pci_using_dac ? PCIDAC : 0) | diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c index f0608f050050..2e6923cc653e 100644 --- a/drivers/net/ethernet/realtek/8139too.c +++ b/drivers/net/ethernet/realtek/8139too.c @@ -932,7 +932,7 @@ static const struct net_device_ops rtl8139_netdev_ops = { .ndo_set_mac_address = rtl8139_set_mac_address, .ndo_start_xmit = rtl8139_start_xmit, .ndo_set_rx_mode = rtl8139_set_rx_mode, - .ndo_do_ioctl = netdev_ioctl, + .ndo_eth_ioctl = netdev_ioctl, .ndo_tx_timeout = rtl8139_tx_timeout, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = rtl8139_poll_controller, diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 4d8e337f5085..46a6ff9a782d 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -1749,7 +1749,10 @@ rtl_coalesce_info(struct rtl8169_private *tp) return ERR_PTR(-ELNRNG); } -static int rtl_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) +static int rtl_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct rtl8169_private *tp = netdev_priv(dev); const struct rtl_coalesce_info *ci; @@ -1807,7 +1810,10 @@ static int rtl_coalesce_choose_scale(struct rtl8169_private *tp, u32 usec, return -ERANGE; } -static int rtl_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) +static int rtl_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct rtl8169_private *tp = netdev_priv(dev); u32 tx_fr = ec->tx_max_coalesced_frames; @@ -2598,7 +2604,7 @@ static u32 rtl_csi_read(struct rtl8169_private *tp, int addr) RTL_R32(tp, CSIDR) : ~0; } -static void rtl_csi_access_enable(struct rtl8169_private *tp, u8 val) +static void rtl_set_aspm_entry_latency(struct rtl8169_private *tp, u8 val) { struct pci_dev *pdev = tp->pci_dev; u32 csi; @@ -2606,6 +2612,8 @@ static void rtl_csi_access_enable(struct rtl8169_private *tp, u8 val) /* According to Realtek the value at config space address 0x070f * controls the L0s/L1 entrance latency. We try standard ECAM access * first and if it fails fall back to CSI. + * bit 0..2: L0: 0 = 1us, 1 = 2us .. 6 = 7us, 7 = 7us (no typo) + * bit 3..5: L1: 0 = 1us, 1 = 2us .. 6 = 64us, 7 = 64us */ if (pdev->cfg_size > 0x070f && pci_write_config_byte(pdev, 0x070f, val) == PCIBIOS_SUCCESSFUL) @@ -2619,7 +2627,8 @@ static void rtl_csi_access_enable(struct rtl8169_private *tp, u8 val) static void rtl_set_def_aspm_entry_latency(struct rtl8169_private *tp) { - rtl_csi_access_enable(tp, 0x27); + /* L0 7us, L1 16us */ + rtl_set_aspm_entry_latency(tp, 0x27); } struct ephy_info { @@ -2660,6 +2669,34 @@ static void rtl_pcie_state_l2l3_disable(struct rtl8169_private *tp) RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Rdy_to_L23); } +static void rtl_enable_exit_l1(struct rtl8169_private *tp) +{ + /* Bits control which events trigger ASPM L1 exit: + * Bit 12: rxdv + * Bit 11: ltr_msg + * Bit 10: txdma_poll + * Bit 9: xadm + * Bit 8: pktavi + * Bit 7: txpla + */ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_36: + rtl_eri_set_bits(tp, 0xd4, 0x1f00); + break; + case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_38: + rtl_eri_set_bits(tp, 0xd4, 0x0c00); + break; + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_53: + rtl_eri_set_bits(tp, 0xd4, 0x1f80); + break; + case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_63: + r8168_mac_ocp_modify(tp, 0xc0ac, 0, 0x1f80); + break; + default: + break; + } +} + static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable) { /* Don't enable ASPM in the chip if OS can't control ASPM */ @@ -2848,7 +2885,6 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp) rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); rtl_eri_write(tp, 0xb8, ERIAR_MASK_1111, 0x0000); rtl_set_fifo_size(tp, 0x10, 0x10, 0x02, 0x06); - rtl_eri_set_bits(tp, 0x0d4, 0x1f00); rtl_eri_set_bits(tp, 0x1d0, BIT(1)); rtl_reset_packet_filter(tp); rtl_eri_set_bits(tp, 0x1b0, BIT(4)); @@ -2905,8 +2941,6 @@ static void rtl_hw_start_8168f_1(struct rtl8169_private *tp) rtl_hw_start_8168f(tp); rtl_ephy_init(tp, e_info_8168f_1); - - rtl_eri_set_bits(tp, 0x0d4, 0x1f00); } static void rtl_hw_start_8411(struct rtl8169_private *tp) @@ -2923,8 +2957,6 @@ static void rtl_hw_start_8411(struct rtl8169_private *tp) rtl_pcie_state_l2l3_disable(tp); rtl_ephy_init(tp, e_info_8168f_1); - - rtl_eri_set_bits(tp, 0x0d4, 0x0c00); } static void rtl_hw_start_8168g(struct rtl8169_private *tp) @@ -2941,7 +2973,6 @@ static void rtl_hw_start_8168g(struct rtl8169_private *tp) rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); - rtl_eri_set_bits(tp, 0x0d4, 0x1f80); rtl8168_config_eee_mac(tp); @@ -3172,7 +3203,6 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) rtl_reset_packet_filter(tp); - rtl_eri_set_bits(tp, 0xd4, 0x1f00); rtl_eri_set_bits(tp, 0xdc, 0x001c); rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87); @@ -3226,8 +3256,6 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp) rtl_reset_packet_filter(tp); - rtl_eri_set_bits(tp, 0xd4, 0x1f80); - rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87); RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); @@ -3329,7 +3357,7 @@ static void rtl_hw_start_8117(struct rtl8169_private *tp) rtl_reset_packet_filter(tp); - rtl_eri_set_bits(tp, 0xd4, 0x1f90); + rtl_eri_set_bits(tp, 0xd4, 0x0010); rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87); @@ -3502,8 +3530,8 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp) RTL_W8(tp, MCU, RTL_R8(tp, MCU) | EN_NDP | EN_OOB_RESET); RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); - /* The default value is 0x13. Change it to 0x2f */ - rtl_csi_access_enable(tp, 0x2f); + /* L0 7us, L1 32us - needed to avoid issues with link-up detection */ + rtl_set_aspm_entry_latency(tp, 0x2f); rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000); @@ -3560,7 +3588,6 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp) r8168_mac_ocp_modify(tp, 0xea1c, 0x0003, 0x0001); r8168_mac_ocp_modify(tp, 0xe0c0, 0x4f0f, 0x4403); r8168_mac_ocp_modify(tp, 0xe052, 0x0080, 0x0068); - r8168_mac_ocp_modify(tp, 0xc0ac, 0x0080, 0x1f00); r8168_mac_ocp_modify(tp, 0xd430, 0x0fff, 0x047f); r8168_mac_ocp_modify(tp, 0xea1c, 0x0004, 0x0000); @@ -3783,6 +3810,7 @@ static void rtl_hw_start(struct rtl8169_private *tp) else rtl_hw_start_8168(tp); + rtl_enable_exit_l1(tp); rtl_set_rx_max_size(tp); rtl_set_rx_tx_desc_registers(tp); rtl_lock_config_regs(tp); @@ -4983,7 +5011,7 @@ static const struct net_device_ops rtl_netdev_ops = { .ndo_fix_features = rtl8169_fix_features, .ndo_set_features = rtl8169_set_features, .ndo_set_mac_address = rtl_set_mac_address, - .ndo_do_ioctl = phy_do_ioctl_running, + .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_set_rx_mode = rtl_set_rx_mode, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = rtl8169_netpoll, @@ -5278,11 +5306,10 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) return rc; - /* Disable ASPM completely as that cause random device stop working + /* Disable ASPM L1 as that cause random device stop working * problems as well as full system hangs for some PCIe devices users. */ - rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | - PCIE_LINK_STATE_L1); + rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1); tp->aspm_manageable = !rc; /* enable device (incl. PCI PM wakeup and hotplug setup) */ diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig index 5a2a4af31812..8008b2f45934 100644 --- a/drivers/net/ethernet/renesas/Kconfig +++ b/drivers/net/ethernet/renesas/Kconfig @@ -32,11 +32,11 @@ config SH_ETH config RAVB tristate "Renesas Ethernet AVB support" depends on ARCH_RENESAS || COMPILE_TEST + depends on PTP_1588_CLOCK_OPTIONAL select CRC32 select MII select MDIO_BITBANG select PHYLIB - imply PTP_1588_CLOCK help Renesas Ethernet AVB device driver. This driver supports the following SoCs: diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h index 80e62ca2e3d3..47c5377e4f42 100644 --- a/drivers/net/ethernet/renesas/ravb.h +++ b/drivers/net/ethernet/renesas/ravb.h @@ -956,10 +956,6 @@ enum RAVB_QUEUE { #define RX_BUF_SZ (2048 - ETH_FCS_LEN + sizeof(__sum16)) -/* TX descriptors per packet */ -#define NUM_TX_DESC_GEN2 2 -#define NUM_TX_DESC_GEN3 1 - struct ravb_tstamp_skb { struct list_head list; struct sk_buff *skb; @@ -983,9 +979,29 @@ struct ravb_ptp { struct ravb_ptp_perout perout[N_PER_OUT]; }; -enum ravb_chip_id { - RCAR_GEN2, - RCAR_GEN3, +struct ravb_hw_info { + void (*rx_ring_free)(struct net_device *ndev, int q); + void (*rx_ring_format)(struct net_device *ndev, int q); + void *(*alloc_rx_desc)(struct net_device *ndev, int q); + bool (*receive)(struct net_device *ndev, int *quota, int q); + void (*set_rate)(struct net_device *ndev); + int (*set_rx_csum_feature)(struct net_device *ndev, netdev_features_t features); + void (*dmac_init)(struct net_device *ndev); + void (*emac_init)(struct net_device *ndev); + const char (*gstrings_stats)[ETH_GSTRING_LEN]; + size_t gstrings_size; + netdev_features_t net_hw_features; + netdev_features_t net_features; + int stats_len; + size_t max_rx_len; + unsigned aligned_tx: 1; + + /* hardware features */ + unsigned internal_delay:1; /* AVB-DMAC has internal delays */ + unsigned tx_counters:1; /* E-MAC has TX counters */ + unsigned multi_irqs:1; /* AVB-DMAC and E-MAC has multiple irqs */ + unsigned no_ptp_cfg_active:1; /* AVB-DMAC does not support gPTP active in config mode */ + unsigned ptp_cfg_active:1; /* AVB-DMAC has gPTP support active in config mode */ }; struct ravb_private { @@ -1029,7 +1045,6 @@ struct ravb_private { int msg_enable; int speed; int emac_irq; - enum ravb_chip_id chip_id; int rx_irqs[NUM_RX_QUEUE]; int tx_irqs[NUM_TX_QUEUE]; @@ -1039,7 +1054,10 @@ struct ravb_private { unsigned rxcidm:1; /* RX Clock Internal Delay Mode */ unsigned txcidm:1; /* TX Clock Internal Delay Mode */ unsigned rgmii_override:1; /* Deprecated rgmii-*id behavior */ - int num_tx_desc; /* TX descriptors per packet */ + unsigned int num_tx_desc; /* TX descriptors per packet */ + + const struct ravb_hw_info *info; + struct reset_control *rstc; }; static inline u32 ravb_read(struct net_device *ndev, enum ravb_reg reg) diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 805397088850..0f85f2d97b18 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -29,6 +29,7 @@ #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/sys_soc.h> +#include <linux/reset.h> #include <asm/div64.h> @@ -177,10 +178,10 @@ static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only) { struct ravb_private *priv = netdev_priv(ndev); struct net_device_stats *stats = &priv->stats[q]; - int num_tx_desc = priv->num_tx_desc; + unsigned int num_tx_desc = priv->num_tx_desc; struct ravb_tx_desc *desc; + unsigned int entry; int free_num = 0; - int entry; u32 size; for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) { @@ -216,31 +217,42 @@ static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only) return free_num; } -/* Free skb's and DMA buffers for Ethernet AVB */ -static void ravb_ring_free(struct net_device *ndev, int q) +static void ravb_rx_ring_free(struct net_device *ndev, int q) { struct ravb_private *priv = netdev_priv(ndev); - int num_tx_desc = priv->num_tx_desc; - int ring_size; - int i; + unsigned int ring_size; + unsigned int i; + + if (!priv->rx_ring[q]) + return; - if (priv->rx_ring[q]) { - for (i = 0; i < priv->num_rx_ring[q]; i++) { - struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i]; + for (i = 0; i < priv->num_rx_ring[q]; i++) { + struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i]; - if (!dma_mapping_error(ndev->dev.parent, - le32_to_cpu(desc->dptr))) - dma_unmap_single(ndev->dev.parent, - le32_to_cpu(desc->dptr), - RX_BUF_SZ, - DMA_FROM_DEVICE); - } - ring_size = sizeof(struct ravb_ex_rx_desc) * - (priv->num_rx_ring[q] + 1); - dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q], - priv->rx_desc_dma[q]); - priv->rx_ring[q] = NULL; + if (!dma_mapping_error(ndev->dev.parent, + le32_to_cpu(desc->dptr))) + dma_unmap_single(ndev->dev.parent, + le32_to_cpu(desc->dptr), + RX_BUF_SZ, + DMA_FROM_DEVICE); } + ring_size = sizeof(struct ravb_ex_rx_desc) * + (priv->num_rx_ring[q] + 1); + dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q], + priv->rx_desc_dma[q]); + priv->rx_ring[q] = NULL; +} + +/* Free skb's and DMA buffers for Ethernet AVB */ +static void ravb_ring_free(struct net_device *ndev, int q) +{ + struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; + unsigned int num_tx_desc = priv->num_tx_desc; + unsigned int ring_size; + unsigned int i; + + info->rx_ring_free(ndev, q); if (priv->tx_ring[q]) { ravb_tx_free(ndev, q, false); @@ -271,24 +283,13 @@ static void ravb_ring_free(struct net_device *ndev, int q) priv->tx_skb[q] = NULL; } -/* Format skb and descriptor buffer for Ethernet AVB */ -static void ravb_ring_format(struct net_device *ndev, int q) +static void ravb_rx_ring_format(struct net_device *ndev, int q) { struct ravb_private *priv = netdev_priv(ndev); - int num_tx_desc = priv->num_tx_desc; struct ravb_ex_rx_desc *rx_desc; - struct ravb_tx_desc *tx_desc; - struct ravb_desc *desc; - int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q]; - int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] * - num_tx_desc; + unsigned int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q]; dma_addr_t dma_addr; - int i; - - priv->cur_rx[q] = 0; - priv->cur_tx[q] = 0; - priv->dirty_rx[q] = 0; - priv->dirty_tx[q] = 0; + unsigned int i; memset(priv->rx_ring[q], 0, rx_ring_size); /* Build RX ring buffer */ @@ -310,6 +311,26 @@ static void ravb_ring_format(struct net_device *ndev, int q) rx_desc = &priv->rx_ring[q][i]; rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]); rx_desc->die_dt = DT_LINKFIX; /* type */ +} + +/* Format skb and descriptor buffer for Ethernet AVB */ +static void ravb_ring_format(struct net_device *ndev, int q) +{ + struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; + unsigned int num_tx_desc = priv->num_tx_desc; + struct ravb_tx_desc *tx_desc; + struct ravb_desc *desc; + unsigned int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] * + num_tx_desc; + unsigned int i; + + priv->cur_rx[q] = 0; + priv->cur_tx[q] = 0; + priv->dirty_rx[q] = 0; + priv->dirty_tx[q] = 0; + + info->rx_ring_format(ndev, q); memset(priv->tx_ring[q], 0, tx_ring_size); /* Build TX ring buffer */ @@ -335,14 +356,28 @@ static void ravb_ring_format(struct net_device *ndev, int q) desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]); } +static void *ravb_alloc_rx_desc(struct net_device *ndev, int q) +{ + struct ravb_private *priv = netdev_priv(ndev); + unsigned int ring_size; + + ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1); + + priv->rx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size, + &priv->rx_desc_dma[q], + GFP_KERNEL); + return priv->rx_ring[q]; +} + /* Init skb and descriptor buffer for Ethernet AVB */ static int ravb_ring_init(struct net_device *ndev, int q) { struct ravb_private *priv = netdev_priv(ndev); - int num_tx_desc = priv->num_tx_desc; + const struct ravb_hw_info *info = priv->info; + unsigned int num_tx_desc = priv->num_tx_desc; + unsigned int ring_size; struct sk_buff *skb; - int ring_size; - int i; + unsigned int i; /* Allocate RX and TX skb rings */ priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q], @@ -353,7 +388,7 @@ static int ravb_ring_init(struct net_device *ndev, int q) goto error; for (i = 0; i < priv->num_rx_ring[q]; i++) { - skb = netdev_alloc_skb(ndev, RX_BUF_SZ + RAVB_ALIGN - 1); + skb = netdev_alloc_skb(ndev, info->max_rx_len); if (!skb) goto error; ravb_set_buffer_align(skb); @@ -369,11 +404,7 @@ static int ravb_ring_init(struct net_device *ndev, int q) } /* Allocate all RX descriptors. */ - ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1); - priv->rx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size, - &priv->rx_desc_dma[q], - GFP_KERNEL); - if (!priv->rx_ring[q]) + if (!info->alloc_rx_desc(ndev, q)) goto error; priv->dirty_rx[q] = 0; @@ -395,8 +426,7 @@ error: return -ENOMEM; } -/* E-MAC init function */ -static void ravb_emac_init(struct net_device *ndev) +static void ravb_rcar_emac_init(struct net_device *ndev) { /* Receive frame limit set register */ ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR); @@ -422,29 +452,19 @@ static void ravb_emac_init(struct net_device *ndev) ravb_write(ndev, ECSIPR_ICDIP | ECSIPR_MPDIP | ECSIPR_LCHNGIP, ECSIPR); } -/* Device init function for Ethernet AVB */ -static int ravb_dmac_init(struct net_device *ndev) +/* E-MAC init function */ +static void ravb_emac_init(struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); - int error; + const struct ravb_hw_info *info = priv->info; - /* Set CONFIG mode */ - error = ravb_config(ndev); - if (error) - return error; - - error = ravb_ring_init(ndev, RAVB_BE); - if (error) - return error; - error = ravb_ring_init(ndev, RAVB_NC); - if (error) { - ravb_ring_free(ndev, RAVB_BE); - return error; - } + info->emac_init(ndev); +} - /* Descriptor format */ - ravb_ring_format(ndev, RAVB_BE); - ravb_ring_format(ndev, RAVB_NC); +static void ravb_rcar_dmac_init(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; /* Set AVB RX */ ravb_write(ndev, @@ -457,7 +477,7 @@ static int ravb_dmac_init(struct net_device *ndev) ravb_write(ndev, TCCR_TFEN, TCCR); /* Interrupt init: */ - if (priv->chip_id == RCAR_GEN3) { + if (info->multi_irqs) { /* Clear DIL.DPLx */ ravb_write(ndev, 0, DIL); /* Set queue specific interrupt */ @@ -471,6 +491,34 @@ static int ravb_dmac_init(struct net_device *ndev) ravb_write(ndev, RIC2_QFE0 | RIC2_QFE1 | RIC2_RFFE, RIC2); /* Frame transmitted, timestamp FIFO updated */ ravb_write(ndev, TIC_FTE0 | TIC_FTE1 | TIC_TFUE, TIC); +} + +/* Device init function for Ethernet AVB */ +static int ravb_dmac_init(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; + int error; + + /* Set CONFIG mode */ + error = ravb_config(ndev); + if (error) + return error; + + error = ravb_ring_init(ndev, RAVB_BE); + if (error) + return error; + error = ravb_ring_init(ndev, RAVB_NC); + if (error) { + ravb_ring_free(ndev, RAVB_BE); + return error; + } + + /* Descriptor format */ + ravb_ring_format(ndev, RAVB_BE); + ravb_ring_format(ndev, RAVB_NC); + + info->dmac_init(ndev); /* Setting the control will start the AVB-DMAC process. */ ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_OPERATION); @@ -531,10 +579,10 @@ static void ravb_rx_csum(struct sk_buff *skb) skb_trim(skb, skb->len - sizeof(__sum16)); } -/* Packet receive function for Ethernet AVB */ -static bool ravb_rx(struct net_device *ndev, int *quota, int q) +static bool ravb_rcar_rx(struct net_device *ndev, int *quota, int q) { struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; int entry = priv->cur_rx[q] % priv->num_rx_ring[q]; int boguscnt = (priv->dirty_rx[q] + priv->num_rx_ring[q]) - priv->cur_rx[q]; @@ -619,9 +667,7 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q) desc->ds_cc = cpu_to_le16(RX_BUF_SZ); if (!priv->rx_skb[q][entry]) { - skb = netdev_alloc_skb(ndev, - RX_BUF_SZ + - RAVB_ALIGN - 1); + skb = netdev_alloc_skb(ndev, info->max_rx_len); if (!skb) break; /* Better luck next round. */ ravb_set_buffer_align(skb); @@ -647,6 +693,15 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q) return boguscnt <= 0; } +/* Packet receive function for Ethernet AVB */ +static bool ravb_rx(struct net_device *ndev, int *quota, int q) +{ + struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; + + return info->receive(ndev, quota, q); +} + static void ravb_rcv_snd_disable(struct net_device *ndev) { /* Disable TX and RX */ @@ -758,6 +813,7 @@ static void ravb_error_interrupt(struct net_device *ndev) static bool ravb_queue_interrupt(struct net_device *ndev, int q) { struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; u32 ris0 = ravb_read(ndev, RIS0); u32 ric0 = ravb_read(ndev, RIC0); u32 tis = ravb_read(ndev, TIS); @@ -766,7 +822,7 @@ static bool ravb_queue_interrupt(struct net_device *ndev, int q) if (((ris0 & ric0) & BIT(q)) || ((tis & tic) & BIT(q))) { if (napi_schedule_prep(&priv->napi[q])) { /* Mask RX and TX interrupts */ - if (priv->chip_id == RCAR_GEN2) { + if (!info->multi_irqs) { ravb_write(ndev, ric0 & ~BIT(q), RIC0); ravb_write(ndev, tic & ~BIT(q), TIC); } else { @@ -909,6 +965,7 @@ static int ravb_poll(struct napi_struct *napi, int budget) { struct net_device *ndev = napi->dev; struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; unsigned long flags; int q = napi - priv->napi; int mask = BIT(q); @@ -932,7 +989,7 @@ static int ravb_poll(struct napi_struct *napi, int budget) /* Re-enable RX/TX interrupts */ spin_lock_irqsave(&priv->lock, flags); - if (priv->chip_id == RCAR_GEN2) { + if (!info->multi_irqs) { ravb_modify(ndev, RIC0, mask, mask); ravb_modify(ndev, TIC, mask, mask); } else { @@ -956,6 +1013,7 @@ out: static void ravb_adjust_link(struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; struct phy_device *phydev = ndev->phydev; bool new_state = false; unsigned long flags; @@ -970,7 +1028,7 @@ static void ravb_adjust_link(struct net_device *ndev) if (phydev->speed != priv->speed) { new_state = true; priv->speed = phydev->speed; - ravb_set_rate(ndev); + info->set_rate(ndev); } if (!priv->link) { ravb_modify(ndev, ECMR, ECMR_TXF, 0); @@ -1133,13 +1191,14 @@ static const char ravb_gstrings_stats[][ETH_GSTRING_LEN] = { "rx_queue_1_over_errors", }; -#define RAVB_STATS_LEN ARRAY_SIZE(ravb_gstrings_stats) - static int ravb_get_sset_count(struct net_device *netdev, int sset) { + struct ravb_private *priv = netdev_priv(netdev); + const struct ravb_hw_info *info = priv->info; + switch (sset) { case ETH_SS_STATS: - return RAVB_STATS_LEN; + return info->stats_len; default: return -EOPNOTSUPP; } @@ -1176,9 +1235,12 @@ static void ravb_get_ethtool_stats(struct net_device *ndev, static void ravb_get_strings(struct net_device *ndev, u32 stringset, u8 *data) { + struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; + switch (stringset) { case ETH_SS_STATS: - memcpy(data, ravb_gstrings_stats, sizeof(ravb_gstrings_stats)); + memcpy(data, info->gstrings_stats, info->gstrings_size); break; } } @@ -1198,6 +1260,7 @@ static int ravb_set_ringparam(struct net_device *ndev, struct ethtool_ringparam *ring) { struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; int error; if (ring->tx_pending > BE_TX_RING_MAX || @@ -1211,7 +1274,7 @@ static int ravb_set_ringparam(struct net_device *ndev, if (netif_running(ndev)) { netif_device_detach(ndev); /* Stop PTP Clock driver */ - if (priv->chip_id == RCAR_GEN2) + if (info->no_ptp_cfg_active) ravb_ptp_stop(ndev); /* Wait for DMA stopping */ error = ravb_stop_dma(ndev); @@ -1243,7 +1306,7 @@ static int ravb_set_ringparam(struct net_device *ndev, ravb_emac_init(ndev); /* Initialise PTP Clock driver */ - if (priv->chip_id == RCAR_GEN2) + if (info->no_ptp_cfg_active) ravb_ptp_init(ndev, priv->pdev); netif_device_attach(ndev); @@ -1334,6 +1397,7 @@ static inline int ravb_hook_irq(unsigned int irq, irq_handler_t handler, static int ravb_open(struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; struct platform_device *pdev = priv->pdev; struct device *dev = &pdev->dev; int error; @@ -1341,7 +1405,7 @@ static int ravb_open(struct net_device *ndev) napi_enable(&priv->napi[RAVB_BE]); napi_enable(&priv->napi[RAVB_NC]); - if (priv->chip_id == RCAR_GEN2) { + if (!info->multi_irqs) { error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED, ndev->name, ndev); if (error) { @@ -1382,7 +1446,7 @@ static int ravb_open(struct net_device *ndev) ravb_emac_init(ndev); /* Initialise PTP Clock driver */ - if (priv->chip_id == RCAR_GEN2) + if (info->no_ptp_cfg_active) ravb_ptp_init(ndev, priv->pdev); netif_tx_start_all_queues(ndev); @@ -1396,10 +1460,10 @@ static int ravb_open(struct net_device *ndev) out_ptp_stop: /* Stop PTP Clock driver */ - if (priv->chip_id == RCAR_GEN2) + if (info->no_ptp_cfg_active) ravb_ptp_stop(ndev); out_free_irq_nc_tx: - if (priv->chip_id == RCAR_GEN2) + if (!info->multi_irqs) goto out_free_irq; free_irq(priv->tx_irqs[RAVB_NC], ndev); out_free_irq_nc_rx: @@ -1437,13 +1501,14 @@ static void ravb_tx_timeout_work(struct work_struct *work) { struct ravb_private *priv = container_of(work, struct ravb_private, work); + const struct ravb_hw_info *info = priv->info; struct net_device *ndev = priv->ndev; int error; netif_tx_stop_all_queues(ndev); /* Stop PTP Clock driver */ - if (priv->chip_id == RCAR_GEN2) + if (info->no_ptp_cfg_active) ravb_ptp_stop(ndev); /* Wait for DMA stopping */ @@ -1478,7 +1543,7 @@ static void ravb_tx_timeout_work(struct work_struct *work) out: /* Initialise PTP Clock driver */ - if (priv->chip_id == RCAR_GEN2) + if (info->no_ptp_cfg_active) ravb_ptp_init(ndev, priv->pdev); netif_tx_start_all_queues(ndev); @@ -1488,7 +1553,7 @@ out: static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); - int num_tx_desc = priv->num_tx_desc; + unsigned int num_tx_desc = priv->num_tx_desc; u16 q = skb_get_queue_mapping(skb); struct ravb_tstamp_skb *ts_skb; struct ravb_tx_desc *desc; @@ -1628,13 +1693,14 @@ static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb, static struct net_device_stats *ravb_get_stats(struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; struct net_device_stats *nstats, *stats0, *stats1; nstats = &ndev->stats; stats0 = &priv->stats[RAVB_BE]; stats1 = &priv->stats[RAVB_NC]; - if (priv->chip_id == RCAR_GEN3) { + if (info->tx_counters) { nstats->tx_dropped += ravb_read(ndev, TROCR); ravb_write(ndev, 0, TROCR); /* (write clear) */ } @@ -1675,6 +1741,7 @@ static int ravb_close(struct net_device *ndev) { struct device_node *np = ndev->dev.parent->of_node; struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; struct ravb_tstamp_skb *ts_skb, *ts_skb2; netif_tx_stop_all_queues(ndev); @@ -1685,7 +1752,7 @@ static int ravb_close(struct net_device *ndev) ravb_write(ndev, 0, TIC); /* Stop PTP Clock driver */ - if (priv->chip_id == RCAR_GEN2) + if (info->no_ptp_cfg_active) ravb_ptp_stop(ndev); /* Set the config mode to stop the AVB-DMAC's processes */ @@ -1708,7 +1775,7 @@ static int ravb_close(struct net_device *ndev) of_phy_deregister_fixed_link(np); } - if (priv->chip_id != RCAR_GEN2) { + if (info->multi_irqs) { free_irq(priv->tx_irqs[RAVB_NC], ndev); free_irq(priv->rx_irqs[RAVB_NC], ndev); free_irq(priv->tx_irqs[RAVB_BE], ndev); @@ -1851,8 +1918,8 @@ static void ravb_set_rx_csum(struct net_device *ndev, bool enable) spin_unlock_irqrestore(&priv->lock, flags); } -static int ravb_set_features(struct net_device *ndev, - netdev_features_t features) +static int ravb_set_features_rx_csum(struct net_device *ndev, + netdev_features_t features) { netdev_features_t changed = ndev->features ^ features; @@ -1864,6 +1931,15 @@ static int ravb_set_features(struct net_device *ndev, return 0; } +static int ravb_set_features(struct net_device *ndev, + netdev_features_t features) +{ + struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; + + return info->set_rx_csum_feature(ndev, features); +} + static const struct net_device_ops ravb_netdev_ops = { .ndo_open = ravb_open, .ndo_stop = ravb_close, @@ -1872,7 +1948,7 @@ static const struct net_device_ops ravb_netdev_ops = { .ndo_get_stats = ravb_get_stats, .ndo_set_rx_mode = ravb_set_rx_mode, .ndo_tx_timeout = ravb_tx_timeout, - .ndo_do_ioctl = ravb_do_ioctl, + .ndo_eth_ioctl = ravb_do_ioctl, .ndo_change_mtu = ravb_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, @@ -1924,12 +2000,52 @@ static int ravb_mdio_release(struct ravb_private *priv) return 0; } +static const struct ravb_hw_info ravb_gen3_hw_info = { + .rx_ring_free = ravb_rx_ring_free, + .rx_ring_format = ravb_rx_ring_format, + .alloc_rx_desc = ravb_alloc_rx_desc, + .receive = ravb_rcar_rx, + .set_rate = ravb_set_rate, + .set_rx_csum_feature = ravb_set_features_rx_csum, + .dmac_init = ravb_rcar_dmac_init, + .emac_init = ravb_rcar_emac_init, + .gstrings_stats = ravb_gstrings_stats, + .gstrings_size = sizeof(ravb_gstrings_stats), + .net_hw_features = NETIF_F_RXCSUM, + .net_features = NETIF_F_RXCSUM, + .stats_len = ARRAY_SIZE(ravb_gstrings_stats), + .max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1, + .internal_delay = 1, + .tx_counters = 1, + .multi_irqs = 1, + .ptp_cfg_active = 1, +}; + +static const struct ravb_hw_info ravb_gen2_hw_info = { + .rx_ring_free = ravb_rx_ring_free, + .rx_ring_format = ravb_rx_ring_format, + .alloc_rx_desc = ravb_alloc_rx_desc, + .receive = ravb_rcar_rx, + .set_rate = ravb_set_rate, + .set_rx_csum_feature = ravb_set_features_rx_csum, + .dmac_init = ravb_rcar_dmac_init, + .emac_init = ravb_rcar_emac_init, + .gstrings_stats = ravb_gstrings_stats, + .gstrings_size = sizeof(ravb_gstrings_stats), + .net_hw_features = NETIF_F_RXCSUM, + .net_features = NETIF_F_RXCSUM, + .stats_len = ARRAY_SIZE(ravb_gstrings_stats), + .max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1, + .aligned_tx = 1, + .no_ptp_cfg_active = 1, +}; + static const struct of_device_id ravb_match_table[] = { - { .compatible = "renesas,etheravb-r8a7790", .data = (void *)RCAR_GEN2 }, - { .compatible = "renesas,etheravb-r8a7794", .data = (void *)RCAR_GEN2 }, - { .compatible = "renesas,etheravb-rcar-gen2", .data = (void *)RCAR_GEN2 }, - { .compatible = "renesas,etheravb-r8a7795", .data = (void *)RCAR_GEN3 }, - { .compatible = "renesas,etheravb-rcar-gen3", .data = (void *)RCAR_GEN3 }, + { .compatible = "renesas,etheravb-r8a7790", .data = &ravb_gen2_hw_info }, + { .compatible = "renesas,etheravb-r8a7794", .data = &ravb_gen2_hw_info }, + { .compatible = "renesas,etheravb-rcar-gen2", .data = &ravb_gen2_hw_info }, + { .compatible = "renesas,etheravb-r8a7795", .data = &ravb_gen3_hw_info }, + { .compatible = "renesas,etheravb-rcar-gen3", .data = &ravb_gen3_hw_info }, { } }; MODULE_DEVICE_TABLE(of, ravb_match_table); @@ -1962,8 +2078,9 @@ static int ravb_set_gti(struct net_device *ndev) static void ravb_set_config_mode(struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; - if (priv->chip_id == RCAR_GEN2) { + if (info->no_ptp_cfg_active) { ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG); /* Set CSEL value */ ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB); @@ -1973,13 +2090,6 @@ static void ravb_set_config_mode(struct net_device *ndev) } } -static const struct soc_device_attribute ravb_delay_mode_quirk_match[] = { - { .soc_id = "r8a774c0" }, - { .soc_id = "r8a77990" }, - { .soc_id = "r8a77995" }, - { /* sentinel */ } -}; - /* Set tx and rx clock internal delay modes */ static void ravb_parse_delay_mode(struct device_node *np, struct net_device *ndev) { @@ -2010,12 +2120,8 @@ static void ravb_parse_delay_mode(struct device_node *np, struct net_device *nde if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) { - if (!WARN(soc_device_match(ravb_delay_mode_quirk_match), - "phy-mode %s requires TX clock internal delay mode which is not supported by this hardware revision. Please update device tree", - phy_modes(priv->phy_interface))) { - priv->txcidm = 1; - priv->rgmii_override = 1; - } + priv->txcidm = 1; + priv->rgmii_override = 1; } } @@ -2034,8 +2140,9 @@ static void ravb_set_delay_mode(struct net_device *ndev) static int ravb_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; + const struct ravb_hw_info *info; + struct reset_control *rstc; struct ravb_private *priv; - enum ravb_chip_id chip_id; struct net_device *ndev; int error, irq, q; struct resource *res; @@ -2047,20 +2154,26 @@ static int ravb_probe(struct platform_device *pdev) return -EINVAL; } + rstc = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL); + if (IS_ERR(rstc)) + return dev_err_probe(&pdev->dev, PTR_ERR(rstc), + "failed to get cpg reset\n"); + ndev = alloc_etherdev_mqs(sizeof(struct ravb_private), NUM_TX_QUEUE, NUM_RX_QUEUE); if (!ndev) return -ENOMEM; - ndev->features = NETIF_F_RXCSUM; - ndev->hw_features = NETIF_F_RXCSUM; + info = of_device_get_match_data(&pdev->dev); + ndev->features = info->net_features; + ndev->hw_features = info->net_hw_features; + + reset_control_deassert(rstc); pm_runtime_enable(&pdev->dev); pm_runtime_get_sync(&pdev->dev); - chip_id = (enum ravb_chip_id)of_device_get_match_data(&pdev->dev); - - if (chip_id == RCAR_GEN3) + if (info->multi_irqs) irq = platform_get_irq_byname(pdev, "ch22"); else irq = platform_get_irq(pdev, 0); @@ -2073,6 +2186,8 @@ static int ravb_probe(struct platform_device *pdev) SET_NETDEV_DEV(ndev, &pdev->dev); priv = netdev_priv(ndev); + priv->info = info; + priv->rstc = rstc; priv->ndev = ndev; priv->pdev = pdev; priv->num_tx_ring[RAVB_BE] = BE_TX_RING_SIZE; @@ -2099,7 +2214,7 @@ static int ravb_probe(struct platform_device *pdev) priv->avb_link_active_low = of_property_read_bool(np, "renesas,ether-link-active-low"); - if (chip_id == RCAR_GEN3) { + if (info->multi_irqs) { irq = platform_get_irq_byname(pdev, "ch24"); if (irq < 0) { error = irq; @@ -2124,8 +2239,6 @@ static int ravb_probe(struct platform_device *pdev) } } - priv->chip_id = chip_id; - priv->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(priv->clk)) { error = PTR_ERR(priv->clk); @@ -2142,8 +2255,12 @@ static int ravb_probe(struct platform_device *pdev) ndev->max_mtu = 2048 - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN); ndev->min_mtu = ETH_MIN_MTU; - priv->num_tx_desc = chip_id == RCAR_GEN2 ? - NUM_TX_DESC_GEN2 : NUM_TX_DESC_GEN3; + /* FIXME: R-Car Gen2 has 4byte alignment restriction for tx buffer + * Use two descriptor to handle such situation. First descriptor to + * handle aligned data buffer and second descriptor to handle the + * overflow data because of alignment. + */ + priv->num_tx_desc = info->aligned_tx ? 2 : 1; /* Set function */ ndev->netdev_ops = &ravb_netdev_ops; @@ -2160,7 +2277,7 @@ static int ravb_probe(struct platform_device *pdev) /* Request GTI loading */ ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI); - if (priv->chip_id != RCAR_GEN2) { + if (info->internal_delay) { ravb_parse_delay_mode(np, ndev); ravb_set_delay_mode(ndev); } @@ -2184,7 +2301,7 @@ static int ravb_probe(struct platform_device *pdev) INIT_LIST_HEAD(&priv->ts_skb_list); /* Initialise PTP Clock driver */ - if (chip_id != RCAR_GEN2) + if (info->ptp_cfg_active) ravb_ptp_init(ndev, pdev); /* Debug message level */ @@ -2232,7 +2349,7 @@ out_dma_free: priv->desc_bat_dma); /* Stop PTP Clock driver */ - if (chip_id != RCAR_GEN2) + if (info->ptp_cfg_active) ravb_ptp_stop(ndev); out_disable_refclk: clk_disable_unprepare(priv->refclk); @@ -2241,6 +2358,7 @@ out_release: pm_runtime_put(&pdev->dev); pm_runtime_disable(&pdev->dev); + reset_control_assert(rstc); return error; } @@ -2248,9 +2366,10 @@ static int ravb_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; /* Stop PTP Clock driver */ - if (priv->chip_id != RCAR_GEN2) + if (info->ptp_cfg_active) ravb_ptp_stop(ndev); clk_disable_unprepare(priv->refclk); @@ -2265,6 +2384,7 @@ static int ravb_remove(struct platform_device *pdev) netif_napi_del(&priv->napi[RAVB_BE]); ravb_mdio_release(priv); pm_runtime_disable(&pdev->dev); + reset_control_assert(priv->rstc); free_netdev(ndev); platform_set_drvdata(pdev, NULL); @@ -2333,6 +2453,7 @@ static int __maybe_unused ravb_resume(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; int ret = 0; /* If WoL is enabled set reset mode to rearm the WoL logic */ @@ -2355,7 +2476,7 @@ static int __maybe_unused ravb_resume(struct device *dev) /* Request GTI loading */ ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI); - if (priv->chip_id != RCAR_GEN2) + if (info->internal_delay) ravb_set_delay_mode(ndev); /* Restore descriptor base address table */ diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c index 6984bd5b7da9..c099656dd75b 100644 --- a/drivers/net/ethernet/renesas/ravb_ptp.c +++ b/drivers/net/ethernet/renesas/ravb_ptp.c @@ -179,6 +179,7 @@ static int ravb_ptp_extts(struct ptp_clock_info *ptp, { struct ravb_private *priv = container_of(ptp, struct ravb_private, ptp.info); + const struct ravb_hw_info *info = priv->info; struct net_device *ndev = priv->ndev; unsigned long flags; @@ -197,7 +198,7 @@ static int ravb_ptp_extts(struct ptp_clock_info *ptp, priv->ptp.extts[req->index] = on; spin_lock_irqsave(&priv->lock, flags); - if (priv->chip_id == RCAR_GEN2) + if (!info->multi_irqs) ravb_modify(ndev, GIC, GIC_PTCE, on ? GIC_PTCE : 0); else if (on) ravb_write(ndev, GIE_PTCS, GIE); @@ -213,6 +214,7 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp, { struct ravb_private *priv = container_of(ptp, struct ravb_private, ptp.info); + const struct ravb_hw_info *info = priv->info; struct net_device *ndev = priv->ndev; struct ravb_ptp_perout *perout; unsigned long flags; @@ -252,7 +254,7 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp, error = ravb_ptp_update_compare(priv, (u32)start_ns); if (!error) { /* Unmask interrupt */ - if (priv->chip_id == RCAR_GEN2) + if (!info->multi_irqs) ravb_modify(ndev, GIC, GIC_PTME, GIC_PTME); else ravb_write(ndev, GIE_PTMS0, GIE); @@ -264,7 +266,7 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp, perout->period = 0; /* Mask interrupt */ - if (priv->chip_id == RCAR_GEN2) + if (!info->multi_irqs) ravb_modify(ndev, GIC, GIC_PTME, 0); else ravb_write(ndev, GID_PTMD0, GID); diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 840478692a37..6c8ba916d1a6 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -3141,7 +3141,7 @@ static const struct net_device_ops sh_eth_netdev_ops = { .ndo_get_stats = sh_eth_get_stats, .ndo_set_rx_mode = sh_eth_set_rx_mode, .ndo_tx_timeout = sh_eth_tx_timeout, - .ndo_do_ioctl = phy_do_ioctl_running, + .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_change_mtu = sh_eth_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, @@ -3157,7 +3157,7 @@ static const struct net_device_ops sh_eth_netdev_ops_tsu = { .ndo_vlan_rx_add_vid = sh_eth_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = sh_eth_vlan_rx_kill_vid, .ndo_tx_timeout = sh_eth_tx_timeout, - .ndo_do_ioctl = phy_do_ioctl_running, + .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_change_mtu = sh_eth_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, diff --git a/drivers/net/ethernet/rocker/rocker.h b/drivers/net/ethernet/rocker/rocker.h index 315a6e5c0f59..e75814a4654f 100644 --- a/drivers/net/ethernet/rocker/rocker.h +++ b/drivers/net/ethernet/rocker/rocker.h @@ -119,7 +119,8 @@ struct rocker_world_ops { int (*port_obj_fdb_del)(struct rocker_port *rocker_port, u16 vid, const unsigned char *addr); int (*port_master_linked)(struct rocker_port *rocker_port, - struct net_device *master); + struct net_device *master, + struct netlink_ext_ack *extack); int (*port_master_unlinked)(struct rocker_port *rocker_port, struct net_device *master); int (*port_neigh_update)(struct rocker_port *rocker_port, diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index 1f06b92ee5bb..3364b6a56bd1 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -1670,13 +1670,14 @@ rocker_world_port_fdb_del(struct rocker_port *rocker_port, } static int rocker_world_port_master_linked(struct rocker_port *rocker_port, - struct net_device *master) + struct net_device *master, + struct netlink_ext_ack *extack) { struct rocker_world_ops *wops = rocker_port->rocker->wops; if (!wops->port_master_linked) return -EOPNOTSUPP; - return wops->port_master_linked(rocker_port, master); + return wops->port_master_linked(rocker_port, master, extack); } static int rocker_world_port_master_unlinked(struct rocker_port *rocker_port, @@ -3107,6 +3108,7 @@ struct rocker_port *rocker_port_dev_lower_find(struct net_device *dev, static int rocker_netdevice_event(struct notifier_block *unused, unsigned long event, void *ptr) { + struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr); struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct netdev_notifier_changeupper_info *info; struct rocker_port *rocker_port; @@ -3123,7 +3125,8 @@ static int rocker_netdevice_event(struct notifier_block *unused, rocker_port = netdev_priv(dev); if (info->linking) { err = rocker_world_port_master_linked(rocker_port, - info->upper_dev); + info->upper_dev, + extack); if (err) netdev_warn(dev, "failed to reflect master linked (err %d)\n", err); diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c index e33a9d283a4e..3e1ca7a8d029 100644 --- a/drivers/net/ethernet/rocker/rocker_ofdpa.c +++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c @@ -2571,8 +2571,10 @@ static int ofdpa_port_obj_fdb_del(struct rocker_port *rocker_port, } static int ofdpa_port_bridge_join(struct ofdpa_port *ofdpa_port, - struct net_device *bridge) + struct net_device *bridge, + struct netlink_ext_ack *extack) { + struct net_device *dev = ofdpa_port->dev; int err; /* Port is joining bridge, so the internal VLAN for the @@ -2592,13 +2594,21 @@ static int ofdpa_port_bridge_join(struct ofdpa_port *ofdpa_port, ofdpa_port->bridge_dev = bridge; - return ofdpa_port_vlan_add(ofdpa_port, OFDPA_UNTAGGED_VID, 0); + err = ofdpa_port_vlan_add(ofdpa_port, OFDPA_UNTAGGED_VID, 0); + if (err) + return err; + + return switchdev_bridge_port_offload(dev, dev, NULL, NULL, NULL, + false, extack); } static int ofdpa_port_bridge_leave(struct ofdpa_port *ofdpa_port) { + struct net_device *dev = ofdpa_port->dev; int err; + switchdev_bridge_port_unoffload(dev, NULL, NULL, NULL); + err = ofdpa_port_vlan_del(ofdpa_port, OFDPA_UNTAGGED_VID, 0); if (err) return err; @@ -2637,13 +2647,14 @@ static int ofdpa_port_ovs_changed(struct ofdpa_port *ofdpa_port, } static int ofdpa_port_master_linked(struct rocker_port *rocker_port, - struct net_device *master) + struct net_device *master, + struct netlink_ext_ack *extack) { struct ofdpa_port *ofdpa_port = rocker_port->wpriv; int err = 0; if (netif_is_bridge_master(master)) - err = ofdpa_port_bridge_join(ofdpa_port, master); + err = ofdpa_port_bridge_join(ofdpa_port, master, extack); else if (netif_is_ovs_master(master)) err = ofdpa_port_ovs_changed(ofdpa_port, master); return err; diff --git a/drivers/net/ethernet/samsung/Kconfig b/drivers/net/ethernet/samsung/Kconfig index 0582e110b1c0..2a6c2658d284 100644 --- a/drivers/net/ethernet/samsung/Kconfig +++ b/drivers/net/ethernet/samsung/Kconfig @@ -20,9 +20,9 @@ if NET_VENDOR_SAMSUNG config SXGBE_ETH tristate "Samsung 10G/2.5G/1G SXGBE Ethernet driver" depends on HAS_IOMEM && HAS_DMA + depends on PTP_1588_CLOCK_OPTIONAL select PHYLIB select CRC32 - imply PTP_1588_CLOCK help This is the driver for the SXGBE 10G Ethernet IP block found on Samsung platforms. diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c index 7f8b10c49660..98edb01024f0 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c @@ -274,7 +274,9 @@ static u32 sxgbe_usec2riwt(u32 usec, struct sxgbe_priv_data *priv) } static int sxgbe_get_coalesce(struct net_device *dev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct sxgbe_priv_data *priv = netdev_priv(dev); @@ -285,7 +287,9 @@ static int sxgbe_get_coalesce(struct net_device *dev, } static int sxgbe_set_coalesce(struct net_device *dev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct sxgbe_priv_data *priv = netdev_priv(dev); unsigned int rx_riwt; diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c index 090bcd2fb758..6781aa636d58 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c @@ -1964,7 +1964,7 @@ static const struct net_device_ops sxgbe_netdev_ops = { .ndo_set_features = sxgbe_set_features, .ndo_set_rx_mode = sxgbe_set_rx_mode, .ndo_tx_timeout = sxgbe_tx_timeout, - .ndo_do_ioctl = sxgbe_ioctl, + .ndo_eth_ioctl = sxgbe_ioctl, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = sxgbe_poll_controller, #endif diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig index 5e37c8313725..97ce64079855 100644 --- a/drivers/net/ethernet/sfc/Kconfig +++ b/drivers/net/ethernet/sfc/Kconfig @@ -19,9 +19,9 @@ if NET_VENDOR_SOLARFLARE config SFC tristate "Solarflare SFC9000/SFC9100/EF100-family support" depends on PCI + depends on PTP_1588_CLOCK_OPTIONAL select MDIO select CRC32 - imply PTP_1588_CLOCK help This driver supports 10/40-gigabit Ethernet cards based on the Solarflare SFC9000-family and SFC9100-family controllers. diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 37fcf2eb0741..a295e2621cf3 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -591,7 +591,7 @@ static const struct net_device_ops efx_netdev_ops = { .ndo_tx_timeout = efx_watchdog, .ndo_start_xmit = efx_hard_start_xmit, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = efx_ioctl, + .ndo_eth_ioctl = efx_ioctl, .ndo_change_mtu = efx_change_mtu, .ndo_set_mac_address = efx_set_mac_address, .ndo_set_rx_mode = efx_set_rx_mode, diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index 058d9fe41d99..e002ce21788d 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -97,7 +97,9 @@ static void efx_ethtool_get_regs(struct net_device *net_dev, */ static int efx_ethtool_get_coalesce(struct net_device *net_dev, - struct ethtool_coalesce *coalesce) + struct ethtool_coalesce *coalesce, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct efx_nic *efx = netdev_priv(net_dev); unsigned int tx_usecs, rx_usecs; @@ -115,7 +117,9 @@ static int efx_ethtool_get_coalesce(struct net_device *net_dev, } static int efx_ethtool_set_coalesce(struct net_device *net_dev, - struct ethtool_coalesce *coalesce) + struct ethtool_coalesce *coalesce, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct efx_nic *efx = netdev_priv(net_dev); struct efx_channel *channel; diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c index 9ec752a43c75..c177ea0f301e 100644 --- a/drivers/net/ethernet/sfc/falcon/efx.c +++ b/drivers/net/ethernet/sfc/falcon/efx.c @@ -2219,7 +2219,7 @@ static const struct net_device_ops ef4_netdev_ops = { .ndo_tx_timeout = ef4_watchdog, .ndo_start_xmit = ef4_hard_start_xmit, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = ef4_ioctl, + .ndo_eth_ioctl = ef4_ioctl, .ndo_change_mtu = ef4_change_mtu, .ndo_set_mac_address = ef4_set_mac_address, .ndo_set_rx_mode = ef4_set_rx_mode, diff --git a/drivers/net/ethernet/sfc/falcon/ethtool.c b/drivers/net/ethernet/sfc/falcon/ethtool.c index a6bae6a234ba..137e8a7aeaa1 100644 --- a/drivers/net/ethernet/sfc/falcon/ethtool.c +++ b/drivers/net/ethernet/sfc/falcon/ethtool.c @@ -577,7 +577,9 @@ static int ef4_ethtool_nway_reset(struct net_device *net_dev) */ static int ef4_ethtool_get_coalesce(struct net_device *net_dev, - struct ethtool_coalesce *coalesce) + struct ethtool_coalesce *coalesce, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct ef4_nic *efx = netdev_priv(net_dev); unsigned int tx_usecs, rx_usecs; @@ -595,7 +597,9 @@ static int ef4_ethtool_get_coalesce(struct net_device *net_dev, } static int ef4_ethtool_set_coalesce(struct net_device *net_dev, - struct ethtool_coalesce *coalesce) + struct ethtool_coalesce *coalesce, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct ef4_nic *efx = netdev_priv(net_dev); struct ef4_channel *channel; diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c index 2b29fd4cbdf4..062f7844c496 100644 --- a/drivers/net/ethernet/sgi/ioc3-eth.c +++ b/drivers/net/ethernet/sgi/ioc3-eth.c @@ -820,7 +820,7 @@ static const struct net_device_ops ioc3_netdev_ops = { .ndo_tx_timeout = ioc3_timeout, .ndo_get_stats = ioc3_get_stats, .ndo_set_rx_mode = ioc3_set_multicast_list, - .ndo_do_ioctl = ioc3_ioctl, + .ndo_eth_ioctl = ioc3_ioctl, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = ioc3_set_mac_address, }; diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c index 0c396ecd3389..efce834d8ee6 100644 --- a/drivers/net/ethernet/sgi/meth.c +++ b/drivers/net/ethernet/sgi/meth.c @@ -812,7 +812,7 @@ static const struct net_device_ops meth_netdev_ops = { .ndo_open = meth_open, .ndo_stop = meth_release, .ndo_start_xmit = meth_tx, - .ndo_do_ioctl = meth_ioctl, + .ndo_eth_ioctl = meth_ioctl, .ndo_tx_timeout = meth_tx_timeout, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c index 676b193833c0..3d1a18a01ce5 100644 --- a/drivers/net/ethernet/sis/sis190.c +++ b/drivers/net/ethernet/sis/sis190.c @@ -1841,7 +1841,7 @@ static int sis190_mac_addr(struct net_device *dev, void *p) static const struct net_device_ops sis190_netdev_ops = { .ndo_open = sis190_open, .ndo_stop = sis190_close, - .ndo_do_ioctl = sis190_ioctl, + .ndo_eth_ioctl = sis190_ioctl, .ndo_start_xmit = sis190_start_xmit, .ndo_tx_timeout = sis190_tx_timeout, .ndo_set_rx_mode = sis190_set_rx_mode, diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c index cff87de9178a..60a0c0e9ded2 100644 --- a/drivers/net/ethernet/sis/sis900.c +++ b/drivers/net/ethernet/sis/sis900.c @@ -404,7 +404,7 @@ static const struct net_device_ops sis900_netdev_ops = { .ndo_set_rx_mode = set_rx_mode, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, - .ndo_do_ioctl = mii_ioctl, + .ndo_eth_ioctl = mii_ioctl, .ndo_tx_timeout = sis900_tx_timeout, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = sis900_poll, diff --git a/drivers/net/ethernet/smsc/Kconfig b/drivers/net/ethernet/smsc/Kconfig index c52a38df0e0d..72e42a868346 100644 --- a/drivers/net/ethernet/smsc/Kconfig +++ b/drivers/net/ethernet/smsc/Kconfig @@ -23,6 +23,7 @@ config SMC9194 tristate "SMC 9194 support" depends on ISA select CRC32 + select NETDEV_LEGACY_INIT help This is support for the SMC9xxx based Ethernet cards. Choose this option if you have a DELL laptop with the docking station, or diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c index 51cd7dca91cd..44daf79a8f97 100644 --- a/drivers/net/ethernet/smsc/epic100.c +++ b/drivers/net/ethernet/smsc/epic100.c @@ -312,7 +312,7 @@ static const struct net_device_ops epic_netdev_ops = { .ndo_tx_timeout = epic_tx_timeout, .ndo_get_stats = epic_get_stats, .ndo_set_rx_mode = set_rx_mode, - .ndo_do_ioctl = netdev_ioctl, + .ndo_eth_ioctl = netdev_ioctl, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; diff --git a/drivers/net/ethernet/smsc/smc9194.c b/drivers/net/ethernet/smsc/smc9194.c index bf7c8c8b1350..0ce403fa5f1a 100644 --- a/drivers/net/ethernet/smsc/smc9194.c +++ b/drivers/net/ethernet/smsc/smc9194.c @@ -1508,7 +1508,7 @@ MODULE_PARM_DESC(io, "SMC 99194 I/O base address"); MODULE_PARM_DESC(irq, "SMC 99194 IRQ number"); MODULE_PARM_DESC(ifport, "SMC 99194 interface port (0-default, 1-TP, 2-AUI)"); -int __init init_module(void) +static int __init smc_init_module(void) { if (io == 0) printk(KERN_WARNING @@ -1518,13 +1518,15 @@ int __init init_module(void) devSMC9194 = smc_init(-1); return PTR_ERR_OR_ZERO(devSMC9194); } +module_init(smc_init_module); -void __exit cleanup_module(void) +static void __exit smc_cleanup_module(void) { unregister_netdev(devSMC9194); free_irq(devSMC9194->irq, devSMC9194); release_region(devSMC9194->base_addr, SMC_IO_EXTENT); free_netdev(devSMC9194); } +module_exit(smc_cleanup_module); #endif /* MODULE */ diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c index f2a50eb3c1e0..42fc37c7887a 100644 --- a/drivers/net/ethernet/smsc/smc91c92_cs.c +++ b/drivers/net/ethernet/smsc/smc91c92_cs.c @@ -294,7 +294,7 @@ static const struct net_device_ops smc_netdev_ops = { .ndo_tx_timeout = smc_tx_timeout, .ndo_set_config = s9k_config, .ndo_set_rx_mode = set_rx_mode, - .ndo_do_ioctl = smc_ioctl, + .ndo_eth_ioctl = smc_ioctl, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index 556a9790cdcf..199a97339280 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -2148,7 +2148,7 @@ static const struct net_device_ops smsc911x_netdev_ops = { .ndo_start_xmit = smsc911x_hard_start_xmit, .ndo_get_stats = smsc911x_get_stats, .ndo_set_rx_mode = smsc911x_set_multicast_list, - .ndo_do_ioctl = phy_do_ioctl_running, + .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = smsc911x_set_mac_address, #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c index c1dab009415d..fdbd2a43e267 100644 --- a/drivers/net/ethernet/smsc/smsc9420.c +++ b/drivers/net/ethernet/smsc/smsc9420.c @@ -1482,7 +1482,7 @@ static const struct net_device_ops smsc9420_netdev_ops = { .ndo_start_xmit = smsc9420_hard_start_xmit, .ndo_get_stats = smsc9420_get_stats, .ndo_set_rx_mode = smsc9420_set_multicast_list, - .ndo_do_ioctl = phy_do_ioctl_running, + .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c index 20d148c019d8..1f46af136aa8 100644 --- a/drivers/net/ethernet/socionext/netsec.c +++ b/drivers/net/ethernet/socionext/netsec.c @@ -532,7 +532,9 @@ static void netsec_et_get_drvinfo(struct net_device *net_device, } static int netsec_et_get_coalesce(struct net_device *net_device, - struct ethtool_coalesce *et_coalesce) + struct ethtool_coalesce *et_coalesce, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct netsec_priv *priv = netdev_priv(net_device); @@ -542,7 +544,9 @@ static int netsec_et_get_coalesce(struct net_device *net_device, } static int netsec_et_set_coalesce(struct net_device *net_device, - struct ethtool_coalesce *et_coalesce) + struct ethtool_coalesce *et_coalesce, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct netsec_priv *priv = netdev_priv(net_device); @@ -1544,7 +1548,7 @@ static int netsec_start_gmac(struct netsec_priv *priv) netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_CLR, ~0); netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_CLR, ~0); - netsec_et_set_coalesce(priv->ndev, &priv->et_coalesce); + netsec_et_set_coalesce(priv->ndev, &priv->et_coalesce, NULL, NULL); if (netsec_mac_write(priv, GMAC_REG_OMR, value)) return -ETIMEDOUT; @@ -1831,7 +1835,7 @@ static const struct net_device_ops netsec_netdev_ops = { .ndo_set_features = netsec_netdev_set_features, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = phy_do_ioctl, + .ndo_eth_ioctl = phy_do_ioctl, .ndo_xdp_xmit = netsec_xdp_xmit, .ndo_bpf = netsec_xdp, }; diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c index 5eb6bb4f7b6c..ae31ed93aaf0 100644 --- a/drivers/net/ethernet/socionext/sni_ave.c +++ b/drivers/net/ethernet/socionext/sni_ave.c @@ -1543,7 +1543,7 @@ static const struct net_device_ops ave_netdev_ops = { .ndo_open = ave_open, .ndo_stop = ave_stop, .ndo_start_xmit = ave_start_xmit, - .ndo_do_ioctl = ave_ioctl, + .ndo_eth_ioctl = ave_ioctl, .ndo_set_rx_mode = ave_set_rx_mode, .ndo_get_stats64 = ave_get_stats64, .ndo_set_mac_address = ave_set_mac_address, diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index ac3c248d4f9b..929cfc22cd0c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -2,12 +2,12 @@ config STMMAC_ETH tristate "STMicroelectronics Multi-Gigabit Ethernet driver" depends on HAS_IOMEM && HAS_DMA + depends on PTP_1588_CLOCK_OPTIONAL select MII select PCS_XPCS select PAGE_POOL select PHYLINK select CRC32 - imply PTP_1588_CLOCK select RESET_CONTROLLER help This is the driver for the Ethernet IPs built around a diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 5fecc83f175b..b6d945ea903d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -58,6 +58,16 @@ #undef FRAME_FILTER_DEBUG /* #define FRAME_FILTER_DEBUG */ +struct stmmac_txq_stats { + unsigned long tx_pkt_n; + unsigned long tx_normal_irq_n; +}; + +struct stmmac_rxq_stats { + unsigned long rx_pkt_n; + unsigned long rx_normal_irq_n; +}; + /* Extra statistic and debug information exposed by ethtool */ struct stmmac_extra_stats { /* Transmit errors */ @@ -189,6 +199,9 @@ struct stmmac_extra_stats { unsigned long mtl_est_hlbf; unsigned long mtl_est_btre; unsigned long mtl_est_btrlm; + /* per queue statistics */ + struct stmmac_txq_stats txq_stats[MTL_MAX_TX_QUEUES]; + struct stmmac_rxq_stats rxq_stats[MTL_MAX_RX_QUEUES]; }; /* Safety Feature statistics exposed by ethtool */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c index 28dd0ed85a82..f7dc8458cde8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c @@ -289,10 +289,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev) val &= ~NSS_COMMON_GMAC_CTL_PHY_IFACE_SEL; break; default: - dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n", - phy_modes(gmac->phy_mode)); - err = -EINVAL; - goto err_remove_config_dt; + goto err_unsupported_phy; } regmap_write(gmac->nss_common, NSS_COMMON_GMAC_CTL(gmac->id), val); @@ -309,10 +306,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev) NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id); break; default: - dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n", - phy_modes(gmac->phy_mode)); - err = -EINVAL; - goto err_remove_config_dt; + goto err_unsupported_phy; } regmap_write(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, val); @@ -329,8 +323,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev) NSS_COMMON_CLK_GATE_GMII_TX_EN(gmac->id); break; default: - /* We don't get here; the switch above will have errored out */ - unreachable(); + goto err_unsupported_phy; } regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val); @@ -361,6 +354,11 @@ static int ipq806x_gmac_probe(struct platform_device *pdev) return 0; +err_unsupported_phy: + dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n", + phy_modes(gmac->phy_mode)); + err = -EINVAL; + err_remove_config_dt: stmmac_remove_config_dt(pdev, plat_dat); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c index e63270267578..9292a1fab7d3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c @@ -170,13 +170,16 @@ int dwmac4_dma_interrupt(void __iomem *ioaddr, x->normal_irq_n++; if (likely(intr_status & DMA_CHAN_STATUS_RI)) { x->rx_normal_irq_n++; + x->rxq_stats[chan].rx_normal_irq_n++; ret |= handle_rx; } - if (likely(intr_status & (DMA_CHAN_STATUS_TI | - DMA_CHAN_STATUS_TBU))) { + if (likely(intr_status & DMA_CHAN_STATUS_TI)) { x->tx_normal_irq_n++; + x->txq_stats[chan].tx_normal_irq_n++; ret |= handle_tx; } + if (unlikely(intr_status & DMA_CHAN_STATUS_TBU)) + ret |= handle_tx; if (unlikely(intr_status & DMA_CHAN_STATUS_ERI)) x->rx_early_irq++; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index d0ce608b81c3..d89455803bed 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -261,6 +261,18 @@ static const struct stmmac_stats stmmac_mmc[] = { }; #define STMMAC_MMC_STATS_LEN ARRAY_SIZE(stmmac_mmc) +static const char stmmac_qstats_tx_string[][ETH_GSTRING_LEN] = { + "tx_pkt_n", + "tx_irq_n", +#define STMMAC_TXQ_STATS ARRAY_SIZE(stmmac_qstats_tx_string) +}; + +static const char stmmac_qstats_rx_string[][ETH_GSTRING_LEN] = { + "rx_pkt_n", + "rx_irq_n", +#define STMMAC_RXQ_STATS ARRAY_SIZE(stmmac_qstats_rx_string) +}; + static void stmmac_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { @@ -510,6 +522,31 @@ stmmac_set_pauseparam(struct net_device *netdev, } } +static void stmmac_get_per_qstats(struct stmmac_priv *priv, u64 *data) +{ + u32 tx_cnt = priv->plat->tx_queues_to_use; + u32 rx_cnt = priv->plat->rx_queues_to_use; + int q, stat; + char *p; + + for (q = 0; q < tx_cnt; q++) { + p = (char *)priv + offsetof(struct stmmac_priv, + xstats.txq_stats[q].tx_pkt_n); + for (stat = 0; stat < STMMAC_TXQ_STATS; stat++) { + *data++ = (*(u64 *)p); + p += sizeof(u64 *); + } + } + for (q = 0; q < rx_cnt; q++) { + p = (char *)priv + offsetof(struct stmmac_priv, + xstats.rxq_stats[q].rx_pkt_n); + for (stat = 0; stat < STMMAC_RXQ_STATS; stat++) { + *data++ = (*(u64 *)p); + p += sizeof(u64 *); + } + } +} + static void stmmac_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *dummy, u64 *data) { @@ -560,16 +597,21 @@ static void stmmac_get_ethtool_stats(struct net_device *dev, data[j++] = (stmmac_gstrings_stats[i].sizeof_stat == sizeof(u64)) ? (*(u64 *)p) : (*(u32 *)p); } + stmmac_get_per_qstats(priv, &data[j]); } static int stmmac_get_sset_count(struct net_device *netdev, int sset) { struct stmmac_priv *priv = netdev_priv(netdev); + u32 tx_cnt = priv->plat->tx_queues_to_use; + u32 rx_cnt = priv->plat->rx_queues_to_use; int i, len, safety_len = 0; switch (sset) { case ETH_SS_STATS: - len = STMMAC_STATS_LEN; + len = STMMAC_STATS_LEN + + STMMAC_TXQ_STATS * tx_cnt + + STMMAC_RXQ_STATS * rx_cnt; if (priv->dma_cap.rmon) len += STMMAC_MMC_STATS_LEN; @@ -592,6 +634,28 @@ static int stmmac_get_sset_count(struct net_device *netdev, int sset) } } +static void stmmac_get_qstats_string(struct stmmac_priv *priv, u8 *data) +{ + u32 tx_cnt = priv->plat->tx_queues_to_use; + u32 rx_cnt = priv->plat->rx_queues_to_use; + int q, stat; + + for (q = 0; q < tx_cnt; q++) { + for (stat = 0; stat < STMMAC_TXQ_STATS; stat++) { + snprintf(data, ETH_GSTRING_LEN, "q%d_%s", q, + stmmac_qstats_tx_string[stat]); + data += ETH_GSTRING_LEN; + } + } + for (q = 0; q < rx_cnt; q++) { + for (stat = 0; stat < STMMAC_RXQ_STATS; stat++) { + snprintf(data, ETH_GSTRING_LEN, "q%d_%s", q, + stmmac_qstats_rx_string[stat]); + data += ETH_GSTRING_LEN; + } + } +} + static void stmmac_get_strings(struct net_device *dev, u32 stringset, u8 *data) { int i; @@ -622,6 +686,7 @@ static void stmmac_get_strings(struct net_device *dev, u32 stringset, u8 *data) ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } + stmmac_get_qstats_string(priv, p); break; case ETH_SS_TEST: stmmac_selftest_get_strings(priv, p); @@ -809,7 +874,9 @@ static int __stmmac_get_coalesce(struct net_device *dev, } static int stmmac_get_coalesce(struct net_device *dev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { return __stmmac_get_coalesce(dev, ec, -1); } @@ -893,7 +960,9 @@ static int __stmmac_set_coalesce(struct net_device *dev, } static int stmmac_set_coalesce(struct net_device *dev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { return __stmmac_set_coalesce(dev, ec, -1); } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index fa90bcdf4e45..ed0cd3920171 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -2500,6 +2500,7 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) } else { priv->dev->stats.tx_packets++; priv->xstats.tx_pkt_n++; + priv->xstats.txq_stats[queue].tx_pkt_n++; } if (skb) stmmac_get_tx_hwtstamp(priv, p, skb); @@ -5000,6 +5001,9 @@ read_again: stmmac_finalize_xdp_rx(priv, xdp_status); + priv->xstats.rx_pkt_n += count; + priv->xstats.rxq_stats[queue].rx_pkt_n += count; + if (xsk_uses_need_wakeup(rx_q->xsk_pool)) { if (failure || stmmac_rx_dirty(priv, queue) > 0) xsk_set_rx_need_wakeup(rx_q->xsk_pool); @@ -5287,6 +5291,7 @@ drain_data: stmmac_rx_refill(priv, queue); priv->xstats.rx_pkt_n += count; + priv->xstats.rxq_stats[queue].rx_pkt_n += count; return count; } @@ -6451,7 +6456,7 @@ static const struct net_device_ops stmmac_netdev_ops = { .ndo_set_features = stmmac_set_features, .ndo_set_rx_mode = stmmac_set_rx_mode, .ndo_tx_timeout = stmmac_tx_timeout, - .ndo_do_ioctl = stmmac_ioctl, + .ndo_eth_ioctl = stmmac_ioctl, .ndo_setup_tc = stmmac_setup_tc, .ndo_select_queue = stmmac_select_queue, #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index 981685c88308..287ae4c538aa 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -4876,7 +4876,7 @@ static const struct net_device_ops cas_netdev_ops = { .ndo_start_xmit = cas_start_xmit, .ndo_get_stats = cas_get_stats, .ndo_set_rx_mode = cas_set_multicast, - .ndo_do_ioctl = cas_ioctl, + .ndo_eth_ioctl = cas_ioctl, .ndo_tx_timeout = cas_tx_timeout, .ndo_change_mtu = cas_change_mtu, .ndo_set_mac_address = eth_mac_addr, diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 860644d182ab..a68a01d1b2b1 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -9208,7 +9208,7 @@ static int niu_get_of_props(struct niu *np) else dp = pci_device_to_OF_node(np->pdev); - phy_type = of_get_property(dp, "phy-type", &prop_len); + phy_type = of_get_property(dp, "phy-type", NULL); if (!phy_type) { netdev_err(dev, "%pOF: OF node lacks phy-type property\n", dp); return -EINVAL; @@ -9242,12 +9242,12 @@ static int niu_get_of_props(struct niu *np) return -EINVAL; } - model = of_get_property(dp, "model", &prop_len); + model = of_get_property(dp, "model", NULL); if (model) strcpy(np->vpd.model, model); - if (of_find_property(dp, "hot-swappable-phy", &prop_len)) { + if (of_find_property(dp, "hot-swappable-phy", NULL)) { np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER | NIU_FLAGS_HOTPLUG_PHY); } @@ -9668,7 +9668,7 @@ static const struct net_device_ops niu_netdev_ops = { .ndo_set_rx_mode = niu_set_rx_mode, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = niu_set_mac_addr, - .ndo_do_ioctl = niu_ioctl, + .ndo_eth_ioctl = niu_ioctl, .ndo_tx_timeout = niu_tx_timeout, .ndo_change_mtu = niu_change_mtu, }; @@ -9722,7 +9722,6 @@ static int niu_pci_init_one(struct pci_dev *pdev, struct net_device *dev; struct niu *np; int err; - u64 dma_mask; niu_driver_version(); @@ -9777,18 +9776,11 @@ static int niu_pci_init_one(struct pci_dev *pdev, PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE | PCI_EXP_DEVCTL_RELAX_EN); - dma_mask = DMA_BIT_MASK(44); - err = pci_set_dma_mask(pdev, dma_mask); - if (!err) { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44)); + if (!err) dev->features |= NETIF_F_HIGHDMA; - err = pci_set_consistent_dma_mask(pdev, dma_mask); - if (err) { - dev_err(&pdev->dev, "Unable to obtain 44 bit DMA for consistent allocations, aborting\n"); - goto err_out_release_parent; - } - } if (err) { - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); goto err_out_release_parent; diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c index cfb9e21b18b7..d72018a60c0f 100644 --- a/drivers/net/ethernet/sun/sungem.c +++ b/drivers/net/ethernet/sun/sungem.c @@ -2831,7 +2831,7 @@ static const struct net_device_ops gem_netdev_ops = { .ndo_start_xmit = gem_start_xmit, .ndo_get_stats = gem_get_stats, .ndo_set_rx_mode = gem_set_multicast, - .ndo_do_ioctl = gem_ioctl, + .ndo_eth_ioctl = gem_ioctl, .ndo_tx_timeout = gem_tx_timeout, .ndo_change_mtu = gem_change_mtu, .ndo_validate_addr = eth_validate_addr, diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c index a2c1a404c52d..62f81b0d14ed 100644 --- a/drivers/net/ethernet/sun/sunhme.c +++ b/drivers/net/ethernet/sun/sunhme.c @@ -251,14 +251,6 @@ static u32 pci_hme_read_desc32(hme32 *p) ((__hp)->write_txd((__txd), (__flags), (__addr))) #define hme_read_desc32(__hp, __p) \ ((__hp)->read_desc32(__p)) -#define hme_dma_map(__hp, __ptr, __size, __dir) \ - ((__hp)->dma_map((__hp)->dma_dev, (__ptr), (__size), (__dir))) -#define hme_dma_unmap(__hp, __addr, __size, __dir) \ - ((__hp)->dma_unmap((__hp)->dma_dev, (__addr), (__size), (__dir))) -#define hme_dma_sync_for_cpu(__hp, __addr, __size, __dir) \ - ((__hp)->dma_sync_for_cpu((__hp)->dma_dev, (__addr), (__size), (__dir))) -#define hme_dma_sync_for_device(__hp, __addr, __size, __dir) \ - ((__hp)->dma_sync_for_device((__hp)->dma_dev, (__addr), (__size), (__dir))) #else #ifdef CONFIG_SBUS /* SBUS only compilation */ @@ -277,14 +269,6 @@ do { (__txd)->tx_addr = (__force hme32)(u32)(__addr); \ (__txd)->tx_flags = (__force hme32)(u32)(__flags); \ } while(0) #define hme_read_desc32(__hp, __p) ((__force u32)(hme32)*(__p)) -#define hme_dma_map(__hp, __ptr, __size, __dir) \ - dma_map_single((__hp)->dma_dev, (__ptr), (__size), (__dir)) -#define hme_dma_unmap(__hp, __addr, __size, __dir) \ - dma_unmap_single((__hp)->dma_dev, (__addr), (__size), (__dir)) -#define hme_dma_sync_for_cpu(__hp, __addr, __size, __dir) \ - dma_dma_sync_single_for_cpu((__hp)->dma_dev, (__addr), (__size), (__dir)) -#define hme_dma_sync_for_device(__hp, __addr, __size, __dir) \ - dma_dma_sync_single_for_device((__hp)->dma_dev, (__addr), (__size), (__dir)) #else /* PCI only compilation */ #define hme_write32(__hp, __reg, __val) \ @@ -305,14 +289,6 @@ static inline u32 hme_read_desc32(struct happy_meal *hp, hme32 *p) { return le32_to_cpup((__le32 *)p); } -#define hme_dma_map(__hp, __ptr, __size, __dir) \ - pci_map_single((__hp)->dma_dev, (__ptr), (__size), (__dir)) -#define hme_dma_unmap(__hp, __addr, __size, __dir) \ - pci_unmap_single((__hp)->dma_dev, (__addr), (__size), (__dir)) -#define hme_dma_sync_for_cpu(__hp, __addr, __size, __dir) \ - pci_dma_sync_single_for_cpu((__hp)->dma_dev, (__addr), (__size), (__dir)) -#define hme_dma_sync_for_device(__hp, __addr, __size, __dir) \ - pci_dma_sync_single_for_device((__hp)->dma_dev, (__addr), (__size), (__dir)) #endif #endif diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-ethtool.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-ethtool.c index bc198eadfcab..49f8c6be9459 100644 --- a/drivers/net/ethernet/synopsys/dwc-xlgmac-ethtool.c +++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-ethtool.c @@ -146,8 +146,11 @@ static void xlgmac_ethtool_get_channels(struct net_device *netdev, channel->tx_count = pdata->tx_q_count; } -static int xlgmac_ethtool_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) +static int +xlgmac_ethtool_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct xlgmac_pdata *pdata = netdev_priv(netdev); @@ -158,8 +161,11 @@ static int xlgmac_ethtool_get_coalesce(struct net_device *netdev, return 0; } -static int xlgmac_ethtool_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) +static int +xlgmac_ethtool_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct xlgmac_pdata *pdata = netdev_priv(netdev); struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops; diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c index 26d178f8616b..1db7104fef3a 100644 --- a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c +++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c @@ -933,7 +933,7 @@ static const struct net_device_ops xlgmac_netdev_ops = { .ndo_change_mtu = xlgmac_change_mtu, .ndo_set_mac_address = xlgmac_set_mac_address, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = xlgmac_ioctl, + .ndo_eth_ioctl = xlgmac_ioctl, .ndo_vlan_rx_add_vid = xlgmac_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = xlgmac_vlan_rx_kill_vid, #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c index d054c6e83b1c..6b409f9c5863 100644 --- a/drivers/net/ethernet/tehuti/tehuti.c +++ b/drivers/net/ethernet/tehuti/tehuti.c @@ -637,7 +637,8 @@ static int bdx_range_check(struct bdx_priv *priv, u32 offset) -EINVAL : 0; } -static int bdx_ioctl_priv(struct net_device *ndev, struct ifreq *ifr, int cmd) +static int bdx_siocdevprivate(struct net_device *ndev, struct ifreq *ifr, + void __user *udata, int cmd) { struct bdx_priv *priv = netdev_priv(ndev); u32 data[3]; @@ -647,7 +648,7 @@ static int bdx_ioctl_priv(struct net_device *ndev, struct ifreq *ifr, int cmd) DBG("jiffies=%ld cmd=%d\n", jiffies, cmd); if (cmd != SIOCDEVPRIVATE) { - error = copy_from_user(data, ifr->ifr_data, sizeof(data)); + error = copy_from_user(data, udata, sizeof(data)); if (error) { pr_err("can't copy from user\n"); RET(-EFAULT); @@ -669,7 +670,7 @@ static int bdx_ioctl_priv(struct net_device *ndev, struct ifreq *ifr, int cmd) data[2] = READ_REG(priv, data[1]); DBG("read_reg(0x%x)=0x%x (dec %d)\n", data[1], data[2], data[2]); - error = copy_to_user(ifr->ifr_data, data, sizeof(data)); + error = copy_to_user(udata, data, sizeof(data)); if (error) RET(-EFAULT); break; @@ -688,15 +689,6 @@ static int bdx_ioctl_priv(struct net_device *ndev, struct ifreq *ifr, int cmd) return 0; } -static int bdx_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd) -{ - ENTER; - if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15)) - RET(bdx_ioctl_priv(ndev, ifr, cmd)); - else - RET(-EOPNOTSUPP); -} - /** * __bdx_vlan_rx_vid - private helper for adding/killing VLAN vid * @ndev: network device @@ -1860,7 +1852,7 @@ static const struct net_device_ops bdx_netdev_ops = { .ndo_stop = bdx_close, .ndo_start_xmit = bdx_tx_transmit, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = bdx_ioctl, + .ndo_siocdevprivate = bdx_siocdevprivate, .ndo_set_rx_mode = bdx_setmulti, .ndo_change_mtu = bdx_change_mtu, .ndo_set_mac_address = bdx_set_mac, @@ -2159,8 +2151,10 @@ bdx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) * @netdev * @ecoal */ -static int -bdx_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecoal) +static int bdx_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ecoal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { u32 rdintcm; u32 tdintcm; @@ -2188,8 +2182,10 @@ bdx_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecoal) * @netdev * @ecoal */ -static int -bdx_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecoal) +static int bdx_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ecoal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { u32 rdintcm; u32 tdintcm; diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index 67a08cbba859..130346f74ee8 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -27,6 +27,7 @@ #include <linux/sys_soc.h> #include <linux/dma/ti-cppi5.h> #include <linux/dma/k3-udma-glue.h> +#include <net/switchdev.h> #include "cpsw_ale.h" #include "cpsw_sl.h" @@ -518,6 +519,10 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common, } napi_enable(&common->napi_rx); + if (common->rx_irq_disabled) { + common->rx_irq_disabled = false; + enable_irq(common->rx_chns.irq); + } dev_dbg(common->dev, "cpsw_nuss started\n"); return 0; @@ -871,8 +876,12 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget) dev_dbg(common->dev, "%s num_rx:%d %d\n", __func__, num_rx, budget); - if (num_rx < budget && napi_complete_done(napi_rx, num_rx)) - enable_irq(common->rx_chns.irq); + if (num_rx < budget && napi_complete_done(napi_rx, num_rx)) { + if (common->rx_irq_disabled) { + common->rx_irq_disabled = false; + enable_irq(common->rx_chns.irq); + } + } return num_rx; } @@ -1077,19 +1086,20 @@ static int am65_cpsw_nuss_tx_poll(struct napi_struct *napi_tx, int budget) else num_tx = am65_cpsw_nuss_tx_compl_packets(tx_chn->common, tx_chn->id, budget); - num_tx = min(num_tx, budget); - if (num_tx < budget) { - napi_complete(napi_tx); + if (num_tx >= budget) + return budget; + + if (napi_complete_done(napi_tx, num_tx)) enable_irq(tx_chn->irq); - } - return num_tx; + return 0; } static irqreturn_t am65_cpsw_nuss_rx_irq(int irq, void *dev_id) { struct am65_cpsw_common *common = dev_id; + common->rx_irq_disabled = true; disable_irq_nosync(irq); napi_schedule(&common->napi_rx); @@ -1479,7 +1489,7 @@ static const struct net_device_ops am65_cpsw_nuss_netdev_ops = { .ndo_tx_timeout = am65_cpsw_nuss_ndo_host_tx_timeout, .ndo_vlan_rx_add_vid = am65_cpsw_nuss_ndo_slave_add_vid, .ndo_vlan_rx_kill_vid = am65_cpsw_nuss_ndo_slave_kill_vid, - .ndo_do_ioctl = am65_cpsw_nuss_ndo_slave_ioctl, + .ndo_eth_ioctl = am65_cpsw_nuss_ndo_slave_ioctl, .ndo_setup_tc = am65_cpsw_qos_ndo_setup_tc, .ndo_get_devlink_port = am65_cpsw_ndo_get_devlink_port, }; @@ -2081,10 +2091,13 @@ bool am65_cpsw_port_dev_check(const struct net_device *ndev) return false; } -static int am65_cpsw_netdevice_port_link(struct net_device *ndev, struct net_device *br_ndev) +static int am65_cpsw_netdevice_port_link(struct net_device *ndev, + struct net_device *br_ndev, + struct netlink_ext_ack *extack) { struct am65_cpsw_common *common = am65_ndev_to_common(ndev); struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(ndev); + int err; if (!common->br_members) { common->hw_bridge_dev = br_ndev; @@ -2096,6 +2109,11 @@ static int am65_cpsw_netdevice_port_link(struct net_device *ndev, struct net_dev return -EOPNOTSUPP; } + err = switchdev_bridge_port_offload(ndev, ndev, NULL, NULL, NULL, + false, extack); + if (err) + return err; + common->br_members |= BIT(priv->port->port_id); am65_cpsw_port_offload_fwd_mark_update(common); @@ -2108,6 +2126,8 @@ static void am65_cpsw_netdevice_port_unlink(struct net_device *ndev) struct am65_cpsw_common *common = am65_ndev_to_common(ndev); struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(ndev); + switchdev_bridge_port_unoffload(ndev, NULL, NULL, NULL); + common->br_members &= ~BIT(priv->port->port_id); am65_cpsw_port_offload_fwd_mark_update(common); @@ -2120,6 +2140,7 @@ static void am65_cpsw_netdevice_port_unlink(struct net_device *ndev) static int am65_cpsw_netdevice_event(struct notifier_block *unused, unsigned long event, void *ptr) { + struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr); struct net_device *ndev = netdev_notifier_info_to_dev(ptr); struct netdev_notifier_changeupper_info *info; int ret = NOTIFY_DONE; @@ -2133,7 +2154,9 @@ static int am65_cpsw_netdevice_event(struct notifier_block *unused, if (netif_is_bridge_master(info->upper_dev)) { if (info->linking) - ret = am65_cpsw_netdevice_port_link(ndev, info->upper_dev); + ret = am65_cpsw_netdevice_port_link(ndev, + info->upper_dev, + extack); else am65_cpsw_netdevice_port_unlink(ndev); } @@ -2388,21 +2411,6 @@ static const struct devlink_param am65_cpsw_devlink_params[] = { am65_cpsw_dl_switch_mode_set, NULL), }; -static void am65_cpsw_unregister_devlink_ports(struct am65_cpsw_common *common) -{ - struct devlink_port *dl_port; - struct am65_cpsw_port *port; - int i; - - for (i = 1; i <= common->port_num; i++) { - port = am65_common_get_port(common, i); - dl_port = &port->devlink_port; - - if (dl_port->registered) - devlink_port_unregister(dl_port); - } -} - static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common) { struct devlink_port_attrs attrs = {}; @@ -2414,14 +2422,14 @@ static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common) int i; common->devlink = - devlink_alloc(&am65_cpsw_devlink_ops, sizeof(*dl_priv)); + devlink_alloc(&am65_cpsw_devlink_ops, sizeof(*dl_priv), dev); if (!common->devlink) return -ENOMEM; dl_priv = devlink_priv(common->devlink); dl_priv->common = common; - ret = devlink_register(common->devlink, dev); + ret = devlink_register(common->devlink); if (ret) { dev_err(dev, "devlink reg fail ret:%d\n", ret); goto dl_free; @@ -2464,7 +2472,12 @@ static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common) return ret; dl_port_unreg: - am65_cpsw_unregister_devlink_ports(common); + for (i = i - 1; i >= 1; i--) { + port = am65_common_get_port(common, i); + dl_port = &port->devlink_port; + + devlink_port_unregister(dl_port); + } dl_unreg: devlink_unregister(common->devlink); dl_free: @@ -2475,6 +2488,17 @@ dl_free: static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common) { + struct devlink_port *dl_port; + struct am65_cpsw_port *port; + int i; + + for (i = 1; i <= common->port_num; i++) { + port = am65_common_get_port(common, i); + dl_port = &port->devlink_port; + + devlink_port_unregister(dl_port); + } + if (!AM65_CPSW_IS_CPSW2G(common) && IS_ENABLED(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV)) { devlink_params_unpublish(common->devlink); @@ -2482,7 +2506,6 @@ static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common) ARRAY_SIZE(am65_cpsw_devlink_params)); } - am65_cpsw_unregister_devlink_ports(common); devlink_unregister(common->devlink); devlink_free(common->devlink); } diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h index 5d93e346f05e..048ed10143c1 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h @@ -126,6 +126,8 @@ struct am65_cpsw_common { struct am65_cpsw_rx_chn rx_chns; struct napi_struct napi_rx; + bool rx_irq_disabled; + u32 nuss_ver; u32 cpsw_ver; unsigned long bus_freq; diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c index c20715107075..02d4e51f7306 100644 --- a/drivers/net/ethernet/ti/cpmac.c +++ b/drivers/net/ethernet/ti/cpmac.c @@ -1044,7 +1044,7 @@ static const struct net_device_ops cpmac_netdev_ops = { .ndo_start_xmit = cpmac_start_xmit, .ndo_tx_timeout = cpmac_tx_timeout, .ndo_set_rx_mode = cpmac_set_multicast_list, - .ndo_do_ioctl = phy_do_ioctl_running, + .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, }; diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index cbbd0f665796..66f7ddd9b1f9 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -431,7 +431,7 @@ static void cpsw_rx_handler(void *token, int len, int status) skb->protocol = eth_type_trans(skb, ndev); /* mark skb for recycling */ - skb_mark_for_recycle(skb, page, pool); + skb_mark_for_recycle(skb); netif_receive_skb(skb); ndev->stats.rx_bytes += len; @@ -845,7 +845,7 @@ static int cpsw_ndo_open(struct net_device *ndev) struct ethtool_coalesce coal; coal.rx_coalesce_usecs = cpsw->coal_intvl; - cpsw_set_coalesce(ndev, &coal); + cpsw_set_coalesce(ndev, &coal, NULL, NULL); } cpdma_ctlr_start(cpsw->dma); @@ -905,7 +905,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb, struct cpdma_chan *txch; int ret, q_idx; - if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) { + if (skb_put_padto(skb, CPSW_MIN_PACKET_SIZE)) { cpsw_err(priv, tx_err, "packet pad failed\n"); ndev->stats.tx_dropped++; return NET_XMIT_DROP; @@ -1159,7 +1159,7 @@ static const struct net_device_ops cpsw_netdev_ops = { .ndo_stop = cpsw_ndo_stop, .ndo_start_xmit = cpsw_ndo_start_xmit, .ndo_set_mac_address = cpsw_ndo_set_mac_address, - .ndo_do_ioctl = cpsw_ndo_ioctl, + .ndo_eth_ioctl = cpsw_ndo_ioctl, .ndo_validate_addr = eth_validate_addr, .ndo_tx_timeout = cpsw_ndo_tx_timeout, .ndo_set_rx_mode = cpsw_ndo_set_rx_mode, diff --git a/drivers/net/ethernet/ti/cpsw_ethtool.c b/drivers/net/ethernet/ti/cpsw_ethtool.c index 4619c3a950b0..158c8d3793f4 100644 --- a/drivers/net/ethernet/ti/cpsw_ethtool.c +++ b/drivers/net/ethernet/ti/cpsw_ethtool.c @@ -152,7 +152,9 @@ void cpsw_set_msglevel(struct net_device *ndev, u32 value) priv->msg_enable = value; } -int cpsw_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal) +int cpsw_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct cpsw_common *cpsw = ndev_to_cpsw(ndev); @@ -160,7 +162,9 @@ int cpsw_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal) return 0; } -int cpsw_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal) +int cpsw_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct cpsw_priv *priv = netdev_priv(ndev); u32 int_ctrl; diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c index d1d02001cef6..7968f24d99c8 100644 --- a/drivers/net/ethernet/ti/cpsw_new.c +++ b/drivers/net/ethernet/ti/cpsw_new.c @@ -28,6 +28,7 @@ #include <linux/kmemleak.h> #include <linux/sys_soc.h> +#include <net/switchdev.h> #include <net/page_pool.h> #include <net/pkt_cls.h> #include <net/devlink.h> @@ -374,7 +375,7 @@ static void cpsw_rx_handler(void *token, int len, int status) skb->protocol = eth_type_trans(skb, ndev); /* mark skb for recycling */ - skb_mark_for_recycle(skb, page, pool); + skb_mark_for_recycle(skb); netif_receive_skb(skb); ndev->stats.rx_bytes += len; @@ -501,7 +502,7 @@ static void cpsw_restore(struct cpsw_priv *priv) static void cpsw_init_stp_ale_entry(struct cpsw_common *cpsw) { - char stpa[] = {0x01, 0x80, 0xc2, 0x0, 0x0, 0x0}; + static const char stpa[] = {0x01, 0x80, 0xc2, 0x0, 0x0, 0x0}; cpsw_ale_add_mcast(cpsw->ale, stpa, ALE_PORT_HOST, ALE_SUPER, 0, @@ -893,7 +894,7 @@ static int cpsw_ndo_open(struct net_device *ndev) struct ethtool_coalesce coal; coal.rx_coalesce_usecs = cpsw->coal_intvl; - cpsw_set_coalesce(ndev, &coal); + cpsw_set_coalesce(ndev, &coal, NULL, NULL); } cpdma_ctlr_start(cpsw->dma); @@ -1127,7 +1128,7 @@ static const struct net_device_ops cpsw_netdev_ops = { .ndo_stop = cpsw_ndo_stop, .ndo_start_xmit = cpsw_ndo_start_xmit, .ndo_set_mac_address = cpsw_ndo_set_mac_address, - .ndo_do_ioctl = cpsw_ndo_ioctl, + .ndo_eth_ioctl = cpsw_ndo_ioctl, .ndo_validate_addr = eth_validate_addr, .ndo_tx_timeout = cpsw_ndo_tx_timeout, .ndo_set_rx_mode = cpsw_ndo_set_rx_mode, @@ -1500,10 +1501,12 @@ static void cpsw_port_offload_fwd_mark_update(struct cpsw_common *cpsw) } static int cpsw_netdevice_port_link(struct net_device *ndev, - struct net_device *br_ndev) + struct net_device *br_ndev, + struct netlink_ext_ack *extack) { struct cpsw_priv *priv = netdev_priv(ndev); struct cpsw_common *cpsw = priv->cpsw; + int err; if (!cpsw->br_members) { cpsw->hw_bridge_dev = br_ndev; @@ -1515,6 +1518,11 @@ static int cpsw_netdevice_port_link(struct net_device *ndev, return -EOPNOTSUPP; } + err = switchdev_bridge_port_offload(ndev, ndev, NULL, NULL, NULL, + false, extack); + if (err) + return err; + cpsw->br_members |= BIT(priv->emac_port); cpsw_port_offload_fwd_mark_update(cpsw); @@ -1527,6 +1535,8 @@ static void cpsw_netdevice_port_unlink(struct net_device *ndev) struct cpsw_priv *priv = netdev_priv(ndev); struct cpsw_common *cpsw = priv->cpsw; + switchdev_bridge_port_unoffload(ndev, NULL, NULL, NULL); + cpsw->br_members &= ~BIT(priv->emac_port); cpsw_port_offload_fwd_mark_update(cpsw); @@ -1539,6 +1549,7 @@ static void cpsw_netdevice_port_unlink(struct net_device *ndev) static int cpsw_netdevice_event(struct notifier_block *unused, unsigned long event, void *ptr) { + struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr); struct net_device *ndev = netdev_notifier_info_to_dev(ptr); struct netdev_notifier_changeupper_info *info; int ret = NOTIFY_DONE; @@ -1553,7 +1564,8 @@ static int cpsw_netdevice_event(struct notifier_block *unused, if (netif_is_bridge_master(info->upper_dev)) { if (info->linking) ret = cpsw_netdevice_port_link(ndev, - info->upper_dev); + info->upper_dev, + extack); else cpsw_netdevice_port_unlink(ndev); } @@ -1791,14 +1803,14 @@ static int cpsw_register_devlink(struct cpsw_common *cpsw) struct cpsw_devlink *dl_priv; int ret = 0; - cpsw->devlink = devlink_alloc(&cpsw_devlink_ops, sizeof(*dl_priv)); + cpsw->devlink = devlink_alloc(&cpsw_devlink_ops, sizeof(*dl_priv), dev); if (!cpsw->devlink) return -ENOMEM; dl_priv = devlink_priv(cpsw->devlink); dl_priv->cpsw = cpsw; - ret = devlink_register(cpsw->devlink, dev); + ret = devlink_register(cpsw->devlink); if (ret) { dev_err(dev, "DL reg fail ret:%d\n", ret); goto dl_free; diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h index 2951fb7b9dae..435668ee542d 100644 --- a/drivers/net/ethernet/ti/cpsw_priv.h +++ b/drivers/net/ethernet/ti/cpsw_priv.h @@ -464,8 +464,12 @@ void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv); /* ethtool */ u32 cpsw_get_msglevel(struct net_device *ndev); void cpsw_set_msglevel(struct net_device *ndev, u32 value); -int cpsw_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal); -int cpsw_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal); +int cpsw_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack); +int cpsw_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack); int cpsw_get_sset_count(struct net_device *ndev, int sset); void cpsw_get_strings(struct net_device *ndev, u32 stringset, u8 *data); void cpsw_get_ethtool_stats(struct net_device *ndev, diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index c674e34b6839..e8291d848839 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -383,12 +383,16 @@ static void emac_get_drvinfo(struct net_device *ndev, * emac_get_coalesce - Get interrupt coalesce settings for this device * @ndev : The DaVinci EMAC network adapter * @coal : ethtool coalesce settings structure + * @kernel_coal: ethtool CQE mode setting structure + * @extack: extack for reporting error messages * * Fetch the current interrupt coalesce settings * */ static int emac_get_coalesce(struct net_device *ndev, - struct ethtool_coalesce *coal) + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct emac_priv *priv = netdev_priv(ndev); @@ -401,12 +405,16 @@ static int emac_get_coalesce(struct net_device *ndev, * emac_set_coalesce - Set interrupt coalesce settings for this device * @ndev : The DaVinci EMAC network adapter * @coal : ethtool coalesce settings structure + * @kernel_coal: ethtool CQE mode setting structure + * @extack: extack for reporting error messages * * Set interrupt coalesce parameters * */ static int emac_set_coalesce(struct net_device *ndev, - struct ethtool_coalesce *coal) + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct emac_priv *priv = netdev_priv(ndev); u32 int_ctrl, num_interrupts = 0; @@ -943,7 +951,7 @@ static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev) goto fail_tx; } - ret_code = skb_padto(skb, EMAC_DEF_MIN_ETHPKTSIZE); + ret_code = skb_put_padto(skb, EMAC_DEF_MIN_ETHPKTSIZE); if (unlikely(ret_code < 0)) { if (netif_msg_tx_err(priv) && net_ratelimit()) dev_err(emac_dev, "DaVinci EMAC: packet pad failed"); @@ -1462,7 +1470,7 @@ static int emac_dev_open(struct net_device *ndev) struct ethtool_coalesce coal; coal.rx_coalesce_usecs = (priv->coal_intvl << 4); - emac_set_coalesce(ndev, &coal); + emac_set_coalesce(ndev, &coal, NULL, NULL); } cpdma_ctlr_start(priv->dma); @@ -1670,7 +1678,7 @@ static const struct net_device_ops emac_netdev_ops = { .ndo_start_xmit = emac_dev_xmit, .ndo_set_rx_mode = emac_dev_mcast_set, .ndo_set_mac_address = emac_dev_setmac_addr, - .ndo_do_ioctl = emac_devioctl, + .ndo_eth_ioctl = emac_devioctl, .ndo_tx_timeout = emac_dev_tx_timeout, .ndo_get_stats = emac_dev_getnetstats, #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 97942b0e3897..eda2961c0fe2 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -1944,7 +1944,7 @@ static const struct net_device_ops netcp_netdev_ops = { .ndo_stop = netcp_ndo_stop, .ndo_start_xmit = netcp_ndo_start_xmit, .ndo_set_rx_mode = netcp_set_rx_mode, - .ndo_do_ioctl = netcp_ndo_ioctl, + .ndo_eth_ioctl = netcp_ndo_ioctl, .ndo_get_stats64 = netcp_get_stats, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c index e0cb713193ea..77c448ad67ce 100644 --- a/drivers/net/ethernet/ti/tlan.c +++ b/drivers/net/ethernet/ti/tlan.c @@ -749,7 +749,7 @@ static const struct net_device_ops tlan_netdev_ops = { .ndo_tx_timeout = tlan_tx_timeout, .ndo_get_stats = tlan_get_stats, .ndo_set_rx_mode = tlan_set_multicast_list, - .ndo_do_ioctl = tlan_ioctl, + .ndo_eth_ioctl = tlan_ioctl, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c index 226a76633e65..66d4e024d11e 100644 --- a/drivers/net/ethernet/toshiba/spider_net.c +++ b/drivers/net/ethernet/toshiba/spider_net.c @@ -354,9 +354,10 @@ spider_net_free_rx_chain_contents(struct spider_net_card *card) descr = card->rx_chain.head; do { if (descr->skb) { - pci_unmap_single(card->pdev, descr->hwdescr->buf_addr, + dma_unmap_single(&card->pdev->dev, + descr->hwdescr->buf_addr, SPIDER_NET_MAX_FRAME, - PCI_DMA_BIDIRECTIONAL); + DMA_BIDIRECTIONAL); dev_kfree_skb(descr->skb); descr->skb = NULL; } @@ -411,9 +412,9 @@ spider_net_prepare_rx_descr(struct spider_net_card *card, if (offset) skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset); /* iommu-map the skb */ - buf = pci_map_single(card->pdev, descr->skb->data, - SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(card->pdev, buf)) { + buf = dma_map_single(&card->pdev->dev, descr->skb->data, + SPIDER_NET_MAX_FRAME, DMA_FROM_DEVICE); + if (dma_mapping_error(&card->pdev->dev, buf)) { dev_kfree_skb_any(descr->skb); descr->skb = NULL; if (netif_msg_rx_err(card) && net_ratelimit()) @@ -653,8 +654,9 @@ spider_net_prepare_tx_descr(struct spider_net_card *card, dma_addr_t buf; unsigned long flags; - buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(card->pdev, buf)) { + buf = dma_map_single(&card->pdev->dev, skb->data, skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(&card->pdev->dev, buf)) { if (netif_msg_tx_err(card) && net_ratelimit()) dev_err(&card->netdev->dev, "could not iommu-map packet (%p, %i). " "Dropping packet\n", skb->data, skb->len); @@ -666,7 +668,8 @@ spider_net_prepare_tx_descr(struct spider_net_card *card, descr = card->tx_chain.head; if (descr->next == chain->tail->prev) { spin_unlock_irqrestore(&chain->lock, flags); - pci_unmap_single(card->pdev, buf, skb->len, PCI_DMA_TODEVICE); + dma_unmap_single(&card->pdev->dev, buf, skb->len, + DMA_TO_DEVICE); return -ENOMEM; } hwdescr = descr->hwdescr; @@ -822,8 +825,8 @@ spider_net_release_tx_chain(struct spider_net_card *card, int brutal) /* unmap the skb */ if (skb) { - pci_unmap_single(card->pdev, buf_addr, skb->len, - PCI_DMA_TODEVICE); + dma_unmap_single(&card->pdev->dev, buf_addr, skb->len, + DMA_TO_DEVICE); dev_consume_skb_any(skb); } } @@ -1165,8 +1168,8 @@ spider_net_decode_one_descr(struct spider_net_card *card) /* unmap descriptor */ hw_buf_addr = hwdescr->buf_addr; hwdescr->buf_addr = 0xffffffff; - pci_unmap_single(card->pdev, hw_buf_addr, - SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE); + dma_unmap_single(&card->pdev->dev, hw_buf_addr, SPIDER_NET_MAX_FRAME, + DMA_FROM_DEVICE); if ( (status == SPIDER_NET_DESCR_RESPONSE_ERROR) || (status == SPIDER_NET_DESCR_PROTECTION_ERROR) || @@ -2214,7 +2217,7 @@ static const struct net_device_ops spider_net_ops = { .ndo_start_xmit = spider_net_xmit, .ndo_set_rx_mode = spider_net_set_multi, .ndo_set_mac_address = spider_net_set_mac, - .ndo_do_ioctl = spider_net_do_ioctl, + .ndo_eth_ioctl = spider_net_do_ioctl, .ndo_tx_timeout = spider_net_tx_timeout, .ndo_validate_addr = eth_validate_addr, /* HW VLAN */ diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c index fedb2bf69261..52245ac60fc7 100644 --- a/drivers/net/ethernet/toshiba/tc35815.c +++ b/drivers/net/ethernet/toshiba/tc35815.c @@ -750,7 +750,7 @@ static const struct net_device_ops tc35815_netdev_ops = { .ndo_get_stats = tc35815_get_stats, .ndo_set_rx_mode = tc35815_set_multicast_list, .ndo_tx_timeout = tc35815_tx_timeout, - .ndo_do_ioctl = phy_do_ioctl_running, + .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c index c62f474b6d08..cf0917b29e30 100644 --- a/drivers/net/ethernet/tundra/tsi108_eth.c +++ b/drivers/net/ethernet/tundra/tsi108_eth.c @@ -1538,7 +1538,7 @@ static const struct net_device_ops tsi108_netdev_ops = { .ndo_start_xmit = tsi108_send_packet, .ndo_set_rx_mode = tsi108_set_rx_mode, .ndo_get_stats = tsi108_get_stats, - .ndo_do_ioctl = tsi108_do_ioctl, + .ndo_eth_ioctl = tsi108_do_ioctl, .ndo_set_mac_address = tsi108_set_mac, .ndo_validate_addr = eth_validate_addr, }; diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index 73ca597ebd1b..3b73a9c55a5a 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c @@ -884,7 +884,7 @@ static const struct net_device_ops rhine_netdev_ops = { .ndo_set_rx_mode = rhine_set_rx_mode, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, - .ndo_do_ioctl = netdev_ioctl, + .ndo_eth_ioctl = netdev_ioctl, .ndo_tx_timeout = rhine_tx_timeout, .ndo_vlan_rx_add_vid = rhine_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = rhine_vlan_rx_kill_vid, @@ -1113,13 +1113,12 @@ err_out: static int rhine_init_one_platform(struct platform_device *pdev) { - const struct of_device_id *match; const u32 *quirks; int irq; void __iomem *ioaddr; - match = of_match_device(rhine_of_tbl, &pdev->dev); - if (!match) + quirks = of_device_get_match_data(&pdev->dev); + if (!quirks) return -EINVAL; ioaddr = devm_platform_ioremap_resource(pdev, 0); @@ -1130,10 +1129,6 @@ static int rhine_init_one_platform(struct platform_device *pdev) if (!irq) return -EINVAL; - quirks = match->data; - if (!quirks) - return -EINVAL; - return rhine_init_one_common(&pdev->dev, *quirks, (long)ioaddr, ioaddr, irq); } diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c index 88426b5e410b..4b9c30f735b5 100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c @@ -2637,7 +2637,7 @@ static const struct net_device_ops velocity_netdev_ops = { .ndo_set_mac_address = eth_mac_addr, .ndo_set_rx_mode = velocity_set_multi, .ndo_change_mtu = velocity_change_mtu, - .ndo_do_ioctl = velocity_ioctl, + .ndo_eth_ioctl = velocity_ioctl, .ndo_vlan_rx_add_vid = velocity_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = velocity_vlan_rx_kill_vid, #ifdef CONFIG_NET_POLL_CONTROLLER @@ -2943,14 +2943,12 @@ static void velocity_pci_remove(struct pci_dev *pdev) static int velocity_platform_probe(struct platform_device *pdev) { - const struct of_device_id *of_id; const struct velocity_info_tbl *info; int irq; - of_id = of_match_device(velocity_of_ids, &pdev->dev); - if (!of_id) + info = of_device_get_match_data(&pdev->dev); + if (!info) return -EINVAL; - info = of_id->data; irq = irq_of_parse_and_map(pdev->dev.of_node, 0); if (!irq) @@ -3520,7 +3518,9 @@ static void set_pending_timer_val(int *val, u32 us) static int velocity_get_coalesce(struct net_device *dev, - struct ethtool_coalesce *ecmd) + struct ethtool_coalesce *ecmd, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct velocity_info *vptr = netdev_priv(dev); @@ -3534,7 +3534,9 @@ static int velocity_get_coalesce(struct net_device *dev, } static int velocity_set_coalesce(struct net_device *dev, - struct ethtool_coalesce *ecmd) + struct ethtool_coalesce *ecmd, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct velocity_info *vptr = netdev_priv(dev); int max_us = 0x3f * 64; diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index 811815f8cd3b..f974e70a82e8 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c @@ -1047,6 +1047,8 @@ static int w5100_mmio_probe(struct platform_device *pdev) mac_addr = data->mac_addr; mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem) + return -EINVAL; if (resource_size(mem) < W5100_BUS_DIRECT_SIZE) ops = &w5100_mmio_indirect_ops; else diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 60a4f79b8fa1..463094ced104 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -1237,7 +1237,7 @@ static const struct net_device_ops temac_netdev_ops = { .ndo_set_rx_mode = temac_set_multicast_list, .ndo_set_mac_address = temac_set_mac_address, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = phy_do_ioctl_running, + .ndo_eth_ioctl = phy_do_ioctl_running, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = temac_poll_controller, #endif @@ -1310,8 +1310,11 @@ static int ll_temac_ethtools_set_ringparam(struct net_device *ndev, return 0; } -static int ll_temac_ethtools_get_coalesce(struct net_device *ndev, - struct ethtool_coalesce *ec) +static int +ll_temac_ethtools_get_coalesce(struct net_device *ndev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct temac_local *lp = netdev_priv(ndev); @@ -1322,8 +1325,11 @@ static int ll_temac_ethtools_get_coalesce(struct net_device *ndev, return 0; } -static int ll_temac_ethtools_set_coalesce(struct net_device *ndev, - struct ethtool_coalesce *ec) +static int +ll_temac_ethtools_set_coalesce(struct net_device *ndev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct temac_local *lp = netdev_priv(ndev); diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 13cd799541aa..871b5ec3183d 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -1227,7 +1227,7 @@ static const struct net_device_ops axienet_netdev_ops = { .ndo_change_mtu = axienet_change_mtu, .ndo_set_mac_address = netdev_set_mac_address, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = axienet_ioctl, + .ndo_eth_ioctl = axienet_ioctl, .ndo_set_rx_mode = axienet_set_multicast_list, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = axienet_poll_controller, @@ -1400,6 +1400,8 @@ axienet_ethtools_set_pauseparam(struct net_device *ndev, * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count. * @ndev: Pointer to net_device structure * @ecoalesce: Pointer to ethtool_coalesce structure + * @kernel_coal: ethtool CQE mode setting structure + * @extack: extack for reporting error messages * * This implements ethtool command for getting the DMA interrupt coalescing * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to @@ -1407,8 +1409,11 @@ axienet_ethtools_set_pauseparam(struct net_device *ndev, * * Return: 0 always */ -static int axienet_ethtools_get_coalesce(struct net_device *ndev, - struct ethtool_coalesce *ecoalesce) +static int +axienet_ethtools_get_coalesce(struct net_device *ndev, + struct ethtool_coalesce *ecoalesce, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { u32 regval = 0; struct axienet_local *lp = netdev_priv(ndev); @@ -1425,6 +1430,8 @@ static int axienet_ethtools_get_coalesce(struct net_device *ndev, * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count. * @ndev: Pointer to net_device structure * @ecoalesce: Pointer to ethtool_coalesce structure + * @kernel_coal: ethtool CQE mode setting structure + * @extack: extack for reporting error messages * * This implements ethtool command for setting the DMA interrupt coalescing * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux @@ -1432,8 +1439,11 @@ static int axienet_ethtools_get_coalesce(struct net_device *ndev, * * Return: 0, on success, Non-zero error value on failure. */ -static int axienet_ethtools_set_coalesce(struct net_device *ndev, - struct ethtool_coalesce *ecoalesce) +static int +axienet_ethtools_set_coalesce(struct net_device *ndev, + struct ethtool_coalesce *ecoalesce, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct axienet_local *lp = netdev_priv(ndev); diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index b06377fe7293..b780aad3550a 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -1263,7 +1263,7 @@ static const struct net_device_ops xemaclite_netdev_ops = { .ndo_start_xmit = xemaclite_send, .ndo_set_mac_address = xemaclite_set_mac_address, .ndo_tx_timeout = xemaclite_tx_timeout, - .ndo_do_ioctl = xemaclite_ioctl, + .ndo_eth_ioctl = xemaclite_ioctl, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = xemaclite_poll_controller, #endif diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c index 4f6db6f5c272..ae611e46da6a 100644 --- a/drivers/net/ethernet/xircom/xirc2ps_cs.c +++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c @@ -464,7 +464,7 @@ static const struct net_device_ops netdev_ops = { .ndo_start_xmit = do_start_xmit, .ndo_tx_timeout = xirc_tx_timeout, .ndo_set_config = do_config, - .ndo_do_ioctl = do_ioctl, + .ndo_eth_ioctl = do_ioctl, .ndo_set_rx_mode = set_multicast_list, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, diff --git a/drivers/net/ethernet/xscale/Kconfig b/drivers/net/ethernet/xscale/Kconfig index 468ffe3d1707..0e878fa6e322 100644 --- a/drivers/net/ethernet/xscale/Kconfig +++ b/drivers/net/ethernet/xscale/Kconfig @@ -29,9 +29,9 @@ config IXP4XX_ETH on IXP4xx processor. config PTP_1588_CLOCK_IXP46X - tristate "Intel IXP46x as PTP clock" + bool "Intel IXP46x as PTP clock" depends on IXP4XX_ETH - depends on PTP_1588_CLOCK + depends on PTP_1588_CLOCK=y || PTP_1588_CLOCK=IXP4XX_ETH default y help This driver adds support for using the IXP46X as a PTP diff --git a/drivers/net/ethernet/xscale/Makefile b/drivers/net/ethernet/xscale/Makefile index 607f91b1e878..e935f2a2979f 100644 --- a/drivers/net/ethernet/xscale/Makefile +++ b/drivers/net/ethernet/xscale/Makefile @@ -3,5 +3,9 @@ # Makefile for the Intel XScale IXP device drivers. # +# Keep this link order to avoid deferred probing +ifdef CONFIG_PTP_1588_CLOCK_IXP46X +obj-$(CONFIG_IXP4XX_ETH) += ptp_ixp46x.o +endif + obj-$(CONFIG_IXP4XX_ETH) += ixp4xx_eth.o -obj-$(CONFIG_PTP_1588_CLOCK_IXP46X) += ptp_ixp46x.o diff --git a/drivers/net/ethernet/xscale/ixp46x_ts.h b/drivers/net/ethernet/xscale/ixp46x_ts.h index d792130e27b0..ee9b93ded20a 100644 --- a/drivers/net/ethernet/xscale/ixp46x_ts.h +++ b/drivers/net/ethernet/xscale/ixp46x_ts.h @@ -62,7 +62,16 @@ struct ixp46x_ts_regs { #define TX_SNAPSHOT_LOCKED (1<<0) #define RX_SNAPSHOT_LOCKED (1<<1) -/* The ptp_ixp46x module will set this variable */ -extern int ixp46x_phc_index; +#if IS_ENABLED(CONFIG_PTP_1588_CLOCK_IXP46X) +int ixp46x_ptp_find(struct ixp46x_ts_regs *__iomem *regs, int *phc_index); +#else +static inline int ixp46x_ptp_find(struct ixp46x_ts_regs *__iomem *regs, int *phc_index) +{ + *regs = NULL; + *phc_index = -1; + + return -ENODEV; +} +#endif #endif diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c index 7ae754eadf22..931494cc1c39 100644 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c @@ -37,7 +37,6 @@ #include <linux/module.h> #include <linux/soc/ixp4xx/npe.h> #include <linux/soc/ixp4xx/qmgr.h> -#include <mach/hardware.h> #include <linux/soc/ixp4xx/cpu.h> #include "ixp46x_ts.h" @@ -169,13 +168,15 @@ struct eth_regs { struct port { struct eth_regs __iomem *regs; + struct ixp46x_ts_regs __iomem *timesync_regs; + int phc_index; struct npe *npe; struct net_device *netdev; struct napi_struct napi; struct eth_plat_info *plat; buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS]; struct desc *desc_tab; /* coherent */ - u32 desc_tab_phys; + dma_addr_t desc_tab_phys; int id; /* logical port ID */ int speed, duplex; u8 firmware[4]; @@ -295,7 +296,7 @@ static void ixp_rx_timestamp(struct port *port, struct sk_buff *skb) ch = PORT2CHANNEL(port); - regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT; + regs = port->timesync_regs; val = __raw_readl(®s->channel[ch].ch_event); @@ -340,7 +341,7 @@ static void ixp_tx_timestamp(struct port *port, struct sk_buff *skb) ch = PORT2CHANNEL(port); - regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT; + regs = port->timesync_regs; /* * This really stinks, but we have to poll for the Tx time stamp. @@ -375,6 +376,7 @@ static int hwtstamp_set(struct net_device *netdev, struct ifreq *ifr) struct hwtstamp_config cfg; struct ixp46x_ts_regs *regs; struct port *port = netdev_priv(netdev); + int ret; int ch; if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) @@ -383,8 +385,12 @@ static int hwtstamp_set(struct net_device *netdev, struct ifreq *ifr) if (cfg.flags) /* reserved for future extensions */ return -EINVAL; + ret = ixp46x_ptp_find(&port->timesync_regs, &port->phc_index); + if (ret) + return ret; + ch = PORT2CHANNEL(port); - regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT; + regs = port->timesync_regs; if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON) return -ERANGE; @@ -850,14 +856,14 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev) bytes = len; mem = skb->data; #else - offset = (int)skb->data & 3; /* keep 32-bit alignment */ + offset = (uintptr_t)skb->data & 3; /* keep 32-bit alignment */ bytes = ALIGN(offset + len, 4); if (!(mem = kmalloc(bytes, GFP_ATOMIC))) { dev_kfree_skb(skb); dev->stats.tx_dropped++; return NETDEV_TX_OK; } - memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4); + memcpy_swab32(mem, (u32 *)((uintptr_t)skb->data & ~3), bytes / 4); #endif phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE); @@ -988,25 +994,27 @@ static void ixp4xx_get_drvinfo(struct net_device *dev, strlcpy(info->bus_info, "internal", sizeof(info->bus_info)); } -int ixp46x_phc_index = -1; -EXPORT_SYMBOL_GPL(ixp46x_phc_index); - static int ixp4xx_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) { - if (!cpu_is_ixp46x()) { + struct port *port = netdev_priv(dev); + + if (port->phc_index < 0) + ixp46x_ptp_find(&port->timesync_regs, &port->phc_index); + + info->phc_index = port->phc_index; + + if (info->phc_index < 0) { info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | SOF_TIMESTAMPING_RX_SOFTWARE | SOF_TIMESTAMPING_SOFTWARE; - info->phc_index = -1; return 0; } info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; - info->phc_index = ixp46x_phc_index; info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); @@ -1357,7 +1365,7 @@ static const struct net_device_ops ixp4xx_netdev_ops = { .ndo_stop = eth_close, .ndo_start_xmit = eth_xmit, .ndo_set_rx_mode = eth_set_mcast_list, - .ndo_do_ioctl = eth_ioctl, + .ndo_eth_ioctl = eth_ioctl, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; @@ -1481,6 +1489,7 @@ static int ixp4xx_eth_probe(struct platform_device *pdev) port = netdev_priv(ndev); port->netdev = ndev; port->id = plat->npe; + port->phc_index = -1; /* Get the port resource and remap */ port->regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); diff --git a/drivers/net/ethernet/xscale/ptp_ixp46x.c b/drivers/net/ethernet/xscale/ptp_ixp46x.c index a6fb88fd42f7..ecece21315c3 100644 --- a/drivers/net/ethernet/xscale/ptp_ixp46x.c +++ b/drivers/net/ethernet/xscale/ptp_ixp46x.c @@ -5,14 +5,16 @@ * Copyright (C) 2010 OMICRON electronics GmbH */ #include <linux/device.h> +#include <linux/module.h> +#include <linux/mod_devicetable.h> #include <linux/err.h> -#include <linux/gpio.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/ptp_clock_kernel.h> +#include <linux/platform_device.h> #include <linux/soc/ixp4xx/cpu.h> #include <linux/module.h> #include <mach/ixp4xx-regs.h> @@ -21,10 +23,6 @@ #define DRIVER "ptp_ixp46x" #define N_EXT_TS 2 -#define MASTER_GPIO 8 -#define MASTER_IRQ 25 -#define SLAVE_GPIO 7 -#define SLAVE_IRQ 24 struct ixp_clock { struct ixp46x_ts_regs *regs; @@ -32,9 +30,11 @@ struct ixp_clock { struct ptp_clock_info caps; int exts0_enabled; int exts1_enabled; + int slave_irq; + int master_irq; }; -DEFINE_SPINLOCK(register_lock); +static DEFINE_SPINLOCK(register_lock); /* * Register access functions @@ -243,53 +243,38 @@ static const struct ptp_clock_info ptp_ixp_caps = { static struct ixp_clock ixp_clock; -static int setup_interrupt(int gpio) +int ixp46x_ptp_find(struct ixp46x_ts_regs *__iomem *regs, int *phc_index) { - int irq; - int err; - - err = gpio_request(gpio, "ixp4-ptp"); - if (err) - return err; - - err = gpio_direction_input(gpio); - if (err) - return err; - - irq = gpio_to_irq(gpio); - if (irq < 0) - return irq; + *regs = ixp_clock.regs; + *phc_index = ptp_clock_index(ixp_clock.ptp_clock); - err = irq_set_irq_type(irq, IRQF_TRIGGER_FALLING); - if (err) { - pr_err("cannot set trigger type for irq %d\n", irq); - return err; - } - - err = request_irq(irq, isr, 0, DRIVER, &ixp_clock); - if (err) { - pr_err("request_irq failed for irq %d\n", irq); - return err; - } + if (!ixp_clock.ptp_clock) + return -EPROBE_DEFER; - return irq; + return 0; } +EXPORT_SYMBOL_GPL(ixp46x_ptp_find); -static void __exit ptp_ixp_exit(void) +/* Called from the registered devm action */ +static void ptp_ixp_unregister_action(void *d) { - free_irq(MASTER_IRQ, &ixp_clock); - free_irq(SLAVE_IRQ, &ixp_clock); - ixp46x_phc_index = -1; - ptp_clock_unregister(ixp_clock.ptp_clock); + struct ptp_clock *ptp_clock = d; + + ptp_clock_unregister(ptp_clock); + ixp_clock.ptp_clock = NULL; } -static int __init ptp_ixp_init(void) +static int ptp_ixp_probe(struct platform_device *pdev) { - if (!cpu_is_ixp46x()) - return -ENODEV; + struct device *dev = &pdev->dev; + int ret; - ixp_clock.regs = - (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT; + ixp_clock.regs = devm_platform_ioremap_resource(pdev, 0); + ixp_clock.master_irq = platform_get_irq(pdev, 0); + ixp_clock.slave_irq = platform_get_irq(pdev, 1); + if (IS_ERR(ixp_clock.regs) || + !ixp_clock.master_irq || !ixp_clock.slave_irq) + return -ENXIO; ixp_clock.caps = ptp_ixp_caps; @@ -298,32 +283,51 @@ static int __init ptp_ixp_init(void) if (IS_ERR(ixp_clock.ptp_clock)) return PTR_ERR(ixp_clock.ptp_clock); - ixp46x_phc_index = ptp_clock_index(ixp_clock.ptp_clock); + ret = devm_add_action_or_reset(dev, ptp_ixp_unregister_action, + ixp_clock.ptp_clock); + if (ret) { + dev_err(dev, "failed to install clock removal handler\n"); + return ret; + } __raw_writel(DEFAULT_ADDEND, &ixp_clock.regs->addend); __raw_writel(1, &ixp_clock.regs->trgt_lo); __raw_writel(0, &ixp_clock.regs->trgt_hi); __raw_writel(TTIPEND, &ixp_clock.regs->event); - if (MASTER_IRQ != setup_interrupt(MASTER_GPIO)) { - pr_err("failed to setup gpio %d as irq\n", MASTER_GPIO); - goto no_master; - } - if (SLAVE_IRQ != setup_interrupt(SLAVE_GPIO)) { - pr_err("failed to setup gpio %d as irq\n", SLAVE_GPIO); - goto no_slave; - } + ret = devm_request_irq(dev, ixp_clock.master_irq, isr, + 0, DRIVER, &ixp_clock); + if (ret) + return dev_err_probe(dev, ret, + "request_irq failed for irq %d\n", + ixp_clock.master_irq); + + ret = devm_request_irq(dev, ixp_clock.slave_irq, isr, + 0, DRIVER, &ixp_clock); + if (ret) + return dev_err_probe(dev, ret, + "request_irq failed for irq %d\n", + ixp_clock.slave_irq); return 0; -no_slave: - free_irq(MASTER_IRQ, &ixp_clock); -no_master: - ptp_clock_unregister(ixp_clock.ptp_clock); - return -ENODEV; } -module_init(ptp_ixp_init); -module_exit(ptp_ixp_exit); +static const struct of_device_id ptp_ixp_match[] = { + { + .compatible = "intel,ixp46x-ptp-timer", + }, + { }, +}; + +static struct platform_driver ptp_ixp_driver = { + .driver = { + .name = "ptp-ixp46x", + .of_match_table = ptp_ixp_match, + .suppress_bind_attrs = true, + }, + .probe = ptp_ixp_probe, +}; +module_platform_driver(ptp_ixp_driver); MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>"); MODULE_DESCRIPTION("PTP clock using the IXP46X timer"); diff --git a/drivers/net/fddi/skfp/skfddi.c b/drivers/net/fddi/skfp/skfddi.c index 69c29a2ef95d..c5cb421f9890 100644 --- a/drivers/net/fddi/skfp/skfddi.c +++ b/drivers/net/fddi/skfp/skfddi.c @@ -70,6 +70,7 @@ static const char * const boot_msg = /* Include files */ #include <linux/capability.h> +#include <linux/compat.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> @@ -103,7 +104,8 @@ static struct net_device_stats *skfp_ctl_get_stats(struct net_device *dev); static void skfp_ctl_set_multicast_list(struct net_device *dev); static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev); static int skfp_ctl_set_mac_address(struct net_device *dev, void *addr); -static int skfp_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); +static int skfp_siocdevprivate(struct net_device *dev, struct ifreq *rq, + void __user *data, int cmd); static netdev_tx_t skfp_send_pkt(struct sk_buff *skb, struct net_device *dev); static void send_queued_packets(struct s_smc *smc); @@ -164,7 +166,7 @@ static const struct net_device_ops skfp_netdev_ops = { .ndo_get_stats = skfp_ctl_get_stats, .ndo_set_rx_mode = skfp_ctl_set_multicast_list, .ndo_set_mac_address = skfp_ctl_set_mac_address, - .ndo_do_ioctl = skfp_ioctl, + .ndo_siocdevprivate = skfp_siocdevprivate, }; /* @@ -932,9 +934,9 @@ static int skfp_ctl_set_mac_address(struct net_device *dev, void *addr) /* - * ============== - * = skfp_ioctl = - * ============== + * ======================= + * = skfp_siocdevprivate = + * ======================= * * Overview: * @@ -954,16 +956,19 @@ static int skfp_ctl_set_mac_address(struct net_device *dev, void *addr) */ -static int skfp_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +static int skfp_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd) { struct s_smc *smc = netdev_priv(dev); skfddi_priv *lp = &smc->os; struct s_skfp_ioctl ioc; int status = 0; - if (copy_from_user(&ioc, rq->ifr_data, sizeof(struct s_skfp_ioctl))) + if (copy_from_user(&ioc, data, sizeof(struct s_skfp_ioctl))) return -EFAULT; + if (in_compat_syscall()) + return -EOPNOTSUPP; + switch (ioc.cmd) { case SKFP_GET_STATS: /* Get the driver statistics */ ioc.len = sizeof(lp->MacStat); @@ -1169,8 +1174,8 @@ static void send_queued_packets(struct s_smc *smc) txd = (struct s_smt_fp_txd *) HWM_GET_CURR_TXD(smc, queue); - dma_address = pci_map_single(&bp->pdev, skb->data, - skb->len, PCI_DMA_TODEVICE); + dma_address = dma_map_single(&(&bp->pdev)->dev, skb->data, + skb->len, DMA_TO_DEVICE); if (frame_status & LAN_TX) { txd->txd_os.skb = skb; // save skb txd->txd_os.dma_addr = dma_address; // save dma mapping @@ -1179,8 +1184,8 @@ static void send_queued_packets(struct s_smc *smc) frame_status | FIRST_FRAG | LAST_FRAG | EN_IRQ_EOF); if (!(frame_status & LAN_TX)) { // local only frame - pci_unmap_single(&bp->pdev, dma_address, - skb->len, PCI_DMA_TODEVICE); + dma_unmap_single(&(&bp->pdev)->dev, dma_address, + skb->len, DMA_TO_DEVICE); dev_kfree_skb_irq(skb); } spin_unlock_irqrestore(&bp->DriverLock, Flags); @@ -1462,8 +1467,9 @@ void dma_complete(struct s_smc *smc, volatile union s_fp_descr *descr, int flag) if (r->rxd_os.skb && r->rxd_os.dma_addr) { int MaxFrameSize = bp->MaxFrameSize; - pci_unmap_single(&bp->pdev, r->rxd_os.dma_addr, - MaxFrameSize, PCI_DMA_FROMDEVICE); + dma_unmap_single(&(&bp->pdev)->dev, + r->rxd_os.dma_addr, MaxFrameSize, + DMA_FROM_DEVICE); r->rxd_os.dma_addr = 0; } } @@ -1498,8 +1504,8 @@ void mac_drv_tx_complete(struct s_smc *smc, volatile struct s_smt_fp_txd *txd) txd->txd_os.skb = NULL; // release the DMA mapping - pci_unmap_single(&smc->os.pdev, txd->txd_os.dma_addr, - skb->len, PCI_DMA_TODEVICE); + dma_unmap_single(&(&smc->os.pdev)->dev, txd->txd_os.dma_addr, + skb->len, DMA_TO_DEVICE); txd->txd_os.dma_addr = 0; smc->os.MacStat.gen.tx_packets++; // Count transmitted packets. @@ -1702,10 +1708,9 @@ void mac_drv_requeue_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd, skb_reserve(skb, 3); skb_put(skb, MaxFrameSize); v_addr = skb->data; - b_addr = pci_map_single(&smc->os.pdev, - v_addr, - MaxFrameSize, - PCI_DMA_FROMDEVICE); + b_addr = dma_map_single(&(&smc->os.pdev)->dev, + v_addr, MaxFrameSize, + DMA_FROM_DEVICE); rxd->rxd_os.dma_addr = b_addr; } else { // no skb available, use local buffer @@ -1718,10 +1723,8 @@ void mac_drv_requeue_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd, // we use skb from old rxd rxd->rxd_os.skb = skb; v_addr = skb->data; - b_addr = pci_map_single(&smc->os.pdev, - v_addr, - MaxFrameSize, - PCI_DMA_FROMDEVICE); + b_addr = dma_map_single(&(&smc->os.pdev)->dev, v_addr, + MaxFrameSize, DMA_FROM_DEVICE); rxd->rxd_os.dma_addr = b_addr; } hwm_rx_frag(smc, v_addr, b_addr, MaxFrameSize, @@ -1773,10 +1776,8 @@ void mac_drv_fill_rxd(struct s_smc *smc) skb_reserve(skb, 3); skb_put(skb, MaxFrameSize); v_addr = skb->data; - b_addr = pci_map_single(&smc->os.pdev, - v_addr, - MaxFrameSize, - PCI_DMA_FROMDEVICE); + b_addr = dma_map_single(&(&smc->os.pdev)->dev, v_addr, + MaxFrameSize, DMA_FROM_DEVICE); rxd->rxd_os.dma_addr = b_addr; } else { // no skb available, use local buffer @@ -1833,8 +1834,9 @@ void mac_drv_clear_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd, skfddi_priv *bp = &smc->os; int MaxFrameSize = bp->MaxFrameSize; - pci_unmap_single(&bp->pdev, rxd->rxd_os.dma_addr, - MaxFrameSize, PCI_DMA_FROMDEVICE); + dma_unmap_single(&(&bp->pdev)->dev, + rxd->rxd_os.dma_addr, MaxFrameSize, + DMA_FROM_DEVICE); dev_kfree_skb(skb); rxd->rxd_os.skb = NULL; diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c index 4435a1195194..775dcf4ebde5 100644 --- a/drivers/net/hamradio/baycom_epp.c +++ b/drivers/net/hamradio/baycom_epp.c @@ -1005,7 +1005,8 @@ static int baycom_setmode(struct baycom_state *bc, const char *modestr) /* --------------------------------------------------------------------- */ -static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +static int baycom_siocdevprivate(struct net_device *dev, struct ifreq *ifr, + void __user *data, int cmd) { struct baycom_state *bc = netdev_priv(dev); struct hdlcdrv_ioctl hi; @@ -1013,7 +1014,7 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) if (cmd != SIOCDEVPRIVATE) return -ENOIOCTLCMD; - if (copy_from_user(&hi, ifr->ifr_data, sizeof(hi))) + if (copy_from_user(&hi, data, sizeof(hi))) return -EFAULT; switch (hi.cmd) { default: @@ -1104,7 +1105,7 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return HDLCDRV_PARMASK_IOBASE; } - if (copy_to_user(ifr->ifr_data, &hi, sizeof(hi))) + if (copy_to_user(data, &hi, sizeof(hi))) return -EFAULT; return 0; } @@ -1114,7 +1115,7 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) static const struct net_device_ops baycom_netdev_ops = { .ndo_open = epp_open, .ndo_stop = epp_close, - .ndo_do_ioctl = baycom_ioctl, + .ndo_siocdevprivate = baycom_siocdevprivate, .ndo_start_xmit = baycom_send_packet, .ndo_set_mac_address = baycom_set_mac_address, }; diff --git a/drivers/net/hamradio/baycom_par.c b/drivers/net/hamradio/baycom_par.c index 6a3dc7b3f28a..fd7da5bb1fa5 100644 --- a/drivers/net/hamradio/baycom_par.c +++ b/drivers/net/hamradio/baycom_par.c @@ -380,7 +380,7 @@ static int par96_close(struct net_device *dev) * ===================== hdlcdrv driver interface ========================= */ -static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr, +static int baycom_ioctl(struct net_device *dev, void __user *data, struct hdlcdrv_ioctl *hi, int cmd); /* --------------------------------------------------------------------- */ @@ -408,7 +408,7 @@ static int baycom_setmode(struct baycom_state *bc, const char *modestr) /* --------------------------------------------------------------------- */ -static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr, +static int baycom_ioctl(struct net_device *dev, void __user *data, struct hdlcdrv_ioctl *hi, int cmd) { struct baycom_state *bc; @@ -428,7 +428,7 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr, case HDLCDRVCTL_GETMODE: strcpy(hi->data.modename, bc->options ? "par96" : "picpar"); - if (copy_to_user(ifr->ifr_data, hi, sizeof(struct hdlcdrv_ioctl))) + if (copy_to_user(data, hi, sizeof(struct hdlcdrv_ioctl))) return -EFAULT; return 0; @@ -440,7 +440,7 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr, case HDLCDRVCTL_MODELIST: strcpy(hi->data.modename, "par96,picpar"); - if (copy_to_user(ifr->ifr_data, hi, sizeof(struct hdlcdrv_ioctl))) + if (copy_to_user(data, hi, sizeof(struct hdlcdrv_ioctl))) return -EFAULT; return 0; @@ -449,7 +449,7 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr, } - if (copy_from_user(&bi, ifr->ifr_data, sizeof(bi))) + if (copy_from_user(&bi, data, sizeof(bi))) return -EFAULT; switch (bi.cmd) { default: @@ -464,7 +464,7 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr, #endif /* BAYCOM_DEBUG */ } - if (copy_to_user(ifr->ifr_data, &bi, sizeof(bi))) + if (copy_to_user(data, &bi, sizeof(bi))) return -EFAULT; return 0; diff --git a/drivers/net/hamradio/baycom_ser_fdx.c b/drivers/net/hamradio/baycom_ser_fdx.c index 04bb409707fc..646f605e358f 100644 --- a/drivers/net/hamradio/baycom_ser_fdx.c +++ b/drivers/net/hamradio/baycom_ser_fdx.c @@ -462,7 +462,7 @@ static int ser12_close(struct net_device *dev) /* --------------------------------------------------------------------- */ -static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr, +static int baycom_ioctl(struct net_device *dev, void __user *data, struct hdlcdrv_ioctl *hi, int cmd); /* --------------------------------------------------------------------- */ @@ -497,7 +497,7 @@ static int baycom_setmode(struct baycom_state *bc, const char *modestr) /* --------------------------------------------------------------------- */ -static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr, +static int baycom_ioctl(struct net_device *dev, void __user *data, struct hdlcdrv_ioctl *hi, int cmd) { struct baycom_state *bc; @@ -519,7 +519,7 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr, sprintf(hi->data.modename, "ser%u", bc->baud / 100); if (bc->opt_dcd <= 0) strcat(hi->data.modename, (!bc->opt_dcd) ? "*" : "+"); - if (copy_to_user(ifr->ifr_data, hi, sizeof(struct hdlcdrv_ioctl))) + if (copy_to_user(data, hi, sizeof(struct hdlcdrv_ioctl))) return -EFAULT; return 0; @@ -531,7 +531,7 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr, case HDLCDRVCTL_MODELIST: strcpy(hi->data.modename, "ser12,ser3,ser24"); - if (copy_to_user(ifr->ifr_data, hi, sizeof(struct hdlcdrv_ioctl))) + if (copy_to_user(data, hi, sizeof(struct hdlcdrv_ioctl))) return -EFAULT; return 0; @@ -540,7 +540,7 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr, } - if (copy_from_user(&bi, ifr->ifr_data, sizeof(bi))) + if (copy_from_user(&bi, data, sizeof(bi))) return -EFAULT; switch (bi.cmd) { default: @@ -555,7 +555,7 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr, #endif /* BAYCOM_DEBUG */ } - if (copy_to_user(ifr->ifr_data, &bi, sizeof(bi))) + if (copy_to_user(data, &bi, sizeof(bi))) return -EFAULT; return 0; diff --git a/drivers/net/hamradio/baycom_ser_hdx.c b/drivers/net/hamradio/baycom_ser_hdx.c index a1acb3a47bdb..5d1ab4840753 100644 --- a/drivers/net/hamradio/baycom_ser_hdx.c +++ b/drivers/net/hamradio/baycom_ser_hdx.c @@ -521,7 +521,7 @@ static int ser12_close(struct net_device *dev) /* --------------------------------------------------------------------- */ -static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr, +static int baycom_ioctl(struct net_device *dev, void __user *data, struct hdlcdrv_ioctl *hi, int cmd); /* --------------------------------------------------------------------- */ @@ -551,7 +551,7 @@ static int baycom_setmode(struct baycom_state *bc, const char *modestr) /* --------------------------------------------------------------------- */ -static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr, +static int baycom_ioctl(struct net_device *dev, void __user *data, struct hdlcdrv_ioctl *hi, int cmd) { struct baycom_state *bc; @@ -573,7 +573,7 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr, strcpy(hi->data.modename, "ser12"); if (bc->opt_dcd <= 0) strcat(hi->data.modename, (!bc->opt_dcd) ? "*" : (bc->opt_dcd == -2) ? "@" : "+"); - if (copy_to_user(ifr->ifr_data, hi, sizeof(struct hdlcdrv_ioctl))) + if (copy_to_user(data, hi, sizeof(struct hdlcdrv_ioctl))) return -EFAULT; return 0; @@ -585,7 +585,7 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr, case HDLCDRVCTL_MODELIST: strcpy(hi->data.modename, "ser12"); - if (copy_to_user(ifr->ifr_data, hi, sizeof(struct hdlcdrv_ioctl))) + if (copy_to_user(data, hi, sizeof(struct hdlcdrv_ioctl))) return -EFAULT; return 0; @@ -594,7 +594,7 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr, } - if (copy_from_user(&bi, ifr->ifr_data, sizeof(bi))) + if (copy_from_user(&bi, data, sizeof(bi))) return -EFAULT; switch (bi.cmd) { default: @@ -609,7 +609,7 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr, #endif /* BAYCOM_DEBUG */ } - if (copy_to_user(ifr->ifr_data, &bi, sizeof(bi))) + if (copy_to_user(data, &bi, sizeof(bi))) return -EFAULT; return 0; diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c index 0e623c2e8b2d..d967b0748773 100644 --- a/drivers/net/hamradio/bpqether.c +++ b/drivers/net/hamradio/bpqether.c @@ -314,9 +314,10 @@ static int bpq_set_mac_address(struct net_device *dev, void *addr) * source ethernet address (broadcast * or multicast: accept all) */ -static int bpq_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +static int bpq_siocdevprivate(struct net_device *dev, struct ifreq *ifr, + void __user *data, int cmd) { - struct bpq_ethaddr __user *ethaddr = ifr->ifr_data; + struct bpq_ethaddr __user *ethaddr = data; struct bpqdev *bpq = netdev_priv(dev); struct bpq_req req; @@ -325,7 +326,7 @@ static int bpq_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) switch (cmd) { case SIOCSBPQETHOPT: - if (copy_from_user(&req, ifr->ifr_data, sizeof(struct bpq_req))) + if (copy_from_user(&req, data, sizeof(struct bpq_req))) return -EFAULT; switch (req.cmd) { case SIOCGBPQETHPARAM: @@ -448,7 +449,7 @@ static const struct net_device_ops bpq_netdev_ops = { .ndo_stop = bpq_close, .ndo_start_xmit = bpq_xmit, .ndo_set_mac_address = bpq_set_mac_address, - .ndo_do_ioctl = bpq_ioctl, + .ndo_siocdevprivate = bpq_siocdevprivate, }; static void bpq_setup(struct net_device *dev) diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c index c25c8c99c5c7..b50b7fafd8d6 100644 --- a/drivers/net/hamradio/dmascc.c +++ b/drivers/net/hamradio/dmascc.c @@ -225,7 +225,8 @@ static int read_scc_data(struct scc_priv *priv); static int scc_open(struct net_device *dev); static int scc_close(struct net_device *dev); -static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); +static int scc_siocdevprivate(struct net_device *dev, struct ifreq *ifr, + void __user *data, int cmd); static int scc_send_packet(struct sk_buff *skb, struct net_device *dev); static int scc_set_mac_address(struct net_device *dev, void *sa); @@ -432,7 +433,7 @@ static const struct net_device_ops scc_netdev_ops = { .ndo_open = scc_open, .ndo_stop = scc_close, .ndo_start_xmit = scc_send_packet, - .ndo_do_ioctl = scc_ioctl, + .ndo_siocdevprivate = scc_siocdevprivate, .ndo_set_mac_address = scc_set_mac_address, }; @@ -881,15 +882,13 @@ static int scc_close(struct net_device *dev) } -static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +static int scc_siocdevprivate(struct net_device *dev, struct ifreq *ifr, void __user *data, int cmd) { struct scc_priv *priv = dev->ml_priv; switch (cmd) { case SIOCGSCCPARAM: - if (copy_to_user - (ifr->ifr_data, &priv->param, - sizeof(struct scc_param))) + if (copy_to_user(data, &priv->param, sizeof(struct scc_param))) return -EFAULT; return 0; case SIOCSSCCPARAM: @@ -897,13 +896,12 @@ static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return -EPERM; if (netif_running(dev)) return -EAGAIN; - if (copy_from_user - (&priv->param, ifr->ifr_data, - sizeof(struct scc_param))) + if (copy_from_user(&priv->param, data, + sizeof(struct scc_param))) return -EFAULT; return 0; default: - return -EINVAL; + return -EOPNOTSUPP; } } diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c index cbaf1cdde7cb..5805cfc83854 100644 --- a/drivers/net/hamradio/hdlcdrv.c +++ b/drivers/net/hamradio/hdlcdrv.c @@ -483,23 +483,25 @@ static int hdlcdrv_close(struct net_device *dev) /* --------------------------------------------------------------------- */ -static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +static int hdlcdrv_siocdevprivate(struct net_device *dev, struct ifreq *ifr, + void __user *data, int cmd) { struct hdlcdrv_state *s = netdev_priv(dev); struct hdlcdrv_ioctl bi; - if (cmd != SIOCDEVPRIVATE) { - if (s->ops && s->ops->ioctl) - return s->ops->ioctl(dev, ifr, &bi, cmd); + if (cmd != SIOCDEVPRIVATE) return -ENOIOCTLCMD; - } - if (copy_from_user(&bi, ifr->ifr_data, sizeof(bi))) + + if (in_compat_syscall()) /* to be implemented */ + return -ENOIOCTLCMD; + + if (copy_from_user(&bi, data, sizeof(bi))) return -EFAULT; switch (bi.cmd) { default: if (s->ops && s->ops->ioctl) - return s->ops->ioctl(dev, ifr, &bi, cmd); + return s->ops->ioctl(dev, data, &bi, cmd); return -ENOIOCTLCMD; case HDLCDRVCTL_GETCHANNELPAR: @@ -605,7 +607,7 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) break; } - if (copy_to_user(ifr->ifr_data, &bi, sizeof(bi))) + if (copy_to_user(data, &bi, sizeof(bi))) return -EFAULT; return 0; @@ -617,7 +619,7 @@ static const struct net_device_ops hdlcdrv_netdev = { .ndo_open = hdlcdrv_open, .ndo_stop = hdlcdrv_close, .ndo_start_xmit = hdlcdrv_send_packet, - .ndo_do_ioctl = hdlcdrv_ioctl, + .ndo_siocdevprivate = hdlcdrv_siocdevprivate, .ndo_set_mac_address = hdlcdrv_set_mac_address, }; diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c index 3f1edd0526a4..e0bb131a33d7 100644 --- a/drivers/net/hamradio/scc.c +++ b/drivers/net/hamradio/scc.c @@ -210,7 +210,8 @@ static int scc_net_close(struct net_device *dev); static void scc_net_rx(struct scc_channel *scc, struct sk_buff *skb); static netdev_tx_t scc_net_tx(struct sk_buff *skb, struct net_device *dev); -static int scc_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); +static int scc_net_siocdevprivate(struct net_device *dev, struct ifreq *ifr, + void __user *data, int cmd); static int scc_net_set_mac_address(struct net_device *dev, void *addr); static struct net_device_stats * scc_net_get_stats(struct net_device *dev); @@ -1550,7 +1551,7 @@ static const struct net_device_ops scc_netdev_ops = { .ndo_start_xmit = scc_net_tx, .ndo_set_mac_address = scc_net_set_mac_address, .ndo_get_stats = scc_net_get_stats, - .ndo_do_ioctl = scc_net_ioctl, + .ndo_siocdevprivate = scc_net_siocdevprivate, }; /* ----> Initialize device <----- */ @@ -1703,7 +1704,8 @@ static netdev_tx_t scc_net_tx(struct sk_buff *skb, struct net_device *dev) * SIOCSCCCAL - send calib. pattern arg: (struct scc_calibrate *) arg */ -static int scc_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +static int scc_net_siocdevprivate(struct net_device *dev, + struct ifreq *ifr, void __user *arg, int cmd) { struct scc_kiss_cmd kiss_cmd; struct scc_mem_config memcfg; @@ -1712,8 +1714,6 @@ static int scc_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) struct scc_channel *scc = (struct scc_channel *) dev->ml_priv; int chan; unsigned char device_name[IFNAMSIZ]; - void __user *arg = ifr->ifr_data; - if (!Driver_Initialized) { @@ -1722,6 +1722,9 @@ static int scc_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) int found = 1; if (!capable(CAP_SYS_RAWIO)) return -EPERM; + if (in_compat_syscall()) + return -EOPNOTSUPP; + if (!arg) return -EFAULT; if (Nchips >= SCC_MAXCHIPS) diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c index d4911041596c..6ddacbdb224b 100644 --- a/drivers/net/hamradio/yam.c +++ b/drivers/net/hamradio/yam.c @@ -920,15 +920,15 @@ static int yam_close(struct net_device *dev) /* --------------------------------------------------------------------- */ -static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +static int yam_siocdevprivate(struct net_device *dev, struct ifreq *ifr, void __user *data, int cmd) { struct yam_port *yp = netdev_priv(dev); struct yamdrv_ioctl_cfg yi; struct yamdrv_ioctl_mcs *ym; int ioctl_cmd; - if (copy_from_user(&ioctl_cmd, ifr->ifr_data, sizeof(int))) - return -EFAULT; + if (copy_from_user(&ioctl_cmd, data, sizeof(int))) + return -EFAULT; if (yp->magic != YAM_MAGIC) return -EINVAL; @@ -947,8 +947,7 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) case SIOCYAMSMCS: if (netif_running(dev)) return -EINVAL; /* Cannot change this parameter when up */ - ym = memdup_user(ifr->ifr_data, - sizeof(struct yamdrv_ioctl_mcs)); + ym = memdup_user(data, sizeof(struct yamdrv_ioctl_mcs)); if (IS_ERR(ym)) return PTR_ERR(ym); if (ym->cmd != SIOCYAMSMCS) @@ -965,8 +964,8 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) case SIOCYAMSCFG: if (!capable(CAP_SYS_RAWIO)) return -EPERM; - if (copy_from_user(&yi, ifr->ifr_data, sizeof(struct yamdrv_ioctl_cfg))) - return -EFAULT; + if (copy_from_user(&yi, data, sizeof(struct yamdrv_ioctl_cfg))) + return -EFAULT; if (yi.cmd != SIOCYAMSCFG) return -EINVAL; @@ -1045,8 +1044,8 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) yi.cfg.txtail = yp->txtail; yi.cfg.persist = yp->pers; yi.cfg.slottime = yp->slot; - if (copy_to_user(ifr->ifr_data, &yi, sizeof(struct yamdrv_ioctl_cfg))) - return -EFAULT; + if (copy_to_user(data, &yi, sizeof(struct yamdrv_ioctl_cfg))) + return -EFAULT; break; default: @@ -1074,7 +1073,7 @@ static const struct net_device_ops yam_netdev_ops = { .ndo_open = yam_open, .ndo_stop = yam_close, .ndo_start_xmit = yam_send_packet, - .ndo_do_ioctl = yam_ioctl, + .ndo_siocdevprivate = yam_siocdevprivate, .ndo_set_mac_address = yam_set_mac_address, }; diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c index 22010384c4a3..7661dbb31162 100644 --- a/drivers/net/hippi/rrunner.c +++ b/drivers/net/hippi/rrunner.c @@ -63,7 +63,7 @@ static const char version[] = static const struct net_device_ops rr_netdev_ops = { .ndo_open = rr_open, .ndo_stop = rr_close, - .ndo_do_ioctl = rr_ioctl, + .ndo_siocdevprivate = rr_siocdevprivate, .ndo_start_xmit = rr_start_xmit, .ndo_set_mac_address = hippi_mac_addr, }; @@ -1568,7 +1568,8 @@ out: } -static int rr_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +static int rr_siocdevprivate(struct net_device *dev, struct ifreq *rq, + void __user *data, int cmd) { struct rr_private *rrpriv; unsigned char *image, *oldimage; @@ -1603,7 +1604,7 @@ static int rr_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) error = -EFAULT; goto gf_out; } - error = copy_to_user(rq->ifr_data, image, EEPROM_BYTES); + error = copy_to_user(data, image, EEPROM_BYTES); if (error) error = -EFAULT; gf_out: @@ -1615,7 +1616,7 @@ static int rr_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) return -EPERM; } - image = memdup_user(rq->ifr_data, EEPROM_BYTES); + image = memdup_user(data, EEPROM_BYTES); if (IS_ERR(image)) return PTR_ERR(image); @@ -1658,7 +1659,7 @@ static int rr_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) return error; case SIOCRRID: - return put_user(0x52523032, (int __user *)rq->ifr_data); + return put_user(0x52523032, (int __user *)data); default: return error; } diff --git a/drivers/net/hippi/rrunner.h b/drivers/net/hippi/rrunner.h index 87533784604f..55377614e752 100644 --- a/drivers/net/hippi/rrunner.h +++ b/drivers/net/hippi/rrunner.h @@ -835,7 +835,8 @@ static int rr_open(struct net_device *dev); static netdev_tx_t rr_start_xmit(struct sk_buff *skb, struct net_device *dev); static int rr_close(struct net_device *dev); -static int rr_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); +static int rr_siocdevprivate(struct net_device *dev, struct ifreq *rq, + void __user *data, int cmd); static unsigned int rr_read_eeprom(struct rr_private *rrpriv, unsigned long offset, unsigned char *buf, diff --git a/drivers/net/ipa/Makefile b/drivers/net/ipa/Makefile index 506f8d5cd4ee..bdfb2430ab2c 100644 --- a/drivers/net/ipa/Makefile +++ b/drivers/net/ipa/Makefile @@ -1,9 +1,6 @@ -# Un-comment the next line if you want to validate configuration data -#ccflags-y += -DIPA_VALIDATE - obj-$(CONFIG_QCOM_IPA) += ipa.o -ipa-y := ipa_main.o ipa_clock.o ipa_reg.o ipa_mem.o \ +ipa-y := ipa_main.o ipa_power.o ipa_reg.o ipa_mem.o \ ipa_table.o ipa_interrupt.o gsi.o gsi_trans.o \ ipa_gsi.o ipa_smp2p.o ipa_uc.o \ ipa_endpoint.o ipa_cmd.o ipa_modem.o \ diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c index 427c68b2ad8f..a2fcdb1abdb9 100644 --- a/drivers/net/ipa/gsi.c +++ b/drivers/net/ipa/gsi.c @@ -198,77 +198,6 @@ static void gsi_irq_type_disable(struct gsi *gsi, enum gsi_irq_type_id type_id) gsi_irq_type_update(gsi, gsi->type_enabled_bitmap & ~BIT(type_id)); } -/* Turn off all GSI interrupts initially; there is no gsi_irq_teardown() */ -static void gsi_irq_setup(struct gsi *gsi) -{ - /* Disable all interrupt types */ - gsi_irq_type_update(gsi, 0); - - /* Clear all type-specific interrupt masks */ - iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET); - iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET); - iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); - iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET); - - /* The inter-EE interrupts are not supported for IPA v3.0-v3.1 */ - if (gsi->version > IPA_VERSION_3_1) { - u32 offset; - - /* These registers are in the non-adjusted address range */ - offset = GSI_INTER_EE_SRC_CH_IRQ_MSK_OFFSET; - iowrite32(0, gsi->virt_raw + offset); - offset = GSI_INTER_EE_SRC_EV_CH_IRQ_MSK_OFFSET; - iowrite32(0, gsi->virt_raw + offset); - } - - iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET); -} - -/* Get # supported channel and event rings; there is no gsi_ring_teardown() */ -static int gsi_ring_setup(struct gsi *gsi) -{ - struct device *dev = gsi->dev; - u32 count; - u32 val; - - if (gsi->version < IPA_VERSION_3_5_1) { - /* No HW_PARAM_2 register prior to IPA v3.5.1, assume the max */ - gsi->channel_count = GSI_CHANNEL_COUNT_MAX; - gsi->evt_ring_count = GSI_EVT_RING_COUNT_MAX; - - return 0; - } - - val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET); - - count = u32_get_bits(val, NUM_CH_PER_EE_FMASK); - if (!count) { - dev_err(dev, "GSI reports zero channels supported\n"); - return -EINVAL; - } - if (count > GSI_CHANNEL_COUNT_MAX) { - dev_warn(dev, "limiting to %u channels; hardware supports %u\n", - GSI_CHANNEL_COUNT_MAX, count); - count = GSI_CHANNEL_COUNT_MAX; - } - gsi->channel_count = count; - - count = u32_get_bits(val, NUM_EV_PER_EE_FMASK); - if (!count) { - dev_err(dev, "GSI reports zero event rings supported\n"); - return -EINVAL; - } - if (count > GSI_EVT_RING_COUNT_MAX) { - dev_warn(dev, - "limiting to %u event rings; hardware supports %u\n", - GSI_EVT_RING_COUNT_MAX, count); - count = GSI_EVT_RING_COUNT_MAX; - } - gsi->evt_ring_count = count; - - return 0; -} - /* Event ring commands are performed one at a time. Their completion * is signaled by the event ring control GSI interrupt type, which is * only enabled when we issue an event ring command. Only the event @@ -920,12 +849,13 @@ static void gsi_channel_program(struct gsi_channel *channel, bool doorbell) /* All done! */ } -static int __gsi_channel_start(struct gsi_channel *channel, bool start) +static int __gsi_channel_start(struct gsi_channel *channel, bool resume) { struct gsi *gsi = channel->gsi; int ret; - if (!start) + /* Prior to IPA v4.0 suspend/resume is not implemented by GSI */ + if (resume && gsi->version < IPA_VERSION_4_0) return 0; mutex_lock(&gsi->mutex); @@ -947,7 +877,7 @@ int gsi_channel_start(struct gsi *gsi, u32 channel_id) napi_enable(&channel->napi); gsi_irq_ieob_enable_one(gsi, channel->evt_ring_id); - ret = __gsi_channel_start(channel, true); + ret = __gsi_channel_start(channel, false); if (ret) { gsi_irq_ieob_disable_one(gsi, channel->evt_ring_id); napi_disable(&channel->napi); @@ -971,7 +901,7 @@ static int gsi_channel_stop_retry(struct gsi_channel *channel) return ret; } -static int __gsi_channel_stop(struct gsi_channel *channel, bool stop) +static int __gsi_channel_stop(struct gsi_channel *channel, bool suspend) { struct gsi *gsi = channel->gsi; int ret; @@ -979,7 +909,8 @@ static int __gsi_channel_stop(struct gsi_channel *channel, bool stop) /* Wait for any underway transactions to complete before stopping. */ gsi_channel_trans_quiesce(channel); - if (!stop) + /* Prior to IPA v4.0 suspend/resume is not implemented by GSI */ + if (suspend && gsi->version < IPA_VERSION_4_0) return 0; mutex_lock(&gsi->mutex); @@ -997,7 +928,7 @@ int gsi_channel_stop(struct gsi *gsi, u32 channel_id) struct gsi_channel *channel = &gsi->channel[channel_id]; int ret; - ret = __gsi_channel_stop(channel, true); + ret = __gsi_channel_stop(channel, false); if (ret) return ret; @@ -1026,13 +957,13 @@ void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell) mutex_unlock(&gsi->mutex); } -/* Stop a STARTED channel for suspend (using stop if requested) */ -int gsi_channel_suspend(struct gsi *gsi, u32 channel_id, bool stop) +/* Stop a started channel for suspend */ +int gsi_channel_suspend(struct gsi *gsi, u32 channel_id) { struct gsi_channel *channel = &gsi->channel[channel_id]; int ret; - ret = __gsi_channel_stop(channel, stop); + ret = __gsi_channel_stop(channel, true); if (ret) return ret; @@ -1042,12 +973,24 @@ int gsi_channel_suspend(struct gsi *gsi, u32 channel_id, bool stop) return 0; } -/* Resume a suspended channel (starting will be requested if STOPPED) */ -int gsi_channel_resume(struct gsi *gsi, u32 channel_id, bool start) +/* Resume a suspended channel (starting if stopped) */ +int gsi_channel_resume(struct gsi *gsi, u32 channel_id) { struct gsi_channel *channel = &gsi->channel[channel_id]; - return __gsi_channel_start(channel, start); + return __gsi_channel_start(channel, true); +} + +/* Prevent all GSI interrupts while suspended */ +void gsi_suspend(struct gsi *gsi) +{ + disable_irq(gsi->irq); +} + +/* Allow all GSI interrupts again when resuming */ +void gsi_resume(struct gsi *gsi) +{ + enable_irq(gsi->irq); } /** @@ -1372,33 +1315,20 @@ static irqreturn_t gsi_isr(int irq, void *dev_id) return IRQ_HANDLED; } +/* Init function for GSI IRQ lookup; there is no gsi_irq_exit() */ static int gsi_irq_init(struct gsi *gsi, struct platform_device *pdev) { - struct device *dev = &pdev->dev; - unsigned int irq; int ret; ret = platform_get_irq_byname(pdev, "gsi"); if (ret <= 0) return ret ? : -EINVAL; - irq = ret; - - ret = request_irq(irq, gsi_isr, 0, "gsi", gsi); - if (ret) { - dev_err(dev, "error %d requesting \"gsi\" IRQ\n", ret); - return ret; - } - gsi->irq = irq; + gsi->irq = ret; return 0; } -static void gsi_irq_exit(struct gsi *gsi) -{ - free_irq(gsi->irq, gsi); -} - /* Return the transaction associated with a transfer completion event */ static struct gsi_trans *gsi_event_trans(struct gsi_channel *channel, struct gsi_event *event) @@ -1876,6 +1806,93 @@ static void gsi_channel_teardown(struct gsi *gsi) gsi_irq_disable(gsi); } +/* Turn off all GSI interrupts initially */ +static int gsi_irq_setup(struct gsi *gsi) +{ + int ret; + + /* Writing 1 indicates IRQ interrupts; 0 would be MSI */ + iowrite32(1, gsi->virt + GSI_CNTXT_INTSET_OFFSET); + + /* Disable all interrupt types */ + gsi_irq_type_update(gsi, 0); + + /* Clear all type-specific interrupt masks */ + iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET); + iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET); + iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); + iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET); + + /* The inter-EE interrupts are not supported for IPA v3.0-v3.1 */ + if (gsi->version > IPA_VERSION_3_1) { + u32 offset; + + /* These registers are in the non-adjusted address range */ + offset = GSI_INTER_EE_SRC_CH_IRQ_MSK_OFFSET; + iowrite32(0, gsi->virt_raw + offset); + offset = GSI_INTER_EE_SRC_EV_CH_IRQ_MSK_OFFSET; + iowrite32(0, gsi->virt_raw + offset); + } + + iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET); + + ret = request_irq(gsi->irq, gsi_isr, 0, "gsi", gsi); + if (ret) + dev_err(gsi->dev, "error %d requesting \"gsi\" IRQ\n", ret); + + return ret; +} + +static void gsi_irq_teardown(struct gsi *gsi) +{ + free_irq(gsi->irq, gsi); +} + +/* Get # supported channel and event rings; there is no gsi_ring_teardown() */ +static int gsi_ring_setup(struct gsi *gsi) +{ + struct device *dev = gsi->dev; + u32 count; + u32 val; + + if (gsi->version < IPA_VERSION_3_5_1) { + /* No HW_PARAM_2 register prior to IPA v3.5.1, assume the max */ + gsi->channel_count = GSI_CHANNEL_COUNT_MAX; + gsi->evt_ring_count = GSI_EVT_RING_COUNT_MAX; + + return 0; + } + + val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET); + + count = u32_get_bits(val, NUM_CH_PER_EE_FMASK); + if (!count) { + dev_err(dev, "GSI reports zero channels supported\n"); + return -EINVAL; + } + if (count > GSI_CHANNEL_COUNT_MAX) { + dev_warn(dev, "limiting to %u channels; hardware supports %u\n", + GSI_CHANNEL_COUNT_MAX, count); + count = GSI_CHANNEL_COUNT_MAX; + } + gsi->channel_count = count; + + count = u32_get_bits(val, NUM_EV_PER_EE_FMASK); + if (!count) { + dev_err(dev, "GSI reports zero event rings supported\n"); + return -EINVAL; + } + if (count > GSI_EVT_RING_COUNT_MAX) { + dev_warn(dev, + "limiting to %u event rings; hardware supports %u\n", + GSI_EVT_RING_COUNT_MAX, count); + count = GSI_EVT_RING_COUNT_MAX; + } + gsi->evt_ring_count = count; + + return 0; +} + /* Setup function for GSI. GSI firmware must be loaded and initialized */ int gsi_setup(struct gsi *gsi) { @@ -1889,25 +1906,34 @@ int gsi_setup(struct gsi *gsi) return -EIO; } - gsi_irq_setup(gsi); /* No matching teardown required */ + ret = gsi_irq_setup(gsi); + if (ret) + return ret; ret = gsi_ring_setup(gsi); /* No matching teardown required */ if (ret) - return ret; + goto err_irq_teardown; /* Initialize the error log */ iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET); - /* Writing 1 indicates IRQ interrupts; 0 would be MSI */ - iowrite32(1, gsi->virt + GSI_CNTXT_INTSET_OFFSET); + ret = gsi_channel_setup(gsi); + if (ret) + goto err_irq_teardown; - return gsi_channel_setup(gsi); + return 0; + +err_irq_teardown: + gsi_irq_teardown(gsi); + + return ret; } /* Inverse of gsi_setup() */ void gsi_teardown(struct gsi *gsi) { gsi_channel_teardown(gsi); + gsi_irq_teardown(gsi); } /* Initialize a channel's event ring */ @@ -1964,7 +1990,6 @@ static void gsi_evt_ring_init(struct gsi *gsi) static bool gsi_channel_data_valid(struct gsi *gsi, const struct ipa_gsi_endpoint_data *data) { -#ifdef IPA_VALIDATION u32 channel_id = data->channel_id; struct device *dev = gsi->dev; @@ -2010,7 +2035,6 @@ static bool gsi_channel_data_valid(struct gsi *gsi, channel_id, data->channel.event_count); return false; } -#endif /* IPA_VALIDATION */ return true; } @@ -2206,20 +2230,18 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev, init_completion(&gsi->completion); - ret = gsi_irq_init(gsi, pdev); + ret = gsi_irq_init(gsi, pdev); /* No matching exit required */ if (ret) goto err_iounmap; ret = gsi_channel_init(gsi, count, data); if (ret) - goto err_irq_exit; + goto err_iounmap; mutex_init(&gsi->mutex); return 0; -err_irq_exit: - gsi_irq_exit(gsi); err_iounmap: iounmap(gsi->virt_raw); @@ -2231,7 +2253,6 @@ void gsi_exit(struct gsi *gsi) { mutex_destroy(&gsi->mutex); gsi_channel_exit(gsi); - gsi_irq_exit(gsi); iounmap(gsi->virt_raw); } diff --git a/drivers/net/ipa/gsi.h b/drivers/net/ipa/gsi.h index 81cd7b07f6e1..88b80dc3db79 100644 --- a/drivers/net/ipa/gsi.h +++ b/drivers/net/ipa/gsi.h @@ -232,8 +232,35 @@ int gsi_channel_stop(struct gsi *gsi, u32 channel_id); */ void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell); -int gsi_channel_suspend(struct gsi *gsi, u32 channel_id, bool stop); -int gsi_channel_resume(struct gsi *gsi, u32 channel_id, bool start); +/** + * gsi_suspend() - Prepare the GSI subsystem for suspend + * @gsi: GSI pointer + */ +void gsi_suspend(struct gsi *gsi); + +/** + * gsi_resume() - Resume the GSI subsystem following suspend + * @gsi: GSI pointer + */ +void gsi_resume(struct gsi *gsi); + +/** + * gsi_channel_suspend() - Suspend a GSI channel + * @gsi: GSI pointer + * @channel_id: Channel to suspend + * + * For IPA v4.0+, suspend is implemented by stopping the channel. + */ +int gsi_channel_suspend(struct gsi *gsi, u32 channel_id); + +/** + * gsi_channel_resume() - Resume a suspended GSI channel + * @gsi: GSI pointer + * @channel_id: Channel to resume + * + * For IPA v4.0+, the stopped channel is started again. + */ +int gsi_channel_resume(struct gsi *gsi, u32 channel_id); /** * gsi_init() - Initialize the GSI subsystem diff --git a/drivers/net/ipa/gsi_trans.c b/drivers/net/ipa/gsi_trans.c index 8c795a6a8598..1544564bc283 100644 --- a/drivers/net/ipa/gsi_trans.c +++ b/drivers/net/ipa/gsi_trans.c @@ -90,14 +90,12 @@ int gsi_trans_pool_init(struct gsi_trans_pool *pool, size_t size, u32 count, { void *virt; -#ifdef IPA_VALIDATE if (!size) return -EINVAL; if (count < max_alloc) return -EINVAL; if (!max_alloc) return -EINVAL; -#endif /* IPA_VALIDATE */ /* By allocating a few extra entries in our pool (one less * than the maximum number that will be requested in a @@ -140,14 +138,12 @@ int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool, dma_addr_t addr; void *virt; -#ifdef IPA_VALIDATE if (!size) return -EINVAL; if (count < max_alloc) return -EINVAL; if (!max_alloc) return -EINVAL; -#endif /* IPA_VALIDATE */ /* Don't let allocations cross a power-of-two boundary */ size = __roundup_pow_of_two(size); @@ -188,8 +184,8 @@ static u32 gsi_trans_pool_alloc_common(struct gsi_trans_pool *pool, u32 count) { u32 offset; - /* assert(count > 0); */ - /* assert(count <= pool->max_alloc); */ + WARN_ON(!count); + WARN_ON(count > pool->max_alloc); /* Allocate from beginning if wrap would occur */ if (count > pool->count - pool->free) @@ -225,9 +221,10 @@ void *gsi_trans_pool_next(struct gsi_trans_pool *pool, void *element) { void *end = pool->base + pool->count * pool->size; - /* assert(element >= pool->base); */ - /* assert(element < end); */ - /* assert(pool->max_alloc == 1); */ + WARN_ON(element < pool->base); + WARN_ON(element >= end); + WARN_ON(pool->max_alloc != 1); + element += pool->size; return element < end ? element : pool->base; @@ -332,7 +329,8 @@ struct gsi_trans *gsi_channel_trans_alloc(struct gsi *gsi, u32 channel_id, struct gsi_trans_info *trans_info; struct gsi_trans *trans; - /* assert(tre_count <= gsi_channel_trans_tre_max(gsi, channel_id)); */ + if (WARN_ON(tre_count > gsi_channel_trans_tre_max(gsi, channel_id))) + return NULL; trans_info = &channel->trans_info; @@ -408,7 +406,7 @@ void gsi_trans_cmd_add(struct gsi_trans *trans, void *buf, u32 size, u32 which = trans->used++; struct scatterlist *sg; - /* assert(which < trans->tre_count); */ + WARN_ON(which >= trans->tre_count); /* Commands are quite different from data transfer requests. * Their payloads come from a pool whose memory is allocated @@ -441,8 +439,10 @@ int gsi_trans_page_add(struct gsi_trans *trans, struct page *page, u32 size, struct scatterlist *sg = &trans->sgl[0]; int ret; - /* assert(trans->tre_count == 1); */ - /* assert(!trans->used); */ + if (WARN_ON(trans->tre_count != 1)) + return -EINVAL; + if (WARN_ON(trans->used)) + return -EINVAL; sg_set_page(sg, page, size, offset); ret = dma_map_sg(trans->gsi->dev, sg, 1, trans->direction); @@ -461,8 +461,10 @@ int gsi_trans_skb_add(struct gsi_trans *trans, struct sk_buff *skb) u32 used; int ret; - /* assert(trans->tre_count == 1); */ - /* assert(!trans->used); */ + if (WARN_ON(trans->tre_count != 1)) + return -EINVAL; + if (WARN_ON(trans->used)) + return -EINVAL; /* skb->len will not be 0 (checked early) */ ret = skb_to_sgvec(skb, sg, 0, skb->len); @@ -550,7 +552,7 @@ static void __gsi_trans_commit(struct gsi_trans *trans, bool ring_db) u32 avail; u32 i; - /* assert(trans->used > 0); */ + WARN_ON(!trans->used); /* Consume the entries. If we cross the end of the ring while * filling them we'll switch to the beginning to finish. diff --git a/drivers/net/ipa/ipa.h b/drivers/net/ipa/ipa.h index 744406832a77..9fc880eb7e3a 100644 --- a/drivers/net/ipa/ipa.h +++ b/drivers/net/ipa/ipa.h @@ -23,34 +23,24 @@ struct icc_path; struct net_device; struct platform_device; -struct ipa_clock; +struct ipa_power; struct ipa_smp2p; struct ipa_interrupt; /** - * enum ipa_flag - IPA state flags - * @IPA_FLAG_RESUMED: Whether resume from suspend has been signaled - * @IPA_FLAG_COUNT: Number of defined IPA flags - */ -enum ipa_flag { - IPA_FLAG_RESUMED, - IPA_FLAG_COUNT, /* Last; not a flag */ -}; - -/** * struct ipa - IPA information * @gsi: Embedded GSI structure - * @flags: Boolean state flags * @version: IPA hardware version * @pdev: Platform device * @completion: Used to signal pipeline clear transfer complete * @nb: Notifier block used for remoteproc SSR * @notifier: Remoteproc SSR notifier * @smp2p: SMP2P information - * @clock: IPA clocking information + * @power: IPA power information * @table_addr: DMA address of filter/route table content * @table_virt: Virtual address of filter/route table content * @interrupt: IPA Interrupt information + * @uc_powered: true if power is active by proxy for microcontroller * @uc_loaded: true after microcontroller has reported it's ready * @reg_addr: DMA address used for IPA register access * @reg_virt: Virtual address used for IPA register access @@ -82,19 +72,19 @@ enum ipa_flag { */ struct ipa { struct gsi gsi; - DECLARE_BITMAP(flags, IPA_FLAG_COUNT); enum ipa_version version; struct platform_device *pdev; struct completion completion; struct notifier_block nb; void *notifier; struct ipa_smp2p *smp2p; - struct ipa_clock *clock; + struct ipa_power *power; dma_addr_t table_addr; __le64 *table_virt; struct ipa_interrupt *interrupt; + bool uc_powered; bool uc_loaded; dma_addr_t reg_addr; @@ -144,11 +134,11 @@ struct ipa { * * Activities performed at the init stage can be done without requiring * any access to IPA hardware. Activities performed at the config stage - * require the IPA clock to be running, because they involve access - * to IPA registers. The setup stage is performed only after the GSI - * hardware is ready (more on this below). The setup stage allows - * the AP to perform more complex initialization by issuing "immediate - * commands" using a special interface to the IPA. + * require IPA power, because they involve access to IPA registers. + * The setup stage is performed only after the GSI hardware is ready + * (more on this below). The setup stage allows the AP to perform + * more complex initialization by issuing "immediate commands" using + * a special interface to the IPA. * * This function, @ipa_setup(), starts the setup stage. * diff --git a/drivers/net/ipa/ipa_clock.c b/drivers/net/ipa/ipa_clock.c deleted file mode 100644 index 69ef6ea41e61..000000000000 --- a/drivers/net/ipa/ipa_clock.c +++ /dev/null @@ -1,331 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 - -/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2018-2021 Linaro Ltd. - */ - -#include <linux/refcount.h> -#include <linux/mutex.h> -#include <linux/clk.h> -#include <linux/device.h> -#include <linux/interconnect.h> - -#include "ipa.h" -#include "ipa_clock.h" -#include "ipa_modem.h" -#include "ipa_data.h" - -/** - * DOC: IPA Clocking - * - * The "IPA Clock" manages both the IPA core clock and the interconnects - * (buses) the IPA depends on as a single logical entity. A reference count - * is incremented by "get" operations and decremented by "put" operations. - * Transitions of that count from 0 to 1 result in the clock and interconnects - * being enabled, and transitions of the count from 1 to 0 cause them to be - * disabled. We currently operate the core clock at a fixed clock rate, and - * all buses at a fixed average and peak bandwidth. As more advanced IPA - * features are enabled, we can make better use of clock and bus scaling. - * - * An IPA clock reference must be held for any access to IPA hardware. - */ - -/** - * struct ipa_interconnect - IPA interconnect information - * @path: Interconnect path - * @average_bandwidth: Average interconnect bandwidth (KB/second) - * @peak_bandwidth: Peak interconnect bandwidth (KB/second) - */ -struct ipa_interconnect { - struct icc_path *path; - u32 average_bandwidth; - u32 peak_bandwidth; -}; - -/** - * struct ipa_clock - IPA clocking information - * @count: Clocking reference count - * @mutex: Protects clock enable/disable - * @core: IPA core clock - * @interconnect_count: Number of elements in interconnect[] - * @interconnect: Interconnect array - */ -struct ipa_clock { - refcount_t count; - struct mutex mutex; /* protects clock enable/disable */ - struct clk *core; - u32 interconnect_count; - struct ipa_interconnect *interconnect; -}; - -static int ipa_interconnect_init_one(struct device *dev, - struct ipa_interconnect *interconnect, - const struct ipa_interconnect_data *data) -{ - struct icc_path *path; - - path = of_icc_get(dev, data->name); - if (IS_ERR(path)) { - int ret = PTR_ERR(path); - - dev_err_probe(dev, ret, "error getting %s interconnect\n", - data->name); - - return ret; - } - - interconnect->path = path; - interconnect->average_bandwidth = data->average_bandwidth; - interconnect->peak_bandwidth = data->peak_bandwidth; - - return 0; -} - -static void ipa_interconnect_exit_one(struct ipa_interconnect *interconnect) -{ - icc_put(interconnect->path); - memset(interconnect, 0, sizeof(*interconnect)); -} - -/* Initialize interconnects required for IPA operation */ -static int ipa_interconnect_init(struct ipa_clock *clock, struct device *dev, - const struct ipa_interconnect_data *data) -{ - struct ipa_interconnect *interconnect; - u32 count; - int ret; - - count = clock->interconnect_count; - interconnect = kcalloc(count, sizeof(*interconnect), GFP_KERNEL); - if (!interconnect) - return -ENOMEM; - clock->interconnect = interconnect; - - while (count--) { - ret = ipa_interconnect_init_one(dev, interconnect, data++); - if (ret) - goto out_unwind; - interconnect++; - } - - return 0; - -out_unwind: - while (interconnect-- > clock->interconnect) - ipa_interconnect_exit_one(interconnect); - kfree(clock->interconnect); - clock->interconnect = NULL; - - return ret; -} - -/* Inverse of ipa_interconnect_init() */ -static void ipa_interconnect_exit(struct ipa_clock *clock) -{ - struct ipa_interconnect *interconnect; - - interconnect = clock->interconnect + clock->interconnect_count; - while (interconnect-- > clock->interconnect) - ipa_interconnect_exit_one(interconnect); - kfree(clock->interconnect); - clock->interconnect = NULL; -} - -/* Currently we only use one bandwidth level, so just "enable" interconnects */ -static int ipa_interconnect_enable(struct ipa *ipa) -{ - struct ipa_interconnect *interconnect; - struct ipa_clock *clock = ipa->clock; - int ret; - u32 i; - - interconnect = clock->interconnect; - for (i = 0; i < clock->interconnect_count; i++) { - ret = icc_set_bw(interconnect->path, - interconnect->average_bandwidth, - interconnect->peak_bandwidth); - if (ret) - goto out_unwind; - interconnect++; - } - - return 0; - -out_unwind: - while (interconnect-- > clock->interconnect) - (void)icc_set_bw(interconnect->path, 0, 0); - - return ret; -} - -/* To disable an interconnect, we just its bandwidth to 0 */ -static void ipa_interconnect_disable(struct ipa *ipa) -{ - struct ipa_interconnect *interconnect; - struct ipa_clock *clock = ipa->clock; - int result = 0; - u32 count; - int ret; - - count = clock->interconnect_count; - interconnect = clock->interconnect + count; - while (count--) { - interconnect--; - ret = icc_set_bw(interconnect->path, 0, 0); - if (ret && !result) - result = ret; - } - - if (result) - dev_err(&ipa->pdev->dev, - "error %d disabling IPA interconnects\n", ret); -} - -/* Turn on IPA clocks, including interconnects */ -static int ipa_clock_enable(struct ipa *ipa) -{ - int ret; - - ret = ipa_interconnect_enable(ipa); - if (ret) - return ret; - - ret = clk_prepare_enable(ipa->clock->core); - if (ret) - ipa_interconnect_disable(ipa); - - return ret; -} - -/* Inverse of ipa_clock_enable() */ -static void ipa_clock_disable(struct ipa *ipa) -{ - clk_disable_unprepare(ipa->clock->core); - ipa_interconnect_disable(ipa); -} - -/* Get an IPA clock reference, but only if the reference count is - * already non-zero. Returns true if the additional reference was - * added successfully, or false otherwise. - */ -bool ipa_clock_get_additional(struct ipa *ipa) -{ - return refcount_inc_not_zero(&ipa->clock->count); -} - -/* Get an IPA clock reference. If the reference count is non-zero, it is - * incremented and return is immediate. Otherwise it is checked again - * under protection of the mutex, and if appropriate the IPA clock - * is enabled. - * - * Incrementing the reference count is intentionally deferred until - * after the clock is running and endpoints are resumed. - */ -void ipa_clock_get(struct ipa *ipa) -{ - struct ipa_clock *clock = ipa->clock; - int ret; - - /* If the clock is running, just bump the reference count */ - if (ipa_clock_get_additional(ipa)) - return; - - /* Otherwise get the mutex and check again */ - mutex_lock(&clock->mutex); - - /* A reference might have been added before we got the mutex. */ - if (ipa_clock_get_additional(ipa)) - goto out_mutex_unlock; - - ret = ipa_clock_enable(ipa); - if (ret) { - dev_err(&ipa->pdev->dev, "error %d enabling IPA clock\n", ret); - goto out_mutex_unlock; - } - - refcount_set(&clock->count, 1); - -out_mutex_unlock: - mutex_unlock(&clock->mutex); -} - -/* Attempt to remove an IPA clock reference. If this represents the - * last reference, disable the IPA clock under protection of the mutex. - */ -void ipa_clock_put(struct ipa *ipa) -{ - struct ipa_clock *clock = ipa->clock; - - /* If this is not the last reference there's nothing more to do */ - if (!refcount_dec_and_mutex_lock(&clock->count, &clock->mutex)) - return; - - ipa_clock_disable(ipa); - - mutex_unlock(&clock->mutex); -} - -/* Return the current IPA core clock rate */ -u32 ipa_clock_rate(struct ipa *ipa) -{ - return ipa->clock ? (u32)clk_get_rate(ipa->clock->core) : 0; -} - -/* Initialize IPA clocking */ -struct ipa_clock * -ipa_clock_init(struct device *dev, const struct ipa_clock_data *data) -{ - struct ipa_clock *clock; - struct clk *clk; - int ret; - - clk = clk_get(dev, "core"); - if (IS_ERR(clk)) { - dev_err_probe(dev, PTR_ERR(clk), "error getting core clock\n"); - - return ERR_CAST(clk); - } - - ret = clk_set_rate(clk, data->core_clock_rate); - if (ret) { - dev_err(dev, "error %d setting core clock rate to %u\n", - ret, data->core_clock_rate); - goto err_clk_put; - } - - clock = kzalloc(sizeof(*clock), GFP_KERNEL); - if (!clock) { - ret = -ENOMEM; - goto err_clk_put; - } - clock->core = clk; - clock->interconnect_count = data->interconnect_count; - - ret = ipa_interconnect_init(clock, dev, data->interconnect_data); - if (ret) - goto err_kfree; - - mutex_init(&clock->mutex); - refcount_set(&clock->count, 0); - - return clock; - -err_kfree: - kfree(clock); -err_clk_put: - clk_put(clk); - - return ERR_PTR(ret); -} - -/* Inverse of ipa_clock_init() */ -void ipa_clock_exit(struct ipa_clock *clock) -{ - struct clk *clk = clock->core; - - WARN_ON(refcount_read(&clock->count) != 0); - mutex_destroy(&clock->mutex); - ipa_interconnect_exit(clock); - kfree(clock); - clk_put(clk); -} diff --git a/drivers/net/ipa/ipa_clock.h b/drivers/net/ipa/ipa_clock.h deleted file mode 100644 index 1fe634760e59..000000000000 --- a/drivers/net/ipa/ipa_clock.h +++ /dev/null @@ -1,64 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ - -/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2018-2020 Linaro Ltd. - */ -#ifndef _IPA_CLOCK_H_ -#define _IPA_CLOCK_H_ - -struct device; - -struct ipa; -struct ipa_clock_data; - -/** - * ipa_clock_rate() - Return the current IPA core clock rate - * @ipa: IPA structure - * - * Return: The current clock rate (in Hz), or 0. - */ -u32 ipa_clock_rate(struct ipa *ipa); - -/** - * ipa_clock_init() - Initialize IPA clocking - * @dev: IPA device - * @data: Clock configuration data - * - * Return: A pointer to an ipa_clock structure, or a pointer-coded error - */ -struct ipa_clock *ipa_clock_init(struct device *dev, - const struct ipa_clock_data *data); - -/** - * ipa_clock_exit() - Inverse of ipa_clock_init() - * @clock: IPA clock pointer - */ -void ipa_clock_exit(struct ipa_clock *clock); - -/** - * ipa_clock_get() - Get an IPA clock reference - * @ipa: IPA pointer - * - * This call blocks if this is the first reference. - */ -void ipa_clock_get(struct ipa *ipa); - -/** - * ipa_clock_get_additional() - Get an IPA clock reference if not first - * @ipa: IPA pointer - * - * This returns immediately, and only takes a reference if not the first - */ -bool ipa_clock_get_additional(struct ipa *ipa); - -/** - * ipa_clock_put() - Drop an IPA clock reference - * @ipa: IPA pointer - * - * This drops a clock reference. If the last reference is being dropped, - * the clock is stopped and RX endpoints are suspended. This call will - * not block unless the last reference is dropped. - */ -void ipa_clock_put(struct ipa *ipa); - -#endif /* _IPA_CLOCK_H_ */ diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c index af44ca41189e..cff51731195a 100644 --- a/drivers/net/ipa/ipa_cmd.c +++ b/drivers/net/ipa/ipa_cmd.c @@ -159,35 +159,49 @@ static void ipa_cmd_validate_build(void) BUILD_BUG_ON(TABLE_SIZE > field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK)); #undef TABLE_COUNT_MAX #undef TABLE_SIZE -} -#ifdef IPA_VALIDATE + /* Hashed and non-hashed fields are assumed to be the same size */ + BUILD_BUG_ON(field_max(IP_FLTRT_FLAGS_HASH_SIZE_FMASK) != + field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK)); + BUILD_BUG_ON(field_max(IP_FLTRT_FLAGS_HASH_ADDR_FMASK) != + field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK)); + + /* Valid endpoint numbers must fit in the IP packet init command */ + BUILD_BUG_ON(field_max(IPA_PACKET_INIT_DEST_ENDPOINT_FMASK) < + IPA_ENDPOINT_MAX - 1); +} /* Validate a memory region holding a table */ -bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem, - bool route, bool ipv6, bool hashed) +bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem, bool route) { + u32 offset_max = field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK); + u32 size_max = field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK); + const char *table = route ? "route" : "filter"; struct device *dev = &ipa->pdev->dev; - u32 offset_max; - offset_max = hashed ? field_max(IP_FLTRT_FLAGS_HASH_ADDR_FMASK) - : field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK); + /* Size must fit in the immediate command field that holds it */ + if (mem->size > size_max) { + dev_err(dev, "%s table region size too large\n", table); + dev_err(dev, " (0x%04x > 0x%04x)\n", + mem->size, size_max); + + return false; + } + + /* Offset must fit in the immediate command field that holds it */ if (mem->offset > offset_max || ipa->mem_offset > offset_max - mem->offset) { - dev_err(dev, "IPv%c %s%s table region offset too large\n", - ipv6 ? '6' : '4', hashed ? "hashed " : "", - route ? "route" : "filter"); + dev_err(dev, "%s table region offset too large\n", table); dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n", ipa->mem_offset, mem->offset, offset_max); return false; } + /* Entire memory range must fit within IPA-local memory */ if (mem->offset > ipa->mem_size || mem->size > ipa->mem_size - mem->offset) { - dev_err(dev, "IPv%c %s%s table region out of range\n", - ipv6 ? '6' : '4', hashed ? "hashed " : "", - route ? "route" : "filter"); + dev_err(dev, "%s table region out of range\n", table); dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n", mem->offset, mem->size, ipa->mem_size); @@ -331,7 +345,6 @@ bool ipa_cmd_data_valid(struct ipa *ipa) return true; } -#endif /* IPA_VALIDATE */ int ipa_cmd_pool_init(struct gsi_channel *channel, u32 tre_max) { @@ -522,9 +535,6 @@ static void ipa_cmd_ip_packet_init_add(struct gsi_trans *trans, u8 endpoint_id) union ipa_cmd_payload *cmd_payload; dma_addr_t payload_addr; - /* assert(endpoint_id < - field_max(IPA_PACKET_INIT_DEST_ENDPOINT_FMASK)); */ - cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr); payload = &cmd_payload->ip_packet_init; @@ -548,8 +558,9 @@ void ipa_cmd_dma_shared_mem_add(struct gsi_trans *trans, u32 offset, u16 size, u16 flags; /* size and offset must fit in 16 bit fields */ - /* assert(size > 0 && size <= U16_MAX); */ - /* assert(offset <= U16_MAX && ipa->mem_offset <= U16_MAX - offset); */ + WARN_ON(!size); + WARN_ON(size > U16_MAX); + WARN_ON(offset > U16_MAX || ipa->mem_offset > U16_MAX - offset); offset += ipa->mem_offset; @@ -588,8 +599,6 @@ static void ipa_cmd_ip_tag_status_add(struct gsi_trans *trans) union ipa_cmd_payload *cmd_payload; dma_addr_t payload_addr; - /* assert(tag <= field_max(IP_PACKET_TAG_STATUS_TAG_FMASK)); */ - cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr); payload = &cmd_payload->ip_packet_tag_status; diff --git a/drivers/net/ipa/ipa_cmd.h b/drivers/net/ipa/ipa_cmd.h index b99262281f41..69cd085d427d 100644 --- a/drivers/net/ipa/ipa_cmd.h +++ b/drivers/net/ipa/ipa_cmd.h @@ -57,20 +57,16 @@ struct ipa_cmd_info { enum dma_data_direction direction; }; -#ifdef IPA_VALIDATE - /** * ipa_cmd_table_valid() - Validate a memory region holding a table * @ipa: - IPA pointer * @mem: - IPA memory region descriptor * @route: - Whether the region holds a route or filter table - * @ipv6: - Whether the table is for IPv6 or IPv4 - * @hashed: - Whether the table is hashed or non-hashed * * Return: true if region is valid, false otherwise */ bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem, - bool route, bool ipv6, bool hashed); + bool route); /** * ipa_cmd_data_valid() - Validate command-realted configuration is valid @@ -80,22 +76,6 @@ bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem, */ bool ipa_cmd_data_valid(struct ipa *ipa); -#else /* !IPA_VALIDATE */ - -static inline bool ipa_cmd_table_valid(struct ipa *ipa, - const struct ipa_mem *mem, bool route, - bool ipv6, bool hashed) -{ - return true; -} - -static inline bool ipa_cmd_data_valid(struct ipa *ipa) -{ - return true; -} - -#endif /* !IPA_VALIDATE */ - /** * ipa_cmd_pool_init() - initialize command channel pools * @channel: AP->IPA command TX GSI channel pointer diff --git a/drivers/net/ipa/ipa_data-v3.1.c b/drivers/net/ipa/ipa_data-v3.1.c index 4c28189462a7..06ddb85f39b2 100644 --- a/drivers/net/ipa/ipa_data-v3.1.c +++ b/drivers/net/ipa/ipa_data-v3.1.c @@ -513,7 +513,7 @@ static const struct ipa_interconnect_data ipa_interconnect_data[] = { }; /* Clock and interconnect configuration data for an SoC having IPA v3.1 */ -static const struct ipa_clock_data ipa_clock_data = { +static const struct ipa_power_data ipa_power_data = { .core_clock_rate = 16 * 1000 * 1000, /* Hz */ .interconnect_count = ARRAY_SIZE(ipa_interconnect_data), .interconnect_data = ipa_interconnect_data, @@ -529,5 +529,5 @@ const struct ipa_data ipa_data_v3_1 = { .endpoint_data = ipa_gsi_endpoint_data, .resource_data = &ipa_resource_data, .mem_data = &ipa_mem_data, - .clock_data = &ipa_clock_data, + .power_data = &ipa_power_data, }; diff --git a/drivers/net/ipa/ipa_data-v3.5.1.c b/drivers/net/ipa/ipa_data-v3.5.1.c index af536ef8c120..760c22bbdf70 100644 --- a/drivers/net/ipa/ipa_data-v3.5.1.c +++ b/drivers/net/ipa/ipa_data-v3.5.1.c @@ -394,7 +394,7 @@ static const struct ipa_interconnect_data ipa_interconnect_data[] = { }; /* Clock and interconnect configuration data for an SoC having IPA v3.5.1 */ -static const struct ipa_clock_data ipa_clock_data = { +static const struct ipa_power_data ipa_power_data = { .core_clock_rate = 75 * 1000 * 1000, /* Hz */ .interconnect_count = ARRAY_SIZE(ipa_interconnect_data), .interconnect_data = ipa_interconnect_data, @@ -414,5 +414,5 @@ const struct ipa_data ipa_data_v3_5_1 = { .endpoint_data = ipa_gsi_endpoint_data, .resource_data = &ipa_resource_data, .mem_data = &ipa_mem_data, - .clock_data = &ipa_clock_data, + .power_data = &ipa_power_data, }; diff --git a/drivers/net/ipa/ipa_data-v4.11.c b/drivers/net/ipa/ipa_data-v4.11.c index 9353efbd504f..fea91451a0c3 100644 --- a/drivers/net/ipa/ipa_data-v4.11.c +++ b/drivers/net/ipa/ipa_data-v4.11.c @@ -105,6 +105,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .filter_support = true, .config = { .resource_group = IPA_RSRC_GROUP_SRC_UL_DL, + .checksum = true, .qmap = true, .status_enable = true, .tx = { @@ -128,6 +129,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .endpoint = { .config = { .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL, + .checksum = true, .qmap = true, .aggregation = true, .rx = { @@ -368,24 +370,19 @@ static const struct ipa_mem_data ipa_mem_data = { static const struct ipa_interconnect_data ipa_interconnect_data[] = { { .name = "memory", - .peak_bandwidth = 465000, /* 465 MBps */ - .average_bandwidth = 80000, /* 80 MBps */ - }, - /* Average rate is unused for the next two interconnects */ - { - .name = "imem", - .peak_bandwidth = 68570, /* 68.57 MBps */ - .average_bandwidth = 80000, /* 80 MBps (unused?) */ + .peak_bandwidth = 600000, /* 600 MBps */ + .average_bandwidth = 150000, /* 150 MBps */ }, + /* Average rate is unused for the next interconnect */ { .name = "config", - .peak_bandwidth = 30000, /* 30 MBps */ + .peak_bandwidth = 74000, /* 74 MBps */ .average_bandwidth = 0, /* unused */ }, }; /* Clock and interconnect configuration data for an SoC having IPA v4.11 */ -static const struct ipa_clock_data ipa_clock_data = { +static const struct ipa_power_data ipa_power_data = { .core_clock_rate = 60 * 1000 * 1000, /* Hz */ .interconnect_count = ARRAY_SIZE(ipa_interconnect_data), .interconnect_data = ipa_interconnect_data, @@ -400,5 +397,5 @@ const struct ipa_data ipa_data_v4_11 = { .endpoint_data = ipa_gsi_endpoint_data, .resource_data = &ipa_resource_data, .mem_data = &ipa_mem_data, - .clock_data = &ipa_clock_data, + .power_data = &ipa_power_data, }; diff --git a/drivers/net/ipa/ipa_data-v4.2.c b/drivers/net/ipa/ipa_data-v4.2.c index 3b09b7baa95f..2a231e79d5e1 100644 --- a/drivers/net/ipa/ipa_data-v4.2.c +++ b/drivers/net/ipa/ipa_data-v4.2.c @@ -360,7 +360,7 @@ static const struct ipa_interconnect_data ipa_interconnect_data[] = { }; /* Clock and interconnect configuration data for an SoC having IPA v4.2 */ -static const struct ipa_clock_data ipa_clock_data = { +static const struct ipa_power_data ipa_power_data = { .core_clock_rate = 100 * 1000 * 1000, /* Hz */ .interconnect_count = ARRAY_SIZE(ipa_interconnect_data), .interconnect_data = ipa_interconnect_data, @@ -376,5 +376,5 @@ const struct ipa_data ipa_data_v4_2 = { .endpoint_data = ipa_gsi_endpoint_data, .resource_data = &ipa_resource_data, .mem_data = &ipa_mem_data, - .clock_data = &ipa_clock_data, + .power_data = &ipa_power_data, }; diff --git a/drivers/net/ipa/ipa_data-v4.5.c b/drivers/net/ipa/ipa_data-v4.5.c index a99b6478fa3a..e62ab9c3ac67 100644 --- a/drivers/net/ipa/ipa_data-v4.5.c +++ b/drivers/net/ipa/ipa_data-v4.5.c @@ -114,6 +114,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .filter_support = true, .config = { .resource_group = IPA_RSRC_GROUP_SRC_UL_DL, + .checksum = true, .qmap = true, .status_enable = true, .tx = { @@ -137,6 +138,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .endpoint = { .config = { .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL, + .checksum = true, .qmap = true, .aggregation = true, .rx = { @@ -441,7 +443,7 @@ static const struct ipa_interconnect_data ipa_interconnect_data[] = { }; /* Clock and interconnect configuration data for an SoC having IPA v4.5 */ -static const struct ipa_clock_data ipa_clock_data = { +static const struct ipa_power_data ipa_power_data = { .core_clock_rate = 150 * 1000 * 1000, /* Hz (150? 60?) */ .interconnect_count = ARRAY_SIZE(ipa_interconnect_data), .interconnect_data = ipa_interconnect_data, @@ -456,5 +458,5 @@ const struct ipa_data ipa_data_v4_5 = { .endpoint_data = ipa_gsi_endpoint_data, .resource_data = &ipa_resource_data, .mem_data = &ipa_mem_data, - .clock_data = &ipa_clock_data, + .power_data = &ipa_power_data, }; diff --git a/drivers/net/ipa/ipa_data-v4.9.c b/drivers/net/ipa/ipa_data-v4.9.c index 798d43e1eb13..2421b5abb5d4 100644 --- a/drivers/net/ipa/ipa_data-v4.9.c +++ b/drivers/net/ipa/ipa_data-v4.9.c @@ -106,6 +106,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .filter_support = true, .config = { .resource_group = IPA_RSRC_GROUP_SRC_UL_DL, + .checksum = true, .qmap = true, .status_enable = true, .tx = { @@ -129,6 +130,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .endpoint = { .config = { .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL, + .checksum = true, .qmap = true, .aggregation = true, .rx = { @@ -416,18 +418,13 @@ static const struct ipa_mem_data ipa_mem_data = { /* Interconnect rates are in 1000 byte/second units */ static const struct ipa_interconnect_data ipa_interconnect_data[] = { { - .name = "ipa_to_llcc", + .name = "memory", .peak_bandwidth = 600000, /* 600 MBps */ .average_bandwidth = 150000, /* 150 MBps */ }, - { - .name = "llcc_to_ebi1", - .peak_bandwidth = 1804000, /* 1.804 GBps */ - .average_bandwidth = 150000, /* 150 MBps */ - }, /* Average rate is unused for the next interconnect */ { - .name = "appss_to_ipa", + .name = "config", .peak_bandwidth = 74000, /* 74 MBps */ .average_bandwidth = 0, /* unused */ }, @@ -435,7 +432,7 @@ static const struct ipa_interconnect_data ipa_interconnect_data[] = { }; /* Clock and interconnect configuration data for an SoC having IPA v4.9 */ -static const struct ipa_clock_data ipa_clock_data = { +static const struct ipa_power_data ipa_power_data = { .core_clock_rate = 60 * 1000 * 1000, /* Hz */ .interconnect_count = ARRAY_SIZE(ipa_interconnect_data), .interconnect_data = ipa_interconnect_data, @@ -450,5 +447,5 @@ const struct ipa_data ipa_data_v4_9 = { .endpoint_data = ipa_gsi_endpoint_data, .resource_data = &ipa_resource_data, .mem_data = &ipa_mem_data, - .clock_data = &ipa_clock_data, + .power_data = &ipa_power_data, }; diff --git a/drivers/net/ipa/ipa_data.h b/drivers/net/ipa/ipa_data.h index 5bc244c8f94e..6d329e9ce5d2 100644 --- a/drivers/net/ipa/ipa_data.h +++ b/drivers/net/ipa/ipa_data.h @@ -19,7 +19,7 @@ * IPA and GSI resources to use for a given platform. This data is supplied * via the Device Tree match table, associated with a particular compatible * string. The data defines information about how resources, endpoints and - * channels, memory, clocking and so on are allocated and used for the + * channels, memory, power and so on are allocated and used for the * platform. * * Resources are data structures used internally by the IPA hardware. The @@ -265,12 +265,12 @@ struct ipa_interconnect_data { }; /** - * struct ipa_clock_data - description of IPA clock and interconnect rates + * struct ipa_power_data - description of IPA power configuration data * @core_clock_rate: Core clock rate (Hz) * @interconnect_count: Number of entries in the interconnect_data array * @interconnect_data: IPA interconnect configuration data */ -struct ipa_clock_data { +struct ipa_power_data { u32 core_clock_rate; u32 interconnect_count; /* # entries in interconnect_data[] */ const struct ipa_interconnect_data *interconnect_data; @@ -286,7 +286,7 @@ struct ipa_clock_data { * @endpoint_data: IPA endpoint/GSI channel data * @resource_data: IPA resource configuration data * @mem_data: IPA memory region data - * @clock_data: IPA clock and interconnect data + * @power_data: IPA power data */ struct ipa_data { enum ipa_version version; @@ -297,7 +297,7 @@ struct ipa_data { const struct ipa_gsi_endpoint_data *endpoint_data; const struct ipa_resource_data *resource_data; const struct ipa_mem_data *mem_data; - const struct ipa_clock_data *clock_data; + const struct ipa_power_data *power_data; }; extern const struct ipa_data ipa_data_v3_1; diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c index ab02669bae4e..5528d97110d5 100644 --- a/drivers/net/ipa/ipa_endpoint.c +++ b/drivers/net/ipa/ipa_endpoint.c @@ -21,7 +21,7 @@ #include "ipa_modem.h" #include "ipa_table.h" #include "ipa_gsi.h" -#include "ipa_clock.h" +#include "ipa_power.h" #define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0) @@ -250,17 +250,18 @@ ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay) /* Suspend is not supported for IPA v4.0+. Delay doesn't work * correctly on IPA v4.2. - * - * if (endpoint->toward_ipa) - * assert(ipa->version != IPA_VERSION_4.2); - * else - * assert(ipa->version < IPA_VERSION_4_0); */ + if (endpoint->toward_ipa) + WARN_ON(ipa->version == IPA_VERSION_4_2); + else + WARN_ON(ipa->version >= IPA_VERSION_4_0); + mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK; val = ioread32(ipa->reg_virt + offset); - /* Don't bother if it's already in the requested state */ state = !!(val & mask); + + /* Don't bother if it's already in the requested state */ if (suspend_delay != state) { val ^= mask; iowrite32(val, ipa->reg_virt + offset); @@ -273,7 +274,7 @@ ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay) static void ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable) { - /* assert(endpoint->toward_ipa); */ + WARN_ON(!endpoint->toward_ipa); /* Delay mode doesn't work properly for IPA v4.2 */ if (endpoint->ipa->version != IPA_VERSION_4_2) @@ -287,7 +288,8 @@ static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint) u32 offset; u32 val; - /* assert(mask & ipa->available); */ + WARN_ON(!(mask & ipa->available)); + offset = ipa_reg_state_aggr_active_offset(ipa->version); val = ioread32(ipa->reg_virt + offset); @@ -299,7 +301,8 @@ static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint) u32 mask = BIT(endpoint->endpoint_id); struct ipa *ipa = endpoint->ipa; - /* assert(mask & ipa->available); */ + WARN_ON(!(mask & ipa->available)); + iowrite32(mask, ipa->reg_virt + IPA_REG_AGGR_FORCE_CLOSE_OFFSET); } @@ -338,7 +341,7 @@ ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable) if (endpoint->ipa->version >= IPA_VERSION_4_0) return enable; /* For IPA v4.0+, no change made */ - /* assert(!endpoint->toward_ipa); */ + WARN_ON(endpoint->toward_ipa); suspended = ipa_endpoint_init_ctrl(endpoint, enable); @@ -807,7 +810,7 @@ static u32 hol_block_timer_val(struct ipa *ipa, u32 microseconds) return hol_block_timer_qtime_val(ipa, microseconds); /* Use 64 bit arithmetic to avoid overflow... */ - rate = ipa_clock_rate(ipa); + rate = ipa_core_clock_rate(ipa); ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC); /* ...but we still need to fit into a 32-bit register */ WARN_ON(ticks > U32_MAX); @@ -1156,7 +1159,8 @@ static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint, if (!endpoint->netdev) return false; - /* assert(len <= SKB_WITH_OVERHEAD(IPA_RX_BUFFER_SIZE-NET_SKB_PAD)); */ + WARN_ON(len > SKB_WITH_OVERHEAD(IPA_RX_BUFFER_SIZE - NET_SKB_PAD)); + skb = build_skb(page_address(page), IPA_RX_BUFFER_SIZE); if (skb) { /* Reserve the headroom and account for the data */ @@ -1583,7 +1587,6 @@ void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint) { struct device *dev = &endpoint->ipa->pdev->dev; struct gsi *gsi = &endpoint->ipa->gsi; - bool stop_channel; int ret; if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id))) @@ -1594,11 +1597,7 @@ void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint) (void)ipa_endpoint_program_suspend(endpoint, true); } - /* Starting with IPA v4.0, endpoints are suspended by stopping the - * underlying GSI channel rather than using endpoint suspend mode. - */ - stop_channel = endpoint->ipa->version >= IPA_VERSION_4_0; - ret = gsi_channel_suspend(gsi, endpoint->channel_id, stop_channel); + ret = gsi_channel_suspend(gsi, endpoint->channel_id); if (ret) dev_err(dev, "error %d suspending channel %u\n", ret, endpoint->channel_id); @@ -1608,7 +1607,6 @@ void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint) { struct device *dev = &endpoint->ipa->pdev->dev; struct gsi *gsi = &endpoint->ipa->gsi; - bool start_channel; int ret; if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id))) @@ -1617,11 +1615,7 @@ void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint) if (!endpoint->toward_ipa) (void)ipa_endpoint_program_suspend(endpoint, false); - /* Starting with IPA v4.0, the underlying GSI channel must be - * restarted for resume. - */ - start_channel = endpoint->ipa->version >= IPA_VERSION_4_0; - ret = gsi_channel_resume(gsi, endpoint->channel_id, start_channel); + ret = gsi_channel_resume(gsi, endpoint->channel_id); if (ret) dev_err(dev, "error %d resuming channel %u\n", ret, endpoint->channel_id); diff --git a/drivers/net/ipa/ipa_interrupt.c b/drivers/net/ipa/ipa_interrupt.c index c46df0b7c4e5..b35170a93b0f 100644 --- a/drivers/net/ipa/ipa_interrupt.c +++ b/drivers/net/ipa/ipa_interrupt.c @@ -21,9 +21,9 @@ #include <linux/types.h> #include <linux/interrupt.h> +#include <linux/pm_runtime.h> #include "ipa.h" -#include "ipa_clock.h" #include "ipa_reg.h" #include "ipa_endpoint.h" #include "ipa_interrupt.h" @@ -74,21 +74,30 @@ static void ipa_interrupt_process(struct ipa_interrupt *interrupt, u32 irq_id) iowrite32(mask, ipa->reg_virt + offset); } -/* Process all IPA interrupt types that have been signaled */ -static void ipa_interrupt_process_all(struct ipa_interrupt *interrupt) +/* IPA IRQ handler is threaded */ +static irqreturn_t ipa_isr_thread(int irq, void *dev_id) { + struct ipa_interrupt *interrupt = dev_id; struct ipa *ipa = interrupt->ipa; u32 enabled = interrupt->enabled; + struct device *dev; + u32 pending; u32 offset; u32 mask; + int ret; + + dev = &ipa->pdev->dev; + ret = pm_runtime_get_sync(dev); + if (WARN_ON(ret < 0)) + goto out_power_put; /* The status register indicates which conditions are present, * including conditions whose interrupt is not enabled. Handle * only the enabled ones. */ offset = ipa_reg_irq_stts_offset(ipa->version); - mask = ioread32(ipa->reg_virt + offset); - while ((mask &= enabled)) { + pending = ioread32(ipa->reg_virt + offset); + while ((mask = pending & enabled)) { do { u32 irq_id = __ffs(mask); @@ -96,43 +105,19 @@ static void ipa_interrupt_process_all(struct ipa_interrupt *interrupt) ipa_interrupt_process(interrupt, irq_id); } while (mask); - mask = ioread32(ipa->reg_virt + offset); + pending = ioread32(ipa->reg_virt + offset); } -} - -/* Threaded part of the IPA IRQ handler */ -static irqreturn_t ipa_isr_thread(int irq, void *dev_id) -{ - struct ipa_interrupt *interrupt = dev_id; - - ipa_clock_get(interrupt->ipa); - - ipa_interrupt_process_all(interrupt); - - ipa_clock_put(interrupt->ipa); - - return IRQ_HANDLED; -} - -/* Hard part (i.e., "real" IRQ handler) of the IRQ handler */ -static irqreturn_t ipa_isr(int irq, void *dev_id) -{ - struct ipa_interrupt *interrupt = dev_id; - struct ipa *ipa = interrupt->ipa; - u32 offset; - u32 mask; - offset = ipa_reg_irq_stts_offset(ipa->version); - mask = ioread32(ipa->reg_virt + offset); - if (mask & interrupt->enabled) - return IRQ_WAKE_THREAD; - - /* Nothing in the mask was supposed to cause an interrupt */ - offset = ipa_reg_irq_clr_offset(ipa->version); - iowrite32(mask, ipa->reg_virt + offset); - - dev_err(&ipa->pdev->dev, "%s: unexpected interrupt, mask 0x%08x\n", - __func__, mask); + /* If any disabled interrupts are pending, clear them */ + if (pending) { + dev_dbg(dev, "clearing disabled IPA interrupts 0x%08x\n", + pending); + offset = ipa_reg_irq_clr_offset(ipa->version); + iowrite32(pending, ipa->reg_virt + offset); + } +out_power_put: + pm_runtime_mark_last_busy(dev); + (void)pm_runtime_put_autosuspend(dev); return IRQ_HANDLED; } @@ -146,7 +131,7 @@ static void ipa_interrupt_suspend_control(struct ipa_interrupt *interrupt, u32 offset; u32 val; - /* assert(mask & ipa->available); */ + WARN_ON(!(mask & ipa->available)); /* IPA version 3.0 does not support TX_SUSPEND interrupt control */ if (ipa->version == IPA_VERSION_3_0) @@ -206,7 +191,8 @@ void ipa_interrupt_add(struct ipa_interrupt *interrupt, struct ipa *ipa = interrupt->ipa; u32 offset; - /* assert(ipa_irq < IPA_IRQ_COUNT); */ + WARN_ON(ipa_irq >= IPA_IRQ_COUNT); + interrupt->handler[ipa_irq] = handler; /* Update the IPA interrupt mask to enable it */ @@ -222,7 +208,8 @@ ipa_interrupt_remove(struct ipa_interrupt *interrupt, enum ipa_irq_id ipa_irq) struct ipa *ipa = interrupt->ipa; u32 offset; - /* assert(ipa_irq < IPA_IRQ_COUNT); */ + WARN_ON(ipa_irq >= IPA_IRQ_COUNT); + /* Update the IPA interrupt mask to disable it */ interrupt->enabled &= ~BIT(ipa_irq); offset = ipa_reg_irq_en_offset(ipa->version); @@ -231,8 +218,8 @@ ipa_interrupt_remove(struct ipa_interrupt *interrupt, enum ipa_irq_id ipa_irq) interrupt->handler[ipa_irq] = NULL; } -/* Set up the IPA interrupt framework */ -struct ipa_interrupt *ipa_interrupt_setup(struct ipa *ipa) +/* Configure the IPA interrupt framework */ +struct ipa_interrupt *ipa_interrupt_config(struct ipa *ipa) { struct device *dev = &ipa->pdev->dev; struct ipa_interrupt *interrupt; @@ -258,7 +245,7 @@ struct ipa_interrupt *ipa_interrupt_setup(struct ipa *ipa) offset = ipa_reg_irq_en_offset(ipa->version); iowrite32(0, ipa->reg_virt + offset); - ret = request_threaded_irq(irq, ipa_isr, ipa_isr_thread, IRQF_ONESHOT, + ret = request_threaded_irq(irq, NULL, ipa_isr_thread, IRQF_ONESHOT, "ipa", interrupt); if (ret) { dev_err(dev, "error %d requesting \"ipa\" IRQ\n", ret); @@ -281,8 +268,8 @@ err_kfree: return ERR_PTR(ret); } -/* Tear down the IPA interrupt framework */ -void ipa_interrupt_teardown(struct ipa_interrupt *interrupt) +/* Inverse of ipa_interrupt_config() */ +void ipa_interrupt_deconfig(struct ipa_interrupt *interrupt) { struct device *dev = &interrupt->ipa->pdev->dev; int ret; diff --git a/drivers/net/ipa/ipa_interrupt.h b/drivers/net/ipa/ipa_interrupt.h index d5c486a6800d..231390cea52a 100644 --- a/drivers/net/ipa/ipa_interrupt.h +++ b/drivers/net/ipa/ipa_interrupt.h @@ -86,17 +86,17 @@ void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt); void ipa_interrupt_simulate_suspend(struct ipa_interrupt *interrupt); /** - * ipa_interrupt_setup() - Set up the IPA interrupt framework + * ipa_interrupt_config() - Configure the IPA interrupt framework * @ipa: IPA pointer * * Return: Pointer to IPA SMP2P info, or a pointer-coded error */ -struct ipa_interrupt *ipa_interrupt_setup(struct ipa *ipa); +struct ipa_interrupt *ipa_interrupt_config(struct ipa *ipa); /** - * ipa_interrupt_teardown() - Tear down the IPA interrupt framework + * ipa_interrupt_deconfig() - Inverse of ipa_interrupt_config() * @interrupt: IPA interrupt structure */ -void ipa_interrupt_teardown(struct ipa_interrupt *interrupt); +void ipa_interrupt_deconfig(struct ipa_interrupt *interrupt); #endif /* _IPA_INTERRUPT_H_ */ diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c index 9810c61a0320..cdfa98a76e1f 100644 --- a/drivers/net/ipa/ipa_main.c +++ b/drivers/net/ipa/ipa_main.c @@ -15,11 +15,12 @@ #include <linux/of.h> #include <linux/of_device.h> #include <linux/of_address.h> +#include <linux/pm_runtime.h> #include <linux/qcom_scm.h> #include <linux/soc/qcom/mdt_loader.h> #include "ipa.h" -#include "ipa_clock.h" +#include "ipa_power.h" #include "ipa_data.h" #include "ipa_endpoint.h" #include "ipa_resource.h" @@ -80,29 +81,6 @@ #define IPA_XO_CLOCK_DIVIDER 192 /* 1 is subtracted where used */ /** - * ipa_suspend_handler() - Handle the suspend IPA interrupt - * @ipa: IPA pointer - * @irq_id: IPA interrupt type (unused) - * - * If an RX endpoint is in suspend state, and the IPA has a packet - * destined for that endpoint, the IPA generates a SUSPEND interrupt - * to inform the AP that it should resume the endpoint. If we get - * one of these interrupts we just resume everything. - */ -static void ipa_suspend_handler(struct ipa *ipa, enum ipa_irq_id irq_id) -{ - /* Just report the event, and let system resume handle the rest. - * More than one endpoint could signal this; if so, ignore - * all but the first. - */ - if (!test_and_set_bit(IPA_FLAG_RESUMED, ipa->flags)) - pm_wakeup_dev_event(&ipa->pdev->dev, 0, true); - - /* Acknowledge/clear the suspend interrupt on all endpoints */ - ipa_interrupt_suspend_clear_all(ipa->interrupt); -} - -/** * ipa_setup() - Set up IPA hardware * @ipa: IPA pointer * @@ -124,19 +102,9 @@ int ipa_setup(struct ipa *ipa) if (ret) return ret; - ipa->interrupt = ipa_interrupt_setup(ipa); - if (IS_ERR(ipa->interrupt)) { - ret = PTR_ERR(ipa->interrupt); - goto err_gsi_teardown; - } - ipa_interrupt_add(ipa->interrupt, IPA_IRQ_TX_SUSPEND, - ipa_suspend_handler); - - ipa_uc_setup(ipa); - - ret = device_init_wakeup(dev, true); + ret = ipa_power_setup(ipa); if (ret) - goto err_uc_teardown; + goto err_gsi_teardown; ipa_endpoint_setup(ipa); @@ -167,7 +135,7 @@ int ipa_setup(struct ipa *ipa) ipa_endpoint_default_route_set(ipa, exception_endpoint->endpoint_id); /* We're all set. Now prepare for communication with the modem */ - ret = ipa_modem_setup(ipa); + ret = ipa_qmi_setup(ipa); if (ret) goto err_default_route_clear; @@ -184,11 +152,7 @@ err_command_disable: ipa_endpoint_disable_one(command_endpoint); err_endpoint_teardown: ipa_endpoint_teardown(ipa); - (void)device_init_wakeup(dev, false); -err_uc_teardown: - ipa_uc_teardown(ipa); - ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND); - ipa_interrupt_teardown(ipa->interrupt); + ipa_power_teardown(ipa); err_gsi_teardown: gsi_teardown(&ipa->gsi); @@ -204,17 +168,17 @@ static void ipa_teardown(struct ipa *ipa) struct ipa_endpoint *exception_endpoint; struct ipa_endpoint *command_endpoint; - ipa_modem_teardown(ipa); + /* We're going to tear everything down, as if setup never completed */ + ipa->setup_complete = false; + + ipa_qmi_teardown(ipa); ipa_endpoint_default_route_clear(ipa); exception_endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]; ipa_endpoint_disable_one(exception_endpoint); command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]; ipa_endpoint_disable_one(command_endpoint); ipa_endpoint_teardown(ipa); - (void)device_init_wakeup(&ipa->pdev->dev, false); - ipa_uc_teardown(ipa); - ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND); - ipa_interrupt_teardown(ipa->interrupt); + ipa_power_teardown(ipa); gsi_teardown(&ipa->gsi); } @@ -253,9 +217,6 @@ ipa_hardware_config_qsb(struct ipa *ipa, const struct ipa_data *data) const struct ipa_qsb_data *data1; u32 val; - /* assert(data->qsb_count > 0); */ - /* assert(data->qsb_count < 3); */ - /* QMB 0 represents DDR; QMB 1 (if present) represents PCIe */ data0 = &data->qsb_data[IPA_QSB_MASTER_DDR]; if (data->qsb_count > 1) @@ -289,12 +250,11 @@ ipa_hardware_config_qsb(struct ipa *ipa, const struct ipa_data *data) /* Compute the value to use in the COUNTER_CFG register AGGR_GRANULARITY * field to represent the given number of microseconds. The value is one * less than the number of timer ticks in the requested period. 0 is not - * a valid granularity value. + * a valid granularity value (so for example @usec must be at least 16 for + * a TIMER_FREQUENCY of 32000). */ -static u32 ipa_aggr_granularity_val(u32 usec) +static __always_inline u32 ipa_aggr_granularity_val(u32 usec) { - /* assert(usec != 0); */ - return DIV_ROUND_CLOSEST(usec * TIMER_FREQUENCY, USEC_PER_SEC) - 1; } @@ -366,8 +326,8 @@ static void ipa_idle_indication_cfg(struct ipa *ipa, * @ipa: IPA pointer * * Configures when the IPA signals it is idle to the global clock - * controller, which can respond by scalling down the clock to - * save power. + * controller, which can respond by scaling down the clock to save + * power. */ static void ipa_hardware_dcd_config(struct ipa *ipa) { @@ -457,48 +417,54 @@ static void ipa_hardware_deconfig(struct ipa *ipa) * @ipa: IPA pointer * @data: IPA configuration data * - * Perform initialization requiring IPA clock to be enabled. + * Perform initialization requiring IPA power to be enabled. */ static int ipa_config(struct ipa *ipa, const struct ipa_data *data) { int ret; - /* Get a clock reference to allow initialization. This reference - * is held after initialization completes, and won't get dropped - * unless/until a system suspend request arrives. - */ - ipa_clock_get(ipa); - ipa_hardware_config(ipa, data); - ret = ipa_endpoint_config(ipa); + ret = ipa_mem_config(ipa); if (ret) goto err_hardware_deconfig; - ret = ipa_mem_config(ipa); + ipa->interrupt = ipa_interrupt_config(ipa); + if (IS_ERR(ipa->interrupt)) { + ret = PTR_ERR(ipa->interrupt); + ipa->interrupt = NULL; + goto err_mem_deconfig; + } + + ipa_uc_config(ipa); + + ret = ipa_endpoint_config(ipa); if (ret) - goto err_endpoint_deconfig; + goto err_uc_deconfig; ipa_table_config(ipa); /* No deconfig required */ /* Assign resource limitation to each group; no deconfig required */ ret = ipa_resource_config(ipa, data->resource_data); if (ret) - goto err_mem_deconfig; + goto err_endpoint_deconfig; ret = ipa_modem_config(ipa); if (ret) - goto err_mem_deconfig; + goto err_endpoint_deconfig; return 0; -err_mem_deconfig: - ipa_mem_deconfig(ipa); err_endpoint_deconfig: ipa_endpoint_deconfig(ipa); +err_uc_deconfig: + ipa_uc_deconfig(ipa); + ipa_interrupt_deconfig(ipa->interrupt); + ipa->interrupt = NULL; +err_mem_deconfig: + ipa_mem_deconfig(ipa); err_hardware_deconfig: ipa_hardware_deconfig(ipa); - ipa_clock_put(ipa); return ret; } @@ -510,10 +476,12 @@ err_hardware_deconfig: static void ipa_deconfig(struct ipa *ipa) { ipa_modem_deconfig(ipa); - ipa_mem_deconfig(ipa); ipa_endpoint_deconfig(ipa); + ipa_uc_deconfig(ipa); + ipa_interrupt_deconfig(ipa->interrupt); + ipa->interrupt = NULL; + ipa_mem_deconfig(ipa); ipa_hardware_deconfig(ipa); - ipa_clock_put(ipa); } static int ipa_firmware_load(struct device *dev) @@ -612,7 +580,6 @@ MODULE_DEVICE_TABLE(of, ipa_match); * */ static void ipa_validate_build(void) { -#ifdef IPA_VALIDATE /* At one time we assumed a 64-bit build, allowing some do_div() * calls to be replaced by simple division or modulo operations. * We currently only perform divide and modulo operations on u32, @@ -646,7 +613,6 @@ static void ipa_validate_build(void) BUILD_BUG_ON(!ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY)); BUILD_BUG_ON(ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY) > field_max(AGGR_GRANULARITY_FMASK)); -#endif /* IPA_VALIDATE */ } static bool ipa_version_valid(enum ipa_version version) @@ -681,7 +647,7 @@ static bool ipa_version_valid(enum ipa_version version) * in several stages: * - The "init" stage involves activities that can be initialized without * access to the IPA hardware. - * - The "config" stage requires the IPA clock to be active so IPA registers + * - The "config" stage requires IPA power to be active so IPA registers * can be accessed, but does not require the use of IPA immediate commands. * - The "setup" stage uses IPA immediate commands, and so requires the GSI * layer to be initialized. @@ -697,14 +663,14 @@ static int ipa_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; const struct ipa_data *data; - struct ipa_clock *clock; + struct ipa_power *power; bool modem_init; struct ipa *ipa; int ret; ipa_validate_build(); - /* Get configuration data early; needed for clock initialization */ + /* Get configuration data early; needed for power initialization */ data = of_device_get_match_data(dev); if (!data) { dev_err(dev, "matched hardware not supported\n"); @@ -725,20 +691,20 @@ static int ipa_probe(struct platform_device *pdev) /* The clock and interconnects might not be ready when we're * probed, so might return -EPROBE_DEFER. */ - clock = ipa_clock_init(dev, data->clock_data); - if (IS_ERR(clock)) - return PTR_ERR(clock); + power = ipa_power_init(dev, data->power_data); + if (IS_ERR(power)) + return PTR_ERR(power); /* No more EPROBE_DEFER. Allocate and initialize the IPA structure */ ipa = kzalloc(sizeof(*ipa), GFP_KERNEL); if (!ipa) { ret = -ENOMEM; - goto err_clock_exit; + goto err_power_exit; } ipa->pdev = pdev; dev_set_drvdata(dev, ipa); - ipa->clock = clock; + ipa->power = power; ipa->version = data->version; init_completion(&ipa->completion); @@ -771,18 +737,23 @@ static int ipa_probe(struct platform_device *pdev) if (ret) goto err_table_exit; + /* Power needs to be active for config and setup */ + ret = pm_runtime_get_sync(dev); + if (WARN_ON(ret < 0)) + goto err_power_put; + ret = ipa_config(ipa, data); if (ret) - goto err_modem_exit; + goto err_power_put; dev_info(dev, "IPA driver initialized"); /* If the modem is doing early initialization, it will trigger a - * call to ipa_setup() call when it has finished. In that case - * we're done here. + * call to ipa_setup() when it has finished. In that case we're + * done here. */ if (modem_init) - return 0; + goto done; /* Otherwise we need to load the firmware and have Trust Zone validate * and install it. If that succeeds we can proceed with setup. @@ -794,12 +765,16 @@ static int ipa_probe(struct platform_device *pdev) ret = ipa_setup(ipa); if (ret) goto err_deconfig; +done: + pm_runtime_mark_last_busy(dev); + (void)pm_runtime_put_autosuspend(dev); return 0; err_deconfig: ipa_deconfig(ipa); -err_modem_exit: +err_power_put: + pm_runtime_put_noidle(dev); ipa_modem_exit(ipa); err_table_exit: ipa_table_exit(ipa); @@ -813,8 +788,8 @@ err_reg_exit: ipa_reg_exit(ipa); err_kfree_ipa: kfree(ipa); -err_clock_exit: - ipa_clock_exit(clock); +err_power_exit: + ipa_power_exit(power); return ret; } @@ -822,9 +797,14 @@ err_clock_exit: static int ipa_remove(struct platform_device *pdev) { struct ipa *ipa = dev_get_drvdata(&pdev->dev); - struct ipa_clock *clock = ipa->clock; + struct ipa_power *power = ipa->power; + struct device *dev = &pdev->dev; int ret; + ret = pm_runtime_get_sync(dev); + if (WARN_ON(ret < 0)) + goto out_power_put; + if (ipa->setup_complete) { ret = ipa_modem_stop(ipa); /* If starting or stopping is in progress, try once more */ @@ -839,6 +819,8 @@ static int ipa_remove(struct platform_device *pdev) } ipa_deconfig(ipa); +out_power_put: + pm_runtime_put_noidle(dev); ipa_modem_exit(ipa); ipa_table_exit(ipa); ipa_endpoint_exit(ipa); @@ -846,7 +828,7 @@ static int ipa_remove(struct platform_device *pdev) ipa_mem_exit(ipa); ipa_reg_exit(ipa); kfree(ipa); - ipa_clock_exit(clock); + ipa_power_exit(power); return 0; } @@ -860,62 +842,6 @@ static void ipa_shutdown(struct platform_device *pdev) dev_err(&pdev->dev, "shutdown: remove returned %d\n", ret); } -/** - * ipa_suspend() - Power management system suspend callback - * @dev: IPA device structure - * - * Return: Always returns zero - * - * Called by the PM framework when a system suspend operation is invoked. - * Suspends endpoints and releases the clock reference held to keep - * the IPA clock running until this point. - */ -static int ipa_suspend(struct device *dev) -{ - struct ipa *ipa = dev_get_drvdata(dev); - - /* When a suspended RX endpoint has a packet ready to receive, we - * get an IPA SUSPEND interrupt. We trigger a system resume in - * that case, but only on the first such interrupt since suspend. - */ - __clear_bit(IPA_FLAG_RESUMED, ipa->flags); - - ipa_endpoint_suspend(ipa); - - ipa_clock_put(ipa); - - return 0; -} - -/** - * ipa_resume() - Power management system resume callback - * @dev: IPA device structure - * - * Return: Always returns 0 - * - * Called by the PM framework when a system resume operation is invoked. - * Takes an IPA clock reference to keep the clock running until suspend, - * and resumes endpoints. - */ -static int ipa_resume(struct device *dev) -{ - struct ipa *ipa = dev_get_drvdata(dev); - - /* This clock reference will keep the IPA out of suspend - * until we get a power management suspend request. - */ - ipa_clock_get(ipa); - - ipa_endpoint_resume(ipa); - - return 0; -} - -static const struct dev_pm_ops ipa_pm_ops = { - .suspend = ipa_suspend, - .resume = ipa_resume, -}; - static const struct attribute_group *ipa_attribute_groups[] = { &ipa_attribute_group, &ipa_feature_attribute_group, diff --git a/drivers/net/ipa/ipa_modem.c b/drivers/net/ipa/ipa_modem.c index af9aedbde717..ad116bcc0580 100644 --- a/drivers/net/ipa/ipa_modem.c +++ b/drivers/net/ipa/ipa_modem.c @@ -9,6 +9,7 @@ #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/if_rmnet.h> +#include <linux/pm_runtime.h> #include <linux/remoteproc/qcom_rproc.h> #include "ipa.h" @@ -19,6 +20,8 @@ #include "ipa_modem.h" #include "ipa_smp2p.h" #include "ipa_qmi.h" +#include "ipa_uc.h" +#include "ipa_power.h" #define IPA_NETDEV_NAME "rmnet_ipa%d" #define IPA_NETDEV_TAILROOM 0 /* for padding by mux layer */ @@ -31,9 +34,14 @@ enum ipa_modem_state { IPA_MODEM_STATE_STOPPING, }; -/** struct ipa_priv - IPA network device private data */ +/** + * struct ipa_priv - IPA network device private data + * @ipa: IPA pointer + * @work: Work structure used to wake the modem netdev TX queue + */ struct ipa_priv { struct ipa *ipa; + struct work_struct work; }; /** ipa_open() - Opens the modem network interface */ @@ -41,21 +49,33 @@ static int ipa_open(struct net_device *netdev) { struct ipa_priv *priv = netdev_priv(netdev); struct ipa *ipa = priv->ipa; + struct device *dev; int ret; + dev = &ipa->pdev->dev; + ret = pm_runtime_get_sync(dev); + if (ret < 0) + goto err_power_put; + ret = ipa_endpoint_enable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]); if (ret) - return ret; + goto err_power_put; + ret = ipa_endpoint_enable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]); if (ret) goto err_disable_tx; netif_start_queue(netdev); + pm_runtime_mark_last_busy(dev); + (void)pm_runtime_put_autosuspend(dev); + return 0; err_disable_tx: ipa_endpoint_disable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]); +err_power_put: + pm_runtime_put_noidle(dev); return ret; } @@ -65,11 +85,21 @@ static int ipa_stop(struct net_device *netdev) { struct ipa_priv *priv = netdev_priv(netdev); struct ipa *ipa = priv->ipa; + struct device *dev; + int ret; + + dev = &ipa->pdev->dev; + ret = pm_runtime_get_sync(dev); + if (ret < 0) + goto out_power_put; netif_stop_queue(netdev); ipa_endpoint_disable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]); ipa_endpoint_disable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]); +out_power_put: + pm_runtime_mark_last_busy(dev); + (void)pm_runtime_put_autosuspend(dev); return 0; } @@ -82,13 +112,15 @@ static int ipa_stop(struct net_device *netdev) * NETDEV_TX_OK: Success * NETDEV_TX_BUSY: Error while transmitting the skb. Try again later */ -static int ipa_start_xmit(struct sk_buff *skb, struct net_device *netdev) +static netdev_tx_t +ipa_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct net_device_stats *stats = &netdev->stats; struct ipa_priv *priv = netdev_priv(netdev); struct ipa_endpoint *endpoint; struct ipa *ipa = priv->ipa; u32 skb_len = skb->len; + struct device *dev; int ret; if (!skb_len) @@ -98,7 +130,35 @@ static int ipa_start_xmit(struct sk_buff *skb, struct net_device *netdev) if (endpoint->data->qmap && skb->protocol != htons(ETH_P_MAP)) goto err_drop_skb; + /* The hardware must be powered for us to transmit */ + dev = &ipa->pdev->dev; + ret = pm_runtime_get(dev); + if (ret < 1) { + /* If a resume won't happen, just drop the packet */ + if (ret < 0 && ret != -EINPROGRESS) { + ipa_power_modem_queue_active(ipa); + pm_runtime_put_noidle(dev); + goto err_drop_skb; + } + + /* No power (yet). Stop the network stack from transmitting + * until we're resumed; ipa_modem_resume() arranges for the + * TX queue to be started again. + */ + ipa_power_modem_queue_stop(ipa); + + pm_runtime_put_noidle(dev); + + return NETDEV_TX_BUSY; + } + + ipa_power_modem_queue_active(ipa); + ret = ipa_endpoint_skb_tx(endpoint, skb); + + pm_runtime_mark_last_busy(dev); + (void)pm_runtime_put_autosuspend(dev); + if (ret) { if (ret != -E2BIG) return NETDEV_TX_BUSY; @@ -169,12 +229,31 @@ void ipa_modem_suspend(struct net_device *netdev) struct ipa_priv *priv = netdev_priv(netdev); struct ipa *ipa = priv->ipa; - netif_stop_queue(netdev); + if (!(netdev->flags & IFF_UP)) + return; ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]); ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]); } +/** + * ipa_modem_wake_queue_work() - enable modem netdev queue + * @work: Work structure + * + * Re-enable transmit on the modem network device. This is called + * in (power management) work queue context, scheduled when resuming + * the modem. We can't enable the queue directly in ipa_modem_resume() + * because transmits restart the instant the queue is awakened; but the + * device power state won't be ACTIVE until *after* ipa_modem_resume() + * returns. + */ +static void ipa_modem_wake_queue_work(struct work_struct *work) +{ + struct ipa_priv *priv = container_of(work, struct ipa_priv, work); + + ipa_power_modem_queue_wake(priv->ipa); +} + /** ipa_modem_resume() - resume callback for runtime_pm * @dev: pointer to device * @@ -185,10 +264,14 @@ void ipa_modem_resume(struct net_device *netdev) struct ipa_priv *priv = netdev_priv(netdev); struct ipa *ipa = priv->ipa; + if (!(netdev->flags & IFF_UP)) + return; + ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]); ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]); - netif_wake_queue(netdev); + /* Arrange for the TX queue to be restarted */ + (void)queue_pm_work(&priv->work); } int ipa_modem_start(struct ipa *ipa) @@ -216,13 +299,16 @@ int ipa_modem_start(struct ipa *ipa) SET_NETDEV_DEV(netdev, &ipa->pdev->dev); priv = netdev_priv(netdev); priv->ipa = ipa; + INIT_WORK(&priv->work, ipa_modem_wake_queue_work); + ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]->netdev = netdev; + ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->netdev = netdev; + ipa->modem_netdev = netdev; ret = register_netdev(netdev); - if (!ret) { - ipa->modem_netdev = netdev; - ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]->netdev = netdev; - ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->netdev = netdev; - } else { + if (ret) { + ipa->modem_netdev = NULL; + ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->netdev = NULL; + ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]->netdev = NULL; free_netdev(netdev); } @@ -256,13 +342,18 @@ int ipa_modem_stop(struct ipa *ipa) /* Prevent the modem from triggering a call to ipa_setup() */ ipa_smp2p_disable(ipa); - /* Stop the queue and disable the endpoints if it's open */ + /* Clean up the netdev and endpoints if it was started */ if (netdev) { - (void)ipa_stop(netdev); + struct ipa_priv *priv = netdev_priv(netdev); + + cancel_work_sync(&priv->work); + /* If it was opened, stop it first */ + if (netdev->flags & IFF_UP) + (void)ipa_stop(netdev); + unregister_netdev(netdev); + ipa->modem_netdev = NULL; ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->netdev = NULL; ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]->netdev = NULL; - ipa->modem_netdev = NULL; - unregister_netdev(netdev); free_netdev(netdev); } @@ -278,6 +369,12 @@ static void ipa_modem_crashed(struct ipa *ipa) struct device *dev = &ipa->pdev->dev; int ret; + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + dev_err(dev, "error %d getting power to handle crash\n", ret); + goto out_power_put; + } + ipa_endpoint_modem_pause_all(ipa, true); ipa_endpoint_modem_hol_block_clear_all(ipa); @@ -302,6 +399,10 @@ static void ipa_modem_crashed(struct ipa *ipa) ret = ipa_mem_zero_modem(ipa); if (ret) dev_err(dev, "error %d zeroing modem memory regions\n", ret); + +out_power_put: + pm_runtime_mark_last_busy(dev); + (void)pm_runtime_put_autosuspend(dev); } static int ipa_modem_notify(struct notifier_block *nb, unsigned long action, @@ -314,6 +415,7 @@ static int ipa_modem_notify(struct notifier_block *nb, unsigned long action, switch (action) { case QCOM_SSR_BEFORE_POWERUP: dev_info(dev, "received modem starting event\n"); + ipa_uc_power(ipa); ipa_smp2p_notify_reset(ipa); break; @@ -377,13 +479,3 @@ void ipa_modem_deconfig(struct ipa *ipa) ipa->notifier = NULL; memset(&ipa->nb, 0, sizeof(ipa->nb)); } - -int ipa_modem_setup(struct ipa *ipa) -{ - return ipa_qmi_setup(ipa); -} - -void ipa_modem_teardown(struct ipa *ipa) -{ - ipa_qmi_teardown(ipa); -} diff --git a/drivers/net/ipa/ipa_modem.h b/drivers/net/ipa/ipa_modem.h index 2de3e216d1d4..5e6e3d234454 100644 --- a/drivers/net/ipa/ipa_modem.h +++ b/drivers/net/ipa/ipa_modem.h @@ -7,7 +7,6 @@ #define _IPA_MODEM_H_ struct ipa; -struct ipa_endpoint; struct net_device; struct sk_buff; @@ -25,7 +24,4 @@ void ipa_modem_exit(struct ipa *ipa); int ipa_modem_config(struct ipa *ipa); void ipa_modem_deconfig(struct ipa *ipa); -int ipa_modem_setup(struct ipa *ipa); -void ipa_modem_teardown(struct ipa *ipa); - #endif /* _IPA_MODEM_H_ */ diff --git a/drivers/net/ipa/ipa_power.c b/drivers/net/ipa/ipa_power.c new file mode 100644 index 000000000000..b1c6c0fcb654 --- /dev/null +++ b/drivers/net/ipa/ipa_power.c @@ -0,0 +1,473 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. + * Copyright (C) 2018-2021 Linaro Ltd. + */ + +#include <linux/clk.h> +#include <linux/device.h> +#include <linux/interconnect.h> +#include <linux/pm.h> +#include <linux/pm_runtime.h> +#include <linux/bitops.h> + +#include "ipa.h" +#include "ipa_power.h" +#include "ipa_endpoint.h" +#include "ipa_modem.h" +#include "ipa_data.h" + +/** + * DOC: IPA Power Management + * + * The IPA hardware is enabled when the IPA core clock and all the + * interconnects (buses) it depends on are enabled. Runtime power + * management is used to determine whether the core clock and + * interconnects are enabled, and if not in use to be suspended + * automatically. + * + * The core clock currently runs at a fixed clock rate when enabled, + * an all interconnects use a fixed average and peak bandwidth. + */ + +#define IPA_AUTOSUSPEND_DELAY 500 /* milliseconds */ + +/** + * struct ipa_interconnect - IPA interconnect information + * @path: Interconnect path + * @average_bandwidth: Average interconnect bandwidth (KB/second) + * @peak_bandwidth: Peak interconnect bandwidth (KB/second) + */ +struct ipa_interconnect { + struct icc_path *path; + u32 average_bandwidth; + u32 peak_bandwidth; +}; + +/** + * enum ipa_power_flag - IPA power flags + * @IPA_POWER_FLAG_RESUMED: Whether resume from suspend has been signaled + * @IPA_POWER_FLAG_SYSTEM: Hardware is system (not runtime) suspended + * @IPA_POWER_FLAG_STOPPED: Modem TX is disabled by ipa_start_xmit() + * @IPA_POWER_FLAG_STARTED: Modem TX was enabled by ipa_runtime_resume() + * @IPA_POWER_FLAG_COUNT: Number of defined power flags + */ +enum ipa_power_flag { + IPA_POWER_FLAG_RESUMED, + IPA_POWER_FLAG_SYSTEM, + IPA_POWER_FLAG_STOPPED, + IPA_POWER_FLAG_STARTED, + IPA_POWER_FLAG_COUNT, /* Last; not a flag */ +}; + +/** + * struct ipa_power - IPA power management information + * @dev: IPA device pointer + * @core: IPA core clock + * @spinlock: Protects modem TX queue enable/disable + * @flags: Boolean state flags + * @interconnect_count: Number of elements in interconnect[] + * @interconnect: Interconnect array + */ +struct ipa_power { + struct device *dev; + struct clk *core; + spinlock_t spinlock; /* used with STOPPED/STARTED power flags */ + DECLARE_BITMAP(flags, IPA_POWER_FLAG_COUNT); + u32 interconnect_count; + struct ipa_interconnect *interconnect; +}; + +static int ipa_interconnect_init_one(struct device *dev, + struct ipa_interconnect *interconnect, + const struct ipa_interconnect_data *data) +{ + struct icc_path *path; + + path = of_icc_get(dev, data->name); + if (IS_ERR(path)) { + int ret = PTR_ERR(path); + + dev_err_probe(dev, ret, "error getting %s interconnect\n", + data->name); + + return ret; + } + + interconnect->path = path; + interconnect->average_bandwidth = data->average_bandwidth; + interconnect->peak_bandwidth = data->peak_bandwidth; + + return 0; +} + +static void ipa_interconnect_exit_one(struct ipa_interconnect *interconnect) +{ + icc_put(interconnect->path); + memset(interconnect, 0, sizeof(*interconnect)); +} + +/* Initialize interconnects required for IPA operation */ +static int ipa_interconnect_init(struct ipa_power *power, struct device *dev, + const struct ipa_interconnect_data *data) +{ + struct ipa_interconnect *interconnect; + u32 count; + int ret; + + count = power->interconnect_count; + interconnect = kcalloc(count, sizeof(*interconnect), GFP_KERNEL); + if (!interconnect) + return -ENOMEM; + power->interconnect = interconnect; + + while (count--) { + ret = ipa_interconnect_init_one(dev, interconnect, data++); + if (ret) + goto out_unwind; + interconnect++; + } + + return 0; + +out_unwind: + while (interconnect-- > power->interconnect) + ipa_interconnect_exit_one(interconnect); + kfree(power->interconnect); + power->interconnect = NULL; + + return ret; +} + +/* Inverse of ipa_interconnect_init() */ +static void ipa_interconnect_exit(struct ipa_power *power) +{ + struct ipa_interconnect *interconnect; + + interconnect = power->interconnect + power->interconnect_count; + while (interconnect-- > power->interconnect) + ipa_interconnect_exit_one(interconnect); + kfree(power->interconnect); + power->interconnect = NULL; +} + +/* Currently we only use one bandwidth level, so just "enable" interconnects */ +static int ipa_interconnect_enable(struct ipa *ipa) +{ + struct ipa_interconnect *interconnect; + struct ipa_power *power = ipa->power; + int ret; + u32 i; + + interconnect = power->interconnect; + for (i = 0; i < power->interconnect_count; i++) { + ret = icc_set_bw(interconnect->path, + interconnect->average_bandwidth, + interconnect->peak_bandwidth); + if (ret) { + dev_err(&ipa->pdev->dev, + "error %d enabling %s interconnect\n", + ret, icc_get_name(interconnect->path)); + goto out_unwind; + } + interconnect++; + } + + return 0; + +out_unwind: + while (interconnect-- > power->interconnect) + (void)icc_set_bw(interconnect->path, 0, 0); + + return ret; +} + +/* To disable an interconnect, we just its bandwidth to 0 */ +static int ipa_interconnect_disable(struct ipa *ipa) +{ + struct ipa_interconnect *interconnect; + struct ipa_power *power = ipa->power; + struct device *dev = &ipa->pdev->dev; + int result = 0; + u32 count; + int ret; + + count = power->interconnect_count; + interconnect = power->interconnect + count; + while (count--) { + interconnect--; + ret = icc_set_bw(interconnect->path, 0, 0); + if (ret) { + dev_err(dev, "error %d disabling %s interconnect\n", + ret, icc_get_name(interconnect->path)); + /* Try to disable all; record only the first error */ + if (!result) + result = ret; + } + } + + return result; +} + +/* Enable IPA power, enabling interconnects and the core clock */ +static int ipa_power_enable(struct ipa *ipa) +{ + int ret; + + ret = ipa_interconnect_enable(ipa); + if (ret) + return ret; + + ret = clk_prepare_enable(ipa->power->core); + if (ret) { + dev_err(&ipa->pdev->dev, "error %d enabling core clock\n", ret); + (void)ipa_interconnect_disable(ipa); + } + + return ret; +} + +/* Inverse of ipa_power_enable() */ +static int ipa_power_disable(struct ipa *ipa) +{ + clk_disable_unprepare(ipa->power->core); + + return ipa_interconnect_disable(ipa); +} + +static int ipa_runtime_suspend(struct device *dev) +{ + struct ipa *ipa = dev_get_drvdata(dev); + + /* Endpoints aren't usable until setup is complete */ + if (ipa->setup_complete) { + __clear_bit(IPA_POWER_FLAG_RESUMED, ipa->power->flags); + ipa_endpoint_suspend(ipa); + gsi_suspend(&ipa->gsi); + } + + return ipa_power_disable(ipa); +} + +static int ipa_runtime_resume(struct device *dev) +{ + struct ipa *ipa = dev_get_drvdata(dev); + int ret; + + ret = ipa_power_enable(ipa); + if (WARN_ON(ret < 0)) + return ret; + + /* Endpoints aren't usable until setup is complete */ + if (ipa->setup_complete) { + gsi_resume(&ipa->gsi); + ipa_endpoint_resume(ipa); + } + + return 0; +} + +static int ipa_suspend(struct device *dev) +{ + struct ipa *ipa = dev_get_drvdata(dev); + + __set_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags); + + return pm_runtime_force_suspend(dev); +} + +static int ipa_resume(struct device *dev) +{ + struct ipa *ipa = dev_get_drvdata(dev); + int ret; + + ret = pm_runtime_force_resume(dev); + + __clear_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags); + + return ret; +} + +/* Return the current IPA core clock rate */ +u32 ipa_core_clock_rate(struct ipa *ipa) +{ + return ipa->power ? (u32)clk_get_rate(ipa->power->core) : 0; +} + +/** + * ipa_suspend_handler() - Handle the suspend IPA interrupt + * @ipa: IPA pointer + * @irq_id: IPA interrupt type (unused) + * + * If an RX endpoint is suspended, and the IPA has a packet destined for + * that endpoint, the IPA generates a SUSPEND interrupt to inform the AP + * that it should resume the endpoint. If we get one of these interrupts + * we just wake up the system. + */ +static void ipa_suspend_handler(struct ipa *ipa, enum ipa_irq_id irq_id) +{ + /* To handle an IPA interrupt we will have resumed the hardware + * just to handle the interrupt, so we're done. If we are in a + * system suspend, trigger a system resume. + */ + if (!__test_and_set_bit(IPA_POWER_FLAG_RESUMED, ipa->power->flags)) + if (test_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags)) + pm_wakeup_dev_event(&ipa->pdev->dev, 0, true); + + /* Acknowledge/clear the suspend interrupt on all endpoints */ + ipa_interrupt_suspend_clear_all(ipa->interrupt); +} + +/* The next few functions coordinate stopping and starting the modem + * network device transmit queue. + * + * Transmit can be running concurrent with power resume, and there's a + * chance the resume completes before the transmit path stops the queue, + * leaving the queue in a stopped state. The next two functions are used + * to avoid this: ipa_power_modem_queue_stop() is used by ipa_start_xmit() + * to conditionally stop the TX queue; and ipa_power_modem_queue_start() + * is used by ipa_runtime_resume() to conditionally restart it. + * + * Two flags and a spinlock are used. If the queue is stopped, the STOPPED + * power flag is set. And if the queue is started, the STARTED flag is set. + * The queue is only started on resume if the STOPPED flag is set. And the + * queue is only started in ipa_start_xmit() if the STARTED flag is *not* + * set. As a result, the queue remains operational if the two activites + * happen concurrently regardless of the order they complete. The spinlock + * ensures the flag and TX queue operations are done atomically. + * + * The first function stops the modem netdev transmit queue, but only if + * the STARTED flag is *not* set. That flag is cleared if it was set. + * If the queue is stopped, the STOPPED flag is set. This is called only + * from the power ->runtime_resume operation. + */ +void ipa_power_modem_queue_stop(struct ipa *ipa) +{ + struct ipa_power *power = ipa->power; + unsigned long flags; + + spin_lock_irqsave(&power->spinlock, flags); + + if (!__test_and_clear_bit(IPA_POWER_FLAG_STARTED, power->flags)) { + netif_stop_queue(ipa->modem_netdev); + __set_bit(IPA_POWER_FLAG_STOPPED, power->flags); + } + + spin_unlock_irqrestore(&power->spinlock, flags); +} + +/* This function starts the modem netdev transmit queue, but only if the + * STOPPED flag is set. That flag is cleared if it was set. If the queue + * was restarted, the STARTED flag is set; this allows ipa_start_xmit() + * to skip stopping the queue in the event of a race. + */ +void ipa_power_modem_queue_wake(struct ipa *ipa) +{ + struct ipa_power *power = ipa->power; + unsigned long flags; + + spin_lock_irqsave(&power->spinlock, flags); + + if (__test_and_clear_bit(IPA_POWER_FLAG_STOPPED, power->flags)) { + __set_bit(IPA_POWER_FLAG_STARTED, power->flags); + netif_wake_queue(ipa->modem_netdev); + } + + spin_unlock_irqrestore(&power->spinlock, flags); +} + +/* This function clears the STARTED flag once the TX queue is operating */ +void ipa_power_modem_queue_active(struct ipa *ipa) +{ + clear_bit(IPA_POWER_FLAG_STARTED, ipa->power->flags); +} + +int ipa_power_setup(struct ipa *ipa) +{ + int ret; + + ipa_interrupt_add(ipa->interrupt, IPA_IRQ_TX_SUSPEND, + ipa_suspend_handler); + + ret = device_init_wakeup(&ipa->pdev->dev, true); + if (ret) + ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND); + + return ret; +} + +void ipa_power_teardown(struct ipa *ipa) +{ + (void)device_init_wakeup(&ipa->pdev->dev, false); + ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND); +} + +/* Initialize IPA power management */ +struct ipa_power * +ipa_power_init(struct device *dev, const struct ipa_power_data *data) +{ + struct ipa_power *power; + struct clk *clk; + int ret; + + clk = clk_get(dev, "core"); + if (IS_ERR(clk)) { + dev_err_probe(dev, PTR_ERR(clk), "error getting core clock\n"); + + return ERR_CAST(clk); + } + + ret = clk_set_rate(clk, data->core_clock_rate); + if (ret) { + dev_err(dev, "error %d setting core clock rate to %u\n", + ret, data->core_clock_rate); + goto err_clk_put; + } + + power = kzalloc(sizeof(*power), GFP_KERNEL); + if (!power) { + ret = -ENOMEM; + goto err_clk_put; + } + power->dev = dev; + power->core = clk; + spin_lock_init(&power->spinlock); + power->interconnect_count = data->interconnect_count; + + ret = ipa_interconnect_init(power, dev, data->interconnect_data); + if (ret) + goto err_kfree; + + pm_runtime_set_autosuspend_delay(dev, IPA_AUTOSUSPEND_DELAY); + pm_runtime_use_autosuspend(dev); + pm_runtime_enable(dev); + + return power; + +err_kfree: + kfree(power); +err_clk_put: + clk_put(clk); + + return ERR_PTR(ret); +} + +/* Inverse of ipa_power_init() */ +void ipa_power_exit(struct ipa_power *power) +{ + struct device *dev = power->dev; + struct clk *clk = power->core; + + pm_runtime_disable(dev); + pm_runtime_dont_use_autosuspend(dev); + ipa_interconnect_exit(power); + kfree(power); + clk_put(clk); +} + +const struct dev_pm_ops ipa_pm_ops = { + .suspend = ipa_suspend, + .resume = ipa_resume, + .runtime_suspend = ipa_runtime_suspend, + .runtime_resume = ipa_runtime_resume, +}; diff --git a/drivers/net/ipa/ipa_power.h b/drivers/net/ipa/ipa_power.h new file mode 100644 index 000000000000..2151805d7fbb --- /dev/null +++ b/drivers/net/ipa/ipa_power.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. + * Copyright (C) 2018-2020 Linaro Ltd. + */ +#ifndef _IPA_POWER_H_ +#define _IPA_POWER_H_ + +struct device; + +struct ipa; +struct ipa_power_data; + +/* IPA device power management function block */ +extern const struct dev_pm_ops ipa_pm_ops; + +/** + * ipa_core_clock_rate() - Return the current IPA core clock rate + * @ipa: IPA structure + * + * Return: The current clock rate (in Hz), or 0. + */ +u32 ipa_core_clock_rate(struct ipa *ipa); + +/** + * ipa_power_modem_queue_stop() - Possibly stop the modem netdev TX queue + * @ipa: IPA pointer + */ +void ipa_power_modem_queue_stop(struct ipa *ipa); + +/** + * ipa_power_modem_queue_wake() - Possibly wake the modem netdev TX queue + * @ipa: IPA pointer + */ +void ipa_power_modem_queue_wake(struct ipa *ipa); + +/** + * ipa_power_modem_queue_active() - Report modem netdev TX queue active + * @ipa: IPA pointer + */ +void ipa_power_modem_queue_active(struct ipa *ipa); + +/** + * ipa_power_setup() - Set up IPA power management + * @ipa: IPA pointer + * + * Return: 0 if successful, or a negative error code + */ +int ipa_power_setup(struct ipa *ipa); + +/** + * ipa_power_teardown() - Inverse of ipa_power_setup() + * @ipa: IPA pointer + */ +void ipa_power_teardown(struct ipa *ipa); + +/** + * ipa_power_init() - Initialize IPA power management + * @dev: IPA device + * @data: Clock configuration data + * + * Return: A pointer to an ipa_power structure, or a pointer-coded error + */ +struct ipa_power *ipa_power_init(struct device *dev, + const struct ipa_power_data *data); + +/** + * ipa_power_exit() - Inverse of ipa_power_init() + * @power: IPA power pointer + */ +void ipa_power_exit(struct ipa_power *power); + +#endif /* _IPA_POWER_H_ */ diff --git a/drivers/net/ipa/ipa_qmi.c b/drivers/net/ipa/ipa_qmi.c index 4661105ce7ab..90f3aec55b36 100644 --- a/drivers/net/ipa/ipa_qmi.c +++ b/drivers/net/ipa/ipa_qmi.c @@ -467,10 +467,7 @@ static const struct qmi_ops ipa_client_ops = { .new_server = ipa_client_new_server, }; -/* This is called by ipa_setup(). We can be informed via remoteproc that - * the modem has shut down, in which case this function will be called - * again to prepare for it coming back up again. - */ +/* Set up for QMI message exchange */ int ipa_qmi_setup(struct ipa *ipa) { struct ipa_qmi *ipa_qmi = &ipa->qmi; @@ -526,6 +523,7 @@ err_server_handle_release: return ret; } +/* Tear down IPA QMI handles */ void ipa_qmi_teardown(struct ipa *ipa) { cancel_work_sync(&ipa->qmi.init_driver_work); diff --git a/drivers/net/ipa/ipa_qmi.h b/drivers/net/ipa/ipa_qmi.h index b6f2055d35a6..856ef629ccc8 100644 --- a/drivers/net/ipa/ipa_qmi.h +++ b/drivers/net/ipa/ipa_qmi.h @@ -39,7 +39,26 @@ struct ipa_qmi { bool indication_sent; }; +/** + * ipa_qmi_setup() - Set up for QMI message exchange + * @ipa: IPA pointer + * + * This is called at the end of ipa_setup(), to prepare for the exchange + * of QMI messages that perform a "handshake" between the AP and modem. + * When the modem QMI server announces its presence, an AP request message + * supplies operating parameters to be used to the modem, and the modem + * acknowledges receipt of those parameters. The modem will not touch the + * IPA hardware until this handshake is complete. + * + * If the modem crashes (or shuts down) a new handshake begins when the + * modem's QMI server is started again. + */ int ipa_qmi_setup(struct ipa *ipa); + +/** + * ipa_qmi_teardown() - Tear down IPA QMI handles + * @ipa: IPA pointer + */ void ipa_qmi_teardown(struct ipa *ipa); #endif /* !_IPA_QMI_H_ */ diff --git a/drivers/net/ipa/ipa_reg.h b/drivers/net/ipa/ipa_reg.h index b89dec5865a5..a5b355384d4a 100644 --- a/drivers/net/ipa/ipa_reg.h +++ b/drivers/net/ipa/ipa_reg.h @@ -99,7 +99,7 @@ struct ipa; static inline u32 arbitration_lock_disable_encoded(enum ipa_version version, u32 mask) { - /* assert(version >= IPA_VERSION_4_0); */ + WARN_ON(version < IPA_VERSION_4_0); if (version < IPA_VERSION_4_9) return u32_encode_bits(mask, GENMASK(20, 17)); @@ -116,7 +116,7 @@ static inline u32 full_flush_rsc_closure_en_encoded(enum ipa_version version, { u32 val = enable ? 1 : 0; - /* assert(version >= IPA_VERSION_4_5); */ + WARN_ON(version < IPA_VERSION_4_5); if (version == IPA_VERSION_4_5 || version == IPA_VERSION_4_7) return u32_encode_bits(val, GENMASK(21, 21)); @@ -409,7 +409,7 @@ static inline u32 ipa_header_size_encoded(enum ipa_version version, val = u32_encode_bits(size, HDR_LEN_FMASK); if (version < IPA_VERSION_4_5) { - /* ipa_assert(header_size == size); */ + WARN_ON(header_size != size); return val; } @@ -429,7 +429,7 @@ static inline u32 ipa_metadata_offset_encoded(enum ipa_version version, val = u32_encode_bits(off, HDR_OFST_METADATA_FMASK); if (version < IPA_VERSION_4_5) { - /* ipa_assert(offset == off); */ + WARN_ON(offset != off); return val; } @@ -812,7 +812,7 @@ ipa_reg_irq_suspend_info_offset(enum ipa_version version) static inline u32 ipa_reg_irq_suspend_en_ee_n_offset(enum ipa_version version, u32 ee) { - /* assert(version != IPA_VERSION_3_0); */ + WARN_ON(version == IPA_VERSION_3_0); if (version < IPA_VERSION_4_9) return 0x00003034 + 0x1000 * ee; @@ -830,7 +830,7 @@ ipa_reg_irq_suspend_en_offset(enum ipa_version version) static inline u32 ipa_reg_irq_suspend_clr_ee_n_offset(enum ipa_version version, u32 ee) { - /* assert(version != IPA_VERSION_3_0); */ + WARN_ON(version == IPA_VERSION_3_0); if (version < IPA_VERSION_4_9) return 0x00003038 + 0x1000 * ee; diff --git a/drivers/net/ipa/ipa_resource.c b/drivers/net/ipa/ipa_resource.c index 3b2dc216d3a6..e3da95d69409 100644 --- a/drivers/net/ipa/ipa_resource.c +++ b/drivers/net/ipa/ipa_resource.c @@ -29,7 +29,6 @@ static bool ipa_resource_limits_valid(struct ipa *ipa, const struct ipa_resource_data *data) { -#ifdef IPA_VALIDATION u32 group_count; u32 i; u32 j; @@ -65,7 +64,7 @@ static bool ipa_resource_limits_valid(struct ipa *ipa, if (resource->limits[j].min || resource->limits[j].max) return false; } -#endif /* !IPA_VALIDATION */ + return true; } diff --git a/drivers/net/ipa/ipa_smp2p.c b/drivers/net/ipa/ipa_smp2p.c index 93270e50b6b3..df7639c39d71 100644 --- a/drivers/net/ipa/ipa_smp2p.c +++ b/drivers/net/ipa/ipa_smp2p.c @@ -9,13 +9,13 @@ #include <linux/interrupt.h> #include <linux/notifier.h> #include <linux/panic_notifier.h> +#include <linux/pm_runtime.h> #include <linux/soc/qcom/smem.h> #include <linux/soc/qcom/smem_state.h> #include "ipa_smp2p.h" #include "ipa.h" #include "ipa_uc.h" -#include "ipa_clock.h" /** * DOC: IPA SMP2P communication with the modem @@ -23,19 +23,19 @@ * SMP2P is a primitive communication mechanism available between the AP and * the modem. The IPA driver uses this for two purposes: to enable the modem * to state that the GSI hardware is ready to use; and to communicate the - * state of the IPA clock in the event of a crash. + * state of IPA power in the event of a crash. * * GSI needs to have early initialization completed before it can be used. * This initialization is done either by Trust Zone or by the modem. In the * latter case, the modem uses an SMP2P interrupt to tell the AP IPA driver * when the GSI is ready to use. * - * The modem is also able to inquire about the current state of the IPA - * clock by trigging another SMP2P interrupt to the AP. We communicate - * whether the clock is enabled using two SMP2P state bits--one to - * indicate the clock state (on or off), and a second to indicate the - * clock state bit is valid. The modem will poll the valid bit until it - * is set, and at that time records whether the AP has the IPA clock enabled. + * The modem is also able to inquire about the current state of IPA + * power by trigging another SMP2P interrupt to the AP. We communicate + * whether power is enabled using two SMP2P state bits--one to indicate + * the power state (on or off), and a second to indicate the power state + * bit is valid. The modem will poll the valid bit until it is set, and + * at that time records whether the AP has IPA power enabled. * * Finally, if the AP kernel panics, we update the SMP2P state bits even if * we never receive an interrupt from the modem requesting this. @@ -45,14 +45,14 @@ * struct ipa_smp2p - IPA SMP2P information * @ipa: IPA pointer * @valid_state: SMEM state indicating enabled state is valid - * @enabled_state: SMEM state to indicate clock is enabled + * @enabled_state: SMEM state to indicate power is enabled * @valid_bit: Valid bit in 32-bit SMEM state mask * @enabled_bit: Enabled bit in 32-bit SMEM state mask * @enabled_bit: Enabled bit in 32-bit SMEM state mask - * @clock_query_irq: IPA interrupt triggered by modem for clock query + * @clock_query_irq: IPA interrupt triggered by modem for power query * @setup_ready_irq: IPA interrupt triggered by modem to signal GSI ready - * @clock_on: Whether IPA clock is on - * @notified: Whether modem has been notified of clock state + * @power_on: Whether IPA power is on + * @notified: Whether modem has been notified of power state * @disabled: Whether setup ready interrupt handling is disabled * @mutex: Mutex protecting ready-interrupt/shutdown interlock * @panic_notifier: Panic notifier structure @@ -65,7 +65,7 @@ struct ipa_smp2p { u32 enabled_bit; u32 clock_query_irq; u32 setup_ready_irq; - bool clock_on; + bool power_on; bool notified; bool disabled; struct mutex mutex; @@ -73,28 +73,30 @@ struct ipa_smp2p { }; /** - * ipa_smp2p_notify() - use SMP2P to tell modem about IPA clock state + * ipa_smp2p_notify() - use SMP2P to tell modem about IPA power state * @smp2p: SMP2P information * * This is called either when the modem has requested it (by triggering - * the modem clock query IPA interrupt) or whenever the AP is shutting down + * the modem power query IPA interrupt) or whenever the AP is shutting down * (via a panic notifier). It sets the two SMP2P state bits--one saying - * whether the IPA clock is running, and the other indicating the first bit + * whether the IPA power is on, and the other indicating the first bit * is valid. */ static void ipa_smp2p_notify(struct ipa_smp2p *smp2p) { + struct device *dev; u32 value; u32 mask; if (smp2p->notified) return; - smp2p->clock_on = ipa_clock_get_additional(smp2p->ipa); + dev = &smp2p->ipa->pdev->dev; + smp2p->power_on = pm_runtime_get_if_active(dev, true) > 0; - /* Signal whether the clock is enabled */ + /* Signal whether the IPA power is enabled */ mask = BIT(smp2p->enabled_bit); - value = smp2p->clock_on ? mask : 0; + value = smp2p->power_on ? mask : 0; qcom_smem_state_update_bits(smp2p->enabled_state, mask, value); /* Now indicate that the enabled flag is valid */ @@ -124,7 +126,7 @@ static int ipa_smp2p_panic_notifier(struct notifier_block *nb, ipa_smp2p_notify(smp2p); - if (smp2p->clock_on) + if (smp2p->power_on) ipa_uc_panic_notifier(smp2p->ipa); return NOTIFY_DONE; @@ -150,19 +152,31 @@ static void ipa_smp2p_panic_notifier_unregister(struct ipa_smp2p *smp2p) static irqreturn_t ipa_smp2p_modem_setup_ready_isr(int irq, void *dev_id) { struct ipa_smp2p *smp2p = dev_id; + struct device *dev; + int ret; mutex_lock(&smp2p->mutex); - if (!smp2p->disabled) { - int ret; + if (smp2p->disabled) + goto out_mutex_unlock; + smp2p->disabled = true; /* If any others arrive, ignore them */ - ret = ipa_setup(smp2p->ipa); - if (ret) - dev_err(&smp2p->ipa->pdev->dev, - "error %d from ipa_setup()\n", ret); - smp2p->disabled = true; + /* Power needs to be active for setup */ + dev = &smp2p->ipa->pdev->dev; + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + dev_err(dev, "error %d getting power for setup\n", ret); + goto out_power_put; } + /* An error here won't cause driver shutdown, so warn if one occurs */ + ret = ipa_setup(smp2p->ipa); + WARN(ret != 0, "error %d from ipa_setup()\n", ret); + +out_power_put: + pm_runtime_mark_last_busy(dev); + (void)pm_runtime_put_autosuspend(dev); +out_mutex_unlock: mutex_unlock(&smp2p->mutex); return IRQ_HANDLED; @@ -195,14 +209,17 @@ static void ipa_smp2p_irq_exit(struct ipa_smp2p *smp2p, u32 irq) free_irq(irq, smp2p); } -/* Drop the clock reference if it was taken in ipa_smp2p_notify() */ -static void ipa_smp2p_clock_release(struct ipa *ipa) +/* Drop the power reference if it was taken in ipa_smp2p_notify() */ +static void ipa_smp2p_power_release(struct ipa *ipa) { - if (!ipa->smp2p->clock_on) + struct device *dev = &ipa->pdev->dev; + + if (!ipa->smp2p->power_on) return; - ipa_clock_put(ipa); - ipa->smp2p->clock_on = false; + pm_runtime_mark_last_busy(dev); + (void)pm_runtime_put_autosuspend(dev); + ipa->smp2p->power_on = false; } /* Initialize the IPA SMP2P subsystem */ @@ -236,7 +253,7 @@ int ipa_smp2p_init(struct ipa *ipa, bool modem_init) smp2p->ipa = ipa; - /* These fields are needed by the clock query interrupt + /* These fields are needed by the power query interrupt * handler, so initialize them now. */ mutex_init(&smp2p->mutex); @@ -289,8 +306,8 @@ void ipa_smp2p_exit(struct ipa *ipa) ipa_smp2p_irq_exit(smp2p, smp2p->setup_ready_irq); ipa_smp2p_panic_notifier_unregister(smp2p); ipa_smp2p_irq_exit(smp2p, smp2p->clock_query_irq); - /* We won't get notified any more; drop clock reference (if any) */ - ipa_smp2p_clock_release(ipa); + /* We won't get notified any more; drop power reference (if any) */ + ipa_smp2p_power_release(ipa); ipa->smp2p = NULL; mutex_destroy(&smp2p->mutex); kfree(smp2p); @@ -319,13 +336,13 @@ void ipa_smp2p_notify_reset(struct ipa *ipa) if (!smp2p->notified) return; - ipa_smp2p_clock_release(ipa); + ipa_smp2p_power_release(ipa); - /* Reset the clock enabled valid flag */ + /* Reset the power enabled valid flag */ mask = BIT(smp2p->valid_bit); qcom_smem_state_update_bits(smp2p->valid_state, mask, 0); - /* Mark the clock disabled for good measure... */ + /* Mark the power disabled for good measure... */ mask = BIT(smp2p->enabled_bit); qcom_smem_state_update_bits(smp2p->enabled_state, mask, 0); diff --git a/drivers/net/ipa/ipa_smp2p.h b/drivers/net/ipa/ipa_smp2p.h index 20319438a841..99a956789638 100644 --- a/drivers/net/ipa/ipa_smp2p.h +++ b/drivers/net/ipa/ipa_smp2p.h @@ -39,7 +39,7 @@ void ipa_smp2p_disable(struct ipa *ipa); * ipa_smp2p_notify_reset() - Reset modem notification state * @ipa: IPA pointer * - * If the modem crashes it queries the IPA clock state. In cleaning + * If the modem crashes it queries the IPA power state. In cleaning * up after such a crash this is used to reset some state maintained * for managing this notification. */ diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c index c617a9156f26..2324e1b93e37 100644 --- a/drivers/net/ipa/ipa_table.c +++ b/drivers/net/ipa/ipa_table.c @@ -120,8 +120,6 @@ */ #define IPA_ZERO_RULE_SIZE (2 * sizeof(__le32)) -#ifdef IPA_VALIDATE - /* Check things that can be validated at build time. */ static void ipa_table_validate_build(void) { @@ -161,7 +159,7 @@ ipa_table_valid_one(struct ipa *ipa, enum ipa_mem_id mem_id, bool route) else size = (1 + IPA_FILTER_COUNT_MAX) * sizeof(__le64); - if (!ipa_cmd_table_valid(ipa, mem, route, ipv6, hashed)) + if (!ipa_cmd_table_valid(ipa, mem, route)) return false; /* mem->size >= size is sufficient, but we'll demand more */ @@ -169,7 +167,7 @@ ipa_table_valid_one(struct ipa *ipa, enum ipa_mem_id mem_id, bool route) return true; /* Hashed table regions can be zero size if hashing is not supported */ - if (hashed && !mem->size) + if (ipa_table_hash_support(ipa) && !mem->size) return true; dev_err(dev, "%s table region %u size 0x%02x, expected 0x%02x\n", @@ -183,14 +181,22 @@ bool ipa_table_valid(struct ipa *ipa) { bool valid; - valid = ipa_table_valid_one(IPA_MEM_V4_FILTER, false); - valid = valid && ipa_table_valid_one(IPA_MEM_V4_FILTER_HASHED, false); - valid = valid && ipa_table_valid_one(IPA_MEM_V6_FILTER, false); - valid = valid && ipa_table_valid_one(IPA_MEM_V6_FILTER_HASHED, false); - valid = valid && ipa_table_valid_one(IPA_MEM_V4_ROUTE, true); - valid = valid && ipa_table_valid_one(IPA_MEM_V4_ROUTE_HASHED, true); - valid = valid && ipa_table_valid_one(IPA_MEM_V6_ROUTE, true); - valid = valid && ipa_table_valid_one(IPA_MEM_V6_ROUTE_HASHED, true); + valid = ipa_table_valid_one(ipa, IPA_MEM_V4_FILTER, false); + valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V6_FILTER, false); + valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V4_ROUTE, true); + valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V6_ROUTE, true); + + if (!ipa_table_hash_support(ipa)) + return valid; + + valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V4_FILTER_HASHED, + false); + valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V6_FILTER_HASHED, + false); + valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V4_ROUTE_HASHED, + true); + valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V6_ROUTE_HASHED, + true); return valid; } @@ -217,14 +223,6 @@ bool ipa_filter_map_valid(struct ipa *ipa, u32 filter_map) return true; } -#else /* !IPA_VALIDATE */ -static void ipa_table_validate_build(void) - -{ -} - -#endif /* !IPA_VALIDATE */ - /* Zero entry count means no table, so just return a 0 address */ static dma_addr_t ipa_table_addr(struct ipa *ipa, bool filter_mask, u16 count) { @@ -233,7 +231,7 @@ static dma_addr_t ipa_table_addr(struct ipa *ipa, bool filter_mask, u16 count) if (!count) return 0; -/* assert(count <= max_t(u32, IPA_FILTER_COUNT_MAX, IPA_ROUTE_COUNT_MAX)); */ + WARN_ON(count > max_t(u32, IPA_FILTER_COUNT_MAX, IPA_ROUTE_COUNT_MAX)); /* Skip over the zero rule and possibly the filter mask */ skip = filter_mask ? 1 : 2; diff --git a/drivers/net/ipa/ipa_table.h b/drivers/net/ipa/ipa_table.h index 1e2be9fce2f8..b6a9a0d79d68 100644 --- a/drivers/net/ipa/ipa_table.h +++ b/drivers/net/ipa/ipa_table.h @@ -16,8 +16,6 @@ struct ipa; /* The maximum number of route table entries (IPv4, IPv6; hashed or not) */ #define IPA_ROUTE_COUNT_MAX 15 -#ifdef IPA_VALIDATE - /** * ipa_table_valid() - Validate route and filter table memory regions * @ipa: IPA pointer @@ -35,20 +33,6 @@ bool ipa_table_valid(struct ipa *ipa); */ bool ipa_filter_map_valid(struct ipa *ipa, u32 filter_mask); -#else /* !IPA_VALIDATE */ - -static inline bool ipa_table_valid(struct ipa *ipa) -{ - return true; -} - -static inline bool ipa_filter_map_valid(struct ipa *ipa, u32 filter_mask) -{ - return true; -} - -#endif /* !IPA_VALIDATE */ - /** * ipa_table_hash_support() - Return true if hashed tables are supported * @ipa: IPA pointer diff --git a/drivers/net/ipa/ipa_uc.c b/drivers/net/ipa/ipa_uc.c index fd9219863234..856e55a080a7 100644 --- a/drivers/net/ipa/ipa_uc.c +++ b/drivers/net/ipa/ipa_uc.c @@ -7,9 +7,9 @@ #include <linux/types.h> #include <linux/io.h> #include <linux/delay.h> +#include <linux/pm_runtime.h> #include "ipa.h" -#include "ipa_clock.h" #include "ipa_uc.h" /** @@ -131,7 +131,7 @@ static void ipa_uc_event_handler(struct ipa *ipa, enum ipa_irq_id irq_id) if (shared->event == IPA_UC_EVENT_ERROR) dev_err(dev, "microcontroller error event\n"); else if (shared->event != IPA_UC_EVENT_LOG_INFO) - dev_err(dev, "unsupported microcontroller event %hhu\n", + dev_err(dev, "unsupported microcontroller event %u\n", shared->event); /* The LOG_INFO event can be safely ignored */ } @@ -140,53 +140,77 @@ static void ipa_uc_event_handler(struct ipa *ipa, enum ipa_irq_id irq_id) static void ipa_uc_response_hdlr(struct ipa *ipa, enum ipa_irq_id irq_id) { struct ipa_uc_mem_area *shared = ipa_uc_shared(ipa); + struct device *dev = &ipa->pdev->dev; /* An INIT_COMPLETED response message is sent to the AP by the * microcontroller when it is operational. Other than this, the AP * should only receive responses from the microcontroller when it has * sent it a request message. * - * We can drop the clock reference taken in ipa_uc_setup() once we + * We can drop the power reference taken in ipa_uc_power() once we * know the microcontroller has finished its initialization. */ switch (shared->response) { case IPA_UC_RESPONSE_INIT_COMPLETED: - ipa->uc_loaded = true; - ipa_clock_put(ipa); + if (ipa->uc_powered) { + ipa->uc_loaded = true; + pm_runtime_mark_last_busy(dev); + (void)pm_runtime_put_autosuspend(dev); + ipa->uc_powered = false; + } else { + dev_warn(dev, "unexpected init_completed response\n"); + } break; default: - dev_warn(&ipa->pdev->dev, - "unsupported microcontroller response %hhu\n", + dev_warn(dev, "unsupported microcontroller response %u\n", shared->response); break; } } -/* ipa_uc_setup() - Set up the microcontroller */ -void ipa_uc_setup(struct ipa *ipa) +/* Configure the IPA microcontroller subsystem */ +void ipa_uc_config(struct ipa *ipa) { - /* The microcontroller needs the IPA clock running until it has - * completed its initialization. It signals this by sending an - * INIT_COMPLETED response message to the AP. This could occur after - * we have finished doing the rest of the IPA initialization, so we - * need to take an extra "proxy" reference, and hold it until we've - * received that signal. (This reference is dropped in - * ipa_uc_response_hdlr(), above.) - */ - ipa_clock_get(ipa); - + ipa->uc_powered = false; ipa->uc_loaded = false; ipa_interrupt_add(ipa->interrupt, IPA_IRQ_UC_0, ipa_uc_event_handler); ipa_interrupt_add(ipa->interrupt, IPA_IRQ_UC_1, ipa_uc_response_hdlr); } -/* Inverse of ipa_uc_setup() */ -void ipa_uc_teardown(struct ipa *ipa) +/* Inverse of ipa_uc_config() */ +void ipa_uc_deconfig(struct ipa *ipa) { + struct device *dev = &ipa->pdev->dev; + ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_UC_1); ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_UC_0); - if (!ipa->uc_loaded) - ipa_clock_put(ipa); + if (!ipa->uc_powered) + return; + + pm_runtime_mark_last_busy(dev); + (void)pm_runtime_put_autosuspend(dev); +} + +/* Take a proxy power reference for the microcontroller */ +void ipa_uc_power(struct ipa *ipa) +{ + static bool already; + struct device *dev; + int ret; + + if (already) + return; + already = true; /* Only do this on first boot */ + + /* This power reference dropped in ipa_uc_response_hdlr() above */ + dev = &ipa->pdev->dev; + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + pm_runtime_put_noidle(dev); + dev_err(dev, "error %d getting proxy power\n", ret); + } else { + ipa->uc_powered = true; + } } /* Send a command to the microcontroller */ diff --git a/drivers/net/ipa/ipa_uc.h b/drivers/net/ipa/ipa_uc.h index e8510899a3f0..23847f934d64 100644 --- a/drivers/net/ipa/ipa_uc.h +++ b/drivers/net/ipa/ipa_uc.h @@ -9,16 +9,30 @@ struct ipa; /** - * ipa_uc_setup() - set up the IPA microcontroller subsystem + * ipa_uc_config() - Configure the IPA microcontroller subsystem * @ipa: IPA pointer */ -void ipa_uc_setup(struct ipa *ipa); +void ipa_uc_config(struct ipa *ipa); /** - * ipa_uc_teardown() - inverse of ipa_uc_setup() + * ipa_uc_deconfig() - Inverse of ipa_uc_config() * @ipa: IPA pointer */ -void ipa_uc_teardown(struct ipa *ipa); +void ipa_uc_deconfig(struct ipa *ipa); + +/** + * ipa_uc_power() - Take a proxy power reference for the microcontroller + * @ipa: IPA pointer + * + * The first time the modem boots, it loads firmware for and starts the + * IPA-resident microcontroller. The microcontroller signals that it + * has completed its initialization by sending an INIT_COMPLETED response + * message to the AP. The AP must ensure the IPA is powered until + * it receives this message, and to do so we take a "proxy" clock + * reference on its behalf here. Once we receive the INIT_COMPLETED + * message (in ipa_uc_response_hdlr()) we drop this power reference. + */ +void ipa_uc_power(struct ipa *ipa); /** * ipa_uc_panic_notifier() diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index a707502a0c0f..c0b21a5580d5 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -732,6 +732,7 @@ static int ipvlan_device_event(struct notifier_block *unused, port = ipvlan_port_get_rtnl(dev); switch (event) { + case NETDEV_UP: case NETDEV_CHANGE: list_for_each_entry(ipvlan, &port->ipvlans, pnode) netif_stacked_transfer_operstate(ipvlan->phy_dev, diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 80de9768ecd4..35f46ad040b0 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -829,7 +829,7 @@ static int macvlan_change_mtu(struct net_device *dev, int new_mtu) return 0; } -static int macvlan_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +static int macvlan_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct net_device *real_dev = macvlan_dev_real_dev(dev); const struct net_device_ops *ops = real_dev->netdev_ops; @@ -845,8 +845,8 @@ static int macvlan_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) break; fallthrough; case SIOCGHWTSTAMP: - if (netif_device_present(real_dev) && ops->ndo_do_ioctl) - err = ops->ndo_do_ioctl(real_dev, &ifrr, cmd); + if (netif_device_present(real_dev) && ops->ndo_eth_ioctl) + err = ops->ndo_eth_ioctl(real_dev, &ifrr, cmd); break; } @@ -1151,7 +1151,7 @@ static const struct net_device_ops macvlan_netdev_ops = { .ndo_stop = macvlan_stop, .ndo_start_xmit = macvlan_start_xmit, .ndo_change_mtu = macvlan_change_mtu, - .ndo_do_ioctl = macvlan_do_ioctl, + .ndo_eth_ioctl = macvlan_eth_ioctl, .ndo_fix_features = macvlan_fix_features, .ndo_change_rx_flags = macvlan_change_rx_flags, .ndo_set_mac_address = macvlan_set_mac_address, diff --git a/drivers/net/mctp/Kconfig b/drivers/net/mctp/Kconfig new file mode 100644 index 000000000000..d8f966cedc89 --- /dev/null +++ b/drivers/net/mctp/Kconfig @@ -0,0 +1,8 @@ + +if MCTP + +menu "MCTP Device Drivers" + +endmenu + +endif diff --git a/drivers/net/mctp/Makefile b/drivers/net/mctp/Makefile new file mode 100644 index 000000000000..e69de29bb2d1 --- /dev/null +++ b/drivers/net/mctp/Makefile diff --git a/drivers/net/mdio/Kconfig b/drivers/net/mdio/Kconfig index 99a6c13a11af..6da1fcb25847 100644 --- a/drivers/net/mdio/Kconfig +++ b/drivers/net/mdio/Kconfig @@ -169,9 +169,10 @@ config MDIO_OCTEON config MDIO_IPQ4019 tristate "Qualcomm IPQ4019 MDIO interface support" depends on HAS_IOMEM && OF_MDIO + depends on COMMON_CLK help This driver supports the MDIO interface found in Qualcomm - IPQ40xx series Soc-s. + IPQ40xx, IPQ60xx, IPQ807x and IPQ50xx series Soc-s. config MDIO_IPQ8064 tristate "Qualcomm IPQ8064 MDIO interface support" diff --git a/drivers/net/mdio/mdio-ipq4019.c b/drivers/net/mdio/mdio-ipq4019.c index 9cd71d896963..0d7d3e15d2f0 100644 --- a/drivers/net/mdio/mdio-ipq4019.c +++ b/drivers/net/mdio/mdio-ipq4019.c @@ -11,6 +11,7 @@ #include <linux/of_mdio.h> #include <linux/phy.h> #include <linux/platform_device.h> +#include <linux/clk.h> #define MDIO_MODE_REG 0x40 #define MDIO_ADDR_REG 0x44 @@ -31,8 +32,15 @@ #define IPQ4019_MDIO_TIMEOUT 10000 #define IPQ4019_MDIO_SLEEP 10 +/* MDIO clock source frequency is fixed to 100M */ +#define IPQ_MDIO_CLK_RATE 100000000 + +#define IPQ_PHY_SET_DELAY_US 100000 + struct ipq4019_mdio_data { void __iomem *membase; + void __iomem *eth_ldo_rdy; + struct clk *mdio_clk; }; static int ipq4019_mdio_wait_busy(struct mii_bus *bus) @@ -171,6 +179,30 @@ static int ipq4019_mdio_write(struct mii_bus *bus, int mii_id, int regnum, return 0; } +static int ipq_mdio_reset(struct mii_bus *bus) +{ + struct ipq4019_mdio_data *priv = bus->priv; + u32 val; + int ret; + + /* To indicate CMN_PLL that ethernet_ldo has been ready if platform resource 1 + * is specified in the device tree. + */ + if (priv->eth_ldo_rdy) { + val = readl(priv->eth_ldo_rdy); + val |= BIT(0); + writel(val, priv->eth_ldo_rdy); + fsleep(IPQ_PHY_SET_DELAY_US); + } + + /* Configure MDIO clock source frequency if clock is specified in the device tree */ + ret = clk_set_rate(priv->mdio_clk, IPQ_MDIO_CLK_RATE); + if (ret) + return ret; + + return clk_prepare_enable(priv->mdio_clk); +} + static int ipq4019_mdio_probe(struct platform_device *pdev) { struct ipq4019_mdio_data *priv; @@ -187,9 +219,17 @@ static int ipq4019_mdio_probe(struct platform_device *pdev) if (IS_ERR(priv->membase)) return PTR_ERR(priv->membase); + priv->mdio_clk = devm_clk_get_optional(&pdev->dev, "gcc_mdio_ahb_clk"); + if (IS_ERR(priv->mdio_clk)) + return PTR_ERR(priv->mdio_clk); + + /* The platform resource is provided on the chipset IPQ5018 */ + priv->eth_ldo_rdy = devm_platform_ioremap_resource(pdev, 1); + bus->name = "ipq4019_mdio"; bus->read = ipq4019_mdio_read; bus->write = ipq4019_mdio_write; + bus->reset = ipq_mdio_reset; bus->parent = &pdev->dev; snprintf(bus->id, MII_BUS_ID_SIZE, "%s%d", pdev->name, pdev->id); @@ -215,6 +255,7 @@ static int ipq4019_mdio_remove(struct platform_device *pdev) static const struct of_device_id ipq4019_mdio_dt_ids[] = { { .compatible = "qcom,ipq4019-mdio" }, + { .compatible = "qcom,ipq5018-mdio" }, { } }; MODULE_DEVICE_TABLE(of, ipq4019_mdio_dt_ids); diff --git a/drivers/net/mdio/mdio-mscc-miim.c b/drivers/net/mdio/mdio-mscc-miim.c index 2d67e12c8262..1ee592d3eae4 100644 --- a/drivers/net/mdio/mdio-mscc-miim.c +++ b/drivers/net/mdio/mdio-mscc-miim.c @@ -134,7 +134,6 @@ static int mscc_miim_reset(struct mii_bus *bus) static int mscc_miim_probe(struct platform_device *pdev) { - struct resource *res; struct mii_bus *bus; struct mscc_miim_dev *dev; int ret; @@ -157,13 +156,10 @@ static int mscc_miim_probe(struct platform_device *pdev) return PTR_ERR(dev->regs); } - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (res) { - dev->phy_regs = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(dev->phy_regs)) { - dev_err(&pdev->dev, "Unable to map internal phy registers\n"); - return PTR_ERR(dev->phy_regs); - } + dev->phy_regs = devm_platform_ioremap_resource(pdev, 1); + if (IS_ERR(dev->phy_regs)) { + dev_err(&pdev->dev, "Unable to map internal phy registers\n"); + return PTR_ERR(dev->phy_regs); } ret = of_mdiobus_register(bus, pdev->dev.of_node); diff --git a/drivers/net/mhi/Makefile b/drivers/net/mhi/Makefile deleted file mode 100644 index f71b9f8f3c4f..000000000000 --- a/drivers/net/mhi/Makefile +++ /dev/null @@ -1,3 +0,0 @@ -obj-$(CONFIG_MHI_NET) += mhi_net.o - -mhi_net-y := net.o proto_mbim.o diff --git a/drivers/net/mhi/mhi.h b/drivers/net/mhi/mhi.h deleted file mode 100644 index 1d0c499d27a3..000000000000 --- a/drivers/net/mhi/mhi.h +++ /dev/null @@ -1,41 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* MHI Network driver - Network over MHI bus - * - * Copyright (C) 2021 Linaro Ltd <loic.poulain@linaro.org> - */ - -struct mhi_net_stats { - u64_stats_t rx_packets; - u64_stats_t rx_bytes; - u64_stats_t rx_errors; - u64_stats_t rx_dropped; - u64_stats_t rx_length_errors; - u64_stats_t tx_packets; - u64_stats_t tx_bytes; - u64_stats_t tx_errors; - u64_stats_t tx_dropped; - struct u64_stats_sync tx_syncp; - struct u64_stats_sync rx_syncp; -}; - -struct mhi_net_dev { - struct mhi_device *mdev; - struct net_device *ndev; - struct sk_buff *skbagg_head; - struct sk_buff *skbagg_tail; - const struct mhi_net_proto *proto; - void *proto_data; - struct delayed_work rx_refill; - struct mhi_net_stats stats; - u32 rx_queue_sz; - int msg_enable; - unsigned int mru; -}; - -struct mhi_net_proto { - int (*init)(struct mhi_net_dev *mhi_netdev); - struct sk_buff * (*tx_fixup)(struct mhi_net_dev *mhi_netdev, struct sk_buff *skb); - void (*rx)(struct mhi_net_dev *mhi_netdev, struct sk_buff *skb); -}; - -extern const struct mhi_net_proto proto_mbim; diff --git a/drivers/net/mhi/proto_mbim.c b/drivers/net/mhi/proto_mbim.c deleted file mode 100644 index bf1ad863237d..000000000000 --- a/drivers/net/mhi/proto_mbim.c +++ /dev/null @@ -1,304 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* MHI Network driver - Network over MHI bus - * - * Copyright (C) 2021 Linaro Ltd <loic.poulain@linaro.org> - * - * This driver copy some code from cdc_ncm, which is: - * Copyright (C) ST-Ericsson 2010-2012 - * and cdc_mbim, which is: - * Copyright (c) 2012 Smith Micro Software, Inc. - * Copyright (c) 2012 Bjørn Mork <bjorn@mork.no> - * - */ - -#include <linux/ethtool.h> -#include <linux/if_vlan.h> -#include <linux/ip.h> -#include <linux/mii.h> -#include <linux/netdevice.h> -#include <linux/wwan.h> -#include <linux/skbuff.h> -#include <linux/usb.h> -#include <linux/usb/cdc.h> -#include <linux/usb/usbnet.h> -#include <linux/usb/cdc_ncm.h> - -#include "mhi.h" - -#define MBIM_NDP16_SIGN_MASK 0x00ffffff - -/* Usual WWAN MTU */ -#define MHI_MBIM_DEFAULT_MTU 1500 - -/* 3500 allows to optimize skb allocation, the skbs will basically fit in - * one 4K page. Large MBIM packets will simply be split over several MHI - * transfers and chained by the MHI net layer (zerocopy). - */ -#define MHI_MBIM_DEFAULT_MRU 3500 - -struct mbim_context { - u16 rx_seq; - u16 tx_seq; -}; - -static void __mbim_length_errors_inc(struct mhi_net_dev *dev) -{ - u64_stats_update_begin(&dev->stats.rx_syncp); - u64_stats_inc(&dev->stats.rx_length_errors); - u64_stats_update_end(&dev->stats.rx_syncp); -} - -static void __mbim_errors_inc(struct mhi_net_dev *dev) -{ - u64_stats_update_begin(&dev->stats.rx_syncp); - u64_stats_inc(&dev->stats.rx_errors); - u64_stats_update_end(&dev->stats.rx_syncp); -} - -static int mbim_rx_verify_nth16(struct sk_buff *skb) -{ - struct mhi_net_dev *dev = wwan_netdev_drvpriv(skb->dev); - struct mbim_context *ctx = dev->proto_data; - struct usb_cdc_ncm_nth16 *nth16; - int len; - - if (skb->len < sizeof(struct usb_cdc_ncm_nth16) + - sizeof(struct usb_cdc_ncm_ndp16)) { - netif_dbg(dev, rx_err, dev->ndev, "frame too short\n"); - __mbim_length_errors_inc(dev); - return -EINVAL; - } - - nth16 = (struct usb_cdc_ncm_nth16 *)skb->data; - - if (nth16->dwSignature != cpu_to_le32(USB_CDC_NCM_NTH16_SIGN)) { - netif_dbg(dev, rx_err, dev->ndev, - "invalid NTH16 signature <%#010x>\n", - le32_to_cpu(nth16->dwSignature)); - __mbim_errors_inc(dev); - return -EINVAL; - } - - /* No limit on the block length, except the size of the data pkt */ - len = le16_to_cpu(nth16->wBlockLength); - if (len > skb->len) { - netif_dbg(dev, rx_err, dev->ndev, - "NTB does not fit into the skb %u/%u\n", len, - skb->len); - __mbim_length_errors_inc(dev); - return -EINVAL; - } - - if (ctx->rx_seq + 1 != le16_to_cpu(nth16->wSequence) && - (ctx->rx_seq || le16_to_cpu(nth16->wSequence)) && - !(ctx->rx_seq == 0xffff && !le16_to_cpu(nth16->wSequence))) { - netif_dbg(dev, rx_err, dev->ndev, - "sequence number glitch prev=%d curr=%d\n", - ctx->rx_seq, le16_to_cpu(nth16->wSequence)); - } - ctx->rx_seq = le16_to_cpu(nth16->wSequence); - - return le16_to_cpu(nth16->wNdpIndex); -} - -static int mbim_rx_verify_ndp16(struct sk_buff *skb, struct usb_cdc_ncm_ndp16 *ndp16) -{ - struct mhi_net_dev *dev = wwan_netdev_drvpriv(skb->dev); - int ret; - - if (le16_to_cpu(ndp16->wLength) < USB_CDC_NCM_NDP16_LENGTH_MIN) { - netif_dbg(dev, rx_err, dev->ndev, "invalid DPT16 length <%u>\n", - le16_to_cpu(ndp16->wLength)); - return -EINVAL; - } - - ret = ((le16_to_cpu(ndp16->wLength) - sizeof(struct usb_cdc_ncm_ndp16)) - / sizeof(struct usb_cdc_ncm_dpe16)); - ret--; /* Last entry is always a NULL terminator */ - - if (sizeof(struct usb_cdc_ncm_ndp16) + - ret * sizeof(struct usb_cdc_ncm_dpe16) > skb->len) { - netif_dbg(dev, rx_err, dev->ndev, - "Invalid nframes = %d\n", ret); - return -EINVAL; - } - - return ret; -} - -static void mbim_rx(struct mhi_net_dev *mhi_netdev, struct sk_buff *skb) -{ - struct net_device *ndev = mhi_netdev->ndev; - int ndpoffset; - - /* Check NTB header and retrieve first NDP offset */ - ndpoffset = mbim_rx_verify_nth16(skb); - if (ndpoffset < 0) { - net_err_ratelimited("%s: Incorrect NTB header\n", ndev->name); - goto error; - } - - /* Process each NDP */ - while (1) { - struct usb_cdc_ncm_ndp16 ndp16; - struct usb_cdc_ncm_dpe16 dpe16; - int nframes, n, dpeoffset; - - if (skb_copy_bits(skb, ndpoffset, &ndp16, sizeof(ndp16))) { - net_err_ratelimited("%s: Incorrect NDP offset (%u)\n", - ndev->name, ndpoffset); - __mbim_length_errors_inc(mhi_netdev); - goto error; - } - - /* Check NDP header and retrieve number of datagrams */ - nframes = mbim_rx_verify_ndp16(skb, &ndp16); - if (nframes < 0) { - net_err_ratelimited("%s: Incorrect NDP16\n", ndev->name); - __mbim_length_errors_inc(mhi_netdev); - goto error; - } - - /* Only IP data type supported, no DSS in MHI context */ - if ((ndp16.dwSignature & cpu_to_le32(MBIM_NDP16_SIGN_MASK)) - != cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN)) { - net_err_ratelimited("%s: Unsupported NDP type\n", ndev->name); - __mbim_errors_inc(mhi_netdev); - goto next_ndp; - } - - /* Only primary IP session 0 (0x00) supported for now */ - if (ndp16.dwSignature & ~cpu_to_le32(MBIM_NDP16_SIGN_MASK)) { - net_err_ratelimited("%s: bad packet session\n", ndev->name); - __mbim_errors_inc(mhi_netdev); - goto next_ndp; - } - - /* de-aggregate and deliver IP packets */ - dpeoffset = ndpoffset + sizeof(struct usb_cdc_ncm_ndp16); - for (n = 0; n < nframes; n++, dpeoffset += sizeof(dpe16)) { - u16 dgram_offset, dgram_len; - struct sk_buff *skbn; - - if (skb_copy_bits(skb, dpeoffset, &dpe16, sizeof(dpe16))) - break; - - dgram_offset = le16_to_cpu(dpe16.wDatagramIndex); - dgram_len = le16_to_cpu(dpe16.wDatagramLength); - - if (!dgram_offset || !dgram_len) - break; /* null terminator */ - - skbn = netdev_alloc_skb(ndev, dgram_len); - if (!skbn) - continue; - - skb_put(skbn, dgram_len); - skb_copy_bits(skb, dgram_offset, skbn->data, dgram_len); - - switch (skbn->data[0] & 0xf0) { - case 0x40: - skbn->protocol = htons(ETH_P_IP); - break; - case 0x60: - skbn->protocol = htons(ETH_P_IPV6); - break; - default: - net_err_ratelimited("%s: unknown protocol\n", - ndev->name); - __mbim_errors_inc(mhi_netdev); - dev_kfree_skb_any(skbn); - continue; - } - - netif_rx(skbn); - } -next_ndp: - /* Other NDP to process? */ - ndpoffset = (int)le16_to_cpu(ndp16.wNextNdpIndex); - if (!ndpoffset) - break; - } - - /* free skb */ - dev_consume_skb_any(skb); - return; -error: - dev_kfree_skb_any(skb); -} - -struct mbim_tx_hdr { - struct usb_cdc_ncm_nth16 nth16; - struct usb_cdc_ncm_ndp16 ndp16; - struct usb_cdc_ncm_dpe16 dpe16[2]; -} __packed; - -static struct sk_buff *mbim_tx_fixup(struct mhi_net_dev *mhi_netdev, - struct sk_buff *skb) -{ - struct mbim_context *ctx = mhi_netdev->proto_data; - unsigned int dgram_size = skb->len; - struct usb_cdc_ncm_nth16 *nth16; - struct usb_cdc_ncm_ndp16 *ndp16; - struct mbim_tx_hdr *mbim_hdr; - - /* For now, this is a partial implementation of CDC MBIM, only one NDP - * is sent, containing the IP packet (no aggregation). - */ - - /* Ensure we have enough headroom for crafting MBIM header */ - if (skb_cow_head(skb, sizeof(struct mbim_tx_hdr))) { - dev_kfree_skb_any(skb); - return NULL; - } - - mbim_hdr = skb_push(skb, sizeof(struct mbim_tx_hdr)); - - /* Fill NTB header */ - nth16 = &mbim_hdr->nth16; - nth16->dwSignature = cpu_to_le32(USB_CDC_NCM_NTH16_SIGN); - nth16->wHeaderLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_nth16)); - nth16->wSequence = cpu_to_le16(ctx->tx_seq++); - nth16->wBlockLength = cpu_to_le16(skb->len); - nth16->wNdpIndex = cpu_to_le16(sizeof(struct usb_cdc_ncm_nth16)); - - /* Fill the unique NDP */ - ndp16 = &mbim_hdr->ndp16; - ndp16->dwSignature = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN); - ndp16->wLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_ndp16) - + sizeof(struct usb_cdc_ncm_dpe16) * 2); - ndp16->wNextNdpIndex = 0; - - /* Datagram follows the mbim header */ - ndp16->dpe16[0].wDatagramIndex = cpu_to_le16(sizeof(struct mbim_tx_hdr)); - ndp16->dpe16[0].wDatagramLength = cpu_to_le16(dgram_size); - - /* null termination */ - ndp16->dpe16[1].wDatagramIndex = 0; - ndp16->dpe16[1].wDatagramLength = 0; - - return skb; -} - -static int mbim_init(struct mhi_net_dev *mhi_netdev) -{ - struct net_device *ndev = mhi_netdev->ndev; - - mhi_netdev->proto_data = devm_kzalloc(&ndev->dev, - sizeof(struct mbim_context), - GFP_KERNEL); - if (!mhi_netdev->proto_data) - return -ENOMEM; - - ndev->needed_headroom = sizeof(struct mbim_tx_hdr); - ndev->mtu = MHI_MBIM_DEFAULT_MTU; - mhi_netdev->mru = MHI_MBIM_DEFAULT_MRU; - - return 0; -} - -const struct mhi_net_proto proto_mbim = { - .init = mbim_init, - .rx = mbim_rx, - .tx_fixup = mbim_tx_fixup, -}; diff --git a/drivers/net/mhi/net.c b/drivers/net/mhi_net.c index e60e38c1f09d..d127eb6e9257 100644 --- a/drivers/net/mhi/net.c +++ b/drivers/net/mhi_net.c @@ -11,28 +11,42 @@ #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/u64_stats_sync.h> -#include <linux/wwan.h> - -#include "mhi.h" #define MHI_NET_MIN_MTU ETH_MIN_MTU #define MHI_NET_MAX_MTU 0xffff #define MHI_NET_DEFAULT_MTU 0x4000 -/* When set to false, the default netdev (link 0) is not created, and it's up - * to user to create the link (via wwan rtnetlink). - */ -static bool create_default_iface = true; -module_param(create_default_iface, bool, 0); +struct mhi_net_stats { + u64_stats_t rx_packets; + u64_stats_t rx_bytes; + u64_stats_t rx_errors; + u64_stats_t tx_packets; + u64_stats_t tx_bytes; + u64_stats_t tx_errors; + u64_stats_t tx_dropped; + struct u64_stats_sync tx_syncp; + struct u64_stats_sync rx_syncp; +}; + +struct mhi_net_dev { + struct mhi_device *mdev; + struct net_device *ndev; + struct sk_buff *skbagg_head; + struct sk_buff *skbagg_tail; + struct delayed_work rx_refill; + struct mhi_net_stats stats; + u32 rx_queue_sz; + int msg_enable; + unsigned int mru; +}; struct mhi_device_info { const char *netname; - const struct mhi_net_proto *proto; }; static int mhi_ndo_open(struct net_device *ndev) { - struct mhi_net_dev *mhi_netdev = wwan_netdev_drvpriv(ndev); + struct mhi_net_dev *mhi_netdev = netdev_priv(ndev); /* Feed the rx buffer pool */ schedule_delayed_work(&mhi_netdev->rx_refill, 0); @@ -47,7 +61,7 @@ static int mhi_ndo_open(struct net_device *ndev) static int mhi_ndo_stop(struct net_device *ndev) { - struct mhi_net_dev *mhi_netdev = wwan_netdev_drvpriv(ndev); + struct mhi_net_dev *mhi_netdev = netdev_priv(ndev); netif_stop_queue(ndev); netif_carrier_off(ndev); @@ -58,17 +72,10 @@ static int mhi_ndo_stop(struct net_device *ndev) static netdev_tx_t mhi_ndo_xmit(struct sk_buff *skb, struct net_device *ndev) { - struct mhi_net_dev *mhi_netdev = wwan_netdev_drvpriv(ndev); - const struct mhi_net_proto *proto = mhi_netdev->proto; + struct mhi_net_dev *mhi_netdev = netdev_priv(ndev); struct mhi_device *mdev = mhi_netdev->mdev; int err; - if (proto && proto->tx_fixup) { - skb = proto->tx_fixup(mhi_netdev, skb); - if (unlikely(!skb)) - goto exit_drop; - } - err = mhi_queue_skb(mdev, DMA_TO_DEVICE, skb, skb->len, MHI_EOT); if (unlikely(err)) { net_err_ratelimited("%s: Failed to queue TX buf (%d)\n", @@ -93,7 +100,7 @@ exit_drop: static void mhi_ndo_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *stats) { - struct mhi_net_dev *mhi_netdev = wwan_netdev_drvpriv(ndev); + struct mhi_net_dev *mhi_netdev = netdev_priv(ndev); unsigned int start; do { @@ -101,8 +108,6 @@ static void mhi_ndo_get_stats64(struct net_device *ndev, stats->rx_packets = u64_stats_read(&mhi_netdev->stats.rx_packets); stats->rx_bytes = u64_stats_read(&mhi_netdev->stats.rx_bytes); stats->rx_errors = u64_stats_read(&mhi_netdev->stats.rx_errors); - stats->rx_dropped = u64_stats_read(&mhi_netdev->stats.rx_dropped); - stats->rx_length_errors = u64_stats_read(&mhi_netdev->stats.rx_length_errors); } while (u64_stats_fetch_retry_irq(&mhi_netdev->stats.rx_syncp, start)); do { @@ -165,7 +170,6 @@ static void mhi_net_dl_callback(struct mhi_device *mhi_dev, struct mhi_result *mhi_res) { struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev); - const struct mhi_net_proto *proto = mhi_netdev->proto; struct sk_buff *skb = mhi_res->buf_addr; int free_desc_count; @@ -205,11 +209,6 @@ static void mhi_net_dl_callback(struct mhi_device *mhi_dev, mhi_netdev->skbagg_head = NULL; } - u64_stats_update_begin(&mhi_netdev->stats.rx_syncp); - u64_stats_inc(&mhi_netdev->stats.rx_packets); - u64_stats_add(&mhi_netdev->stats.rx_bytes, skb->len); - u64_stats_update_end(&mhi_netdev->stats.rx_syncp); - switch (skb->data[0] & 0xf0) { case 0x40: skb->protocol = htons(ETH_P_IP); @@ -222,10 +221,11 @@ static void mhi_net_dl_callback(struct mhi_device *mhi_dev, break; } - if (proto && proto->rx) - proto->rx(mhi_netdev, skb); - else - netif_rx(skb); + u64_stats_update_begin(&mhi_netdev->stats.rx_syncp); + u64_stats_inc(&mhi_netdev->stats.rx_packets); + u64_stats_add(&mhi_netdev->stats.rx_bytes, skb->len); + u64_stats_update_end(&mhi_netdev->stats.rx_syncp); + netif_rx(skb); } /* Refill if RX buffers queue becomes low */ @@ -248,7 +248,6 @@ static void mhi_net_ul_callback(struct mhi_device *mhi_dev, u64_stats_update_begin(&mhi_netdev->stats.tx_syncp); if (unlikely(mhi_res->transaction_status)) { - /* MHI layer stopping/resetting the UL channel */ if (mhi_res->transaction_status == -ENOTCONN) { u64_stats_update_end(&mhi_netdev->stats.tx_syncp); @@ -302,33 +301,18 @@ static void mhi_net_rx_refill_work(struct work_struct *work) schedule_delayed_work(&mhi_netdev->rx_refill, HZ / 2); } -static int mhi_net_newlink(void *ctxt, struct net_device *ndev, u32 if_id, - struct netlink_ext_ack *extack) +static int mhi_net_newlink(struct mhi_device *mhi_dev, struct net_device *ndev) { - const struct mhi_device_info *info; - struct mhi_device *mhi_dev = ctxt; struct mhi_net_dev *mhi_netdev; int err; - info = (struct mhi_device_info *)mhi_dev->id->driver_data; - - /* For now we only support one link (link context 0), driver must be - * reworked to break 1:1 relationship for net MBIM and to forward setup - * call to rmnet(QMAP) otherwise. - */ - if (if_id != 0) - return -EINVAL; - - if (dev_get_drvdata(&mhi_dev->dev)) - return -EBUSY; - - mhi_netdev = wwan_netdev_drvpriv(ndev); + mhi_netdev = netdev_priv(ndev); dev_set_drvdata(&mhi_dev->dev, mhi_netdev); mhi_netdev->ndev = ndev; mhi_netdev->mdev = mhi_dev; mhi_netdev->skbagg_head = NULL; - mhi_netdev->proto = info->proto; + mhi_netdev->mru = mhi_dev->mhi_cntrl->mru; INIT_DELAYED_WORK(&mhi_netdev->rx_refill, mhi_net_rx_refill_work); u64_stats_init(&mhi_netdev->stats.rx_syncp); @@ -342,38 +326,22 @@ static int mhi_net_newlink(void *ctxt, struct net_device *ndev, u32 if_id, /* Number of transfer descriptors determines size of the queue */ mhi_netdev->rx_queue_sz = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE); - if (extack) - err = register_netdevice(ndev); - else - err = register_netdev(ndev); + err = register_netdev(ndev); if (err) - goto out_err; - - if (mhi_netdev->proto) { - err = mhi_netdev->proto->init(mhi_netdev); - if (err) - goto out_err_proto; - } + return err; return 0; -out_err_proto: - unregister_netdevice(ndev); out_err: free_netdev(ndev); return err; } -static void mhi_net_dellink(void *ctxt, struct net_device *ndev, - struct list_head *head) +static void mhi_net_dellink(struct mhi_device *mhi_dev, struct net_device *ndev) { - struct mhi_net_dev *mhi_netdev = wwan_netdev_drvpriv(ndev); - struct mhi_device *mhi_dev = ctxt; + struct mhi_net_dev *mhi_netdev = netdev_priv(ndev); - if (head) - unregister_netdevice_queue(ndev, head); - else - unregister_netdev(ndev); + unregister_netdev(ndev); mhi_unprepare_from_transfer(mhi_dev); @@ -382,65 +350,34 @@ static void mhi_net_dellink(void *ctxt, struct net_device *ndev, dev_set_drvdata(&mhi_dev->dev, NULL); } -static const struct wwan_ops mhi_wwan_ops = { - .priv_size = sizeof(struct mhi_net_dev), - .setup = mhi_net_setup, - .newlink = mhi_net_newlink, - .dellink = mhi_net_dellink, -}; - static int mhi_net_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id) { const struct mhi_device_info *info = (struct mhi_device_info *)id->driver_data; - struct mhi_controller *cntrl = mhi_dev->mhi_cntrl; struct net_device *ndev; int err; - err = wwan_register_ops(&cntrl->mhi_dev->dev, &mhi_wwan_ops, mhi_dev, - WWAN_NO_DEFAULT_LINK); - if (err) - return err; - - if (!create_default_iface) - return 0; - - /* Create a default interface which is used as either RMNET real-dev, - * MBIM link 0 or ip link 0) - */ ndev = alloc_netdev(sizeof(struct mhi_net_dev), info->netname, NET_NAME_PREDICTABLE, mhi_net_setup); - if (!ndev) { - err = -ENOMEM; - goto err_unregister; - } + if (!ndev) + return -ENOMEM; SET_NETDEV_DEV(ndev, &mhi_dev->dev); - err = mhi_net_newlink(mhi_dev, ndev, 0, NULL); - if (err) - goto err_release; + err = mhi_net_newlink(mhi_dev, ndev); + if (err) { + free_netdev(ndev); + return err; + } return 0; - -err_release: - free_netdev(ndev); -err_unregister: - wwan_unregister_ops(&cntrl->mhi_dev->dev); - - return err; } static void mhi_net_remove(struct mhi_device *mhi_dev) { struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev); - struct mhi_controller *cntrl = mhi_dev->mhi_cntrl; - - /* WWAN core takes care of removing remaining links */ - wwan_unregister_ops(&cntrl->mhi_dev->dev); - if (create_default_iface) - mhi_net_dellink(mhi_dev, mhi_netdev->ndev, NULL); + mhi_net_dellink(mhi_dev, mhi_netdev->ndev); } static const struct mhi_device_info mhi_hwip0 = { @@ -451,18 +388,11 @@ static const struct mhi_device_info mhi_swip0 = { .netname = "mhi_swip%d", }; -static const struct mhi_device_info mhi_hwip0_mbim = { - .netname = "mhi_mbim%d", - .proto = &proto_mbim, -}; - static const struct mhi_device_id mhi_net_id_table[] = { /* Hardware accelerated data PATH (to modem IPA), protocol agnostic */ { .chan = "IP_HW0", .driver_data = (kernel_ulong_t)&mhi_hwip0 }, /* Software data PATH (to modem CPU) */ { .chan = "IP_SW0", .driver_data = (kernel_ulong_t)&mhi_swip0 }, - /* Hardware accelerated data PATH (to modem IPA), MBIM protocol */ - { .chan = "IP_HW0_MBIM", .driver_data = (kernel_ulong_t)&mhi_hwip0_mbim }, {} }; MODULE_DEVICE_TABLE(mhi, mhi_net_id_table); diff --git a/drivers/net/mii.c b/drivers/net/mii.c index 779c3a96dba7..22680f47385d 100644 --- a/drivers/net/mii.c +++ b/drivers/net/mii.c @@ -49,10 +49,8 @@ static u32 mii_get_an(struct mii_if_info *mii, u16 addr) * * The @ecmd parameter is expected to have been cleared before calling * mii_ethtool_gset(). - * - * Returns 0 for success, negative on error. */ -int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd) +void mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd) { struct net_device *dev = mii->dev; u16 bmcr, bmsr, ctrl1000 = 0, stat1000 = 0; @@ -131,8 +129,6 @@ int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd) mii->full_duplex = ecmd->duplex; /* ignore maxtxpkt, maxrxpkt for now */ - - return 0; } /** diff --git a/drivers/net/netdevsim/bus.c b/drivers/net/netdevsim/bus.c index ccec29970d5b..62d033a1a557 100644 --- a/drivers/net/netdevsim/bus.c +++ b/drivers/net/netdevsim/bus.c @@ -183,8 +183,6 @@ new_port_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev); - struct nsim_dev *nsim_dev = dev_get_drvdata(dev); - struct devlink *devlink; unsigned int port_index; int ret; @@ -195,12 +193,15 @@ new_port_store(struct device *dev, struct device_attribute *attr, if (ret) return ret; - devlink = priv_to_devlink(nsim_dev); + if (!mutex_trylock(&nsim_bus_dev->nsim_bus_reload_lock)) + return -EBUSY; + + if (nsim_bus_dev->in_reload) { + mutex_unlock(&nsim_bus_dev->nsim_bus_reload_lock); + return -EBUSY; + } - mutex_lock(&nsim_bus_dev->nsim_bus_reload_lock); - devlink_reload_disable(devlink); ret = nsim_dev_port_add(nsim_bus_dev, NSIM_DEV_PORT_TYPE_PF, port_index); - devlink_reload_enable(devlink); mutex_unlock(&nsim_bus_dev->nsim_bus_reload_lock); return ret ? ret : count; } @@ -212,8 +213,6 @@ del_port_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev); - struct nsim_dev *nsim_dev = dev_get_drvdata(dev); - struct devlink *devlink; unsigned int port_index; int ret; @@ -224,12 +223,15 @@ del_port_store(struct device *dev, struct device_attribute *attr, if (ret) return ret; - devlink = priv_to_devlink(nsim_dev); + if (!mutex_trylock(&nsim_bus_dev->nsim_bus_reload_lock)) + return -EBUSY; + + if (nsim_bus_dev->in_reload) { + mutex_unlock(&nsim_bus_dev->nsim_bus_reload_lock); + return -EBUSY; + } - mutex_lock(&nsim_bus_dev->nsim_bus_reload_lock); - devlink_reload_disable(devlink); ret = nsim_dev_port_del(nsim_bus_dev, NSIM_DEV_PORT_TYPE_PF, port_index); - devlink_reload_enable(devlink); mutex_unlock(&nsim_bus_dev->nsim_bus_reload_lock); return ret ? ret : count; } @@ -262,29 +264,31 @@ static struct device_type nsim_bus_dev_type = { }; static struct nsim_bus_dev * -nsim_bus_dev_new(unsigned int id, unsigned int port_count); +nsim_bus_dev_new(unsigned int id, unsigned int port_count, unsigned int num_queues); static ssize_t new_device_store(struct bus_type *bus, const char *buf, size_t count) { + unsigned int id, port_count, num_queues; struct nsim_bus_dev *nsim_bus_dev; - unsigned int port_count; - unsigned int id; int err; - err = sscanf(buf, "%u %u", &id, &port_count); + err = sscanf(buf, "%u %u %u", &id, &port_count, &num_queues); switch (err) { case 1: port_count = 1; fallthrough; case 2: + num_queues = 1; + fallthrough; + case 3: if (id > INT_MAX) { pr_err("Value of \"id\" is too big.\n"); return -EINVAL; } break; default: - pr_err("Format for adding new device is \"id port_count\" (uint uint).\n"); + pr_err("Format for adding new device is \"id port_count num_queues\" (uint uint unit).\n"); return -EINVAL; } @@ -295,7 +299,7 @@ new_device_store(struct bus_type *bus, const char *buf, size_t count) goto err; } - nsim_bus_dev = nsim_bus_dev_new(id, port_count); + nsim_bus_dev = nsim_bus_dev_new(id, port_count, num_queues); if (IS_ERR(nsim_bus_dev)) { err = PTR_ERR(nsim_bus_dev); goto err; @@ -397,7 +401,7 @@ static struct bus_type nsim_bus = { #define NSIM_BUS_DEV_MAX_VFS 4 static struct nsim_bus_dev * -nsim_bus_dev_new(unsigned int id, unsigned int port_count) +nsim_bus_dev_new(unsigned int id, unsigned int port_count, unsigned int num_queues) { struct nsim_bus_dev *nsim_bus_dev; int err; @@ -413,6 +417,7 @@ nsim_bus_dev_new(unsigned int id, unsigned int port_count) nsim_bus_dev->dev.bus = &nsim_bus; nsim_bus_dev->dev.type = &nsim_bus_dev_type; nsim_bus_dev->port_count = port_count; + nsim_bus_dev->num_queues = num_queues; nsim_bus_dev->initial_net = current->nsproxy->net_ns; nsim_bus_dev->max_vfs = NSIM_BUS_DEV_MAX_VFS; mutex_init(&nsim_bus_dev->nsim_bus_reload_lock); diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c index 6348307bfa84..54313bd57797 100644 --- a/drivers/net/netdevsim/dev.c +++ b/drivers/net/netdevsim/dev.c @@ -864,16 +864,24 @@ static int nsim_dev_reload_down(struct devlink *devlink, bool netns_change, struct netlink_ext_ack *extack) { struct nsim_dev *nsim_dev = devlink_priv(devlink); + struct nsim_bus_dev *nsim_bus_dev; + + nsim_bus_dev = nsim_dev->nsim_bus_dev; + if (!mutex_trylock(&nsim_bus_dev->nsim_bus_reload_lock)) + return -EOPNOTSUPP; if (nsim_dev->dont_allow_reload) { /* For testing purposes, user set debugfs dont_allow_reload * value to true. So forbid it. */ NL_SET_ERR_MSG_MOD(extack, "User forbid the reload for testing purposes"); + mutex_unlock(&nsim_bus_dev->nsim_bus_reload_lock); return -EOPNOTSUPP; } + nsim_bus_dev->in_reload = true; nsim_dev_reload_destroy(nsim_dev); + mutex_unlock(&nsim_bus_dev->nsim_bus_reload_lock); return 0; } @@ -882,17 +890,26 @@ static int nsim_dev_reload_up(struct devlink *devlink, enum devlink_reload_actio struct netlink_ext_ack *extack) { struct nsim_dev *nsim_dev = devlink_priv(devlink); + struct nsim_bus_dev *nsim_bus_dev; + int ret; + + nsim_bus_dev = nsim_dev->nsim_bus_dev; + mutex_lock(&nsim_bus_dev->nsim_bus_reload_lock); + nsim_bus_dev->in_reload = false; if (nsim_dev->fail_reload) { /* For testing purposes, user set debugfs fail_reload * value to true. Fail right away. */ NL_SET_ERR_MSG_MOD(extack, "User setup the reload to fail for testing purposes"); + mutex_unlock(&nsim_bus_dev->nsim_bus_reload_lock); return -EINVAL; } *actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT); - return nsim_dev_reload_create(nsim_dev, extack); + ret = nsim_dev_reload_create(nsim_dev, extack); + mutex_unlock(&nsim_bus_dev->nsim_bus_reload_lock); + return ret; } static int nsim_dev_info_get(struct devlink *devlink, @@ -1431,10 +1448,10 @@ int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev) struct devlink *devlink; int err; - devlink = devlink_alloc(&nsim_dev_devlink_ops, sizeof(*nsim_dev)); + devlink = devlink_alloc_ns(&nsim_dev_devlink_ops, sizeof(*nsim_dev), + nsim_bus_dev->initial_net, &nsim_bus_dev->dev); if (!devlink) return -ENOMEM; - devlink_net_set(devlink, nsim_bus_dev->initial_net); nsim_dev = devlink_priv(devlink); nsim_dev->nsim_bus_dev = nsim_bus_dev; nsim_dev->switch_id.id_len = sizeof(nsim_dev->switch_id.id); @@ -1453,7 +1470,7 @@ int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev) if (err) goto err_devlink_free; - err = devlink_register(devlink, &nsim_bus_dev->dev); + err = devlink_register(devlink); if (err) goto err_resources_unregister; diff --git a/drivers/net/netdevsim/ethtool.c b/drivers/net/netdevsim/ethtool.c index c9ae52595a8f..b03a0513eb7e 100644 --- a/drivers/net/netdevsim/ethtool.c +++ b/drivers/net/netdevsim/ethtool.c @@ -43,7 +43,9 @@ nsim_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause) } static int nsim_get_coalesce(struct net_device *dev, - struct ethtool_coalesce *coal) + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct netdevsim *ns = netdev_priv(dev); @@ -52,7 +54,9 @@ static int nsim_get_coalesce(struct net_device *dev, } static int nsim_set_coalesce(struct net_device *dev, - struct ethtool_coalesce *coal) + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct netdevsim *ns = netdev_priv(dev); diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c index 213d3e5056c8..4300261e2f9e 100644 --- a/drivers/net/netdevsim/fib.c +++ b/drivers/net/netdevsim/fib.c @@ -1441,7 +1441,7 @@ static u64 nsim_fib_nexthops_res_occ_get(void *priv) static void nsim_fib_set_max_all(struct nsim_fib_data *data, struct devlink *devlink) { - enum nsim_resource_id res_ids[] = { + static const enum nsim_resource_id res_ids[] = { NSIM_RESOURCE_IPV4_FIB, NSIM_RESOURCE_IPV4_FIB_RULES, NSIM_RESOURCE_IPV6_FIB, NSIM_RESOURCE_IPV6_FIB_RULES, NSIM_RESOURCE_NEXTHOPS, diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c index c3aeb15843e2..50572e0f1f52 100644 --- a/drivers/net/netdevsim/netdev.c +++ b/drivers/net/netdevsim/netdev.c @@ -347,7 +347,8 @@ nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port) struct netdevsim *ns; int err; - dev = alloc_netdev(sizeof(*ns), "eth%d", NET_NAME_UNKNOWN, nsim_setup); + dev = alloc_netdev_mq(sizeof(*ns), "eth%d", NET_NAME_UNKNOWN, nsim_setup, + nsim_dev->nsim_bus_dev->num_queues); if (!dev) return ERR_PTR(-ENOMEM); @@ -392,7 +393,8 @@ void nsim_destroy(struct netdevsim *ns) static int nsim_validate(struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { - NL_SET_ERR_MSG_MOD(extack, "Please use: echo \"[ID] [PORT_COUNT]\" > /sys/bus/netdevsim/new_device"); + NL_SET_ERR_MSG_MOD(extack, + "Please use: echo \"[ID] [PORT_COUNT] [NUM_QUEUES]\" > /sys/bus/netdevsim/new_device"); return -EOPNOTSUPP; } diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h index ae462957dcee..793c86dc5a9c 100644 --- a/drivers/net/netdevsim/netdevsim.h +++ b/drivers/net/netdevsim/netdevsim.h @@ -352,6 +352,7 @@ struct nsim_bus_dev { struct device dev; struct list_head list; unsigned int port_count; + unsigned int num_queues; /* Number of queues for each port on this bus */ struct net *initial_net; /* Purpose of this is to carry net pointer * during the probe time only. */ @@ -361,6 +362,7 @@ struct nsim_bus_dev { struct nsim_vf_config *vfconfigs; /* Lock for devlink->reload_enabled in netdevsim module */ struct mutex nsim_bus_reload_lock; + bool in_reload; bool init; }; diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c index 4bd61339823c..fb0a83dc09ac 100644 --- a/drivers/net/pcs/pcs-xpcs.c +++ b/drivers/net/pcs/pcs-xpcs.c @@ -65,6 +65,9 @@ static const int xpcs_xlgmii_features[] = { }; static const int xpcs_sgmii_features[] = { + ETHTOOL_LINK_MODE_Pause_BIT, + ETHTOOL_LINK_MODE_Asym_Pause_BIT, + ETHTOOL_LINK_MODE_Autoneg_BIT, ETHTOOL_LINK_MODE_10baseT_Half_BIT, ETHTOOL_LINK_MODE_10baseT_Full_BIT, ETHTOOL_LINK_MODE_100baseT_Half_BIT, @@ -75,6 +78,7 @@ static const int xpcs_sgmii_features[] = { }; static const int xpcs_2500basex_features[] = { + ETHTOOL_LINK_MODE_Pause_BIT, ETHTOOL_LINK_MODE_Asym_Pause_BIT, ETHTOOL_LINK_MODE_Autoneg_BIT, ETHTOOL_LINK_MODE_2500baseX_Full_BIT, diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index c56f703ae998..902495afcb38 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -207,6 +207,12 @@ config MARVELL_88X2222_PHY Support for the Marvell 88X2222 Dual-port Multi-speed Ethernet Transceiver. +config MAXLINEAR_GPHY + tristate "Maxlinear Ethernet PHYs" + help + Support for the Maxlinear GPY115, GPY211, GPY212, GPY215, + GPY241, GPY245 PHYs. + config MEDIATEK_GE_PHY tristate "MediaTek Gigabit Ethernet PHYs" help @@ -230,6 +236,7 @@ config MICROCHIP_T1_PHY config MICROSEMI_PHY tristate "Microsemi PHYs" depends on MACSEC || MACSEC=n + depends on PTP_1588_CLOCK_OPTIONAL || !NETWORK_PHY_TIMESTAMPING select CRYPTO_LIB_AES if MACSEC help Currently supports VSC8514, VSC8530, VSC8531, VSC8540 and VSC8541 PHYs @@ -247,6 +254,7 @@ config NATIONAL_PHY config NXP_C45_TJA11XX_PHY tristate "NXP C45 TJA11XX PHYs" + depends on PTP_1588_CLOCK_OPTIONAL help Enable support for NXP C45 TJA11XX PHYs. Currently supports only the TJA1103 PHY. diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index 172bb193ae6a..b2728d00fc9a 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -64,6 +64,7 @@ obj-$(CONFIG_LXT_PHY) += lxt.o obj-$(CONFIG_MARVELL_10G_PHY) += marvell10g.o obj-$(CONFIG_MARVELL_PHY) += marvell.o obj-$(CONFIG_MARVELL_88X2222_PHY) += marvell-88x2222.o +obj-$(CONFIG_MAXLINEAR_GPHY) += mxl-gpy.o obj-$(CONFIG_MEDIATEK_GE_PHY) += mediatek-ge.o obj-$(CONFIG_MESON_GXL_PHY) += meson-gxl.o obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c index 5d62b85a4024..bdac087058b2 100644 --- a/drivers/net/phy/at803x.c +++ b/drivers/net/phy/at803x.c @@ -532,12 +532,6 @@ static int at8031_register_regulators(struct phy_device *phydev) return 0; } -static bool at803x_match_phy_id(struct phy_device *phydev, u32 phy_id) -{ - return (phydev->phy_id & phydev->drv->phy_id_mask) - == (phy_id & phydev->drv->phy_id_mask); -} - static int at803x_parse_dt(struct phy_device *phydev) { struct device_node *node = phydev->mdio.dev.of_node; @@ -602,8 +596,8 @@ static int at803x_parse_dt(struct phy_device *phydev) * to the AR8030 so there might be a good chance it works on * the AR8030 too. */ - if (at803x_match_phy_id(phydev, ATH8030_PHY_ID) || - at803x_match_phy_id(phydev, ATH8035_PHY_ID)) { + if (phydev->drv->phy_id == ATH8030_PHY_ID || + phydev->drv->phy_id == ATH8035_PHY_ID) { priv->clk_25m_reg &= AT8035_CLK_OUT_MASK; priv->clk_25m_mask &= AT8035_CLK_OUT_MASK; } @@ -631,7 +625,7 @@ static int at803x_parse_dt(struct phy_device *phydev) /* Only supported on AR8031/AR8033, the AR8030/AR8035 use strapping * options. */ - if (at803x_match_phy_id(phydev, ATH8031_PHY_ID)) { + if (phydev->drv->phy_id == ATH8031_PHY_ID) { if (of_property_read_bool(node, "qca,keep-pll-enabled")) priv->flags |= AT803X_KEEP_PLL_ENABLED; @@ -676,7 +670,7 @@ static int at803x_probe(struct phy_device *phydev) * Switch to the copper page, as otherwise we read * the PHY capabilities from the fiber side. */ - if (at803x_match_phy_id(phydev, ATH8031_PHY_ID)) { + if (phydev->drv->phy_id == ATH8031_PHY_ID) { phy_lock_mdio_bus(phydev); ret = at803x_write_page(phydev, AT803X_PAGE_COPPER); phy_unlock_mdio_bus(phydev); @@ -709,7 +703,7 @@ static int at803x_get_features(struct phy_device *phydev) if (err) return err; - if (!at803x_match_phy_id(phydev, ATH8031_PHY_ID)) + if (phydev->drv->phy_id != ATH8031_PHY_ID) return 0; /* AR8031/AR8033 have different status registers @@ -820,7 +814,7 @@ static int at803x_config_init(struct phy_device *phydev) if (ret < 0) return ret; - if (at803x_match_phy_id(phydev, ATH8031_PHY_ID)) { + if (phydev->drv->phy_id == ATH8031_PHY_ID) { ret = at8031_pll_config(phydev); if (ret < 0) return ret; diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c index f7a2ec150e54..211b5476a6f5 100644 --- a/drivers/net/phy/dp83822.c +++ b/drivers/net/phy/dp83822.c @@ -326,11 +326,9 @@ static irqreturn_t dp83822_handle_interrupt(struct phy_device *phydev) static int dp8382x_disable_wol(struct phy_device *phydev) { - int value = DP83822_WOL_EN | DP83822_WOL_MAGIC_EN | - DP83822_WOL_SECURE_ON; - - return phy_clear_bits_mmd(phydev, DP83822_DEVADDR, - MII_DP83822_WOL_CFG, value); + return phy_clear_bits_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG, + DP83822_WOL_EN | DP83822_WOL_MAGIC_EN | + DP83822_WOL_SECURE_ON); } static int dp83822_read_status(struct phy_device *phydev) diff --git a/drivers/net/phy/intel-xway.c b/drivers/net/phy/intel-xway.c index d453ec016168..3c032868ef04 100644 --- a/drivers/net/phy/intel-xway.c +++ b/drivers/net/phy/intel-xway.c @@ -8,11 +8,16 @@ #include <linux/module.h> #include <linux/phy.h> #include <linux/of.h> +#include <linux/bitfield.h> +#define XWAY_MDIO_MIICTRL 0x17 /* mii control */ #define XWAY_MDIO_IMASK 0x19 /* interrupt mask */ #define XWAY_MDIO_ISTAT 0x1A /* interrupt status */ #define XWAY_MDIO_LED 0x1B /* led control */ +#define XWAY_MDIO_MIICTRL_RXSKEW_MASK GENMASK(14, 12) +#define XWAY_MDIO_MIICTRL_TXSKEW_MASK GENMASK(10, 8) + /* bit 15:12 are reserved */ #define XWAY_MDIO_LED_LED3_EN BIT(11) /* Enable the integrated function of LED3 */ #define XWAY_MDIO_LED_LED2_EN BIT(10) /* Enable the integrated function of LED2 */ @@ -157,6 +162,73 @@ #define PHY_ID_PHY11G_VR9_1_2 0xD565A409 #define PHY_ID_PHY22F_VR9_1_2 0xD565A419 +static const int xway_internal_delay[] = {0, 500, 1000, 1500, 2000, 2500, + 3000, 3500}; + +static int xway_gphy_rgmii_init(struct phy_device *phydev) +{ + struct device *dev = &phydev->mdio.dev; + unsigned int delay_size = ARRAY_SIZE(xway_internal_delay); + s32 int_delay; + int val = 0; + + if (!phy_interface_is_rgmii(phydev)) + return 0; + + /* Existing behavior was to use default pin strapping delay in rgmii + * mode, but rgmii should have meant no delay. Warn existing users, + * but do not change anything at the moment. + */ + if (phydev->interface == PHY_INTERFACE_MODE_RGMII) { + u16 txskew, rxskew; + + val = phy_read(phydev, XWAY_MDIO_MIICTRL); + if (val < 0) + return val; + + txskew = FIELD_GET(XWAY_MDIO_MIICTRL_TXSKEW_MASK, val); + rxskew = FIELD_GET(XWAY_MDIO_MIICTRL_RXSKEW_MASK, val); + + if (txskew > 0 || rxskew > 0) + phydev_warn(phydev, + "PHY has delays (e.g. via pin strapping), but phy-mode = 'rgmii'\n" + "Should be 'rgmii-id' to use internal delays txskew:%d ps rxskew:%d ps\n", + xway_internal_delay[txskew], + xway_internal_delay[rxskew]); + return 0; + } + + if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID || + phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) { + int_delay = phy_get_internal_delay(phydev, dev, + xway_internal_delay, + delay_size, true); + + /* if rx-internal-delay-ps is missing, use default of 2.0 ns */ + if (int_delay < 0) + int_delay = 4; /* 2000 ps */ + + val |= FIELD_PREP(XWAY_MDIO_MIICTRL_RXSKEW_MASK, int_delay); + } + + if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID || + phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) { + int_delay = phy_get_internal_delay(phydev, dev, + xway_internal_delay, + delay_size, false); + + /* if tx-internal-delay-ps is missing, use default of 2.0 ns */ + if (int_delay < 0) + int_delay = 4; /* 2000 ps */ + + val |= FIELD_PREP(XWAY_MDIO_MIICTRL_TXSKEW_MASK, int_delay); + } + + return phy_modify(phydev, XWAY_MDIO_MIICTRL, + XWAY_MDIO_MIICTRL_RXSKEW_MASK | + XWAY_MDIO_MIICTRL_TXSKEW_MASK, val); +} + static int xway_gphy_config_init(struct phy_device *phydev) { int err; @@ -204,6 +276,10 @@ static int xway_gphy_config_init(struct phy_device *phydev) phy_write_mmd(phydev, MDIO_MMD_VEND2, XWAY_MMD_LED2H, ledxh); phy_write_mmd(phydev, MDIO_MMD_VEND2, XWAY_MMD_LED2L, ledxl); + err = xway_gphy_rgmii_init(phydev); + if (err) + return err; + return 0; } diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 3de93c9f2744..4fcfca4e1702 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -32,6 +32,7 @@ #include <linux/marvell_phy.h> #include <linux/bitfield.h> #include <linux/of.h> +#include <linux/sfp.h> #include <linux/io.h> #include <asm/irq.h> @@ -46,6 +47,7 @@ #define MII_MARVELL_MISC_TEST_PAGE 0x06 #define MII_MARVELL_VCT7_PAGE 0x07 #define MII_MARVELL_WOL_PAGE 0x11 +#define MII_MARVELL_MODE_PAGE 0x12 #define MII_M1011_IEVENT 0x13 #define MII_M1011_IEVENT_CLEAR 0x0000 @@ -155,6 +157,7 @@ #define MII_88E1318S_PHY_WOL_CTRL 0x10 #define MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS BIT(12) +#define MII_88E1318S_PHY_WOL_CTRL_LINK_UP_ENABLE BIT(13) #define MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE BIT(14) #define MII_PHY_LED_CTRL 16 @@ -176,7 +179,14 @@ #define MII_88E1510_GEN_CTRL_REG_1 0x14 #define MII_88E1510_GEN_CTRL_REG_1_MODE_MASK 0x7 +#define MII_88E1510_GEN_CTRL_REG_1_MODE_RGMII 0x0 /* RGMII to copper */ #define MII_88E1510_GEN_CTRL_REG_1_MODE_SGMII 0x1 /* SGMII to copper */ +/* RGMII to 1000BASE-X */ +#define MII_88E1510_GEN_CTRL_REG_1_MODE_RGMII_1000X 0x2 +/* RGMII to 100BASE-FX */ +#define MII_88E1510_GEN_CTRL_REG_1_MODE_RGMII_100FX 0x3 +/* RGMII to SGMII */ +#define MII_88E1510_GEN_CTRL_REG_1_MODE_RGMII_SGMII 0x4 #define MII_88E1510_GEN_CTRL_REG_1_RESET 0x8000 /* Soft reset */ #define MII_VCT5_TX_RX_MDI0_COUPLING 0x10 @@ -1746,13 +1756,19 @@ static void m88e1318_get_wol(struct phy_device *phydev, { int ret; - wol->supported = WAKE_MAGIC; + wol->supported = WAKE_MAGIC | WAKE_PHY; wol->wolopts = 0; ret = phy_read_paged(phydev, MII_MARVELL_WOL_PAGE, MII_88E1318S_PHY_WOL_CTRL); - if (ret >= 0 && ret & MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE) + if (ret < 0) + return; + + if (ret & MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE) wol->wolopts |= WAKE_MAGIC; + + if (ret & MII_88E1318S_PHY_WOL_CTRL_LINK_UP_ENABLE) + wol->wolopts |= WAKE_PHY; } static int m88e1318_set_wol(struct phy_device *phydev, @@ -1764,7 +1780,7 @@ static int m88e1318_set_wol(struct phy_device *phydev, if (oldpage < 0) goto error; - if (wol->wolopts & WAKE_MAGIC) { + if (wol->wolopts & (WAKE_MAGIC | WAKE_PHY)) { /* Explicitly switch to page 0x00, just to be sure */ err = marvell_write_page(phydev, MII_MARVELL_COPPER_PAGE); if (err < 0) @@ -1796,7 +1812,9 @@ static int m88e1318_set_wol(struct phy_device *phydev, MII_88E1318S_PHY_LED_TCR_INT_ACTIVE_LOW); if (err < 0) goto error; + } + if (wol->wolopts & WAKE_MAGIC) { err = marvell_write_page(phydev, MII_MARVELL_WOL_PAGE); if (err < 0) goto error; @@ -1837,6 +1855,30 @@ static int m88e1318_set_wol(struct phy_device *phydev, goto error; } + if (wol->wolopts & WAKE_PHY) { + err = marvell_write_page(phydev, MII_MARVELL_WOL_PAGE); + if (err < 0) + goto error; + + /* Clear WOL status and enable link up event */ + err = __phy_modify(phydev, MII_88E1318S_PHY_WOL_CTRL, 0, + MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS | + MII_88E1318S_PHY_WOL_CTRL_LINK_UP_ENABLE); + if (err < 0) + goto error; + } else { + err = marvell_write_page(phydev, MII_MARVELL_WOL_PAGE); + if (err < 0) + goto error; + + /* Clear WOL status and disable link up event */ + err = __phy_modify(phydev, MII_88E1318S_PHY_WOL_CTRL, + MII_88E1318S_PHY_WOL_CTRL_LINK_UP_ENABLE, + MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS); + if (err < 0) + goto error; + } + error: return phy_restore_page(phydev, oldpage, err); } @@ -2701,6 +2743,100 @@ static int marvell_probe(struct phy_device *phydev) return marvell_hwmon_probe(phydev); } +static int m88e1510_sfp_insert(void *upstream, const struct sfp_eeprom_id *id) +{ + struct phy_device *phydev = upstream; + phy_interface_t interface; + struct device *dev; + int oldpage; + int ret = 0; + u16 mode; + + __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, }; + + dev = &phydev->mdio.dev; + + sfp_parse_support(phydev->sfp_bus, id, supported); + interface = sfp_select_interface(phydev->sfp_bus, supported); + + dev_info(dev, "%s SFP module inserted\n", phy_modes(interface)); + + switch (interface) { + case PHY_INTERFACE_MODE_1000BASEX: + mode = MII_88E1510_GEN_CTRL_REG_1_MODE_RGMII_1000X; + + break; + case PHY_INTERFACE_MODE_100BASEX: + mode = MII_88E1510_GEN_CTRL_REG_1_MODE_RGMII_100FX; + + break; + case PHY_INTERFACE_MODE_SGMII: + mode = MII_88E1510_GEN_CTRL_REG_1_MODE_RGMII_SGMII; + + break; + default: + dev_err(dev, "Incompatible SFP module inserted\n"); + + return -EINVAL; + } + + oldpage = phy_select_page(phydev, MII_MARVELL_MODE_PAGE); + if (oldpage < 0) + goto error; + + ret = __phy_modify(phydev, MII_88E1510_GEN_CTRL_REG_1, + MII_88E1510_GEN_CTRL_REG_1_MODE_MASK, mode); + if (ret < 0) + goto error; + + ret = __phy_set_bits(phydev, MII_88E1510_GEN_CTRL_REG_1, + MII_88E1510_GEN_CTRL_REG_1_RESET); + +error: + return phy_restore_page(phydev, oldpage, ret); +} + +static void m88e1510_sfp_remove(void *upstream) +{ + struct phy_device *phydev = upstream; + int oldpage; + int ret = 0; + + oldpage = phy_select_page(phydev, MII_MARVELL_MODE_PAGE); + if (oldpage < 0) + goto error; + + ret = __phy_modify(phydev, MII_88E1510_GEN_CTRL_REG_1, + MII_88E1510_GEN_CTRL_REG_1_MODE_MASK, + MII_88E1510_GEN_CTRL_REG_1_MODE_RGMII); + if (ret < 0) + goto error; + + ret = __phy_set_bits(phydev, MII_88E1510_GEN_CTRL_REG_1, + MII_88E1510_GEN_CTRL_REG_1_RESET); + +error: + phy_restore_page(phydev, oldpage, ret); +} + +static const struct sfp_upstream_ops m88e1510_sfp_ops = { + .module_insert = m88e1510_sfp_insert, + .module_remove = m88e1510_sfp_remove, + .attach = phy_sfp_attach, + .detach = phy_sfp_detach, +}; + +static int m88e1510_probe(struct phy_device *phydev) +{ + int err; + + err = marvell_probe(phydev); + if (err) + return err; + + return phy_sfp_probe(phydev, &m88e1510_sfp_ops); +} + static struct phy_driver marvell_drivers[] = { { .phy_id = MARVELL_PHY_ID_88E1101, @@ -2927,7 +3063,7 @@ static struct phy_driver marvell_drivers[] = { .driver_data = DEF_MARVELL_HWMON_OPS(m88e1510_hwmon_ops), .features = PHY_GBIT_FIBRE_FEATURES, .flags = PHY_POLL_CABLE_TEST, - .probe = marvell_probe, + .probe = m88e1510_probe, .config_init = m88e1510_config_init, .config_aneg = m88e1510_config_aneg, .read_status = marvell_read_status, diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c index 53a433442803..bd310e8d5e43 100644 --- a/drivers/net/phy/marvell10g.c +++ b/drivers/net/phy/marvell10g.c @@ -28,6 +28,7 @@ #include <linux/marvell_phy.h> #include <linux/phy.h> #include <linux/sfp.h> +#include <linux/netdevice.h> #define MV_PHY_ALASKA_NBT_QUIRK_MASK 0xfffffffe #define MV_PHY_ALASKA_NBT_QUIRK_REV (MARVELL_PHY_ID_88X3310 | 0xa) @@ -104,6 +105,16 @@ enum { MV_V2_33X0_PORT_CTRL_MACTYPE_10GBASER_NO_SGMII_AN = 0x5, MV_V2_33X0_PORT_CTRL_MACTYPE_10GBASER_RATE_MATCH = 0x6, MV_V2_33X0_PORT_CTRL_MACTYPE_USXGMII = 0x7, + MV_V2_PORT_INTR_STS = 0xf040, + MV_V2_PORT_INTR_MASK = 0xf043, + MV_V2_PORT_INTR_STS_WOL_EN = BIT(8), + MV_V2_MAGIC_PKT_WORD0 = 0xf06b, + MV_V2_MAGIC_PKT_WORD1 = 0xf06c, + MV_V2_MAGIC_PKT_WORD2 = 0xf06d, + /* Wake on LAN registers */ + MV_V2_WOL_CTRL = 0xf06e, + MV_V2_WOL_CTRL_CLEAR_STS = BIT(15), + MV_V2_WOL_CTRL_MAGIC_PKT_EN = BIT(0), /* Temperature control/read registers (88X3310 only) */ MV_V2_TEMP_CTRL = 0xf08a, MV_V2_TEMP_CTRL_MASK = 0xc000, @@ -987,11 +998,19 @@ static int mv3310_get_number_of_ports(struct phy_device *phydev) static int mv3310_match_phy_device(struct phy_device *phydev) { + if ((phydev->c45_ids.device_ids[MDIO_MMD_PMAPMD] & + MARVELL_PHY_ID_MASK) != MARVELL_PHY_ID_88X3310) + return 0; + return mv3310_get_number_of_ports(phydev) == 1; } static int mv3340_match_phy_device(struct phy_device *phydev) { + if ((phydev->c45_ids.device_ids[MDIO_MMD_PMAPMD] & + MARVELL_PHY_ID_MASK) != MARVELL_PHY_ID_88X3310) + return 0; + return mv3310_get_number_of_ports(phydev) == 4; } @@ -1020,6 +1039,80 @@ static int mv2111_match_phy_device(struct phy_device *phydev) return mv211x_match_phy_device(phydev, false); } +static void mv3110_get_wol(struct phy_device *phydev, + struct ethtool_wolinfo *wol) +{ + int ret; + + wol->supported = WAKE_MAGIC; + wol->wolopts = 0; + + ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, MV_V2_WOL_CTRL); + if (ret < 0) + return; + + if (ret & MV_V2_WOL_CTRL_MAGIC_PKT_EN) + wol->wolopts |= WAKE_MAGIC; +} + +static int mv3110_set_wol(struct phy_device *phydev, + struct ethtool_wolinfo *wol) +{ + int ret; + + if (wol->wolopts & WAKE_MAGIC) { + /* Enable the WOL interrupt */ + ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, + MV_V2_PORT_INTR_MASK, + MV_V2_PORT_INTR_STS_WOL_EN); + if (ret < 0) + return ret; + + /* Store the device address for the magic packet */ + ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, + MV_V2_MAGIC_PKT_WORD2, + ((phydev->attached_dev->dev_addr[5] << 8) | + phydev->attached_dev->dev_addr[4])); + if (ret < 0) + return ret; + + ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, + MV_V2_MAGIC_PKT_WORD1, + ((phydev->attached_dev->dev_addr[3] << 8) | + phydev->attached_dev->dev_addr[2])); + if (ret < 0) + return ret; + + ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, + MV_V2_MAGIC_PKT_WORD0, + ((phydev->attached_dev->dev_addr[1] << 8) | + phydev->attached_dev->dev_addr[0])); + if (ret < 0) + return ret; + + /* Clear WOL status and enable magic packet matching */ + ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, + MV_V2_WOL_CTRL, + MV_V2_WOL_CTRL_MAGIC_PKT_EN | + MV_V2_WOL_CTRL_CLEAR_STS); + if (ret < 0) + return ret; + } else { + /* Disable magic packet matching & reset WOL status bit */ + ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2, + MV_V2_WOL_CTRL, + MV_V2_WOL_CTRL_MAGIC_PKT_EN, + MV_V2_WOL_CTRL_CLEAR_STS); + if (ret < 0) + return ret; + } + + /* Reset the clear WOL status bit as it does not self-clear */ + return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, + MV_V2_WOL_CTRL, + MV_V2_WOL_CTRL_CLEAR_STS); +} + static struct phy_driver mv3310_drivers[] = { { .phy_id = MARVELL_PHY_ID_88X3310, @@ -1039,6 +1132,8 @@ static struct phy_driver mv3310_drivers[] = { .set_tunable = mv3310_set_tunable, .remove = mv3310_remove, .set_loopback = genphy_c45_loopback, + .get_wol = mv3110_get_wol, + .set_wol = mv3110_set_wol, }, { .phy_id = MARVELL_PHY_ID_88X3310, @@ -1076,6 +1171,8 @@ static struct phy_driver mv3310_drivers[] = { .set_tunable = mv3310_set_tunable, .remove = mv3310_remove, .set_loopback = genphy_c45_loopback, + .get_wol = mv3110_get_wol, + .set_wol = mv3110_set_wol, }, { .phy_id = MARVELL_PHY_ID_88E2110, diff --git a/drivers/net/phy/mscc/mscc_ptp.c b/drivers/net/phy/mscc/mscc_ptp.c index 924ed5b034a4..edb951695b13 100644 --- a/drivers/net/phy/mscc/mscc_ptp.c +++ b/drivers/net/phy/mscc/mscc_ptp.c @@ -506,7 +506,7 @@ static int vsc85xx_ptp_cmp_init(struct phy_device *phydev, enum ts_blk blk) { struct vsc8531_private *vsc8531 = phydev->priv; bool base = phydev->mdio.addr == vsc8531->ts_base_addr; - u8 msgs[] = { + static const u8 msgs[] = { PTP_MSGTYPE_SYNC, PTP_MSGTYPE_DELAY_REQ }; @@ -847,7 +847,7 @@ static int vsc85xx_ts_ptp_action_flow(struct phy_device *phydev, enum ts_blk blk static int vsc85xx_ptp_conf(struct phy_device *phydev, enum ts_blk blk, bool one_step, bool enable) { - u8 msgs[] = { + static const u8 msgs[] = { PTP_MSGTYPE_SYNC, PTP_MSGTYPE_DELAY_REQ }; @@ -1268,8 +1268,8 @@ static void vsc8584_set_input_clk_configured(struct phy_device *phydev) static int __vsc8584_init_ptp(struct phy_device *phydev) { struct vsc8531_private *vsc8531 = phydev->priv; - u32 ltc_seq_e[] = { 0, 400000, 0, 0, 0 }; - u8 ltc_seq_a[] = { 8, 6, 5, 4, 2 }; + static const u32 ltc_seq_e[] = { 0, 400000, 0, 0, 0 }; + static const u8 ltc_seq_a[] = { 8, 6, 5, 4, 2 }; u32 val; if (!vsc8584_is_1588_input_clk_configured(phydev)) { diff --git a/drivers/net/phy/mxl-gpy.c b/drivers/net/phy/mxl-gpy.c new file mode 100644 index 000000000000..2d5d5081c3b6 --- /dev/null +++ b/drivers/net/phy/mxl-gpy.c @@ -0,0 +1,727 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (C) 2021 Maxlinear Corporation + * Copyright (C) 2020 Intel Corporation + * + * Drivers for Maxlinear Ethernet GPY + * + */ + +#include <linux/module.h> +#include <linux/bitfield.h> +#include <linux/phy.h> +#include <linux/netdevice.h> + +/* PHY ID */ +#define PHY_ID_GPYx15B_MASK 0xFFFFFFFC +#define PHY_ID_GPY21xB_MASK 0xFFFFFFF9 +#define PHY_ID_GPY2xx 0x67C9DC00 +#define PHY_ID_GPY115B 0x67C9DF00 +#define PHY_ID_GPY115C 0x67C9DF10 +#define PHY_ID_GPY211B 0x67C9DE08 +#define PHY_ID_GPY211C 0x67C9DE10 +#define PHY_ID_GPY212B 0x67C9DE09 +#define PHY_ID_GPY212C 0x67C9DE20 +#define PHY_ID_GPY215B 0x67C9DF04 +#define PHY_ID_GPY215C 0x67C9DF20 +#define PHY_ID_GPY241B 0x67C9DE40 +#define PHY_ID_GPY241BM 0x67C9DE80 +#define PHY_ID_GPY245B 0x67C9DEC0 + +#define PHY_MIISTAT 0x18 /* MII state */ +#define PHY_IMASK 0x19 /* interrupt mask */ +#define PHY_ISTAT 0x1A /* interrupt status */ +#define PHY_FWV 0x1E /* firmware version */ + +#define PHY_MIISTAT_SPD_MASK GENMASK(2, 0) +#define PHY_MIISTAT_DPX BIT(3) +#define PHY_MIISTAT_LS BIT(10) + +#define PHY_MIISTAT_SPD_10 0 +#define PHY_MIISTAT_SPD_100 1 +#define PHY_MIISTAT_SPD_1000 2 +#define PHY_MIISTAT_SPD_2500 4 + +#define PHY_IMASK_WOL BIT(15) /* Wake-on-LAN */ +#define PHY_IMASK_ANC BIT(10) /* Auto-Neg complete */ +#define PHY_IMASK_ADSC BIT(5) /* Link auto-downspeed detect */ +#define PHY_IMASK_DXMC BIT(2) /* Duplex mode change */ +#define PHY_IMASK_LSPC BIT(1) /* Link speed change */ +#define PHY_IMASK_LSTC BIT(0) /* Link state change */ +#define PHY_IMASK_MASK (PHY_IMASK_LSTC | \ + PHY_IMASK_LSPC | \ + PHY_IMASK_DXMC | \ + PHY_IMASK_ADSC | \ + PHY_IMASK_ANC) + +#define PHY_FWV_REL_MASK BIT(15) +#define PHY_FWV_TYPE_MASK GENMASK(11, 8) +#define PHY_FWV_MINOR_MASK GENMASK(7, 0) + +/* SGMII */ +#define VSPEC1_SGMII_CTRL 0x08 +#define VSPEC1_SGMII_CTRL_ANEN BIT(12) /* Aneg enable */ +#define VSPEC1_SGMII_CTRL_ANRS BIT(9) /* Restart Aneg */ +#define VSPEC1_SGMII_ANEN_ANRS (VSPEC1_SGMII_CTRL_ANEN | \ + VSPEC1_SGMII_CTRL_ANRS) + +/* WoL */ +#define VPSPEC2_WOL_CTL 0x0E06 +#define VPSPEC2_WOL_AD01 0x0E08 +#define VPSPEC2_WOL_AD23 0x0E09 +#define VPSPEC2_WOL_AD45 0x0E0A +#define WOL_EN BIT(0) + +static const struct { + int type; + int minor; +} ver_need_sgmii_reaneg[] = { + {7, 0x6D}, + {8, 0x6D}, + {9, 0x73}, +}; + +static int gpy_config_init(struct phy_device *phydev) +{ + int ret; + + /* Mask all interrupts */ + ret = phy_write(phydev, PHY_IMASK, 0); + if (ret) + return ret; + + /* Clear all pending interrupts */ + ret = phy_read(phydev, PHY_ISTAT); + return ret < 0 ? ret : 0; +} + +static int gpy_probe(struct phy_device *phydev) +{ + int ret; + + if (!phydev->is_c45) { + ret = phy_get_c45_ids(phydev); + if (ret < 0) + return ret; + } + + /* Show GPY PHY FW version in dmesg */ + ret = phy_read(phydev, PHY_FWV); + if (ret < 0) + return ret; + + phydev_info(phydev, "Firmware Version: 0x%04X (%s)\n", ret, + (ret & PHY_FWV_REL_MASK) ? "release" : "test"); + + return 0; +} + +static bool gpy_sgmii_need_reaneg(struct phy_device *phydev) +{ + int fw_ver, fw_type, fw_minor; + size_t i; + + fw_ver = phy_read(phydev, PHY_FWV); + if (fw_ver < 0) + return true; + + fw_type = FIELD_GET(PHY_FWV_TYPE_MASK, fw_ver); + fw_minor = FIELD_GET(PHY_FWV_MINOR_MASK, fw_ver); + + for (i = 0; i < ARRAY_SIZE(ver_need_sgmii_reaneg); i++) { + if (fw_type != ver_need_sgmii_reaneg[i].type) + continue; + if (fw_minor < ver_need_sgmii_reaneg[i].minor) + return true; + break; + } + + return false; +} + +static bool gpy_2500basex_chk(struct phy_device *phydev) +{ + int ret; + + ret = phy_read(phydev, PHY_MIISTAT); + if (ret < 0) { + phydev_err(phydev, "Error: MDIO register access failed: %d\n", + ret); + return false; + } + + if (!(ret & PHY_MIISTAT_LS) || + FIELD_GET(PHY_MIISTAT_SPD_MASK, ret) != PHY_MIISTAT_SPD_2500) + return false; + + phydev->speed = SPEED_2500; + phydev->interface = PHY_INTERFACE_MODE_2500BASEX; + phy_modify_mmd(phydev, MDIO_MMD_VEND1, VSPEC1_SGMII_CTRL, + VSPEC1_SGMII_CTRL_ANEN, 0); + return true; +} + +static bool gpy_sgmii_aneg_en(struct phy_device *phydev) +{ + int ret; + + ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VSPEC1_SGMII_CTRL); + if (ret < 0) { + phydev_err(phydev, "Error: MMD register access failed: %d\n", + ret); + return true; + } + + return (ret & VSPEC1_SGMII_CTRL_ANEN) ? true : false; +} + +static int gpy_config_aneg(struct phy_device *phydev) +{ + bool changed = false; + u32 adv; + int ret; + + if (phydev->autoneg == AUTONEG_DISABLE) { + /* Configure half duplex with genphy_setup_forced, + * because genphy_c45_pma_setup_forced does not support. + */ + return phydev->duplex != DUPLEX_FULL + ? genphy_setup_forced(phydev) + : genphy_c45_pma_setup_forced(phydev); + } + + ret = genphy_c45_an_config_aneg(phydev); + if (ret < 0) + return ret; + if (ret > 0) + changed = true; + + adv = linkmode_adv_to_mii_ctrl1000_t(phydev->advertising); + ret = phy_modify_changed(phydev, MII_CTRL1000, + ADVERTISE_1000FULL | ADVERTISE_1000HALF, + adv); + if (ret < 0) + return ret; + if (ret > 0) + changed = true; + + ret = genphy_c45_check_and_restart_aneg(phydev, changed); + if (ret < 0) + return ret; + + if (phydev->interface == PHY_INTERFACE_MODE_USXGMII || + phydev->interface == PHY_INTERFACE_MODE_INTERNAL) + return 0; + + /* No need to trigger re-ANEG if link speed is 2.5G or SGMII ANEG is + * disabled. + */ + if (!gpy_sgmii_need_reaneg(phydev) || gpy_2500basex_chk(phydev) || + !gpy_sgmii_aneg_en(phydev)) + return 0; + + /* There is a design constraint in GPY2xx device where SGMII AN is + * only triggered when there is change of speed. If, PHY link + * partner`s speed is still same even after PHY TPI is down and up + * again, SGMII AN is not triggered and hence no new in-band message + * from GPY to MAC side SGMII. + * This could cause an issue during power up, when PHY is up prior to + * MAC. At this condition, once MAC side SGMII is up, MAC side SGMII + * wouldn`t receive new in-band message from GPY with correct link + * status, speed and duplex info. + * + * 1) If PHY is already up and TPI link status is still down (such as + * hard reboot), TPI link status is polled for 4 seconds before + * retriggerring SGMII AN. + * 2) If PHY is already up and TPI link status is also up (such as soft + * reboot), polling of TPI link status is not needed and SGMII AN is + * immediately retriggered. + * 3) Other conditions such as PHY is down, speed change etc, skip + * retriggering SGMII AN. Note: in case of speed change, GPY FW will + * initiate SGMII AN. + */ + + if (phydev->state != PHY_UP) + return 0; + + ret = phy_read_poll_timeout(phydev, MII_BMSR, ret, ret & BMSR_LSTATUS, + 20000, 4000000, false); + if (ret == -ETIMEDOUT) + return 0; + else if (ret < 0) + return ret; + + /* Trigger SGMII AN. */ + return phy_modify_mmd(phydev, MDIO_MMD_VEND1, VSPEC1_SGMII_CTRL, + VSPEC1_SGMII_CTRL_ANRS, VSPEC1_SGMII_CTRL_ANRS); +} + +static void gpy_update_interface(struct phy_device *phydev) +{ + int ret; + + /* Interface mode is fixed for USXGMII and integrated PHY */ + if (phydev->interface == PHY_INTERFACE_MODE_USXGMII || + phydev->interface == PHY_INTERFACE_MODE_INTERNAL) + return; + + /* Automatically switch SERDES interface between SGMII and 2500-BaseX + * according to speed. Disable ANEG in 2500-BaseX mode. + */ + switch (phydev->speed) { + case SPEED_2500: + phydev->interface = PHY_INTERFACE_MODE_2500BASEX; + ret = phy_modify_mmd(phydev, MDIO_MMD_VEND1, VSPEC1_SGMII_CTRL, + VSPEC1_SGMII_CTRL_ANEN, 0); + if (ret < 0) + phydev_err(phydev, + "Error: Disable of SGMII ANEG failed: %d\n", + ret); + break; + case SPEED_1000: + case SPEED_100: + case SPEED_10: + phydev->interface = PHY_INTERFACE_MODE_SGMII; + if (gpy_sgmii_aneg_en(phydev)) + break; + /* Enable and restart SGMII ANEG for 10/100/1000Mbps link speed + * if ANEG is disabled (in 2500-BaseX mode). + */ + ret = phy_modify_mmd(phydev, MDIO_MMD_VEND1, VSPEC1_SGMII_CTRL, + VSPEC1_SGMII_ANEN_ANRS, + VSPEC1_SGMII_ANEN_ANRS); + if (ret < 0) + phydev_err(phydev, + "Error: Enable of SGMII ANEG failed: %d\n", + ret); + break; + } +} + +static int gpy_read_status(struct phy_device *phydev) +{ + int ret; + + ret = genphy_update_link(phydev); + if (ret) + return ret; + + phydev->speed = SPEED_UNKNOWN; + phydev->duplex = DUPLEX_UNKNOWN; + phydev->pause = 0; + phydev->asym_pause = 0; + + if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete) { + ret = genphy_c45_read_lpa(phydev); + if (ret < 0) + return ret; + + /* Read the link partner's 1G advertisement */ + ret = phy_read(phydev, MII_STAT1000); + if (ret < 0) + return ret; + mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising, ret); + } else if (phydev->autoneg == AUTONEG_DISABLE) { + linkmode_zero(phydev->lp_advertising); + } + + ret = phy_read(phydev, PHY_MIISTAT); + if (ret < 0) + return ret; + + phydev->link = (ret & PHY_MIISTAT_LS) ? 1 : 0; + phydev->duplex = (ret & PHY_MIISTAT_DPX) ? DUPLEX_FULL : DUPLEX_HALF; + switch (FIELD_GET(PHY_MIISTAT_SPD_MASK, ret)) { + case PHY_MIISTAT_SPD_10: + phydev->speed = SPEED_10; + break; + case PHY_MIISTAT_SPD_100: + phydev->speed = SPEED_100; + break; + case PHY_MIISTAT_SPD_1000: + phydev->speed = SPEED_1000; + break; + case PHY_MIISTAT_SPD_2500: + phydev->speed = SPEED_2500; + break; + } + + if (phydev->link) + gpy_update_interface(phydev); + + return 0; +} + +static int gpy_config_intr(struct phy_device *phydev) +{ + u16 mask = 0; + + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) + mask = PHY_IMASK_MASK; + + return phy_write(phydev, PHY_IMASK, mask); +} + +static irqreturn_t gpy_handle_interrupt(struct phy_device *phydev) +{ + int reg; + + reg = phy_read(phydev, PHY_ISTAT); + if (reg < 0) { + phy_error(phydev); + return IRQ_NONE; + } + + if (!(reg & PHY_IMASK_MASK)) + return IRQ_NONE; + + phy_trigger_machine(phydev); + + return IRQ_HANDLED; +} + +static int gpy_set_wol(struct phy_device *phydev, + struct ethtool_wolinfo *wol) +{ + struct net_device *attach_dev = phydev->attached_dev; + int ret; + + if (wol->wolopts & WAKE_MAGIC) { + /* MAC address - Byte0:Byte1:Byte2:Byte3:Byte4:Byte5 + * VPSPEC2_WOL_AD45 = Byte0:Byte1 + * VPSPEC2_WOL_AD23 = Byte2:Byte3 + * VPSPEC2_WOL_AD01 = Byte4:Byte5 + */ + ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, + VPSPEC2_WOL_AD45, + ((attach_dev->dev_addr[0] << 8) | + attach_dev->dev_addr[1])); + if (ret < 0) + return ret; + + ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, + VPSPEC2_WOL_AD23, + ((attach_dev->dev_addr[2] << 8) | + attach_dev->dev_addr[3])); + if (ret < 0) + return ret; + + ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, + VPSPEC2_WOL_AD01, + ((attach_dev->dev_addr[4] << 8) | + attach_dev->dev_addr[5])); + if (ret < 0) + return ret; + + /* Enable the WOL interrupt */ + ret = phy_write(phydev, PHY_IMASK, PHY_IMASK_WOL); + if (ret < 0) + return ret; + + /* Enable magic packet matching */ + ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, + VPSPEC2_WOL_CTL, + WOL_EN); + if (ret < 0) + return ret; + + /* Clear the interrupt status register. + * Only WoL is enabled so clear all. + */ + ret = phy_read(phydev, PHY_ISTAT); + if (ret < 0) + return ret; + } else { + /* Disable magic packet matching */ + ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, + VPSPEC2_WOL_CTL, + WOL_EN); + if (ret < 0) + return ret; + } + + if (wol->wolopts & WAKE_PHY) { + /* Enable the link state change interrupt */ + ret = phy_set_bits(phydev, PHY_IMASK, PHY_IMASK_LSTC); + if (ret < 0) + return ret; + + /* Clear the interrupt status register */ + ret = phy_read(phydev, PHY_ISTAT); + if (ret < 0) + return ret; + + if (ret & (PHY_IMASK_MASK & ~PHY_IMASK_LSTC)) + phy_trigger_machine(phydev); + + return 0; + } + + /* Disable the link state change interrupt */ + return phy_clear_bits(phydev, PHY_IMASK, PHY_IMASK_LSTC); +} + +static void gpy_get_wol(struct phy_device *phydev, + struct ethtool_wolinfo *wol) +{ + int ret; + + wol->supported = WAKE_MAGIC | WAKE_PHY; + wol->wolopts = 0; + + ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, VPSPEC2_WOL_CTL); + if (ret & WOL_EN) + wol->wolopts |= WAKE_MAGIC; + + ret = phy_read(phydev, PHY_IMASK); + if (ret & PHY_IMASK_LSTC) + wol->wolopts |= WAKE_PHY; +} + +static int gpy_loopback(struct phy_device *phydev, bool enable) +{ + int ret; + + ret = phy_modify(phydev, MII_BMCR, BMCR_LOOPBACK, + enable ? BMCR_LOOPBACK : 0); + if (!ret) { + /* It takes some time for PHY device to switch + * into/out-of loopback mode. + */ + msleep(100); + } + + return ret; +} + +static struct phy_driver gpy_drivers[] = { + { + PHY_ID_MATCH_MODEL(PHY_ID_GPY2xx), + .name = "Maxlinear Ethernet GPY2xx", + .get_features = genphy_c45_pma_read_abilities, + .config_init = gpy_config_init, + .probe = gpy_probe, + .suspend = genphy_suspend, + .resume = genphy_resume, + .config_aneg = gpy_config_aneg, + .aneg_done = genphy_c45_aneg_done, + .read_status = gpy_read_status, + .config_intr = gpy_config_intr, + .handle_interrupt = gpy_handle_interrupt, + .set_wol = gpy_set_wol, + .get_wol = gpy_get_wol, + .set_loopback = gpy_loopback, + }, + { + .phy_id = PHY_ID_GPY115B, + .phy_id_mask = PHY_ID_GPYx15B_MASK, + .name = "Maxlinear Ethernet GPY115B", + .get_features = genphy_c45_pma_read_abilities, + .config_init = gpy_config_init, + .probe = gpy_probe, + .suspend = genphy_suspend, + .resume = genphy_resume, + .config_aneg = gpy_config_aneg, + .aneg_done = genphy_c45_aneg_done, + .read_status = gpy_read_status, + .config_intr = gpy_config_intr, + .handle_interrupt = gpy_handle_interrupt, + .set_wol = gpy_set_wol, + .get_wol = gpy_get_wol, + .set_loopback = gpy_loopback, + }, + { + PHY_ID_MATCH_MODEL(PHY_ID_GPY115C), + .name = "Maxlinear Ethernet GPY115C", + .get_features = genphy_c45_pma_read_abilities, + .config_init = gpy_config_init, + .probe = gpy_probe, + .suspend = genphy_suspend, + .resume = genphy_resume, + .config_aneg = gpy_config_aneg, + .aneg_done = genphy_c45_aneg_done, + .read_status = gpy_read_status, + .config_intr = gpy_config_intr, + .handle_interrupt = gpy_handle_interrupt, + .set_wol = gpy_set_wol, + .get_wol = gpy_get_wol, + .set_loopback = gpy_loopback, + }, + { + .phy_id = PHY_ID_GPY211B, + .phy_id_mask = PHY_ID_GPY21xB_MASK, + .name = "Maxlinear Ethernet GPY211B", + .get_features = genphy_c45_pma_read_abilities, + .config_init = gpy_config_init, + .probe = gpy_probe, + .suspend = genphy_suspend, + .resume = genphy_resume, + .config_aneg = gpy_config_aneg, + .aneg_done = genphy_c45_aneg_done, + .read_status = gpy_read_status, + .config_intr = gpy_config_intr, + .handle_interrupt = gpy_handle_interrupt, + .set_wol = gpy_set_wol, + .get_wol = gpy_get_wol, + .set_loopback = gpy_loopback, + }, + { + PHY_ID_MATCH_MODEL(PHY_ID_GPY211C), + .name = "Maxlinear Ethernet GPY211C", + .get_features = genphy_c45_pma_read_abilities, + .config_init = gpy_config_init, + .probe = gpy_probe, + .suspend = genphy_suspend, + .resume = genphy_resume, + .config_aneg = gpy_config_aneg, + .aneg_done = genphy_c45_aneg_done, + .read_status = gpy_read_status, + .config_intr = gpy_config_intr, + .handle_interrupt = gpy_handle_interrupt, + .set_wol = gpy_set_wol, + .get_wol = gpy_get_wol, + .set_loopback = gpy_loopback, + }, + { + .phy_id = PHY_ID_GPY212B, + .phy_id_mask = PHY_ID_GPY21xB_MASK, + .name = "Maxlinear Ethernet GPY212B", + .get_features = genphy_c45_pma_read_abilities, + .config_init = gpy_config_init, + .probe = gpy_probe, + .suspend = genphy_suspend, + .resume = genphy_resume, + .config_aneg = gpy_config_aneg, + .aneg_done = genphy_c45_aneg_done, + .read_status = gpy_read_status, + .config_intr = gpy_config_intr, + .handle_interrupt = gpy_handle_interrupt, + .set_wol = gpy_set_wol, + .get_wol = gpy_get_wol, + .set_loopback = gpy_loopback, + }, + { + PHY_ID_MATCH_MODEL(PHY_ID_GPY212C), + .name = "Maxlinear Ethernet GPY212C", + .get_features = genphy_c45_pma_read_abilities, + .config_init = gpy_config_init, + .probe = gpy_probe, + .suspend = genphy_suspend, + .resume = genphy_resume, + .config_aneg = gpy_config_aneg, + .aneg_done = genphy_c45_aneg_done, + .read_status = gpy_read_status, + .config_intr = gpy_config_intr, + .handle_interrupt = gpy_handle_interrupt, + .set_wol = gpy_set_wol, + .get_wol = gpy_get_wol, + .set_loopback = gpy_loopback, + }, + { + .phy_id = PHY_ID_GPY215B, + .phy_id_mask = PHY_ID_GPYx15B_MASK, + .name = "Maxlinear Ethernet GPY215B", + .get_features = genphy_c45_pma_read_abilities, + .config_init = gpy_config_init, + .probe = gpy_probe, + .suspend = genphy_suspend, + .resume = genphy_resume, + .config_aneg = gpy_config_aneg, + .aneg_done = genphy_c45_aneg_done, + .read_status = gpy_read_status, + .config_intr = gpy_config_intr, + .handle_interrupt = gpy_handle_interrupt, + .set_wol = gpy_set_wol, + .get_wol = gpy_get_wol, + .set_loopback = gpy_loopback, + }, + { + PHY_ID_MATCH_MODEL(PHY_ID_GPY215C), + .name = "Maxlinear Ethernet GPY215C", + .get_features = genphy_c45_pma_read_abilities, + .config_init = gpy_config_init, + .probe = gpy_probe, + .suspend = genphy_suspend, + .resume = genphy_resume, + .config_aneg = gpy_config_aneg, + .aneg_done = genphy_c45_aneg_done, + .read_status = gpy_read_status, + .config_intr = gpy_config_intr, + .handle_interrupt = gpy_handle_interrupt, + .set_wol = gpy_set_wol, + .get_wol = gpy_get_wol, + .set_loopback = gpy_loopback, + }, + { + PHY_ID_MATCH_MODEL(PHY_ID_GPY241B), + .name = "Maxlinear Ethernet GPY241B", + .get_features = genphy_c45_pma_read_abilities, + .config_init = gpy_config_init, + .probe = gpy_probe, + .suspend = genphy_suspend, + .resume = genphy_resume, + .config_aneg = gpy_config_aneg, + .aneg_done = genphy_c45_aneg_done, + .read_status = gpy_read_status, + .config_intr = gpy_config_intr, + .handle_interrupt = gpy_handle_interrupt, + .set_wol = gpy_set_wol, + .get_wol = gpy_get_wol, + .set_loopback = gpy_loopback, + }, + { + PHY_ID_MATCH_MODEL(PHY_ID_GPY241BM), + .name = "Maxlinear Ethernet GPY241BM", + .get_features = genphy_c45_pma_read_abilities, + .config_init = gpy_config_init, + .probe = gpy_probe, + .suspend = genphy_suspend, + .resume = genphy_resume, + .config_aneg = gpy_config_aneg, + .aneg_done = genphy_c45_aneg_done, + .read_status = gpy_read_status, + .config_intr = gpy_config_intr, + .handle_interrupt = gpy_handle_interrupt, + .set_wol = gpy_set_wol, + .get_wol = gpy_get_wol, + .set_loopback = gpy_loopback, + }, + { + PHY_ID_MATCH_MODEL(PHY_ID_GPY245B), + .name = "Maxlinear Ethernet GPY245B", + .get_features = genphy_c45_pma_read_abilities, + .config_init = gpy_config_init, + .probe = gpy_probe, + .suspend = genphy_suspend, + .resume = genphy_resume, + .config_aneg = gpy_config_aneg, + .aneg_done = genphy_c45_aneg_done, + .read_status = gpy_read_status, + .config_intr = gpy_config_intr, + .handle_interrupt = gpy_handle_interrupt, + .set_wol = gpy_set_wol, + .get_wol = gpy_get_wol, + .set_loopback = gpy_loopback, + }, +}; +module_phy_driver(gpy_drivers); + +static struct mdio_device_id __maybe_unused gpy_tbl[] = { + {PHY_ID_MATCH_MODEL(PHY_ID_GPY2xx)}, + {PHY_ID_GPY115B, PHY_ID_GPYx15B_MASK}, + {PHY_ID_MATCH_MODEL(PHY_ID_GPY115C)}, + {PHY_ID_GPY211B, PHY_ID_GPY21xB_MASK}, + {PHY_ID_MATCH_MODEL(PHY_ID_GPY211C)}, + {PHY_ID_GPY212B, PHY_ID_GPY21xB_MASK}, + {PHY_ID_MATCH_MODEL(PHY_ID_GPY212C)}, + {PHY_ID_GPY215B, PHY_ID_GPYx15B_MASK}, + {PHY_ID_MATCH_MODEL(PHY_ID_GPY215C)}, + {PHY_ID_MATCH_MODEL(PHY_ID_GPY241B)}, + {PHY_ID_MATCH_MODEL(PHY_ID_GPY241BM)}, + {PHY_ID_MATCH_MODEL(PHY_ID_GPY245B)}, + { } +}; +MODULE_DEVICE_TABLE(mdio, gpy_tbl); + +MODULE_DESCRIPTION("Maxlinear Ethernet GPY Driver"); +MODULE_AUTHOR("Xu Liang"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/phy/nxp-tja11xx.c b/drivers/net/phy/nxp-tja11xx.c index afd7afa1f498..9944cc501806 100644 --- a/drivers/net/phy/nxp-tja11xx.c +++ b/drivers/net/phy/nxp-tja11xx.c @@ -47,12 +47,14 @@ #define MII_INTSRC_LINK_FAIL BIT(10) #define MII_INTSRC_LINK_UP BIT(9) #define MII_INTSRC_MASK (MII_INTSRC_LINK_FAIL | MII_INTSRC_LINK_UP) -#define MII_INTSRC_TEMP_ERR BIT(1) #define MII_INTSRC_UV_ERR BIT(3) +#define MII_INTSRC_TEMP_ERR BIT(1) #define MII_INTEN 22 #define MII_INTEN_LINK_FAIL BIT(10) #define MII_INTEN_LINK_UP BIT(9) +#define MII_INTEN_UV_ERR BIT(3) +#define MII_INTEN_TEMP_ERR BIT(1) #define MII_COMMSTAT 23 #define MII_COMMSTAT_LINK_UP BIT(15) @@ -607,7 +609,8 @@ static int tja11xx_config_intr(struct phy_device *phydev) if (err) return err; - value = MII_INTEN_LINK_FAIL | MII_INTEN_LINK_UP; + value = MII_INTEN_LINK_FAIL | MII_INTEN_LINK_UP | + MII_INTEN_UV_ERR | MII_INTEN_TEMP_ERR; err = phy_write(phydev, MII_INTEN, value); } else { err = phy_write(phydev, MII_INTEN, value); @@ -622,6 +625,7 @@ static int tja11xx_config_intr(struct phy_device *phydev) static irqreturn_t tja11xx_handle_interrupt(struct phy_device *phydev) { + struct device *dev = &phydev->mdio.dev; int irq_status; irq_status = phy_read(phydev, MII_INTSRC); @@ -630,6 +634,11 @@ static irqreturn_t tja11xx_handle_interrupt(struct phy_device *phydev) return IRQ_NONE; } + if (irq_status & MII_INTSRC_TEMP_ERR) + dev_warn(dev, "Overtemperature error detected (temp > 155C°).\n"); + if (irq_status & MII_INTSRC_UV_ERR) + dev_warn(dev, "Undervoltage error detected.\n"); + if (!(irq_status & MII_INTSRC_MASK)) return IRQ_NONE; diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 8eeb26d8aeb7..f124a8a58bd4 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -426,7 +426,7 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd) EXPORT_SYMBOL(phy_mii_ioctl); /** - * phy_do_ioctl - generic ndo_do_ioctl implementation + * phy_do_ioctl - generic ndo_eth_ioctl implementation * @dev: the net_device struct * @ifr: &struct ifreq for socket ioctl's * @cmd: ioctl cmd to execute @@ -441,7 +441,7 @@ int phy_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) EXPORT_SYMBOL(phy_do_ioctl); /** - * phy_do_ioctl_running - generic ndo_do_ioctl implementation but test first + * phy_do_ioctl_running - generic ndo_eth_ioctl implementation but test first * * @dev: the net_device struct * @ifr: &struct ifreq for socket ioctl's diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 5d5f9a9ee768..9e2891d8e8dd 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -233,11 +233,9 @@ static DEFINE_MUTEX(phy_fixup_lock); static bool mdio_bus_phy_may_suspend(struct phy_device *phydev) { - struct device_driver *drv = phydev->mdio.dev.driver; - struct phy_driver *phydrv = to_phy_driver(drv); struct net_device *netdev = phydev->attached_dev; - if (!drv || !phydrv->suspend) + if (!phydev->drv->suspend) return false; /* PHY not attached? May suspend if the PHY has not already been @@ -969,6 +967,20 @@ void phy_device_remove(struct phy_device *phydev) EXPORT_SYMBOL(phy_device_remove); /** + * phy_get_c45_ids - Read 802.3-c45 IDs for phy device. + * @phydev: phy_device structure to read 802.3-c45 IDs + * + * Returns zero on success, %-EIO on bus access error, or %-ENODEV if + * the "devices in package" is invalid. + */ +int phy_get_c45_ids(struct phy_device *phydev) +{ + return get_phy_c45_ids(phydev->mdio.bus, phydev->mdio.addr, + &phydev->c45_ids); +} +EXPORT_SYMBOL(phy_get_c45_ids); + +/** * phy_find_first - finds the first PHY device on the bus * @bus: the target MII bus */ @@ -1807,11 +1819,10 @@ EXPORT_SYMBOL(phy_resume); int phy_loopback(struct phy_device *phydev, bool enable) { - struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver); int ret = 0; - if (!phydrv) - return -ENODEV; + if (!phydev->drv) + return -EIO; mutex_lock(&phydev->lock); @@ -1825,8 +1836,8 @@ int phy_loopback(struct phy_device *phydev, bool enable) goto out; } - if (phydrv->set_loopback) - ret = phydrv->set_loopback(phydev, enable); + if (phydev->drv->set_loopback) + ret = phydev->drv->set_loopback(phydev, enable); else ret = genphy_loopback(phydev, enable); diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index eb29ef53d971..2cdf9f989dec 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -942,10 +942,11 @@ static void phylink_phy_change(struct phy_device *phydev, bool up) phylink_run_resolve(pl); - phylink_dbg(pl, "phy link %s %s/%s/%s\n", up ? "up" : "down", + phylink_dbg(pl, "phy link %s %s/%s/%s/%s\n", up ? "up" : "down", phy_modes(phydev->interface), phy_speed_to_str(phydev->speed), - phy_duplex_to_str(phydev->duplex)); + phy_duplex_to_str(phydev->duplex), + phylink_pause_to_str(pl->phy_state.pause)); } static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy, @@ -1457,15 +1458,11 @@ int phylink_ethtool_ksettings_set(struct phylink *pl, return phy_ethtool_ksettings_set(pl->phydev, kset); } - linkmode_copy(support, pl->supported); config = pl->link_config; - config.an_enabled = kset->base.autoneg == AUTONEG_ENABLE; - /* Mask out unsupported advertisements, and force the autoneg bit */ + /* Mask out unsupported advertisements */ linkmode_and(config.advertising, kset->link_modes.advertising, - support); - linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, config.advertising, - config.an_enabled); + pl->supported); /* FIXME: should we reject autoneg if phy/mac does not support it? */ switch (kset->base.autoneg) { @@ -1474,7 +1471,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl, * duplex. */ s = phy_lookup_setting(kset->base.speed, kset->base.duplex, - support, false); + pl->supported, false); if (!s) return -EINVAL; @@ -1515,6 +1512,12 @@ int phylink_ethtool_ksettings_set(struct phylink *pl, /* We have ruled out the case with a PHY attached, and the * fixed-link cases. All that is left are in-band links. */ + config.an_enabled = kset->base.autoneg == AUTONEG_ENABLE; + linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, config.advertising, + config.an_enabled); + + /* Validate without changing the current supported mask. */ + linkmode_copy(support, pl->supported); if (phylink_validate(pl, support, &config)) return -EINVAL; diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c index 151c2a3f0b3a..8dcb49ed1f3d 100644 --- a/drivers/net/phy/xilinx_gmii2rgmii.c +++ b/drivers/net/phy/xilinx_gmii2rgmii.c @@ -27,12 +27,28 @@ struct gmii2rgmii { struct mdio_device *mdio; }; -static int xgmiitorgmii_read_status(struct phy_device *phydev) +static void xgmiitorgmii_configure(struct gmii2rgmii *priv, int speed) { - struct gmii2rgmii *priv = mdiodev_get_drvdata(&phydev->mdio); struct mii_bus *bus = priv->mdio->bus; int addr = priv->mdio->addr; - u16 val = 0; + u16 val; + + val = mdiobus_read(bus, addr, XILINX_GMII2RGMII_REG); + val &= ~XILINX_GMII2RGMII_SPEED_MASK; + + if (speed == SPEED_1000) + val |= BMCR_SPEED1000; + else if (speed == SPEED_100) + val |= BMCR_SPEED100; + else + val |= BMCR_SPEED10; + + mdiobus_write(bus, addr, XILINX_GMII2RGMII_REG, val); +} + +static int xgmiitorgmii_read_status(struct phy_device *phydev) +{ + struct gmii2rgmii *priv = mdiodev_get_drvdata(&phydev->mdio); int err; if (priv->phy_drv->read_status) @@ -42,17 +58,24 @@ static int xgmiitorgmii_read_status(struct phy_device *phydev) if (err < 0) return err; - val = mdiobus_read(bus, addr, XILINX_GMII2RGMII_REG); - val &= ~XILINX_GMII2RGMII_SPEED_MASK; + xgmiitorgmii_configure(priv, phydev->speed); - if (phydev->speed == SPEED_1000) - val |= BMCR_SPEED1000; - else if (phydev->speed == SPEED_100) - val |= BMCR_SPEED100; + return 0; +} + +static int xgmiitorgmii_set_loopback(struct phy_device *phydev, bool enable) +{ + struct gmii2rgmii *priv = mdiodev_get_drvdata(&phydev->mdio); + int err; + + if (priv->phy_drv->set_loopback) + err = priv->phy_drv->set_loopback(phydev, enable); else - val |= BMCR_SPEED10; + err = genphy_loopback(phydev, enable); + if (err < 0) + return err; - mdiobus_write(bus, addr, XILINX_GMII2RGMII_REG, val); + xgmiitorgmii_configure(priv, phydev->speed); return 0; } @@ -90,6 +113,7 @@ static int xgmiitorgmii_probe(struct mdio_device *mdiodev) memcpy(&priv->conv_phy_drv, priv->phy_dev->drv, sizeof(struct phy_driver)); priv->conv_phy_drv.read_status = xgmiitorgmii_read_status; + priv->conv_phy_drv.set_loopback = xgmiitorgmii_set_loopback; mdiodev_set_drvdata(&priv->phy_dev->mdio, priv); priv->phy_dev->drv = &priv->conv_phy_drv; diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c index e26cf91bdec2..82d609401711 100644 --- a/drivers/net/plip/plip.c +++ b/drivers/net/plip/plip.c @@ -84,6 +84,7 @@ static const char version[] = "NET3 PLIP version 2.4-parport gniibe@mri.co.jp\n" extra grounds are 18,19,20,21,22,23,24 */ +#include <linux/compat.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> @@ -150,7 +151,8 @@ static int plip_hard_header_cache(const struct neighbour *neigh, struct hh_cache *hh, __be16 type); static int plip_open(struct net_device *dev); static int plip_close(struct net_device *dev); -static int plip_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); +static int plip_siocdevprivate(struct net_device *dev, struct ifreq *ifr, + void __user *data, int cmd); static int plip_preempt(void *handle); static void plip_wakeup(void *handle); @@ -265,7 +267,7 @@ static const struct net_device_ops plip_netdev_ops = { .ndo_open = plip_open, .ndo_stop = plip_close, .ndo_start_xmit = plip_tx_packet, - .ndo_do_ioctl = plip_ioctl, + .ndo_siocdevprivate = plip_siocdevprivate, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; @@ -1207,7 +1209,8 @@ plip_wakeup(void *handle) } static int -plip_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +plip_siocdevprivate(struct net_device *dev, struct ifreq *rq, + void __user *data, int cmd) { struct net_local *nl = netdev_priv(dev); struct plipconf *pc = (struct plipconf *) &rq->ifr_ifru; @@ -1215,6 +1218,9 @@ plip_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) if (cmd != SIOCDEVPLIP) return -EOPNOTSUPP; + if (in_compat_syscall()) + return -EOPNOTSUPP; + switch(pc->pcmd) { case PLIP_GET_TIMEOUT: pc->trigger = nl->trigger; diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index 7a099c37527f..fb52cd175b45 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -1463,11 +1463,11 @@ ppp_start_xmit(struct sk_buff *skb, struct net_device *dev) } static int -ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +ppp_net_siocdevprivate(struct net_device *dev, struct ifreq *ifr, + void __user *addr, int cmd) { struct ppp *ppp = netdev_priv(dev); int err = -EFAULT; - void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data; struct ppp_stats stats; struct ppp_comp_stats cstats; char *vers; @@ -1596,7 +1596,7 @@ static const struct net_device_ops ppp_netdev_ops = { .ndo_init = ppp_dev_init, .ndo_uninit = ppp_dev_uninit, .ndo_start_xmit = ppp_start_xmit, - .ndo_do_ioctl = ppp_net_ioctl, + .ndo_siocdevprivate = ppp_net_siocdevprivate, .ndo_get_stats64 = ppp_get_stats64, .ndo_fill_forward_path = ppp_fill_forward_path, }; @@ -1744,7 +1744,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb) a four-byte PPP header on each packet */ *(u8 *)skb_push(skb, 2) = 1; if (ppp->pass_filter && - BPF_PROG_RUN(ppp->pass_filter, skb) == 0) { + bpf_prog_run(ppp->pass_filter, skb) == 0) { if (ppp->debug & 1) netdev_printk(KERN_DEBUG, ppp->dev, "PPP: outbound frame " @@ -1754,7 +1754,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb) } /* if this packet passes the active filter, record the time */ if (!(ppp->active_filter && - BPF_PROG_RUN(ppp->active_filter, skb) == 0)) + bpf_prog_run(ppp->active_filter, skb) == 0)) ppp->last_xmit = jiffies; skb_pull(skb, 2); #else @@ -2468,7 +2468,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb) *(u8 *)skb_push(skb, 2) = 0; if (ppp->pass_filter && - BPF_PROG_RUN(ppp->pass_filter, skb) == 0) { + bpf_prog_run(ppp->pass_filter, skb) == 0) { if (ppp->debug & 1) netdev_printk(KERN_DEBUG, ppp->dev, "PPP: inbound frame " @@ -2477,7 +2477,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb) return; } if (!(ppp->active_filter && - BPF_PROG_RUN(ppp->active_filter, skb) == 0)) + bpf_prog_run(ppp->active_filter, skb) == 0)) ppp->last_recv = jiffies; __skb_pull(skb, 2); } else diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c index e88af978f63c..f01c9db01b16 100644 --- a/drivers/net/sb1000.c +++ b/drivers/net/sb1000.c @@ -78,7 +78,8 @@ struct sb1000_private { /* prototypes for Linux interface */ extern int sb1000_probe(struct net_device *dev); static int sb1000_open(struct net_device *dev); -static int sb1000_dev_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd); +static int sb1000_siocdevprivate(struct net_device *dev, struct ifreq *ifr, + void __user *data, int cmd); static netdev_tx_t sb1000_start_xmit(struct sk_buff *skb, struct net_device *dev); static irqreturn_t sb1000_interrupt(int irq, void *dev_id); @@ -135,7 +136,7 @@ MODULE_DEVICE_TABLE(pnp, sb1000_pnp_ids); static const struct net_device_ops sb1000_netdev_ops = { .ndo_open = sb1000_open, .ndo_start_xmit = sb1000_start_xmit, - .ndo_do_ioctl = sb1000_dev_ioctl, + .ndo_siocdevprivate = sb1000_siocdevprivate, .ndo_stop = sb1000_close, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, @@ -987,7 +988,8 @@ sb1000_open(struct net_device *dev) return 0; /* Always succeed */ } -static int sb1000_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +static int sb1000_siocdevprivate(struct net_device *dev, struct ifreq *ifr, + void __user *data, int cmd) { char* name; unsigned char version[2]; @@ -1011,7 +1013,7 @@ static int sb1000_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) stats[2] = dev->stats.rx_packets; stats[3] = dev->stats.rx_errors; stats[4] = dev->stats.rx_dropped; - if(copy_to_user(ifr->ifr_data, stats, sizeof(stats))) + if (copy_to_user(data, stats, sizeof(stats))) return -EFAULT; status = 0; break; @@ -1019,21 +1021,21 @@ static int sb1000_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) case SIOCGCMFIRMWARE: /* get firmware version */ if ((status = sb1000_get_firmware_version(ioaddr, name, version, 1))) return status; - if(copy_to_user(ifr->ifr_data, version, sizeof(version))) + if (copy_to_user(data, version, sizeof(version))) return -EFAULT; break; case SIOCGCMFREQUENCY: /* get frequency */ if ((status = sb1000_get_frequency(ioaddr, name, &frequency))) return status; - if(put_user(frequency, (int __user *) ifr->ifr_data)) + if (put_user(frequency, (int __user *)data)) return -EFAULT; break; case SIOCSCMFREQUENCY: /* set frequency */ if (!capable(CAP_NET_ADMIN)) return -EPERM; - if(get_user(frequency, (int __user *) ifr->ifr_data)) + if (get_user(frequency, (int __user *)data)) return -EFAULT; if ((status = sb1000_set_frequency(ioaddr, name, frequency))) return status; @@ -1042,14 +1044,14 @@ static int sb1000_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) case SIOCGCMPIDS: /* get PIDs */ if ((status = sb1000_get_PIDs(ioaddr, name, PID))) return status; - if(copy_to_user(ifr->ifr_data, PID, sizeof(PID))) + if (copy_to_user(data, PID, sizeof(PID))) return -EFAULT; break; case SIOCSCMPIDS: /* set PIDs */ if (!capable(CAP_NET_ADMIN)) return -EPERM; - if(copy_from_user(PID, ifr->ifr_data, sizeof(PID))) + if (copy_from_user(PID, data, sizeof(PID))) return -EFAULT; if ((status = sb1000_set_PIDs(ioaddr, name, PID))) return status; diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c index dc84cb844319..5435b5689ce6 100644 --- a/drivers/net/slip/slip.c +++ b/drivers/net/slip/slip.c @@ -62,6 +62,7 @@ */ #define SL_CHECK_TRANSMIT +#include <linux/compat.h> #include <linux/module.h> #include <linux/moduleparam.h> @@ -108,7 +109,7 @@ static void slip_unesc6(struct slip *sl, unsigned char c); #ifdef CONFIG_SLIP_SMART static void sl_keepalive(struct timer_list *t); static void sl_outfill(struct timer_list *t); -static int sl_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); +static int sl_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd); #endif /******************************** @@ -647,7 +648,7 @@ static const struct net_device_ops sl_netdev_ops = { .ndo_change_mtu = sl_change_mtu, .ndo_tx_timeout = sl_tx_timeout, #ifdef CONFIG_SLIP_SMART - .ndo_do_ioctl = sl_ioctl, + .ndo_siocdevprivate = sl_siocdevprivate, #endif }; @@ -1179,11 +1180,12 @@ static int slip_ioctl(struct tty_struct *tty, struct file *file, /* VSV changes start here */ #ifdef CONFIG_SLIP_SMART -/* function do_ioctl called from net/core/dev.c +/* function sl_siocdevprivate called from net/core/dev.c to allow get/set outfill/keepalive parameter by ifconfig */ -static int sl_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +static int sl_siocdevprivate(struct net_device *dev, struct ifreq *rq, + void __user *data, int cmd) { struct slip *sl = netdev_priv(dev); unsigned long *p = (unsigned long *)&rq->ifr_ifru; @@ -1191,6 +1193,9 @@ static int sl_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) if (sl == NULL) /* Allocation failed ?? */ return -ENODEV; + if (in_compat_syscall()) + return -EOPNOTSUPP; + spin_lock_bh(&sl->lock); if (!sl->tty) { diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c index 32aef8ac4a14..b095a4b4957b 100644 --- a/drivers/net/team/team_mode_loadbalance.c +++ b/drivers/net/team/team_mode_loadbalance.c @@ -197,7 +197,7 @@ static unsigned int lb_get_skb_hash(struct lb_priv *lb_priv, fp = rcu_dereference_bh(lb_priv->fp); if (unlikely(!fp)) return 0; - lhash = BPF_PROG_RUN(fp, skb); + lhash = bpf_prog_run(fp, skb); c = (char *) &lhash; return c[0] ^ c[1] ^ c[2] ^ c[3]; } diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 2ced021f4faf..fecc9a1d293a 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -3510,7 +3510,9 @@ static void tun_set_msglevel(struct net_device *dev, u32 value) } static int tun_get_coalesce(struct net_device *dev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct tun_struct *tun = netdev_priv(dev); @@ -3520,7 +3522,9 @@ static int tun_get_coalesce(struct net_device *dev, } static int tun_set_coalesce(struct net_device *dev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct tun_struct *tun = netdev_priv(dev); diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index dc87e8caf954..30821f6a6d7a 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@ -197,7 +197,7 @@ static const struct net_device_ops ax88172_netdev_ops = { .ndo_get_stats64 = dev_get_tstats64, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = asix_ioctl, + .ndo_eth_ioctl = asix_ioctl, .ndo_set_rx_mode = ax88172_set_multicast, }; @@ -587,7 +587,7 @@ static const struct net_device_ops ax88772_netdev_ops = { .ndo_get_stats64 = dev_get_tstats64, .ndo_set_mac_address = asix_set_mac_address, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = phy_do_ioctl_running, + .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_set_rx_mode = asix_set_multicast, }; @@ -706,7 +706,6 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf) u8 buf[ETH_ALEN] = {0}, chipcode = 0; struct asix_common_private *priv; int ret, i; - u32 phyid; priv = devm_kzalloc(&dev->udev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) @@ -767,10 +766,6 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf) return ret; } - /* Read PHYID register *AFTER* the PHY was reset properly */ - phyid = asix_get_phyid(dev); - netdev_dbg(dev->net, "PHYID=0x%08x\n", phyid); - /* Asix framing packs multiple eth frames into a 2K usb bulk transfer */ if (dev->driver_info->flags & FLAG_FRAMING_AX) { /* hard_mtu is still the default - the device does not support @@ -1105,7 +1100,7 @@ static const struct net_device_ops ax88178_netdev_ops = { .ndo_set_mac_address = asix_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = asix_set_multicast, - .ndo_do_ioctl = asix_ioctl, + .ndo_eth_ioctl = asix_ioctl, .ndo_change_mtu = ax88178_change_mtu, }; @@ -1220,6 +1215,7 @@ static const struct driver_info ax88772b_info = { .unbind = ax88772_unbind, .status = asix_status, .reset = ax88772_reset, + .stop = ax88772_stop, .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR | FLAG_MULTI_PACKET, .rx_fixup = asix_rx_fixup_common, diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c index 530947d7477b..d9777d9a7c5d 100644 --- a/drivers/net/usb/ax88172a.c +++ b/drivers/net/usb/ax88172a.c @@ -109,7 +109,7 @@ static const struct net_device_ops ax88172a_netdev_ops = { .ndo_get_stats64 = dev_get_tstats64, .ndo_set_mac_address = asix_set_mac_address, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = phy_do_ioctl_running, + .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_set_rx_mode = asix_set_multicast, }; diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index c1316718304d..f25448a08870 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c @@ -1035,7 +1035,7 @@ static const struct net_device_ops ax88179_netdev_ops = { .ndo_change_mtu = ax88179_change_mtu, .ndo_set_mac_address = ax88179_set_mac_addr, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = ax88179_ioctl, + .ndo_eth_ioctl = ax88179_ioctl, .ndo_set_rx_mode = ax88179_set_multicast, .ndo_set_features = ax88179_set_features, }; diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c index 8d1f69dad603..e1da9102a540 100644 --- a/drivers/net/usb/cdc-phonet.c +++ b/drivers/net/usb/cdc-phonet.c @@ -253,7 +253,8 @@ static int usbpn_close(struct net_device *dev) return usb_set_interface(pnd->usb, num, !pnd->active_setting); } -static int usbpn_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +static int usbpn_siocdevprivate(struct net_device *dev, struct ifreq *ifr, + void __user *data, int cmd) { struct if_phonet_req *req = (struct if_phonet_req *)ifr; @@ -269,7 +270,7 @@ static const struct net_device_ops usbpn_ops = { .ndo_open = usbpn_open, .ndo_stop = usbpn_close, .ndo_start_xmit = usbpn_xmit, - .ndo_do_ioctl = usbpn_ioctl, + .ndo_siocdevprivate = usbpn_siocdevprivate, }; static void usbpn_setup(struct net_device *dev) diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c index 89cc61d7a675..907f98b1eefe 100644 --- a/drivers/net/usb/dm9601.c +++ b/drivers/net/usb/dm9601.c @@ -345,7 +345,7 @@ static const struct net_device_ops dm9601_netdev_ops = { .ndo_change_mtu = usbnet_change_mtu, .ndo_get_stats64 = dev_get_tstats64, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = dm9601_ioctl, + .ndo_eth_ioctl = dm9601_ioctl, .ndo_set_rx_mode = dm9601_set_multicast, .ndo_set_mac_address = dm9601_set_mac_address, }; diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index dec96e8ab567..24bc1e678b7b 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -1079,8 +1079,7 @@ static void hso_init_termios(struct ktermios *termios) tty_termios_encode_baud_rate(termios, 115200, 115200); } -static void _hso_serial_set_termios(struct tty_struct *tty, - struct ktermios *old) +static void _hso_serial_set_termios(struct tty_struct *tty) { struct hso_serial *serial = tty->driver_data; @@ -1262,7 +1261,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp) if (serial->port.count == 1) { serial->rx_state = RX_IDLE; /* Force default termio settings */ - _hso_serial_set_termios(tty, NULL); + _hso_serial_set_termios(tty); tasklet_setup(&serial->unthrottle_tasklet, hso_unthrottle_tasklet); result = hso_start_serial_device(serial->parent, GFP_KERNEL); @@ -1394,7 +1393,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old) /* the actual setup */ spin_lock_irqsave(&serial->serial_lock, flags); if (serial->port.count) - _hso_serial_set_termios(tty, old); + _hso_serial_set_termios(tty); else tty->termios = *old; spin_unlock_irqrestore(&serial->serial_lock, flags); @@ -2353,7 +2352,7 @@ static int remove_net_device(struct hso_device *hso_dev) } /* Frees our network device */ -static void hso_free_net_device(struct hso_device *hso_dev, bool bailout) +static void hso_free_net_device(struct hso_device *hso_dev) { int i; struct hso_net *hso_net = dev2net(hso_dev); @@ -2376,7 +2375,7 @@ static void hso_free_net_device(struct hso_device *hso_dev, bool bailout) kfree(hso_net->mux_bulk_tx_buf); hso_net->mux_bulk_tx_buf = NULL; - if (hso_net->net && !bailout) + if (hso_net->net) free_netdev(hso_net->net); kfree(hso_dev); @@ -3133,7 +3132,7 @@ static void hso_free_interface(struct usb_interface *interface) rfkill_unregister(rfk); rfkill_destroy(rfk); } - hso_free_net_device(network_table[i], false); + hso_free_net_device(network_table[i]); } } } diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c index 207e59e74935..06e2181e5810 100644 --- a/drivers/net/usb/ipheth.c +++ b/drivers/net/usb/ipheth.c @@ -443,7 +443,7 @@ static int ipheth_probe(struct usb_interface *intf, netdev->netdev_ops = &ipheth_netdev_ops; netdev->watchdog_timeo = IPHETH_TX_TIMEOUT; - strcpy(netdev->name, "eth%d"); + strscpy(netdev->name, "eth%d", sizeof(netdev->name)); dev = netdev_priv(netdev); dev->udev = udev; diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 6d092d78e0cb..793f8fbe0069 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -46,6 +46,19 @@ #define MAX_RX_FIFO_SIZE (12 * 1024) #define MAX_TX_FIFO_SIZE (12 * 1024) + +#define FLOW_THRESHOLD(n) ((((n) + 511) / 512) & 0x7F) +#define FLOW_CTRL_THRESHOLD(on, off) ((FLOW_THRESHOLD(on) << 0) | \ + (FLOW_THRESHOLD(off) << 8)) + +/* Flow control turned on when Rx FIFO level rises above this level (bytes) */ +#define FLOW_ON_SS 9216 +#define FLOW_ON_HS 8704 + +/* Flow control turned off when Rx FIFO level falls below this level (bytes) */ +#define FLOW_OFF_SS 4096 +#define FLOW_OFF_HS 1024 + #define DEFAULT_BURST_CAP_SIZE (MAX_TX_FIFO_SIZE) #define DEFAULT_BULK_IN_DELAY (0x0800) #define MAX_SINGLE_PACKET_SIZE (9000) @@ -87,6 +100,12 @@ /* statistic update interval (mSec) */ #define STAT_UPDATE_TIMER (1 * 1000) +/* time to wait for MAC or FCT to stop (jiffies) */ +#define HW_DISABLE_TIMEOUT (HZ / 10) + +/* time to wait between polling MAC or FCT state (ms) */ +#define HW_DISABLE_DELAY_MS 1 + /* defines interrupts from interrupt EP */ #define MAX_INT_EP (32) #define INT_EP_INTEP (31) @@ -341,6 +360,7 @@ struct usb_context { #define EVENT_DEV_ASLEEP 7 #define EVENT_DEV_OPEN 8 #define EVENT_STAT_UPDATE 9 +#define EVENT_DEV_DISCONNECT 10 struct statstage { struct mutex access_lock; /* for stats access */ @@ -370,7 +390,6 @@ struct lan78xx_net { struct sk_buff_head rxq; struct sk_buff_head txq; struct sk_buff_head done; - struct sk_buff_head rxq_pause; struct sk_buff_head txq_pend; struct tasklet_struct bh; @@ -381,8 +400,9 @@ struct lan78xx_net { struct urb *urb_intr; struct usb_anchor deferred; + struct mutex dev_mutex; /* serialise open/stop wrt suspend/resume */ struct mutex phy_mutex; /* for phy access */ - unsigned pipe_in, pipe_out, pipe_intr; + unsigned int pipe_in, pipe_out, pipe_intr; u32 hard_mtu; /* count any extra framing */ size_t rx_urb_size; /* size for rx urbs */ @@ -392,8 +412,7 @@ struct lan78xx_net { wait_queue_head_t *wait; unsigned char suspend_count; - unsigned maxpacket; - struct timer_list delay; + unsigned int maxpacket; struct timer_list stat_monitor; unsigned long data[5]; @@ -426,9 +445,13 @@ MODULE_PARM_DESC(msg_level, "Override default message level"); static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data) { - u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL); + u32 *buf; int ret; + if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags)) + return -ENODEV; + + buf = kmalloc(sizeof(u32), GFP_KERNEL); if (!buf) return -ENOMEM; @@ -439,7 +462,7 @@ static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data) if (likely(ret >= 0)) { le32_to_cpus(buf); *data = *buf; - } else { + } else if (net_ratelimit()) { netdev_warn(dev->net, "Failed to read register index 0x%08x. ret = %d", index, ret); @@ -452,9 +475,13 @@ static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data) static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data) { - u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL); + u32 *buf; int ret; + if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags)) + return -ENODEV; + + buf = kmalloc(sizeof(u32), GFP_KERNEL); if (!buf) return -ENOMEM; @@ -465,7 +492,8 @@ static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data) USB_VENDOR_REQUEST_WRITE_REGISTER, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, index, buf, 4, USB_CTRL_SET_TIMEOUT); - if (unlikely(ret < 0)) { + if (unlikely(ret < 0) && + net_ratelimit()) { netdev_warn(dev->net, "Failed to write register index 0x%08x. ret = %d", index, ret); @@ -476,6 +504,26 @@ static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data) return ret; } +static int lan78xx_update_reg(struct lan78xx_net *dev, u32 reg, u32 mask, + u32 data) +{ + int ret; + u32 buf; + + ret = lan78xx_read_reg(dev, reg, &buf); + if (ret < 0) + return ret; + + buf &= ~mask; + buf |= (mask & data); + + ret = lan78xx_write_reg(dev, reg, buf); + if (ret < 0) + return ret; + + return 0; +} + static int lan78xx_read_stats(struct lan78xx_net *dev, struct lan78xx_statstage *data) { @@ -501,7 +549,7 @@ static int lan78xx_read_stats(struct lan78xx_net *dev, if (likely(ret >= 0)) { src = (u32 *)stats; dst = (u32 *)data; - for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) { + for (i = 0; i < sizeof(*stats) / sizeof(u32); i++) { le32_to_cpus(&src[i]); dst[i] = src[i]; } @@ -515,10 +563,11 @@ static int lan78xx_read_stats(struct lan78xx_net *dev, return ret; } -#define check_counter_rollover(struct1, dev_stats, member) { \ - if (struct1->member < dev_stats.saved.member) \ - dev_stats.rollover_count.member++; \ - } +#define check_counter_rollover(struct1, dev_stats, member) \ + do { \ + if ((struct1)->member < (dev_stats).saved.member) \ + (dev_stats).rollover_count.member++; \ + } while (0) static void lan78xx_check_stat_rollover(struct lan78xx_net *dev, struct lan78xx_statstage *stats) @@ -844,9 +893,9 @@ static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset, for (i = 0; i < length; i++) { lan78xx_write_reg(dev, OTP_ADDR1, - ((offset + i) >> 8) & OTP_ADDR1_15_11); + ((offset + i) >> 8) & OTP_ADDR1_15_11); lan78xx_write_reg(dev, OTP_ADDR2, - ((offset + i) & OTP_ADDR2_10_3)); + ((offset + i) & OTP_ADDR2_10_3)); lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_); lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_); @@ -900,9 +949,9 @@ static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset, for (i = 0; i < length; i++) { lan78xx_write_reg(dev, OTP_ADDR1, - ((offset + i) >> 8) & OTP_ADDR1_15_11); + ((offset + i) >> 8) & OTP_ADDR1_15_11); lan78xx_write_reg(dev, OTP_ADDR2, - ((offset + i) & OTP_ADDR2_10_3)); + ((offset + i) & OTP_ADDR2_10_3)); lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]); lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_); lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_); @@ -959,7 +1008,7 @@ static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev) usleep_range(40, 100); } - netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out"); + netdev_warn(dev->net, "%s timed out", __func__); return -EIO; } @@ -972,7 +1021,7 @@ static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select, int i, ret; if (usb_autopm_get_interface(dev->intf) < 0) - return 0; + return 0; mutex_lock(&pdata->dataport_mutex); @@ -1045,9 +1094,9 @@ static void lan78xx_deferred_multicast_write(struct work_struct *param) for (i = 1; i < NUM_OF_MAF; i++) { lan78xx_write_reg(dev, MAF_HI(i), 0); lan78xx_write_reg(dev, MAF_LO(i), - pdata->pfilter_table[i][1]); + pdata->pfilter_table[i][1]); lan78xx_write_reg(dev, MAF_HI(i), - pdata->pfilter_table[i][0]); + pdata->pfilter_table[i][0]); } lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); @@ -1066,11 +1115,12 @@ static void lan78xx_set_multicast(struct net_device *netdev) RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_); for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++) - pdata->mchash_table[i] = 0; + pdata->mchash_table[i] = 0; + /* pfilter_table[0] has own HW address */ for (i = 1; i < NUM_OF_MAF; i++) { - pdata->pfilter_table[i][0] = - pdata->pfilter_table[i][1] = 0; + pdata->pfilter_table[i][0] = 0; + pdata->pfilter_table[i][1] = 0; } pdata->rfe_ctl |= RFE_CTL_BCAST_EN_; @@ -1134,9 +1184,9 @@ static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex, flow |= FLOW_CR_RX_FCEN_; if (dev->udev->speed == USB_SPEED_SUPER) - fct_flow = 0x817; + fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_SS, FLOW_OFF_SS); else if (dev->udev->speed == USB_SPEED_HIGH) - fct_flow = 0x211; + fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_HS, FLOW_OFF_HS); netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s", (cap & FLOW_CTRL_RX ? "enabled" : "disabled"), @@ -1150,6 +1200,52 @@ static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex, return 0; } +static int lan78xx_mac_reset(struct lan78xx_net *dev) +{ + unsigned long start_time = jiffies; + u32 val; + int ret; + + mutex_lock(&dev->phy_mutex); + + /* Resetting the device while there is activity on the MDIO + * bus can result in the MAC interface locking up and not + * completing register access transactions. + */ + ret = lan78xx_phy_wait_not_busy(dev); + if (ret < 0) + goto done; + + ret = lan78xx_read_reg(dev, MAC_CR, &val); + if (ret < 0) + goto done; + + val |= MAC_CR_RST_; + ret = lan78xx_write_reg(dev, MAC_CR, val); + if (ret < 0) + goto done; + + /* Wait for the reset to complete before allowing any further + * MAC register accesses otherwise the MAC may lock up. + */ + do { + ret = lan78xx_read_reg(dev, MAC_CR, &val); + if (ret < 0) + goto done; + + if (!(val & MAC_CR_RST_)) { + ret = 0; + goto done; + } + } while (!time_after(jiffies, start_time + HZ)); + + ret = -ETIMEDOUT; +done: + mutex_unlock(&dev->phy_mutex); + + return ret; +} + static int lan78xx_link_reset(struct lan78xx_net *dev) { struct phy_device *phydev = dev->net->phydev; @@ -1160,7 +1256,7 @@ static int lan78xx_link_reset(struct lan78xx_net *dev) /* clear LAN78xx interrupt status */ ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_); if (unlikely(ret < 0)) - return -EIO; + return ret; mutex_lock(&phydev->lock); phy_read_status(phydev); @@ -1171,13 +1267,9 @@ static int lan78xx_link_reset(struct lan78xx_net *dev) dev->link_on = false; /* reset MAC */ - ret = lan78xx_read_reg(dev, MAC_CR, &buf); - if (unlikely(ret < 0)) - return -EIO; - buf |= MAC_CR_RST_; - ret = lan78xx_write_reg(dev, MAC_CR, buf); - if (unlikely(ret < 0)) - return -EIO; + ret = lan78xx_mac_reset(dev); + if (ret < 0) + return ret; del_timer(&dev->stat_monitor); } else if (link && !dev->link_on) { @@ -1189,18 +1281,30 @@ static int lan78xx_link_reset(struct lan78xx_net *dev) if (ecmd.base.speed == 1000) { /* disable U2 */ ret = lan78xx_read_reg(dev, USB_CFG1, &buf); + if (ret < 0) + return ret; buf &= ~USB_CFG1_DEV_U2_INIT_EN_; ret = lan78xx_write_reg(dev, USB_CFG1, buf); + if (ret < 0) + return ret; /* enable U1 */ ret = lan78xx_read_reg(dev, USB_CFG1, &buf); + if (ret < 0) + return ret; buf |= USB_CFG1_DEV_U1_INIT_EN_; ret = lan78xx_write_reg(dev, USB_CFG1, buf); + if (ret < 0) + return ret; } else { /* enable U1 & U2 */ ret = lan78xx_read_reg(dev, USB_CFG1, &buf); + if (ret < 0) + return ret; buf |= USB_CFG1_DEV_U2_INIT_EN_; buf |= USB_CFG1_DEV_U1_INIT_EN_; ret = lan78xx_write_reg(dev, USB_CFG1, buf); + if (ret < 0) + return ret; } } @@ -1218,6 +1322,8 @@ static int lan78xx_link_reset(struct lan78xx_net *dev) ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv, radv); + if (ret < 0) + return ret; if (!timer_pending(&dev->stat_monitor)) { dev->delta = 1; @@ -1228,7 +1334,7 @@ static int lan78xx_link_reset(struct lan78xx_net *dev) tasklet_schedule(&dev->bh); } - return ret; + return 0; } /* some work can't be done in tasklets, so we use keventd @@ -1264,9 +1370,10 @@ static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb) generic_handle_irq(dev->domain_data.phyirq); local_irq_enable(); } - } else + } else { netdev_warn(dev->net, "unexpected interrupt: 0x%08x\n", intdata); + } } static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev) @@ -1355,7 +1462,7 @@ static void lan78xx_get_wol(struct net_device *netdev, struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]); if (usb_autopm_get_interface(dev->intf) < 0) - return; + return; ret = lan78xx_read_reg(dev, USB_CFG0, &buf); if (unlikely(ret < 0)) { @@ -2003,7 +2110,7 @@ static int lan8835_fixup(struct phy_device *phydev) /* RGMII MAC TXC Delay Enable */ lan78xx_write_reg(dev, MAC_RGMII_ID, - MAC_RGMII_ID_TXC_DELAY_EN_); + MAC_RGMII_ID_TXC_DELAY_EN_); /* RGMII TX DLL Tune Adjust */ lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00); @@ -2267,11 +2374,16 @@ static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu) int ll_mtu = new_mtu + netdev->hard_header_len; int old_hard_mtu = dev->hard_mtu; int old_rx_urb_size = dev->rx_urb_size; + int ret; /* no second zero-length packet read wanted after mtu-sized packets */ if ((ll_mtu % dev->maxpacket) == 0) return -EDOM; + ret = usb_autopm_get_interface(dev->intf); + if (ret < 0) + return ret; + lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN); netdev->mtu = new_mtu; @@ -2287,6 +2399,8 @@ static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu) } } + usb_autopm_put_interface(dev->intf); + return 0; } @@ -2443,26 +2557,186 @@ static void lan78xx_init_ltm(struct lan78xx_net *dev) lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]); } +static int lan78xx_start_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enable) +{ + return lan78xx_update_reg(dev, reg, hw_enable, hw_enable); +} + +static int lan78xx_stop_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enabled, + u32 hw_disabled) +{ + unsigned long timeout; + bool stopped = true; + int ret; + u32 buf; + + /* Stop the h/w block (if not already stopped) */ + + ret = lan78xx_read_reg(dev, reg, &buf); + if (ret < 0) + return ret; + + if (buf & hw_enabled) { + buf &= ~hw_enabled; + + ret = lan78xx_write_reg(dev, reg, buf); + if (ret < 0) + return ret; + + stopped = false; + timeout = jiffies + HW_DISABLE_TIMEOUT; + do { + ret = lan78xx_read_reg(dev, reg, &buf); + if (ret < 0) + return ret; + + if (buf & hw_disabled) + stopped = true; + else + msleep(HW_DISABLE_DELAY_MS); + } while (!stopped && !time_after(jiffies, timeout)); + } + + ret = stopped ? 0 : -ETIME; + + return ret; +} + +static int lan78xx_flush_fifo(struct lan78xx_net *dev, u32 reg, u32 fifo_flush) +{ + return lan78xx_update_reg(dev, reg, fifo_flush, fifo_flush); +} + +static int lan78xx_start_tx_path(struct lan78xx_net *dev) +{ + int ret; + + netif_dbg(dev, drv, dev->net, "start tx path"); + + /* Start the MAC transmitter */ + + ret = lan78xx_start_hw(dev, MAC_TX, MAC_TX_TXEN_); + if (ret < 0) + return ret; + + /* Start the Tx FIFO */ + + ret = lan78xx_start_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_); + if (ret < 0) + return ret; + + return 0; +} + +static int lan78xx_stop_tx_path(struct lan78xx_net *dev) +{ + int ret; + + netif_dbg(dev, drv, dev->net, "stop tx path"); + + /* Stop the Tx FIFO */ + + ret = lan78xx_stop_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_, FCT_TX_CTL_DIS_); + if (ret < 0) + return ret; + + /* Stop the MAC transmitter */ + + ret = lan78xx_stop_hw(dev, MAC_TX, MAC_TX_TXEN_, MAC_TX_TXD_); + if (ret < 0) + return ret; + + return 0; +} + +/* The caller must ensure the Tx path is stopped before calling + * lan78xx_flush_tx_fifo(). + */ +static int lan78xx_flush_tx_fifo(struct lan78xx_net *dev) +{ + return lan78xx_flush_fifo(dev, FCT_TX_CTL, FCT_TX_CTL_RST_); +} + +static int lan78xx_start_rx_path(struct lan78xx_net *dev) +{ + int ret; + + netif_dbg(dev, drv, dev->net, "start rx path"); + + /* Start the Rx FIFO */ + + ret = lan78xx_start_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_); + if (ret < 0) + return ret; + + /* Start the MAC receiver*/ + + ret = lan78xx_start_hw(dev, MAC_RX, MAC_RX_RXEN_); + if (ret < 0) + return ret; + + return 0; +} + +static int lan78xx_stop_rx_path(struct lan78xx_net *dev) +{ + int ret; + + netif_dbg(dev, drv, dev->net, "stop rx path"); + + /* Stop the MAC receiver */ + + ret = lan78xx_stop_hw(dev, MAC_RX, MAC_RX_RXEN_, MAC_RX_RXD_); + if (ret < 0) + return ret; + + /* Stop the Rx FIFO */ + + ret = lan78xx_stop_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_, FCT_RX_CTL_DIS_); + if (ret < 0) + return ret; + + return 0; +} + +/* The caller must ensure the Rx path is stopped before calling + * lan78xx_flush_rx_fifo(). + */ +static int lan78xx_flush_rx_fifo(struct lan78xx_net *dev) +{ + return lan78xx_flush_fifo(dev, FCT_RX_CTL, FCT_RX_CTL_RST_); +} + static int lan78xx_reset(struct lan78xx_net *dev) { struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]); - u32 buf; - int ret = 0; unsigned long timeout; + int ret; + u32 buf; u8 sig; ret = lan78xx_read_reg(dev, HW_CFG, &buf); + if (ret < 0) + return ret; + buf |= HW_CFG_LRST_; + ret = lan78xx_write_reg(dev, HW_CFG, buf); + if (ret < 0) + return ret; timeout = jiffies + HZ; do { mdelay(1); ret = lan78xx_read_reg(dev, HW_CFG, &buf); + if (ret < 0) + return ret; + if (time_after(jiffies, timeout)) { netdev_warn(dev->net, "timeout on completion of LiteReset"); - return -EIO; + ret = -ETIMEDOUT; + return ret; } } while (buf & HW_CFG_LRST_); @@ -2470,13 +2744,22 @@ static int lan78xx_reset(struct lan78xx_net *dev) /* save DEVID for later usage */ ret = lan78xx_read_reg(dev, ID_REV, &buf); + if (ret < 0) + return ret; + dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16; dev->chiprev = buf & ID_REV_CHIP_REV_MASK_; /* Respond to the IN token with a NAK */ ret = lan78xx_read_reg(dev, USB_CFG0, &buf); + if (ret < 0) + return ret; + buf |= USB_CFG_BIR_; + ret = lan78xx_write_reg(dev, USB_CFG0, buf); + if (ret < 0) + return ret; /* Init LTM */ lan78xx_init_ltm(dev); @@ -2499,53 +2782,105 @@ static int lan78xx_reset(struct lan78xx_net *dev) } ret = lan78xx_write_reg(dev, BURST_CAP, buf); + if (ret < 0) + return ret; + ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY); + if (ret < 0) + return ret; ret = lan78xx_read_reg(dev, HW_CFG, &buf); + if (ret < 0) + return ret; + buf |= HW_CFG_MEF_; + ret = lan78xx_write_reg(dev, HW_CFG, buf); + if (ret < 0) + return ret; ret = lan78xx_read_reg(dev, USB_CFG0, &buf); + if (ret < 0) + return ret; + buf |= USB_CFG_BCE_; + ret = lan78xx_write_reg(dev, USB_CFG0, buf); + if (ret < 0) + return ret; /* set FIFO sizes */ buf = (MAX_RX_FIFO_SIZE - 512) / 512; + ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf); + if (ret < 0) + return ret; buf = (MAX_TX_FIFO_SIZE - 512) / 512; + ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf); + if (ret < 0) + return ret; ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_); + if (ret < 0) + return ret; + ret = lan78xx_write_reg(dev, FLOW, 0); + if (ret < 0) + return ret; + ret = lan78xx_write_reg(dev, FCT_FLOW, 0); + if (ret < 0) + return ret; /* Don't need rfe_ctl_lock during initialisation */ ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl); + if (ret < 0) + return ret; + pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_; + ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); + if (ret < 0) + return ret; /* Enable or disable checksum offload engines */ - lan78xx_set_features(dev->net, dev->net->features); + ret = lan78xx_set_features(dev->net, dev->net->features); + if (ret < 0) + return ret; lan78xx_set_multicast(dev->net); /* reset PHY */ ret = lan78xx_read_reg(dev, PMT_CTL, &buf); + if (ret < 0) + return ret; + buf |= PMT_CTL_PHY_RST_; + ret = lan78xx_write_reg(dev, PMT_CTL, buf); + if (ret < 0) + return ret; timeout = jiffies + HZ; do { mdelay(1); ret = lan78xx_read_reg(dev, PMT_CTL, &buf); + if (ret < 0) + return ret; + if (time_after(jiffies, timeout)) { netdev_warn(dev->net, "timeout waiting for PHY Reset"); - return -EIO; + ret = -ETIMEDOUT; + return ret; } } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_)); ret = lan78xx_read_reg(dev, MAC_CR, &buf); + if (ret < 0) + return ret; + /* LAN7801 only has RGMII mode */ if (dev->chipid == ID_REV_CHIP_ID_7801_) buf &= ~MAC_CR_GMII_EN_; @@ -2559,27 +2894,13 @@ static int lan78xx_reset(struct lan78xx_net *dev) } } ret = lan78xx_write_reg(dev, MAC_CR, buf); - - ret = lan78xx_read_reg(dev, MAC_TX, &buf); - buf |= MAC_TX_TXEN_; - ret = lan78xx_write_reg(dev, MAC_TX, buf); - - ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf); - buf |= FCT_TX_CTL_EN_; - ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf); + if (ret < 0) + return ret; ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + VLAN_ETH_HLEN); - ret = lan78xx_read_reg(dev, MAC_RX, &buf); - buf |= MAC_RX_RXEN_; - ret = lan78xx_write_reg(dev, MAC_RX, buf); - - ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf); - buf |= FCT_RX_CTL_EN_; - ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf); - - return 0; + return ret; } static void lan78xx_init_stats(struct lan78xx_net *dev) @@ -2613,9 +2934,13 @@ static int lan78xx_open(struct net_device *net) struct lan78xx_net *dev = netdev_priv(net); int ret; + netif_dbg(dev, ifup, dev->net, "open device"); + ret = usb_autopm_get_interface(dev->intf); if (ret < 0) - goto out; + return ret; + + mutex_lock(&dev->dev_mutex); phy_start(net->phydev); @@ -2631,6 +2956,20 @@ static int lan78xx_open(struct net_device *net) } } + ret = lan78xx_flush_rx_fifo(dev); + if (ret < 0) + goto done; + ret = lan78xx_flush_tx_fifo(dev); + if (ret < 0) + goto done; + + ret = lan78xx_start_tx_path(dev); + if (ret < 0) + goto done; + ret = lan78xx_start_rx_path(dev); + if (ret < 0) + goto done; + lan78xx_init_stats(dev); set_bit(EVENT_DEV_OPEN, &dev->flags); @@ -2641,9 +2980,10 @@ static int lan78xx_open(struct net_device *net) lan78xx_defer_kevent(dev, EVENT_LINK_RESET); done: + mutex_unlock(&dev->dev_mutex); + usb_autopm_put_interface(dev->intf); -out: return ret; } @@ -2660,53 +3000,74 @@ static void lan78xx_terminate_urbs(struct lan78xx_net *dev) temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq); /* maybe wait for deletions to finish. */ - while (!skb_queue_empty(&dev->rxq) && - !skb_queue_empty(&dev->txq) && - !skb_queue_empty(&dev->done)) { + while (!skb_queue_empty(&dev->rxq) || + !skb_queue_empty(&dev->txq)) { schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS)); set_current_state(TASK_UNINTERRUPTIBLE); netif_dbg(dev, ifdown, dev->net, - "waited for %d urb completions\n", temp); + "waited for %d urb completions", temp); } set_current_state(TASK_RUNNING); dev->wait = NULL; remove_wait_queue(&unlink_wakeup, &wait); + + while (!skb_queue_empty(&dev->done)) { + struct skb_data *entry; + struct sk_buff *skb; + + skb = skb_dequeue(&dev->done); + entry = (struct skb_data *)(skb->cb); + usb_free_urb(entry->urb); + dev_kfree_skb(skb); + } } static int lan78xx_stop(struct net_device *net) { struct lan78xx_net *dev = netdev_priv(net); + netif_dbg(dev, ifup, dev->net, "stop device"); + + mutex_lock(&dev->dev_mutex); + if (timer_pending(&dev->stat_monitor)) del_timer_sync(&dev->stat_monitor); - if (net->phydev) - phy_stop(net->phydev); - clear_bit(EVENT_DEV_OPEN, &dev->flags); netif_stop_queue(net); + tasklet_kill(&dev->bh); + + lan78xx_terminate_urbs(dev); netif_info(dev, ifdown, dev->net, "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n", net->stats.rx_packets, net->stats.tx_packets, net->stats.rx_errors, net->stats.tx_errors); - lan78xx_terminate_urbs(dev); + /* ignore errors that occur stopping the Tx and Rx data paths */ + lan78xx_stop_tx_path(dev); + lan78xx_stop_rx_path(dev); - usb_kill_urb(dev->urb_intr); + if (net->phydev) + phy_stop(net->phydev); - skb_queue_purge(&dev->rxq_pause); + usb_kill_urb(dev->urb_intr); /* deferred work (task, timer, softirq) must also stop. * can't flush_scheduled_work() until we drop rtnl (later), * else workers could deadlock; so make workers a NOP. */ - dev->flags = 0; + clear_bit(EVENT_TX_HALT, &dev->flags); + clear_bit(EVENT_RX_HALT, &dev->flags); + clear_bit(EVENT_LINK_RESET, &dev->flags); + clear_bit(EVENT_STAT_UPDATE, &dev->flags); + cancel_delayed_work_sync(&dev->wq); - tasklet_kill(&dev->bh); usb_autopm_put_interface(dev->intf); + mutex_unlock(&dev->dev_mutex); + return 0; } @@ -2795,16 +3156,23 @@ static void tx_complete(struct urb *urb) /* software-driven interface shutdown */ case -ECONNRESET: case -ESHUTDOWN: + netif_dbg(dev, tx_err, dev->net, + "tx err interface gone %d\n", + entry->urb->status); break; case -EPROTO: case -ETIME: case -EILSEQ: netif_stop_queue(dev->net); + netif_dbg(dev, tx_err, dev->net, + "tx err queue stopped %d\n", + entry->urb->status); break; default: netif_dbg(dev, tx_err, dev->net, - "tx err %d\n", entry->urb->status); + "unknown tx err %d\n", + entry->urb->status); break; } } @@ -2829,6 +3197,9 @@ lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net) struct lan78xx_net *dev = netdev_priv(net); struct sk_buff *skb2 = NULL; + if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) + schedule_delayed_work(&dev->wq, 0); + if (skb) { skb_tx_timestamp(skb); skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC); @@ -2988,11 +3359,6 @@ static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb) { int status; - if (test_bit(EVENT_RX_PAUSED, &dev->flags)) { - skb_queue_tail(&dev->rxq_pause, skb); - return; - } - dev->net->stats.rx_packets++; dev->net->stats.rx_bytes += skb->len; @@ -3140,6 +3506,7 @@ static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags) lan78xx_defer_kevent(dev, EVENT_RX_HALT); break; case -ENODEV: + case -ENOENT: netif_dbg(dev, ifdown, dev->net, "device gone\n"); netif_device_detach(dev->net); break; @@ -3340,6 +3707,12 @@ gso_skb: lan78xx_defer_kevent(dev, EVENT_TX_HALT); usb_autopm_put_interface_async(dev->intf); break; + case -ENODEV: + case -ENOENT: + netif_dbg(dev, tx_err, dev->net, + "tx: submit urb err %d (disconnected?)", ret); + netif_device_detach(dev->net); + break; default: usb_autopm_put_interface_async(dev->intf); netif_dbg(dev, tx_err, dev->net, @@ -3356,9 +3729,10 @@ drop: if (skb) dev_kfree_skb_any(skb); usb_free_urb(urb); - } else + } else { netif_dbg(dev, tx_queued, dev->net, "> tx, len %d, type 0x%x\n", length, skb->protocol); + } } static void lan78xx_rx_bh(struct lan78xx_net *dev) @@ -3421,8 +3795,7 @@ static void lan78xx_bh(struct tasklet_struct *t) if (!skb_queue_empty(&dev->txq_pend)) lan78xx_tx_bh(dev); - if (!timer_pending(&dev->delay) && - !test_bit(EVENT_RX_HALT, &dev->flags)) + if (!test_bit(EVENT_RX_HALT, &dev->flags)) lan78xx_rx_bh(dev); } } @@ -3434,18 +3807,20 @@ static void lan78xx_delayedwork(struct work_struct *work) dev = container_of(work, struct lan78xx_net, wq.work); + if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags)) + return; + + if (usb_autopm_get_interface(dev->intf) < 0) + return; + if (test_bit(EVENT_TX_HALT, &dev->flags)) { unlink_urbs(dev, &dev->txq); - status = usb_autopm_get_interface(dev->intf); - if (status < 0) - goto fail_pipe; + status = usb_clear_halt(dev->udev, dev->pipe_out); - usb_autopm_put_interface(dev->intf); if (status < 0 && status != -EPIPE && status != -ESHUTDOWN) { if (netif_msg_tx_err(dev)) -fail_pipe: netdev_err(dev->net, "can't clear tx halt, status %d\n", status); @@ -3455,18 +3830,14 @@ fail_pipe: netif_wake_queue(dev->net); } } + if (test_bit(EVENT_RX_HALT, &dev->flags)) { unlink_urbs(dev, &dev->rxq); - status = usb_autopm_get_interface(dev->intf); - if (status < 0) - goto fail_halt; status = usb_clear_halt(dev->udev, dev->pipe_in); - usb_autopm_put_interface(dev->intf); if (status < 0 && status != -EPIPE && status != -ESHUTDOWN) { if (netif_msg_rx_err(dev)) -fail_halt: netdev_err(dev->net, "can't clear rx halt, status %d\n", status); @@ -3480,16 +3851,9 @@ fail_halt: int ret = 0; clear_bit(EVENT_LINK_RESET, &dev->flags); - status = usb_autopm_get_interface(dev->intf); - if (status < 0) - goto skip_reset; if (lan78xx_link_reset(dev) < 0) { - usb_autopm_put_interface(dev->intf); -skip_reset: netdev_info(dev->net, "link reset failed (%d)\n", ret); - } else { - usb_autopm_put_interface(dev->intf); } } @@ -3503,6 +3867,8 @@ skip_reset: dev->delta = min((dev->delta * 2), 50); } + + usb_autopm_put_interface(dev->intf); } static void intr_complete(struct urb *urb) @@ -3518,6 +3884,7 @@ static void intr_complete(struct urb *urb) /* software-driven interface shutdown */ case -ENOENT: /* urb killed */ + case -ENODEV: /* hardware gone */ case -ESHUTDOWN: /* hardware gone */ netif_dbg(dev, ifdown, dev->net, "intr shutdown, code %d\n", status); @@ -3531,14 +3898,29 @@ static void intr_complete(struct urb *urb) break; } - if (!netif_running(dev->net)) + if (!netif_device_present(dev->net) || + !netif_running(dev->net)) { + netdev_warn(dev->net, "not submitting new status URB"); return; + } memset(urb->transfer_buffer, 0, urb->transfer_buffer_length); status = usb_submit_urb(urb, GFP_ATOMIC); - if (status != 0) + + switch (status) { + case 0: + break; + case -ENODEV: + case -ENOENT: + netif_dbg(dev, timer, dev->net, + "intr resubmit %d (disconnect?)", status); + netif_device_detach(dev->net); + break; + default: netif_err(dev, timer, dev->net, "intr resubmit --> %d\n", status); + break; + } } static void lan78xx_disconnect(struct usb_interface *intf) @@ -3553,8 +3935,15 @@ static void lan78xx_disconnect(struct usb_interface *intf) if (!dev) return; + set_bit(EVENT_DEV_DISCONNECT, &dev->flags); + udev = interface_to_usbdev(intf); net = dev->net; + + unregister_netdev(net); + + cancel_delayed_work_sync(&dev->wq); + phydev = net->phydev; phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0); @@ -3565,12 +3954,11 @@ static void lan78xx_disconnect(struct usb_interface *intf) if (phy_is_pseudo_fixed_link(phydev)) fixed_phy_unregister(phydev); - unregister_netdev(net); - - cancel_delayed_work_sync(&dev->wq); - usb_scuttle_anchored_urbs(&dev->deferred); + if (timer_pending(&dev->stat_monitor)) + del_timer_sync(&dev->stat_monitor); + lan78xx_unbind(dev, intf); usb_kill_urb(dev->urb_intr); @@ -3609,7 +3997,7 @@ static const struct net_device_ops lan78xx_netdev_ops = { .ndo_change_mtu = lan78xx_change_mtu, .ndo_set_mac_address = lan78xx_set_mac_addr, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = phy_do_ioctl_running, + .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_set_rx_mode = lan78xx_set_multicast, .ndo_set_features = lan78xx_set_features, .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid, @@ -3632,8 +4020,8 @@ static int lan78xx_probe(struct usb_interface *intf, struct net_device *netdev; struct usb_device *udev; int ret; - unsigned maxp; - unsigned period; + unsigned int maxp; + unsigned int period; u8 *buf = NULL; udev = interface_to_usbdev(intf); @@ -3659,9 +4047,9 @@ static int lan78xx_probe(struct usb_interface *intf, skb_queue_head_init(&dev->rxq); skb_queue_head_init(&dev->txq); skb_queue_head_init(&dev->done); - skb_queue_head_init(&dev->rxq_pause); skb_queue_head_init(&dev->txq_pend); mutex_init(&dev->phy_mutex); + mutex_init(&dev->dev_mutex); tasklet_setup(&dev->bh, lan78xx_bh); INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork); @@ -3798,37 +4186,119 @@ static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len) return crc; } -static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol) +static int lan78xx_set_auto_suspend(struct lan78xx_net *dev) { u32 buf; - int mask_index; - u16 crc; - u32 temp_wucsr; - u32 temp_pmt_ctl; + int ret; + + ret = lan78xx_stop_tx_path(dev); + if (ret < 0) + return ret; + + ret = lan78xx_stop_rx_path(dev); + if (ret < 0) + return ret; + + /* auto suspend (selective suspend) */ + + ret = lan78xx_write_reg(dev, WUCSR, 0); + if (ret < 0) + return ret; + ret = lan78xx_write_reg(dev, WUCSR2, 0); + if (ret < 0) + return ret; + ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL); + if (ret < 0) + return ret; + + /* set goodframe wakeup */ + + ret = lan78xx_read_reg(dev, WUCSR, &buf); + if (ret < 0) + return ret; + + buf |= WUCSR_RFE_WAKE_EN_; + buf |= WUCSR_STORE_WAKE_; + + ret = lan78xx_write_reg(dev, WUCSR, buf); + if (ret < 0) + return ret; + + ret = lan78xx_read_reg(dev, PMT_CTL, &buf); + if (ret < 0) + return ret; + + buf &= ~PMT_CTL_RES_CLR_WKP_EN_; + buf |= PMT_CTL_RES_CLR_WKP_STS_; + buf |= PMT_CTL_PHY_WAKE_EN_; + buf |= PMT_CTL_WOL_EN_; + buf &= ~PMT_CTL_SUS_MODE_MASK_; + buf |= PMT_CTL_SUS_MODE_3_; + + ret = lan78xx_write_reg(dev, PMT_CTL, buf); + if (ret < 0) + return ret; + + ret = lan78xx_read_reg(dev, PMT_CTL, &buf); + if (ret < 0) + return ret; + + buf |= PMT_CTL_WUPS_MASK_; + + ret = lan78xx_write_reg(dev, PMT_CTL, buf); + if (ret < 0) + return ret; + + ret = lan78xx_start_rx_path(dev); + + return ret; +} + +static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol) +{ const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E }; const u8 ipv6_multicast[3] = { 0x33, 0x33 }; const u8 arp_type[2] = { 0x08, 0x06 }; + u32 temp_pmt_ctl; + int mask_index; + u32 temp_wucsr; + u32 buf; + u16 crc; + int ret; - lan78xx_read_reg(dev, MAC_TX, &buf); - buf &= ~MAC_TX_TXEN_; - lan78xx_write_reg(dev, MAC_TX, buf); - lan78xx_read_reg(dev, MAC_RX, &buf); - buf &= ~MAC_RX_RXEN_; - lan78xx_write_reg(dev, MAC_RX, buf); + ret = lan78xx_stop_tx_path(dev); + if (ret < 0) + return ret; + ret = lan78xx_stop_rx_path(dev); + if (ret < 0) + return ret; - lan78xx_write_reg(dev, WUCSR, 0); - lan78xx_write_reg(dev, WUCSR2, 0); - lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL); + ret = lan78xx_write_reg(dev, WUCSR, 0); + if (ret < 0) + return ret; + ret = lan78xx_write_reg(dev, WUCSR2, 0); + if (ret < 0) + return ret; + ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL); + if (ret < 0) + return ret; temp_wucsr = 0; temp_pmt_ctl = 0; - lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl); + + ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl); + if (ret < 0) + return ret; + temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_; temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_; - for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++) - lan78xx_write_reg(dev, WUF_CFG(mask_index), 0); + for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++) { + ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0); + if (ret < 0) + return ret; + } mask_index = 0; if (wol & WAKE_PHY) { @@ -3857,30 +4327,52 @@ static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol) /* set WUF_CFG & WUF_MASK for IPv4 Multicast */ crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3); - lan78xx_write_reg(dev, WUF_CFG(mask_index), + ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), WUF_CFGX_EN_ | WUF_CFGX_TYPE_MCAST_ | (0 << WUF_CFGX_OFFSET_SHIFT_) | (crc & WUF_CFGX_CRC16_MASK_)); + if (ret < 0) + return ret; + + ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7); + if (ret < 0) + return ret; + ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0); + if (ret < 0) + return ret; + ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0); + if (ret < 0) + return ret; + ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0); + if (ret < 0) + return ret; - lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7); - lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0); - lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0); - lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0); mask_index++; /* for IPv6 Multicast */ crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2); - lan78xx_write_reg(dev, WUF_CFG(mask_index), + ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), WUF_CFGX_EN_ | WUF_CFGX_TYPE_MCAST_ | (0 << WUF_CFGX_OFFSET_SHIFT_) | (crc & WUF_CFGX_CRC16_MASK_)); + if (ret < 0) + return ret; + + ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3); + if (ret < 0) + return ret; + ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0); + if (ret < 0) + return ret; + ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0); + if (ret < 0) + return ret; + ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0); + if (ret < 0) + return ret; - lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3); - lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0); - lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0); - lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0); mask_index++; temp_pmt_ctl |= PMT_CTL_WOL_EN_; @@ -3901,16 +4393,27 @@ static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol) * for packettype (offset 12,13) = ARP (0x0806) */ crc = lan78xx_wakeframe_crc16(arp_type, 2); - lan78xx_write_reg(dev, WUF_CFG(mask_index), + ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), WUF_CFGX_EN_ | WUF_CFGX_TYPE_ALL_ | (0 << WUF_CFGX_OFFSET_SHIFT_) | (crc & WUF_CFGX_CRC16_MASK_)); + if (ret < 0) + return ret; + + ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000); + if (ret < 0) + return ret; + ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0); + if (ret < 0) + return ret; + ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0); + if (ret < 0) + return ret; + ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0); + if (ret < 0) + return ret; - lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000); - lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0); - lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0); - lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0); mask_index++; temp_pmt_ctl |= PMT_CTL_WOL_EN_; @@ -3918,7 +4421,9 @@ static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol) temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_; } - lan78xx_write_reg(dev, WUCSR, temp_wucsr); + ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr); + if (ret < 0) + return ret; /* when multiple WOL bits are set */ if (hweight_long((unsigned long)wol) > 1) { @@ -3926,33 +4431,45 @@ static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol) temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_; temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_; } - lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl); + ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl); + if (ret < 0) + return ret; /* clear WUPS */ - lan78xx_read_reg(dev, PMT_CTL, &buf); + ret = lan78xx_read_reg(dev, PMT_CTL, &buf); + if (ret < 0) + return ret; + buf |= PMT_CTL_WUPS_MASK_; - lan78xx_write_reg(dev, PMT_CTL, buf); - lan78xx_read_reg(dev, MAC_RX, &buf); - buf |= MAC_RX_RXEN_; - lan78xx_write_reg(dev, MAC_RX, buf); + ret = lan78xx_write_reg(dev, PMT_CTL, buf); + if (ret < 0) + return ret; - return 0; + ret = lan78xx_start_rx_path(dev); + + return ret; } static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message) { struct lan78xx_net *dev = usb_get_intfdata(intf); - struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]); - u32 buf; + bool dev_open; int ret; - if (!dev->suspend_count++) { + mutex_lock(&dev->dev_mutex); + + netif_dbg(dev, ifdown, dev->net, + "suspending: pm event %#x", message.event); + + dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags); + + if (dev_open) { spin_lock_irq(&dev->txq.lock); /* don't autosuspend while transmitting */ if ((skb_queue_len(&dev->txq) || skb_queue_len(&dev->txq_pend)) && - PMSG_IS_AUTO(message)) { + PMSG_IS_AUTO(message)) { spin_unlock_irq(&dev->txq.lock); ret = -EBUSY; goto out; @@ -3961,129 +4478,207 @@ static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message) spin_unlock_irq(&dev->txq.lock); } - /* stop TX & RX */ - ret = lan78xx_read_reg(dev, MAC_TX, &buf); - buf &= ~MAC_TX_TXEN_; - ret = lan78xx_write_reg(dev, MAC_TX, buf); - ret = lan78xx_read_reg(dev, MAC_RX, &buf); - buf &= ~MAC_RX_RXEN_; - ret = lan78xx_write_reg(dev, MAC_RX, buf); + /* stop RX */ + ret = lan78xx_stop_rx_path(dev); + if (ret < 0) + goto out; + + ret = lan78xx_flush_rx_fifo(dev); + if (ret < 0) + goto out; + + /* stop Tx */ + ret = lan78xx_stop_tx_path(dev); + if (ret < 0) + goto out; - /* empty out the rx and queues */ + /* empty out the Rx and Tx queues */ netif_device_detach(dev->net); lan78xx_terminate_urbs(dev); usb_kill_urb(dev->urb_intr); /* reattach */ netif_device_attach(dev->net); - } - if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) { del_timer(&dev->stat_monitor); if (PMSG_IS_AUTO(message)) { - /* auto suspend (selective suspend) */ - ret = lan78xx_read_reg(dev, MAC_TX, &buf); - buf &= ~MAC_TX_TXEN_; - ret = lan78xx_write_reg(dev, MAC_TX, buf); - ret = lan78xx_read_reg(dev, MAC_RX, &buf); - buf &= ~MAC_RX_RXEN_; - ret = lan78xx_write_reg(dev, MAC_RX, buf); + ret = lan78xx_set_auto_suspend(dev); + if (ret < 0) + goto out; + } else { + struct lan78xx_priv *pdata; - ret = lan78xx_write_reg(dev, WUCSR, 0); - ret = lan78xx_write_reg(dev, WUCSR2, 0); - ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL); + pdata = (struct lan78xx_priv *)(dev->data[0]); + netif_carrier_off(dev->net); + ret = lan78xx_set_suspend(dev, pdata->wol); + if (ret < 0) + goto out; + } + } else { + /* Interface is down; don't allow WOL and PHY + * events to wake up the host + */ + u32 buf; - /* set goodframe wakeup */ - ret = lan78xx_read_reg(dev, WUCSR, &buf); + set_bit(EVENT_DEV_ASLEEP, &dev->flags); - buf |= WUCSR_RFE_WAKE_EN_; - buf |= WUCSR_STORE_WAKE_; + ret = lan78xx_write_reg(dev, WUCSR, 0); + if (ret < 0) + goto out; + ret = lan78xx_write_reg(dev, WUCSR2, 0); + if (ret < 0) + goto out; - ret = lan78xx_write_reg(dev, WUCSR, buf); + ret = lan78xx_read_reg(dev, PMT_CTL, &buf); + if (ret < 0) + goto out; + + buf &= ~PMT_CTL_RES_CLR_WKP_EN_; + buf |= PMT_CTL_RES_CLR_WKP_STS_; + buf &= ~PMT_CTL_SUS_MODE_MASK_; + buf |= PMT_CTL_SUS_MODE_3_; + + ret = lan78xx_write_reg(dev, PMT_CTL, buf); + if (ret < 0) + goto out; - ret = lan78xx_read_reg(dev, PMT_CTL, &buf); + ret = lan78xx_read_reg(dev, PMT_CTL, &buf); + if (ret < 0) + goto out; + + buf |= PMT_CTL_WUPS_MASK_; + + ret = lan78xx_write_reg(dev, PMT_CTL, buf); + if (ret < 0) + goto out; + } - buf &= ~PMT_CTL_RES_CLR_WKP_EN_; - buf |= PMT_CTL_RES_CLR_WKP_STS_; + ret = 0; +out: + mutex_unlock(&dev->dev_mutex); - buf |= PMT_CTL_PHY_WAKE_EN_; - buf |= PMT_CTL_WOL_EN_; - buf &= ~PMT_CTL_SUS_MODE_MASK_; - buf |= PMT_CTL_SUS_MODE_3_; + return ret; +} - ret = lan78xx_write_reg(dev, PMT_CTL, buf); +static bool lan78xx_submit_deferred_urbs(struct lan78xx_net *dev) +{ + bool pipe_halted = false; + struct urb *urb; - ret = lan78xx_read_reg(dev, PMT_CTL, &buf); + while ((urb = usb_get_from_anchor(&dev->deferred))) { + struct sk_buff *skb = urb->context; + int ret; - buf |= PMT_CTL_WUPS_MASK_; + if (!netif_device_present(dev->net) || + !netif_carrier_ok(dev->net) || + pipe_halted) { + usb_free_urb(urb); + dev_kfree_skb(skb); + continue; + } - ret = lan78xx_write_reg(dev, PMT_CTL, buf); + ret = usb_submit_urb(urb, GFP_ATOMIC); - ret = lan78xx_read_reg(dev, MAC_RX, &buf); - buf |= MAC_RX_RXEN_; - ret = lan78xx_write_reg(dev, MAC_RX, buf); + if (ret == 0) { + netif_trans_update(dev->net); + lan78xx_queue_skb(&dev->txq, skb, tx_start); } else { - lan78xx_set_suspend(dev, pdata->wol); + usb_free_urb(urb); + dev_kfree_skb(skb); + + if (ret == -EPIPE) { + netif_stop_queue(dev->net); + pipe_halted = true; + } else if (ret == -ENODEV) { + netif_device_detach(dev->net); + } } } - ret = 0; -out: - return ret; + return pipe_halted; } static int lan78xx_resume(struct usb_interface *intf) { struct lan78xx_net *dev = usb_get_intfdata(intf); - struct sk_buff *skb; - struct urb *res; + bool dev_open; int ret; - u32 buf; - if (!timer_pending(&dev->stat_monitor)) { - dev->delta = 1; - mod_timer(&dev->stat_monitor, - jiffies + STAT_UPDATE_TIMER); - } + mutex_lock(&dev->dev_mutex); - if (!--dev->suspend_count) { - /* resume interrupt URBs */ - if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags)) - usb_submit_urb(dev->urb_intr, GFP_NOIO); + netif_dbg(dev, ifup, dev->net, "resuming device"); + + dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags); + + if (dev_open) { + bool pipe_halted = false; + + ret = lan78xx_flush_tx_fifo(dev); + if (ret < 0) + goto out; + + if (dev->urb_intr) { + int ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL); - spin_lock_irq(&dev->txq.lock); - while ((res = usb_get_from_anchor(&dev->deferred))) { - skb = (struct sk_buff *)res->context; - ret = usb_submit_urb(res, GFP_ATOMIC); if (ret < 0) { - dev_kfree_skb_any(skb); - usb_free_urb(res); - usb_autopm_put_interface_async(dev->intf); - } else { - netif_trans_update(dev->net); - lan78xx_queue_skb(&dev->txq, skb, tx_start); + if (ret == -ENODEV) + netif_device_detach(dev->net); + + netdev_warn(dev->net, "Failed to submit intr URB"); } } + spin_lock_irq(&dev->txq.lock); + + if (netif_device_present(dev->net)) { + pipe_halted = lan78xx_submit_deferred_urbs(dev); + + if (pipe_halted) + lan78xx_defer_kevent(dev, EVENT_TX_HALT); + } + clear_bit(EVENT_DEV_ASLEEP, &dev->flags); + spin_unlock_irq(&dev->txq.lock); - if (test_bit(EVENT_DEV_OPEN, &dev->flags)) { - if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen)) - netif_start_queue(dev->net); - tasklet_schedule(&dev->bh); + if (!pipe_halted && + netif_device_present(dev->net) && + (skb_queue_len(&dev->txq) < dev->tx_qlen)) + netif_start_queue(dev->net); + + ret = lan78xx_start_tx_path(dev); + if (ret < 0) + goto out; + + tasklet_schedule(&dev->bh); + + if (!timer_pending(&dev->stat_monitor)) { + dev->delta = 1; + mod_timer(&dev->stat_monitor, + jiffies + STAT_UPDATE_TIMER); } + + } else { + clear_bit(EVENT_DEV_ASLEEP, &dev->flags); } ret = lan78xx_write_reg(dev, WUCSR2, 0); + if (ret < 0) + goto out; ret = lan78xx_write_reg(dev, WUCSR, 0); + if (ret < 0) + goto out; ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL); + if (ret < 0) + goto out; ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ | WUCSR2_ARP_RCD_ | WUCSR2_IPV6_TCPSYN_RCD_ | WUCSR2_IPV4_TCPSYN_RCD_); + if (ret < 0) + goto out; ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ | WUCSR_EEE_RX_WAKE_ | @@ -4092,23 +4687,32 @@ static int lan78xx_resume(struct usb_interface *intf) WUCSR_WUFR_ | WUCSR_MPR_ | WUCSR_BCST_FR_); + if (ret < 0) + goto out; - ret = lan78xx_read_reg(dev, MAC_TX, &buf); - buf |= MAC_TX_TXEN_; - ret = lan78xx_write_reg(dev, MAC_TX, buf); + ret = 0; +out: + mutex_unlock(&dev->dev_mutex); - return 0; + return ret; } static int lan78xx_reset_resume(struct usb_interface *intf) { struct lan78xx_net *dev = usb_get_intfdata(intf); + int ret; - lan78xx_reset(dev); + netif_dbg(dev, ifup, dev->net, "(reset) resuming device"); + + ret = lan78xx_reset(dev); + if (ret < 0) + return ret; phy_start(dev->net->phydev); - return lan78xx_resume(intf); + ret = lan78xx_resume(intf); + + return ret; } static const struct usb_device_id products[] = { diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c index 2469bdcb1a04..66866bef25df 100644 --- a/drivers/net/usb/mcs7830.c +++ b/drivers/net/usb/mcs7830.c @@ -464,7 +464,7 @@ static const struct net_device_ops mcs7830_netdev_ops = { .ndo_change_mtu = usbnet_change_mtu, .ndo_get_stats64 = dev_get_tstats64, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = mcs7830_ioctl, + .ndo_eth_ioctl = mcs7830_ioctl, .ndo_set_rx_mode = mcs7830_set_multicast, .ndo_set_mac_address = mcs7830_set_mac_address, }; diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c index 9f9dd0de33cb..6a92a3fef75e 100644 --- a/drivers/net/usb/pegasus.c +++ b/drivers/net/usb/pegasus.c @@ -1001,7 +1001,8 @@ static const struct ethtool_ops ops = { .set_link_ksettings = pegasus_set_link_ksettings, }; -static int pegasus_ioctl(struct net_device *net, struct ifreq *rq, int cmd) +static int pegasus_siocdevprivate(struct net_device *net, struct ifreq *rq, + void __user *udata, int cmd) { __u16 *data = (__u16 *) &rq->ifr_ifru; pegasus_t *pegasus = netdev_priv(net); @@ -1269,7 +1270,7 @@ static int pegasus_resume(struct usb_interface *intf) static const struct net_device_ops pegasus_netdev_ops = { .ndo_open = pegasus_open, .ndo_stop = pegasus_close, - .ndo_do_ioctl = pegasus_ioctl, + .ndo_siocdevprivate = pegasus_siocdevprivate, .ndo_start_xmit = pegasus_start_xmit, .ndo_set_rx_mode = pegasus_set_multicast, .ndo_tx_timeout = pegasus_tx_timeout, diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 79832374f78d..60ba9b734055 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -8848,7 +8848,9 @@ out: } static int rtl8152_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *coalesce) + struct ethtool_coalesce *coalesce, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct r8152 *tp = netdev_priv(netdev); @@ -8867,7 +8869,9 @@ static int rtl8152_get_coalesce(struct net_device *netdev, } static int rtl8152_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *coalesce) + struct ethtool_coalesce *coalesce, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct r8152 *tp = netdev_priv(netdev); int ret; @@ -9190,7 +9194,7 @@ static int rtl8152_change_mtu(struct net_device *dev, int new_mtu) static const struct net_device_ops rtl8152_netdev_ops = { .ndo_open = rtl8152_open, .ndo_stop = rtl8152_close, - .ndo_do_ioctl = rtl8152_ioctl, + .ndo_eth_ioctl = rtl8152_ioctl, .ndo_start_xmit = rtl8152_start_xmit, .ndo_tx_timeout = rtl8152_tx_timeout, .ndo_set_features = rtl8152_set_features, diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c index 7656f2a3afd9..4a1b0e0fc3a3 100644 --- a/drivers/net/usb/rtl8150.c +++ b/drivers/net/usb/rtl8150.c @@ -822,7 +822,8 @@ static const struct ethtool_ops ops = { .get_link_ksettings = rtl8150_get_link_ksettings, }; -static int rtl8150_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) +static int rtl8150_siocdevprivate(struct net_device *netdev, struct ifreq *rq, + void __user *udata, int cmd) { rtl8150_t *dev = netdev_priv(netdev); u16 *data = (u16 *) & rq->ifr_ifru; @@ -850,7 +851,7 @@ static int rtl8150_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) static const struct net_device_ops rtl8150_netdev_ops = { .ndo_open = rtl8150_open, .ndo_stop = rtl8150_close, - .ndo_do_ioctl = rtl8150_ioctl, + .ndo_siocdevprivate = rtl8150_siocdevprivate, .ndo_start_xmit = rtl8150_start_xmit, .ndo_tx_timeout = rtl8150_tx_timeout, .ndo_set_rx_mode = rtl8150_set_multicast, diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c index 13141dbfa3a8..76f7af161313 100644 --- a/drivers/net/usb/smsc75xx.c +++ b/drivers/net/usb/smsc75xx.c @@ -1439,7 +1439,7 @@ static const struct net_device_ops smsc75xx_netdev_ops = { .ndo_change_mtu = smsc75xx_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = smsc75xx_ioctl, + .ndo_eth_ioctl = smsc75xx_ioctl, .ndo_set_rx_mode = smsc75xx_set_multicast, .ndo_set_features = smsc75xx_set_features, }; diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 4c8ee1cff4d4..7d953974eb9b 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -1044,7 +1044,7 @@ static const struct net_device_ops smsc95xx_netdev_ops = { .ndo_get_stats64 = dev_get_tstats64, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = smsc95xx_ioctl, + .ndo_eth_ioctl = smsc95xx_ioctl, .ndo_set_rx_mode = smsc95xx_set_multicast, .ndo_set_features = smsc95xx_set_features, }; diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c index ce29261263cd..6516a37893e2 100644 --- a/drivers/net/usb/sr9700.c +++ b/drivers/net/usb/sr9700.c @@ -310,7 +310,7 @@ static const struct net_device_ops sr9700_netdev_ops = { .ndo_change_mtu = usbnet_change_mtu, .ndo_get_stats64 = dev_get_tstats64, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = sr9700_ioctl, + .ndo_eth_ioctl = sr9700_ioctl, .ndo_set_rx_mode = sr9700_set_multicast, .ndo_set_mac_address = sr9700_set_mac_address, }; diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c index a822d81310d5..576401c8b1be 100644 --- a/drivers/net/usb/sr9800.c +++ b/drivers/net/usb/sr9800.c @@ -684,7 +684,7 @@ static const struct net_device_ops sr9800_netdev_ops = { .ndo_get_stats64 = dev_get_tstats64, .ndo_set_mac_address = sr_set_mac_address, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = sr_ioctl, + .ndo_eth_ioctl = sr_ioctl, .ndo_set_rx_mode = sr_set_multicast, }; diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 470e1c1e6353..840c1c2ab16a 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -1725,7 +1725,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) dev->interrupt_count = 0; dev->net = net; - strcpy (net->name, "usb%d"); + strscpy(net->name, "usb%d", sizeof(net->name)); memcpy (net->dev_addr, node_id, sizeof node_id); /* rx and tx sides can use different message sizes; @@ -1752,13 +1752,13 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) if ((dev->driver_info->flags & FLAG_ETHER) != 0 && ((dev->driver_info->flags & FLAG_POINTTOPOINT) == 0 || (net->dev_addr [0] & 0x02) == 0)) - strcpy (net->name, "eth%d"); + strscpy(net->name, "eth%d", sizeof(net->name)); /* WLAN devices should always be named "wlan%d" */ if ((dev->driver_info->flags & FLAG_WLAN) != 0) - strcpy(net->name, "wlan%d"); + strscpy(net->name, "wlan%d", sizeof(net->name)); /* WWAN devices should always be named "wwan%d" */ if ((dev->driver_info->flags & FLAG_WWAN) != 0) - strcpy(net->name, "wwan%d"); + strscpy(net->name, "wwan%d", sizeof(net->name)); /* devices that cannot do ARP */ if ((dev->driver_info->flags & FLAG_NOARP) != 0) diff --git a/drivers/net/veth.c b/drivers/net/veth.c index bdb7ce3cb054..50eb43e5bf45 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -224,12 +224,13 @@ static void veth_get_channels(struct net_device *dev, { channels->tx_count = dev->real_num_tx_queues; channels->rx_count = dev->real_num_rx_queues; - channels->max_tx = dev->real_num_tx_queues; - channels->max_rx = dev->real_num_rx_queues; - channels->combined_count = min(dev->real_num_rx_queues, dev->real_num_tx_queues); - channels->max_combined = min(dev->real_num_rx_queues, dev->real_num_tx_queues); + channels->max_tx = dev->num_tx_queues; + channels->max_rx = dev->num_rx_queues; } +static int veth_set_channels(struct net_device *dev, + struct ethtool_channels *ch); + static const struct ethtool_ops veth_ethtool_ops = { .get_drvinfo = veth_get_drvinfo, .get_link = ethtool_op_get_link, @@ -239,6 +240,7 @@ static const struct ethtool_ops veth_ethtool_ops = { .get_link_ksettings = veth_get_link_ksettings, .get_ts_info = ethtool_op_get_ts_info, .get_channels = veth_get_channels, + .set_channels = veth_set_channels, }; /* general routines */ @@ -711,7 +713,7 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, int mac_len, delta, off; struct xdp_buff xdp; - skb_orphan_partial(skb); + skb_prepare_for_gro(skb); rcu_read_lock(); xdp_prog = rcu_dereference(rq->xdp_prog); @@ -928,12 +930,12 @@ static int veth_poll(struct napi_struct *napi, int budget) return done; } -static int __veth_napi_enable(struct net_device *dev) +static int __veth_napi_enable_range(struct net_device *dev, int start, int end) { struct veth_priv *priv = netdev_priv(dev); int err, i; - for (i = 0; i < dev->real_num_rx_queues; i++) { + for (i = start; i < end; i++) { struct veth_rq *rq = &priv->rq[i]; err = ptr_ring_init(&rq->xdp_ring, VETH_RING_SIZE, GFP_KERNEL); @@ -941,7 +943,7 @@ static int __veth_napi_enable(struct net_device *dev) goto err_xdp_ring; } - for (i = 0; i < dev->real_num_rx_queues; i++) { + for (i = start; i < end; i++) { struct veth_rq *rq = &priv->rq[i]; napi_enable(&rq->xdp_napi); @@ -949,19 +951,25 @@ static int __veth_napi_enable(struct net_device *dev) } return 0; + err_xdp_ring: - for (i--; i >= 0; i--) + for (i--; i >= start; i--) ptr_ring_cleanup(&priv->rq[i].xdp_ring, veth_ptr_free); return err; } -static void veth_napi_del(struct net_device *dev) +static int __veth_napi_enable(struct net_device *dev) +{ + return __veth_napi_enable_range(dev, 0, dev->real_num_rx_queues); +} + +static void veth_napi_del_range(struct net_device *dev, int start, int end) { struct veth_priv *priv = netdev_priv(dev); int i; - for (i = 0; i < dev->real_num_rx_queues; i++) { + for (i = start; i < end; i++) { struct veth_rq *rq = &priv->rq[i]; rcu_assign_pointer(priv->rq[i].napi, NULL); @@ -970,7 +978,7 @@ static void veth_napi_del(struct net_device *dev) } synchronize_net(); - for (i = 0; i < dev->real_num_rx_queues; i++) { + for (i = start; i < end; i++) { struct veth_rq *rq = &priv->rq[i]; rq->rx_notify_masked = false; @@ -978,41 +986,90 @@ static void veth_napi_del(struct net_device *dev) } } +static void veth_napi_del(struct net_device *dev) +{ + veth_napi_del_range(dev, 0, dev->real_num_rx_queues); +} + static bool veth_gro_requested(const struct net_device *dev) { return !!(dev->wanted_features & NETIF_F_GRO); } -static int veth_enable_xdp(struct net_device *dev) +static int veth_enable_xdp_range(struct net_device *dev, int start, int end, + bool napi_already_on) { - bool napi_already_on = veth_gro_requested(dev) && (dev->flags & IFF_UP); struct veth_priv *priv = netdev_priv(dev); int err, i; - if (!xdp_rxq_info_is_reg(&priv->rq[0].xdp_rxq)) { - for (i = 0; i < dev->real_num_rx_queues; i++) { - struct veth_rq *rq = &priv->rq[i]; + for (i = start; i < end; i++) { + struct veth_rq *rq = &priv->rq[i]; - if (!napi_already_on) - netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT); - err = xdp_rxq_info_reg(&rq->xdp_rxq, dev, i, rq->xdp_napi.napi_id); - if (err < 0) - goto err_rxq_reg; + if (!napi_already_on) + netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT); + err = xdp_rxq_info_reg(&rq->xdp_rxq, dev, i, rq->xdp_napi.napi_id); + if (err < 0) + goto err_rxq_reg; - err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq, - MEM_TYPE_PAGE_SHARED, - NULL); - if (err < 0) - goto err_reg_mem; + err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq, + MEM_TYPE_PAGE_SHARED, + NULL); + if (err < 0) + goto err_reg_mem; - /* Save original mem info as it can be overwritten */ - rq->xdp_mem = rq->xdp_rxq.mem; - } + /* Save original mem info as it can be overwritten */ + rq->xdp_mem = rq->xdp_rxq.mem; + } + return 0; + +err_reg_mem: + xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq); +err_rxq_reg: + for (i--; i >= start; i--) { + struct veth_rq *rq = &priv->rq[i]; + + xdp_rxq_info_unreg(&rq->xdp_rxq); + if (!napi_already_on) + netif_napi_del(&rq->xdp_napi); + } + + return err; +} + +static void veth_disable_xdp_range(struct net_device *dev, int start, int end, + bool delete_napi) +{ + struct veth_priv *priv = netdev_priv(dev); + int i; + + for (i = start; i < end; i++) { + struct veth_rq *rq = &priv->rq[i]; + + rq->xdp_rxq.mem = rq->xdp_mem; + xdp_rxq_info_unreg(&rq->xdp_rxq); + + if (delete_napi) + netif_napi_del(&rq->xdp_napi); + } +} + +static int veth_enable_xdp(struct net_device *dev) +{ + bool napi_already_on = veth_gro_requested(dev) && (dev->flags & IFF_UP); + struct veth_priv *priv = netdev_priv(dev); + int err, i; + + if (!xdp_rxq_info_is_reg(&priv->rq[0].xdp_rxq)) { + err = veth_enable_xdp_range(dev, 0, dev->real_num_rx_queues, napi_already_on); + if (err) + return err; if (!napi_already_on) { err = __veth_napi_enable(dev); - if (err) - goto err_rxq_reg; + if (err) { + veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, true); + return err; + } if (!veth_gro_requested(dev)) { /* user-space did not require GRO, but adding XDP @@ -1030,18 +1087,6 @@ static int veth_enable_xdp(struct net_device *dev) } return 0; -err_reg_mem: - xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq); -err_rxq_reg: - for (i--; i >= 0; i--) { - struct veth_rq *rq = &priv->rq[i]; - - xdp_rxq_info_unreg(&rq->xdp_rxq); - if (!napi_already_on) - netif_napi_del(&rq->xdp_napi); - } - - return err; } static void veth_disable_xdp(struct net_device *dev) @@ -1064,28 +1109,23 @@ static void veth_disable_xdp(struct net_device *dev) } } - for (i = 0; i < dev->real_num_rx_queues; i++) { - struct veth_rq *rq = &priv->rq[i]; - - rq->xdp_rxq.mem = rq->xdp_mem; - xdp_rxq_info_unreg(&rq->xdp_rxq); - } + veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, false); } -static int veth_napi_enable(struct net_device *dev) +static int veth_napi_enable_range(struct net_device *dev, int start, int end) { struct veth_priv *priv = netdev_priv(dev); int err, i; - for (i = 0; i < dev->real_num_rx_queues; i++) { + for (i = start; i < end; i++) { struct veth_rq *rq = &priv->rq[i]; netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT); } - err = __veth_napi_enable(dev); + err = __veth_napi_enable_range(dev, start, end); if (err) { - for (i = 0; i < dev->real_num_rx_queues; i++) { + for (i = start; i < end; i++) { struct veth_rq *rq = &priv->rq[i]; netif_napi_del(&rq->xdp_napi); @@ -1095,6 +1135,128 @@ static int veth_napi_enable(struct net_device *dev) return err; } +static int veth_napi_enable(struct net_device *dev) +{ + return veth_napi_enable_range(dev, 0, dev->real_num_rx_queues); +} + +static void veth_disable_range_safe(struct net_device *dev, int start, int end) +{ + struct veth_priv *priv = netdev_priv(dev); + + if (start >= end) + return; + + if (priv->_xdp_prog) { + veth_napi_del_range(dev, start, end); + veth_disable_xdp_range(dev, start, end, false); + } else if (veth_gro_requested(dev)) { + veth_napi_del_range(dev, start, end); + } +} + +static int veth_enable_range_safe(struct net_device *dev, int start, int end) +{ + struct veth_priv *priv = netdev_priv(dev); + int err; + + if (start >= end) + return 0; + + if (priv->_xdp_prog) { + /* these channels are freshly initialized, napi is not on there even + * when GRO is requeste + */ + err = veth_enable_xdp_range(dev, start, end, false); + if (err) + return err; + + err = __veth_napi_enable_range(dev, start, end); + if (err) { + /* on error always delete the newly added napis */ + veth_disable_xdp_range(dev, start, end, true); + return err; + } + } else if (veth_gro_requested(dev)) { + return veth_napi_enable_range(dev, start, end); + } + return 0; +} + +static int veth_set_channels(struct net_device *dev, + struct ethtool_channels *ch) +{ + struct veth_priv *priv = netdev_priv(dev); + unsigned int old_rx_count, new_rx_count; + struct veth_priv *peer_priv; + struct net_device *peer; + int err; + + /* sanity check. Upper bounds are already enforced by the caller */ + if (!ch->rx_count || !ch->tx_count) + return -EINVAL; + + /* avoid braking XDP, if that is enabled */ + peer = rtnl_dereference(priv->peer); + peer_priv = peer ? netdev_priv(peer) : NULL; + if (priv->_xdp_prog && peer && ch->rx_count < peer->real_num_tx_queues) + return -EINVAL; + + if (peer && peer_priv && peer_priv->_xdp_prog && ch->tx_count > peer->real_num_rx_queues) + return -EINVAL; + + old_rx_count = dev->real_num_rx_queues; + new_rx_count = ch->rx_count; + if (netif_running(dev)) { + /* turn device off */ + netif_carrier_off(dev); + if (peer) + netif_carrier_off(peer); + + /* try to allocate new resurces, as needed*/ + err = veth_enable_range_safe(dev, old_rx_count, new_rx_count); + if (err) + goto out; + } + + err = netif_set_real_num_rx_queues(dev, ch->rx_count); + if (err) + goto revert; + + err = netif_set_real_num_tx_queues(dev, ch->tx_count); + if (err) { + int err2 = netif_set_real_num_rx_queues(dev, old_rx_count); + + /* this error condition could happen only if rx and tx change + * in opposite directions (e.g. tx nr raises, rx nr decreases) + * and we can't do anything to fully restore the original + * status + */ + if (err2) + pr_warn("Can't restore rx queues config %d -> %d %d", + new_rx_count, old_rx_count, err2); + else + goto revert; + } + +out: + if (netif_running(dev)) { + /* note that we need to swap the arguments WRT the enable part + * to identify the range we have to disable + */ + veth_disable_range_safe(dev, new_rx_count, old_rx_count); + netif_carrier_on(dev); + if (peer) + netif_carrier_on(peer); + } + return err; + +revert: + new_rx_count = old_rx_count; + old_rx_count = ch->rx_count; + goto out; +} + static int veth_open(struct net_device *dev) { struct veth_priv *priv = netdev_priv(dev); @@ -1447,6 +1609,23 @@ static void veth_disable_gro(struct net_device *dev) netdev_update_features(dev); } +static int veth_init_queues(struct net_device *dev, struct nlattr *tb[]) +{ + int err; + + if (!tb[IFLA_NUM_TX_QUEUES] && dev->num_tx_queues > 1) { + err = netif_set_real_num_tx_queues(dev, 1); + if (err) + return err; + } + if (!tb[IFLA_NUM_RX_QUEUES] && dev->num_rx_queues > 1) { + err = netif_set_real_num_rx_queues(dev, 1); + if (err) + return err; + } + return 0; +} + static int veth_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) @@ -1556,13 +1735,21 @@ static int veth_newlink(struct net *src_net, struct net_device *dev, priv = netdev_priv(dev); rcu_assign_pointer(priv->peer, peer); + err = veth_init_queues(dev, tb); + if (err) + goto err_queues; priv = netdev_priv(peer); rcu_assign_pointer(priv->peer, dev); + err = veth_init_queues(peer, tb); + if (err) + goto err_queues; veth_disable_gro(dev); return 0; +err_queues: + unregister_netdevice(dev); err_register_dev: /* nothing to do */ err_configure_peer: @@ -1608,6 +1795,16 @@ static struct net *veth_get_link_net(const struct net_device *dev) return peer ? dev_net(peer) : dev_net(dev); } +static unsigned int veth_get_num_queues(void) +{ + /* enforce the same queue limit as rtnl_create_link */ + int queues = num_possible_cpus(); + + if (queues > 4096) + queues = 4096; + return queues; +} + static struct rtnl_link_ops veth_link_ops = { .kind = DRV_NAME, .priv_size = sizeof(struct veth_priv), @@ -1618,6 +1815,8 @@ static struct rtnl_link_ops veth_link_ops = { .policy = veth_policy, .maxtype = VETH_INFO_MAX, .get_link_net = veth_get_link_net, + .get_num_tx_queues = veth_get_num_queues, + .get_num_rx_queues = veth_get_num_queues, }; /* diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index eee493685aad..271d38c1d9f8 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -380,7 +380,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi, struct page *page, unsigned int offset, unsigned int len, unsigned int truesize, bool hdr_valid, unsigned int metasize, - bool whole_page) + unsigned int headroom) { struct sk_buff *skb; struct virtio_net_hdr_mrg_rxbuf *hdr; @@ -398,28 +398,16 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi, else hdr_padded_len = sizeof(struct padded_vnet_hdr); - /* If whole_page, there is an offset between the beginning of the + /* If headroom is not 0, there is an offset between the beginning of the * data and the allocated space, otherwise the data and the allocated * space are aligned. * * Buffers with headroom use PAGE_SIZE as alloc size, see * add_recvbuf_mergeable() + get_mergeable_buf_len() */ - if (whole_page) { - /* Buffers with whole_page use PAGE_SIZE as alloc size, - * see add_recvbuf_mergeable() + get_mergeable_buf_len() - */ - truesize = PAGE_SIZE; - - /* page maybe head page, so we should get the buf by p, not the - * page - */ - tailroom = truesize - len - offset_in_page(p); - buf = (char *)((unsigned long)p & PAGE_MASK); - } else { - tailroom = truesize - len; - buf = p; - } + truesize = headroom ? PAGE_SIZE : truesize; + tailroom = truesize - len - headroom; + buf = p - headroom; len -= hdr_len; offset += hdr_padded_len; @@ -540,19 +528,20 @@ static int __virtnet_xdp_xmit_one(struct virtnet_info *vi, * functions to perfectly solve these three problems at the same time. */ #define virtnet_xdp_get_sq(vi) ({ \ + int cpu = smp_processor_id(); \ struct netdev_queue *txq; \ typeof(vi) v = (vi); \ unsigned int qp; \ \ if (v->curr_queue_pairs > nr_cpu_ids) { \ qp = v->curr_queue_pairs - v->xdp_queue_pairs; \ - qp += smp_processor_id(); \ + qp += cpu; \ txq = netdev_get_tx_queue(v->dev, qp); \ __netif_tx_acquire(txq); \ } else { \ - qp = smp_processor_id() % v->curr_queue_pairs; \ + qp = cpu % v->curr_queue_pairs; \ txq = netdev_get_tx_queue(v->dev, qp); \ - __netif_tx_lock(txq, raw_smp_processor_id()); \ + __netif_tx_lock(txq, cpu); \ } \ v->sq + qp; \ }) @@ -978,7 +967,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, put_page(page); head_skb = page_to_skb(vi, rq, xdp_page, offset, len, PAGE_SIZE, false, - metasize, true); + metasize, + VIRTIO_XDP_HEADROOM); return head_skb; } break; @@ -1029,7 +1019,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, rcu_read_unlock(); head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog, - metasize, !!headroom); + metasize, headroom); curr_skb = head_skb; if (unlikely(!curr_skb)) @@ -2208,14 +2198,14 @@ static int virtnet_set_channels(struct net_device *dev, if (vi->rq[0].xdp_prog) return -EINVAL; - get_online_cpus(); + cpus_read_lock(); err = _virtnet_set_queues(vi, queue_pairs); if (err) { - put_online_cpus(); + cpus_read_unlock(); goto err; } virtnet_set_affinity(vi); - put_online_cpus(); + cpus_read_unlock(); netif_set_real_num_tx_queues(dev, queue_pairs); netif_set_real_num_rx_queues(dev, queue_pairs); @@ -2331,7 +2321,9 @@ static int virtnet_get_link_ksettings(struct net_device *dev, } static int virtnet_set_coalesce(struct net_device *dev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct virtnet_info *vi = netdev_priv(dev); int i, napi_weight; @@ -2352,7 +2344,9 @@ static int virtnet_set_coalesce(struct net_device *dev, } static int virtnet_get_coalesce(struct net_device *dev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct ethtool_coalesce ec_default = { .cmd = ETHTOOL_GCOALESCE, @@ -2970,9 +2964,9 @@ static int init_vqs(struct virtnet_info *vi) if (ret) goto err_free; - get_online_cpus(); + cpus_read_lock(); virtnet_set_affinity(vi); - put_online_cpus(); + cpus_read_unlock(); return 0; diff --git a/drivers/net/vmxnet3/Makefile b/drivers/net/vmxnet3/Makefile index c5a167a1c85c..7a38925f4165 100644 --- a/drivers/net/vmxnet3/Makefile +++ b/drivers/net/vmxnet3/Makefile @@ -2,7 +2,7 @@ # # Linux driver for VMware's vmxnet3 ethernet NIC. # -# Copyright (C) 2007-2020, VMware, Inc. All Rights Reserved. +# Copyright (C) 2007-2021, VMware, Inc. All Rights Reserved. # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the diff --git a/drivers/net/vmxnet3/upt1_defs.h b/drivers/net/vmxnet3/upt1_defs.h index 8c014c98471c..f9f3a23d1698 100644 --- a/drivers/net/vmxnet3/upt1_defs.h +++ b/drivers/net/vmxnet3/upt1_defs.h @@ -1,7 +1,7 @@ /* * Linux driver for VMware's vmxnet3 ethernet NIC. * - * Copyright (C) 2008-2020, VMware, Inc. All Rights Reserved. + * Copyright (C) 2008-2021, VMware, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the diff --git a/drivers/net/vmxnet3/vmxnet3_defs.h b/drivers/net/vmxnet3/vmxnet3_defs.h index a8d5ebd47c71..74d4e8bc4abc 100644 --- a/drivers/net/vmxnet3/vmxnet3_defs.h +++ b/drivers/net/vmxnet3/vmxnet3_defs.h @@ -1,7 +1,7 @@ /* * Linux driver for VMware's vmxnet3 ethernet NIC. * - * Copyright (C) 2008-2020, VMware, Inc. All Rights Reserved. + * Copyright (C) 2008-2021, VMware, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -98,6 +98,9 @@ enum { VMXNET3_CMD_GET_TXDATA_DESC_SIZE, VMXNET3_CMD_GET_COALESCE, VMXNET3_CMD_GET_RSS_FIELDS, + VMXNET3_CMD_GET_RESERVED2, + VMXNET3_CMD_GET_RESERVED3, + VMXNET3_CMD_GET_MAX_QUEUES_CONF, }; /* @@ -341,13 +344,15 @@ struct Vmxnet3_RxCompDescExt { #define VMXNET3_TXD_EOP_SIZE 1 /* value of RxCompDesc.rssType */ -enum { - VMXNET3_RCD_RSS_TYPE_NONE = 0, - VMXNET3_RCD_RSS_TYPE_IPV4 = 1, - VMXNET3_RCD_RSS_TYPE_TCPIPV4 = 2, - VMXNET3_RCD_RSS_TYPE_IPV6 = 3, - VMXNET3_RCD_RSS_TYPE_TCPIPV6 = 4, -}; +#define VMXNET3_RCD_RSS_TYPE_NONE 0 +#define VMXNET3_RCD_RSS_TYPE_IPV4 1 +#define VMXNET3_RCD_RSS_TYPE_TCPIPV4 2 +#define VMXNET3_RCD_RSS_TYPE_IPV6 3 +#define VMXNET3_RCD_RSS_TYPE_TCPIPV6 4 +#define VMXNET3_RCD_RSS_TYPE_UDPIPV4 5 +#define VMXNET3_RCD_RSS_TYPE_UDPIPV6 6 +#define VMXNET3_RCD_RSS_TYPE_ESPIPV4 7 +#define VMXNET3_RCD_RSS_TYPE_ESPIPV6 8 /* a union for accessing all cmd/completion descriptors */ @@ -533,6 +538,13 @@ enum vmxnet3_intr_type { /* addition 1 for events */ #define VMXNET3_MAX_INTRS 25 +/* Version 6 and later will use below macros */ +#define VMXNET3_EXT_MAX_TX_QUEUES 32 +#define VMXNET3_EXT_MAX_RX_QUEUES 32 +/* addition 1 for events */ +#define VMXNET3_EXT_MAX_INTRS 65 +#define VMXNET3_FIRST_SET_INTRS 64 + /* value of intrCtrl */ #define VMXNET3_IC_DISABLE_ALL 0x1 /* bit 0 */ @@ -547,6 +559,19 @@ struct Vmxnet3_IntrConf { __le32 reserved[2]; }; +struct Vmxnet3_IntrConfExt { + u8 autoMask; + u8 numIntrs; /* # of interrupts */ + u8 eventIntrIdx; + u8 reserved; + __le32 intrCtrl; + __le32 reserved1; + u8 modLevels[VMXNET3_EXT_MAX_INTRS]; /* moderation level for + * each intr + */ + u8 reserved2[3]; +}; + /* one bit per VLAN ID, the size is in the units of u32 */ #define VMXNET3_VFT_SIZE (4096 / (sizeof(u32) * 8)) @@ -719,11 +744,16 @@ struct Vmxnet3_DSDevRead { struct Vmxnet3_VariableLenConfDesc pluginConfDesc; }; +struct Vmxnet3_DSDevReadExt { + /* read-only region for device, read by dev in response to a SET cmd */ + struct Vmxnet3_IntrConfExt intrConfExt; +}; + /* All structures in DriverShared are padded to multiples of 8 bytes */ struct Vmxnet3_DriverShared { __le32 magic; /* make devRead start at 64bit boundaries */ - __le32 pad; + __le32 size; /* size of DriverShared */ struct Vmxnet3_DSDevRead devRead; __le32 ecr; __le32 reserved; @@ -734,6 +764,7 @@ struct Vmxnet3_DriverShared { * command */ } cu; + struct Vmxnet3_DSDevReadExt devReadExt; }; @@ -764,6 +795,7 @@ struct Vmxnet3_DriverShared { ((vfTable[vid >> 5] & (1 << (vid & 31))) != 0) #define VMXNET3_MAX_MTU 9000 +#define VMXNET3_V6_MAX_MTU 9190 #define VMXNET3_MIN_MTU 60 #define VMXNET3_LINK_UP (10000 << 16 | 1) /* 10 Gbps, up */ diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 6e87f1fc4874..142f70670f5c 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -1,7 +1,7 @@ /* * Linux driver for VMware's vmxnet3 ethernet NIC. * - * Copyright (C) 2008-2020, VMware, Inc. All Rights Reserved. + * Copyright (C) 2008-2021, VMware, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -314,10 +314,10 @@ vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi, { if (tbi->map_type == VMXNET3_MAP_SINGLE) dma_unmap_single(&pdev->dev, tbi->dma_addr, tbi->len, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); else if (tbi->map_type == VMXNET3_MAP_PAGE) dma_unmap_page(&pdev->dev, tbi->dma_addr, tbi->len, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); else BUG_ON(tbi->map_type != VMXNET3_MAP_NONE); @@ -585,7 +585,7 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx, rbi->dma_addr = dma_map_single( &adapter->pdev->dev, rbi->skb->data, rbi->len, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); if (dma_mapping_error(&adapter->pdev->dev, rbi->dma_addr)) { dev_kfree_skb_any(rbi->skb); @@ -609,7 +609,7 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx, rbi->dma_addr = dma_map_page( &adapter->pdev->dev, rbi->page, 0, PAGE_SIZE, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); if (dma_mapping_error(&adapter->pdev->dev, rbi->dma_addr)) { put_page(rbi->page); @@ -723,7 +723,7 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, tbi->map_type = VMXNET3_MAP_SINGLE; tbi->dma_addr = dma_map_single(&adapter->pdev->dev, skb->data + buf_offset, buf_size, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr)) return -EFAULT; @@ -1449,7 +1449,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, new_dma_addr = dma_map_single(&adapter->pdev->dev, new_skb->data, rbi->len, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); if (dma_mapping_error(&adapter->pdev->dev, new_dma_addr)) { dev_kfree_skb(new_skb); @@ -1467,7 +1467,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, dma_unmap_single(&adapter->pdev->dev, rbi->dma_addr, rbi->len, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); /* Immediate refill */ rbi->skb = new_skb; @@ -1478,10 +1478,28 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, #ifdef VMXNET3_RSS if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE && - (adapter->netdev->features & NETIF_F_RXHASH)) + (adapter->netdev->features & NETIF_F_RXHASH)) { + enum pkt_hash_types hash_type; + + switch (rcd->rssType) { + case VMXNET3_RCD_RSS_TYPE_IPV4: + case VMXNET3_RCD_RSS_TYPE_IPV6: + hash_type = PKT_HASH_TYPE_L3; + break; + case VMXNET3_RCD_RSS_TYPE_TCPIPV4: + case VMXNET3_RCD_RSS_TYPE_TCPIPV6: + case VMXNET3_RCD_RSS_TYPE_UDPIPV4: + case VMXNET3_RCD_RSS_TYPE_UDPIPV6: + hash_type = PKT_HASH_TYPE_L4; + break; + default: + hash_type = PKT_HASH_TYPE_L3; + break; + } skb_set_hash(ctx->skb, le32_to_cpu(rcd->rssHash), - PKT_HASH_TYPE_L3); + hash_type); + } #endif skb_put(ctx->skb, rcd->len); @@ -1528,7 +1546,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, new_dma_addr = dma_map_page(&adapter->pdev->dev, new_page, 0, PAGE_SIZE, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); if (dma_mapping_error(&adapter->pdev->dev, new_dma_addr)) { put_page(new_page); @@ -1541,7 +1559,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, dma_unmap_page(&adapter->pdev->dev, rbi->dma_addr, rbi->len, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); vmxnet3_append_frag(ctx->skb, rcd, rbi); @@ -1659,13 +1677,13 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq, if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD && rq->buf_info[ring_idx][i].skb) { dma_unmap_single(&adapter->pdev->dev, rxd->addr, - rxd->len, PCI_DMA_FROMDEVICE); + rxd->len, DMA_FROM_DEVICE); dev_kfree_skb(rq->buf_info[ring_idx][i].skb); rq->buf_info[ring_idx][i].skb = NULL; } else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY && rq->buf_info[ring_idx][i].page) { dma_unmap_page(&adapter->pdev->dev, rxd->addr, - rxd->len, PCI_DMA_FROMDEVICE); + rxd->len, DMA_FROM_DEVICE); put_page(rq->buf_info[ring_idx][i].page); rq->buf_info[ring_idx][i].page = NULL; } @@ -2401,7 +2419,7 @@ vmxnet3_set_mc(struct net_device *netdev) &adapter->pdev->dev, new_table, sz, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); if (!dma_mapping_error(&adapter->pdev->dev, new_table_pa)) { new_mode |= VMXNET3_RXM_MCAST; @@ -2437,7 +2455,7 @@ vmxnet3_set_mc(struct net_device *netdev) if (new_table_pa_valid) dma_unmap_single(&adapter->pdev->dev, new_table_pa, - rxConf->mfTableLen, PCI_DMA_TODEVICE); + rxConf->mfTableLen, DMA_TO_DEVICE); kfree(new_table); } @@ -2460,6 +2478,7 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) { struct Vmxnet3_DriverShared *shared = adapter->shared; struct Vmxnet3_DSDevRead *devRead = &shared->devRead; + struct Vmxnet3_DSDevReadExt *devReadExt = &shared->devReadExt; struct Vmxnet3_TxQueueConf *tqc; struct Vmxnet3_RxQueueConf *rqc; int i; @@ -2572,14 +2591,26 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) #endif /* VMXNET3_RSS */ /* intr settings */ - devRead->intrConf.autoMask = adapter->intr.mask_mode == - VMXNET3_IMM_AUTO; - devRead->intrConf.numIntrs = adapter->intr.num_intrs; - for (i = 0; i < adapter->intr.num_intrs; i++) - devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i]; + if (!VMXNET3_VERSION_GE_6(adapter) || + !adapter->queuesExtEnabled) { + devRead->intrConf.autoMask = adapter->intr.mask_mode == + VMXNET3_IMM_AUTO; + devRead->intrConf.numIntrs = adapter->intr.num_intrs; + for (i = 0; i < adapter->intr.num_intrs; i++) + devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i]; + + devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx; + devRead->intrConf.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL); + } else { + devReadExt->intrConfExt.autoMask = adapter->intr.mask_mode == + VMXNET3_IMM_AUTO; + devReadExt->intrConfExt.numIntrs = adapter->intr.num_intrs; + for (i = 0; i < adapter->intr.num_intrs; i++) + devReadExt->intrConfExt.modLevels[i] = adapter->intr.mod_levels[i]; - devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx; - devRead->intrConf.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL); + devReadExt->intrConfExt.eventIntrIdx = adapter->intr.event_intr_idx; + devReadExt->intrConfExt.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL); + } /* rx filter settings */ devRead->rxFilterConf.rxMode = 0; @@ -2717,6 +2748,7 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter) * tx queue if the link is up. */ vmxnet3_check_link(adapter, true); + netif_tx_wake_all_queues(adapter->netdev); for (i = 0; i < adapter->num_rx_queues; i++) napi_enable(&adapter->rx_queue[i].napi); vmxnet3_enable_all_intrs(adapter); @@ -3372,6 +3404,8 @@ vmxnet3_probe_device(struct pci_dev *pdev, int size; int num_tx_queues; int num_rx_queues; + int queues; + unsigned long flags; if (!pci_msi_enabled()) enable_mq = 0; @@ -3383,7 +3417,6 @@ vmxnet3_probe_device(struct pci_dev *pdev, else #endif num_rx_queues = 1; - num_rx_queues = rounddown_pow_of_two(num_rx_queues); if (enable_mq) num_tx_queues = min(VMXNET3_DEVICE_MAX_TX_QUEUES, @@ -3391,13 +3424,8 @@ vmxnet3_probe_device(struct pci_dev *pdev, else num_tx_queues = 1; - num_tx_queues = rounddown_pow_of_two(num_tx_queues); netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter), max(num_tx_queues, num_rx_queues)); - dev_info(&pdev->dev, - "# of Tx queues : %d, # of Rx queues : %d\n", - num_tx_queues, num_rx_queues); - if (!netdev) return -ENOMEM; @@ -3410,19 +3438,12 @@ vmxnet3_probe_device(struct pci_dev *pdev, adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE; adapter->rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE; - if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) { - if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) { - dev_err(&pdev->dev, - "pci_set_consistent_dma_mask failed\n"); - err = -EIO; - goto err_set_mask; - } + if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) == 0) { dma64 = true; } else { - if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) { - dev_err(&pdev->dev, - "pci_set_dma_mask failed\n"); - err = -EIO; + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "dma_set_mask failed\n"); goto err_set_mask; } dma64 = false; @@ -3431,7 +3452,7 @@ vmxnet3_probe_device(struct pci_dev *pdev, spin_lock_init(&adapter->cmd_lock); adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter, sizeof(struct vmxnet3_adapter), - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) { dev_err(&pdev->dev, "Failed to map dma\n"); err = -EFAULT; @@ -3447,51 +3468,22 @@ vmxnet3_probe_device(struct pci_dev *pdev, goto err_alloc_shared; } - adapter->num_rx_queues = num_rx_queues; - adapter->num_tx_queues = num_tx_queues; - adapter->rx_buf_per_pkt = 1; - - size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues; - size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues; - adapter->tqd_start = dma_alloc_coherent(&adapter->pdev->dev, size, - &adapter->queue_desc_pa, - GFP_KERNEL); - - if (!adapter->tqd_start) { - dev_err(&pdev->dev, "Failed to allocate memory\n"); - err = -ENOMEM; - goto err_alloc_queue_desc; - } - adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start + - adapter->num_tx_queues); - - adapter->pm_conf = dma_alloc_coherent(&adapter->pdev->dev, - sizeof(struct Vmxnet3_PMConf), - &adapter->pm_conf_pa, - GFP_KERNEL); - if (adapter->pm_conf == NULL) { - err = -ENOMEM; - goto err_alloc_pm; - } - -#ifdef VMXNET3_RSS - - adapter->rss_conf = dma_alloc_coherent(&adapter->pdev->dev, - sizeof(struct UPT1_RSSConf), - &adapter->rss_conf_pa, - GFP_KERNEL); - if (adapter->rss_conf == NULL) { - err = -ENOMEM; - goto err_alloc_rss; - } -#endif /* VMXNET3_RSS */ - err = vmxnet3_alloc_pci_resources(adapter); if (err < 0) goto err_alloc_pci; ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS); - if (ver & (1 << VMXNET3_REV_4)) { + if (ver & (1 << VMXNET3_REV_6)) { + VMXNET3_WRITE_BAR1_REG(adapter, + VMXNET3_REG_VRRS, + 1 << VMXNET3_REV_6); + adapter->version = VMXNET3_REV_6 + 1; + } else if (ver & (1 << VMXNET3_REV_5)) { + VMXNET3_WRITE_BAR1_REG(adapter, + VMXNET3_REG_VRRS, + 1 << VMXNET3_REV_5); + adapter->version = VMXNET3_REV_5 + 1; + } else if (ver & (1 << VMXNET3_REV_4)) { VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 1 << VMXNET3_REV_4); @@ -3529,6 +3521,77 @@ vmxnet3_probe_device(struct pci_dev *pdev, goto err_ver; } + if (VMXNET3_VERSION_GE_6(adapter)) { + spin_lock_irqsave(&adapter->cmd_lock, flags); + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, + VMXNET3_CMD_GET_MAX_QUEUES_CONF); + queues = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); + if (queues > 0) { + adapter->num_rx_queues = min(num_rx_queues, ((queues >> 8) & 0xff)); + adapter->num_tx_queues = min(num_tx_queues, (queues & 0xff)); + } else { + adapter->num_rx_queues = min(num_rx_queues, + VMXNET3_DEVICE_DEFAULT_RX_QUEUES); + adapter->num_tx_queues = min(num_tx_queues, + VMXNET3_DEVICE_DEFAULT_TX_QUEUES); + } + if (adapter->num_rx_queues > VMXNET3_MAX_RX_QUEUES || + adapter->num_tx_queues > VMXNET3_MAX_TX_QUEUES) { + adapter->queuesExtEnabled = true; + } else { + adapter->queuesExtEnabled = false; + } + } else { + adapter->queuesExtEnabled = false; + num_rx_queues = rounddown_pow_of_two(num_rx_queues); + num_tx_queues = rounddown_pow_of_two(num_tx_queues); + adapter->num_rx_queues = min(num_rx_queues, + VMXNET3_DEVICE_DEFAULT_RX_QUEUES); + adapter->num_tx_queues = min(num_tx_queues, + VMXNET3_DEVICE_DEFAULT_TX_QUEUES); + } + dev_info(&pdev->dev, + "# of Tx queues : %d, # of Rx queues : %d\n", + adapter->num_tx_queues, adapter->num_rx_queues); + + adapter->rx_buf_per_pkt = 1; + + size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues; + size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues; + adapter->tqd_start = dma_alloc_coherent(&adapter->pdev->dev, size, + &adapter->queue_desc_pa, + GFP_KERNEL); + + if (!adapter->tqd_start) { + dev_err(&pdev->dev, "Failed to allocate memory\n"); + err = -ENOMEM; + goto err_ver; + } + adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start + + adapter->num_tx_queues); + + adapter->pm_conf = dma_alloc_coherent(&adapter->pdev->dev, + sizeof(struct Vmxnet3_PMConf), + &adapter->pm_conf_pa, + GFP_KERNEL); + if (adapter->pm_conf == NULL) { + err = -ENOMEM; + goto err_alloc_pm; + } + +#ifdef VMXNET3_RSS + + adapter->rss_conf = dma_alloc_coherent(&adapter->pdev->dev, + sizeof(struct UPT1_RSSConf), + &adapter->rss_conf_pa, + GFP_KERNEL); + if (adapter->rss_conf == NULL) { + err = -ENOMEM; + goto err_alloc_rss; + } +#endif /* VMXNET3_RSS */ + if (VMXNET3_VERSION_GE_3(adapter)) { adapter->coal_conf = dma_alloc_coherent(&adapter->pdev->dev, @@ -3538,7 +3601,7 @@ vmxnet3_probe_device(struct pci_dev *pdev, GFP_KERNEL); if (!adapter->coal_conf) { err = -ENOMEM; - goto err_ver; + goto err_coal_conf; } adapter->coal_conf->coalMode = VMXNET3_COALESCE_DISABLED; adapter->default_coal_mode = true; @@ -3581,9 +3644,12 @@ vmxnet3_probe_device(struct pci_dev *pdev, vmxnet3_set_ethtool_ops(netdev); netdev->watchdog_timeo = 5 * HZ; - /* MTU range: 60 - 9000 */ + /* MTU range: 60 - 9190 */ netdev->min_mtu = VMXNET3_MIN_MTU; - netdev->max_mtu = VMXNET3_MAX_MTU; + if (VMXNET3_VERSION_GE_6(adapter)) + netdev->max_mtu = VMXNET3_V6_MAX_MTU; + else + netdev->max_mtu = VMXNET3_MAX_MTU; INIT_WORK(&adapter->work, vmxnet3_reset_work); set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state); @@ -3621,9 +3687,7 @@ err_register: adapter->coal_conf, adapter->coal_conf_pa); } vmxnet3_free_intr_resources(adapter); -err_ver: - vmxnet3_free_pci_resources(adapter); -err_alloc_pci: +err_coal_conf: #ifdef VMXNET3_RSS dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf), adapter->rss_conf, adapter->rss_conf_pa); @@ -3634,13 +3698,15 @@ err_alloc_rss: err_alloc_pm: dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start, adapter->queue_desc_pa); -err_alloc_queue_desc: +err_ver: + vmxnet3_free_pci_resources(adapter); +err_alloc_pci: dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_DriverShared), adapter->shared, adapter->shared_pa); err_alloc_shared: dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa, - sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE); + sizeof(struct vmxnet3_adapter), DMA_TO_DEVICE); err_set_mask: free_netdev(netdev); return err; @@ -3653,7 +3719,8 @@ vmxnet3_remove_device(struct pci_dev *pdev) struct net_device *netdev = pci_get_drvdata(pdev); struct vmxnet3_adapter *adapter = netdev_priv(netdev); int size = 0; - int num_rx_queues; + int num_rx_queues, rx_queues; + unsigned long flags; #ifdef VMXNET3_RSS if (enable_mq) @@ -3662,7 +3729,24 @@ vmxnet3_remove_device(struct pci_dev *pdev) else #endif num_rx_queues = 1; - num_rx_queues = rounddown_pow_of_two(num_rx_queues); + if (!VMXNET3_VERSION_GE_6(adapter)) { + num_rx_queues = rounddown_pow_of_two(num_rx_queues); + } + if (VMXNET3_VERSION_GE_6(adapter)) { + spin_lock_irqsave(&adapter->cmd_lock, flags); + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, + VMXNET3_CMD_GET_MAX_QUEUES_CONF); + rx_queues = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); + if (rx_queues > 0) + rx_queues = (rx_queues >> 8) & 0xff; + else + rx_queues = min(num_rx_queues, VMXNET3_DEVICE_DEFAULT_RX_QUEUES); + num_rx_queues = min(num_rx_queues, rx_queues); + } else { + num_rx_queues = min(num_rx_queues, + VMXNET3_DEVICE_DEFAULT_RX_QUEUES); + } cancel_work_sync(&adapter->work); @@ -3690,7 +3774,7 @@ vmxnet3_remove_device(struct pci_dev *pdev) sizeof(struct Vmxnet3_DriverShared), adapter->shared, adapter->shared_pa); dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa, - sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE); + sizeof(struct vmxnet3_adapter), DMA_TO_DEVICE); free_netdev(netdev); } diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c index 1b483cf2b1ca..5dd8360b21a0 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c @@ -787,6 +787,10 @@ vmxnet3_get_rss_hash_opts(struct vmxnet3_adapter *adapter, case AH_ESP_V6_FLOW: case AH_V6_FLOW: case ESP_V6_FLOW: + if (VMXNET3_VERSION_GE_6(adapter) && + (rss_fields & VMXNET3_RSS_FIELDS_ESPIP6)) + info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; case SCTP_V6_FLOW: case IPV6_FLOW: info->data |= RXH_IP_SRC | RXH_IP_DST; @@ -871,6 +875,22 @@ vmxnet3_set_rss_hash_opt(struct net_device *netdev, case ESP_V6_FLOW: case AH_V6_FLOW: case AH_ESP_V6_FLOW: + if (!VMXNET3_VERSION_GE_6(adapter)) + return -EOPNOTSUPP; + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + rss_fields &= ~VMXNET3_RSS_FIELDS_ESPIP6; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + rss_fields |= VMXNET3_RSS_FIELDS_ESPIP6; + break; + default: + return -EINVAL; + } + break; case SCTP_V4_FLOW: case SCTP_V6_FLOW: if (!(nfc->data & RXH_IP_SRC) || @@ -1033,8 +1053,10 @@ vmxnet3_set_rss(struct net_device *netdev, const u32 *p, const u8 *key, } #endif -static int -vmxnet3_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec) +static int vmxnet3_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); @@ -1068,8 +1090,10 @@ vmxnet3_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec) return 0; } -static int -vmxnet3_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec) +static int vmxnet3_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); struct Vmxnet3_DriverShared *shared = adapter->shared; diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h index e910596b79cf..7027ff483fa5 100644 --- a/drivers/net/vmxnet3/vmxnet3_int.h +++ b/drivers/net/vmxnet3/vmxnet3_int.h @@ -1,7 +1,7 @@ /* * Linux driver for VMware's vmxnet3 ethernet NIC. * - * Copyright (C) 2008-2020, VMware, Inc. All Rights Reserved. + * Copyright (C) 2008-2021, VMware, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -69,18 +69,20 @@ /* * Version numbers */ -#define VMXNET3_DRIVER_VERSION_STRING "1.5.0.0-k" +#define VMXNET3_DRIVER_VERSION_STRING "1.6.0.0-k" /* Each byte of this 32-bit integer encodes a version number in * VMXNET3_DRIVER_VERSION_STRING. */ -#define VMXNET3_DRIVER_VERSION_NUM 0x01050000 +#define VMXNET3_DRIVER_VERSION_NUM 0x01060000 #if defined(CONFIG_PCI_MSI) /* RSS only makes sense if MSI-X is supported. */ #define VMXNET3_RSS #endif +#define VMXNET3_REV_6 5 /* Vmxnet3 Rev. 6 */ +#define VMXNET3_REV_5 4 /* Vmxnet3 Rev. 5 */ #define VMXNET3_REV_4 3 /* Vmxnet3 Rev. 4 */ #define VMXNET3_REV_3 2 /* Vmxnet3 Rev. 3 */ #define VMXNET3_REV_2 1 /* Vmxnet3 Rev. 2 */ @@ -301,15 +303,18 @@ struct vmxnet3_rx_queue { struct vmxnet3_rq_driver_stats stats; } __attribute__((__aligned__(SMP_CACHE_BYTES))); -#define VMXNET3_DEVICE_MAX_TX_QUEUES 8 -#define VMXNET3_DEVICE_MAX_RX_QUEUES 8 /* Keep this value as a power of 2 */ +#define VMXNET3_DEVICE_MAX_TX_QUEUES 32 +#define VMXNET3_DEVICE_MAX_RX_QUEUES 32 /* Keep this value as a power of 2 */ + +#define VMXNET3_DEVICE_DEFAULT_TX_QUEUES 8 +#define VMXNET3_DEVICE_DEFAULT_RX_QUEUES 8 /* Keep this value as a power of 2 */ /* Should be less than UPT1_RSS_MAX_IND_TABLE_SIZE */ #define VMXNET3_RSS_IND_TABLE_SIZE (VMXNET3_DEVICE_MAX_RX_QUEUES * 4) #define VMXNET3_LINUX_MAX_MSIX_VECT (VMXNET3_DEVICE_MAX_TX_QUEUES + \ VMXNET3_DEVICE_MAX_RX_QUEUES + 1) -#define VMXNET3_LINUX_MIN_MSIX_VECT 2 /* 1 for tx-rx pair and 1 for event */ +#define VMXNET3_LINUX_MIN_MSIX_VECT 3 /* 1 for tx, 1 for rx pair and 1 for event */ struct vmxnet3_intr { @@ -396,6 +401,7 @@ struct vmxnet3_adapter { dma_addr_t adapter_pa; dma_addr_t pm_conf_pa; dma_addr_t rss_conf_pa; + bool queuesExtEnabled; }; #define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \ @@ -421,6 +427,10 @@ struct vmxnet3_adapter { (adapter->version >= VMXNET3_REV_3 + 1) #define VMXNET3_VERSION_GE_4(adapter) \ (adapter->version >= VMXNET3_REV_4 + 1) +#define VMXNET3_VERSION_GE_5(adapter) \ + (adapter->version >= VMXNET3_REV_5 + 1) +#define VMXNET3_VERSION_GE_6(adapter) \ + (adapter->version >= VMXNET3_REV_6 + 1) /* must be a multiple of VMXNET3_RING_SIZE_ALIGN */ #define VMXNET3_DEF_TX_RING_SIZE 512 diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 8bbe2a7bb141..bf2fac913942 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -857,30 +857,24 @@ static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *s unsigned int hh_len = LL_RESERVED_SPACE(dev); struct neighbour *neigh; bool is_v6gw = false; - int ret = -EINVAL; nf_reset_ct(skb); /* Be paranoid, rather than too clever. */ if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) { - struct sk_buff *skb2; - - skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev)); - if (!skb2) { - ret = -ENOMEM; - goto err; + skb = skb_expand_head(skb, hh_len); + if (!skb) { + dev->stats.tx_errors++; + return -ENOMEM; } - if (skb->sk) - skb_set_owner_w(skb2, skb->sk); - - consume_skb(skb); - skb = skb2; } rcu_read_lock_bh(); neigh = ip_neigh_for_gw(rt, skb, &is_v6gw); if (!IS_ERR(neigh)) { + int ret; + sock_confirm_neigh(skb, neigh); /* if crossing protocols, can not use the cached header */ ret = neigh_output(neigh, skb, is_v6gw); @@ -889,9 +883,8 @@ static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *s } rcu_read_unlock_bh(); -err: vrf_tx_error(skb->dev, skb); - return ret; + return -EINVAL; } static int vrf_output(struct net *net, struct sock *sk, struct sk_buff *skb) diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig index 473df2505c8e..592a8389fc5a 100644 --- a/drivers/net/wan/Kconfig +++ b/drivers/net/wan/Kconfig @@ -290,30 +290,6 @@ config SLIC_DS26522 To compile this driver as a module, choose M here: the module will be called slic_ds26522. -config DSCC4_PCISYNC - bool "Etinc PCISYNC features" - depends on DSCC4 - help - Due to Etinc's design choice for its PCISYNC cards, some operations - are only allowed on specific ports of the DSCC4. This option is the - only way for the driver to know that it shouldn't return a success - code for these operations. - - Please say Y if your card is an Etinc's PCISYNC. - -config DSCC4_PCI_RST - bool "Hard reset support" - depends on DSCC4 - help - Various DSCC4 bugs forbid any reliable software reset of the ASIC. - As a replacement, some vendors provide a way to assert the PCI #RST - pin of DSCC4 through the GPIO port of the card. If you choose Y, - the driver will make use of this feature before module removal - (i.e. rmmod). The feature is known to be available on Commtech's - cards. Contact your manufacturer for details. - - Say Y if your card supports this feature. - config IXP4XX_HSS tristate "Intel IXP4xx HSS (synchronous serial port) support" depends on HDLC && IXP4XX_NPE && IXP4XX_QMGR @@ -337,33 +313,6 @@ config LAPBETHER To compile this driver as a module, choose M here: the module will be called lapbether. - If unsure, say N. - -config SBNI - tristate "Granch SBNI12 Leased Line adapter support" - depends on X86 - help - Driver for ISA SBNI12-xx cards which are low cost alternatives to - leased line modems. - - You can find more information and last versions of drivers and - utilities at <http://www.granch.ru/>. If you have any question you - can send email to <sbni@granch.ru>. - - To compile this driver as a module, choose M here: the - module will be called sbni. - - If unsure, say N. - -config SBNI_MULTILINE - bool "Multiple line feature support" - depends on SBNI - help - Schedule traffic for some parallel lines, via SBNI12 adapters. - - If you have two computers connected with two parallel lines it's - possible to increase transfer rate nearly twice. You should have - a program named 'sbniconfig' to configure adapters. If unsure, say N. diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile index 081666c36ca2..f6b92efffc94 100644 --- a/drivers/net/wan/Makefile +++ b/drivers/net/wan/Makefile @@ -22,7 +22,6 @@ obj-$(CONFIG_FARSYNC) += farsync.o obj-$(CONFIG_LANMEDIA) += lmc/ obj-$(CONFIG_LAPBETHER) += lapbether.o -obj-$(CONFIG_SBNI) += sbni.o obj-$(CONFIG_N2) += n2.o obj-$(CONFIG_C101) += c101.o obj-$(CONFIG_WANXL) += wanxl.o diff --git a/drivers/net/wan/c101.c b/drivers/net/wan/c101.c index 059c2f7133be..8dd14d916c3a 100644 --- a/drivers/net/wan/c101.c +++ b/drivers/net/wan/c101.c @@ -208,14 +208,12 @@ static int c101_close(struct net_device *dev) return 0; } -static int c101_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +static int c101_siocdevprivate(struct net_device *dev, struct ifreq *ifr, + void __user *data, int cmd) { - const size_t size = sizeof(sync_serial_settings); - sync_serial_settings new_line; - sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync; +#ifdef DEBUG_RINGS port_t *port = dev_to_port(dev); -#ifdef DEBUG_RINGS if (cmd == SIOCDEVPRIVATE) { sca_dump_rings(dev); printk(KERN_DEBUG "MSCI1: ST: %02x %02x %02x %02x\n", @@ -226,14 +224,22 @@ static int c101_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return 0; } #endif - if (cmd != SIOCWANDEV) - return hdlc_ioctl(dev, ifr, cmd); - switch (ifr->ifr_settings.type) { + return -EOPNOTSUPP; +} + +static int c101_ioctl(struct net_device *dev, struct if_settings *ifs) +{ + const size_t size = sizeof(sync_serial_settings); + sync_serial_settings new_line; + sync_serial_settings __user *line = ifs->ifs_ifsu.sync; + port_t *port = dev_to_port(dev); + + switch (ifs->type) { case IF_GET_IFACE: - ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL; - if (ifr->ifr_settings.size < size) { - ifr->ifr_settings.size = size; /* data size wanted */ + ifs->type = IF_IFACE_SYNC_SERIAL; + if (ifs->size < size) { + ifs->size = size; /* data size wanted */ return -ENOBUFS; } if (copy_to_user(line, &port->settings, size)) @@ -261,7 +267,7 @@ static int c101_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return 0; default: - return hdlc_ioctl(dev, ifr, cmd); + return hdlc_ioctl(dev, ifs); } } @@ -286,7 +292,8 @@ static const struct net_device_ops c101_ops = { .ndo_open = c101_open, .ndo_stop = c101_close, .ndo_start_xmit = hdlc_start_xmit, - .ndo_do_ioctl = c101_ioctl, + .ndo_siocwandev = c101_ioctl, + .ndo_siocdevprivate = c101_siocdevprivate, }; static int __init c101_run(unsigned long irq, unsigned long winbase) diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c index 43caab0b7dee..23d2954d9747 100644 --- a/drivers/net/wan/cosa.c +++ b/drivers/net/wan/cosa.c @@ -267,7 +267,6 @@ static netdev_tx_t cosa_net_tx(struct sk_buff *skb, struct net_device *d); static char *cosa_net_setup_rx(struct channel_data *channel, int size); static int cosa_net_rx_done(struct channel_data *channel); static int cosa_net_tx_done(struct channel_data *channel, int size); -static int cosa_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); /* Character device */ static char *chrdev_setup_rx(struct channel_data *channel, int size); @@ -415,7 +414,7 @@ static const struct net_device_ops cosa_ops = { .ndo_open = cosa_net_open, .ndo_stop = cosa_net_close, .ndo_start_xmit = hdlc_start_xmit, - .ndo_do_ioctl = cosa_net_ioctl, + .ndo_siocwandev = hdlc_ioctl, .ndo_tx_timeout = cosa_net_timeout, }; @@ -1169,18 +1168,6 @@ static int cosa_ioctl_common(struct cosa_data *cosa, return -ENOIOCTLCMD; } -static int cosa_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) -{ - int rv; - struct channel_data *chan = dev_to_chan(dev); - - rv = cosa_ioctl_common(chan->cosa, chan, cmd, - (unsigned long)ifr->ifr_data); - if (rv != -ENOIOCTLCMD) - return rv; - return hdlc_ioctl(dev, ifr, cmd); -} - static long cosa_chardev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c index b3466e084e84..6a212c085435 100644 --- a/drivers/net/wan/farsync.c +++ b/drivers/net/wan/farsync.c @@ -1784,16 +1784,15 @@ gather_conf_info(struct fst_card_info *card, struct fst_port_info *port, static int fst_set_iface(struct fst_card_info *card, struct fst_port_info *port, - struct ifreq *ifr) + struct if_settings *ifs) { sync_serial_settings sync; int i; - if (ifr->ifr_settings.size != sizeof(sync)) + if (ifs->size != sizeof(sync)) return -ENOMEM; - if (copy_from_user - (&sync, ifr->ifr_settings.ifs_ifsu.sync, sizeof(sync))) + if (copy_from_user(&sync, ifs->ifs_ifsu.sync, sizeof(sync))) return -EFAULT; if (sync.loopback) @@ -1801,7 +1800,7 @@ fst_set_iface(struct fst_card_info *card, struct fst_port_info *port, i = port->index; - switch (ifr->ifr_settings.type) { + switch (ifs->type) { case IF_IFACE_V35: FST_WRW(card, portConfig[i].lineInterface, V35); port->hwif = V35; @@ -1857,7 +1856,7 @@ fst_set_iface(struct fst_card_info *card, struct fst_port_info *port, static int fst_get_iface(struct fst_card_info *card, struct fst_port_info *port, - struct ifreq *ifr) + struct if_settings *ifs) { sync_serial_settings sync; int i; @@ -1868,29 +1867,29 @@ fst_get_iface(struct fst_card_info *card, struct fst_port_info *port, */ switch (port->hwif) { case E1: - ifr->ifr_settings.type = IF_IFACE_E1; + ifs->type = IF_IFACE_E1; break; case T1: - ifr->ifr_settings.type = IF_IFACE_T1; + ifs->type = IF_IFACE_T1; break; case V35: - ifr->ifr_settings.type = IF_IFACE_V35; + ifs->type = IF_IFACE_V35; break; case V24: - ifr->ifr_settings.type = IF_IFACE_V24; + ifs->type = IF_IFACE_V24; break; case X21D: - ifr->ifr_settings.type = IF_IFACE_X21D; + ifs->type = IF_IFACE_X21D; break; case X21: default: - ifr->ifr_settings.type = IF_IFACE_X21; + ifs->type = IF_IFACE_X21; break; } - if (ifr->ifr_settings.size == 0) + if (!ifs->size) return 0; /* only type requested */ - if (ifr->ifr_settings.size < sizeof(sync)) + if (ifs->size < sizeof(sync)) return -ENOMEM; i = port->index; @@ -1901,15 +1900,15 @@ fst_get_iface(struct fst_card_info *card, struct fst_port_info *port, INTCLK ? CLOCK_INT : CLOCK_EXT; sync.loopback = 0; - if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &sync, sizeof(sync))) + if (copy_to_user(ifs->ifs_ifsu.sync, &sync, sizeof(sync))) return -EFAULT; - ifr->ifr_settings.size = sizeof(sync); + ifs->size = sizeof(sync); return 0; } static int -fst_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +fst_siocdevprivate(struct net_device *dev, struct ifreq *ifr, void __user *data, int cmd) { struct fst_card_info *card; struct fst_port_info *port; @@ -1918,7 +1917,7 @@ fst_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) unsigned long flags; void *buf; - dbg(DBG_IOCTL, "ioctl: %x, %p\n", cmd, ifr->ifr_data); + dbg(DBG_IOCTL, "ioctl: %x, %p\n", cmd, data); port = dev_to_port(dev); card = port->card; @@ -1942,11 +1941,10 @@ fst_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /* First copy in the header with the length and offset of data * to write */ - if (!ifr->ifr_data) + if (!data) return -EINVAL; - if (copy_from_user(&wrthdr, ifr->ifr_data, - sizeof(struct fstioc_write))) + if (copy_from_user(&wrthdr, data, sizeof(struct fstioc_write))) return -EFAULT; /* Sanity check the parameters. We don't support partial writes @@ -1958,7 +1956,7 @@ fst_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /* Now copy the data to the card. */ - buf = memdup_user(ifr->ifr_data + sizeof(struct fstioc_write), + buf = memdup_user(data + sizeof(struct fstioc_write), wrthdr.size); if (IS_ERR(buf)) return PTR_ERR(buf); @@ -1991,12 +1989,12 @@ fst_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) } } - if (!ifr->ifr_data) + if (!data) return -EINVAL; gather_conf_info(card, port, &info); - if (copy_to_user(ifr->ifr_data, &info, sizeof(info))) + if (copy_to_user(data, &info, sizeof(info))) return -EFAULT; return 0; @@ -2011,46 +2009,58 @@ fst_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) card->card_no, card->state); return -EIO; } - if (copy_from_user(&info, ifr->ifr_data, sizeof(info))) + if (copy_from_user(&info, data, sizeof(info))) return -EFAULT; return set_conf_from_info(card, port, &info); + default: + return -EINVAL; + } +} - case SIOCWANDEV: - switch (ifr->ifr_settings.type) { - case IF_GET_IFACE: - return fst_get_iface(card, port, ifr); - - case IF_IFACE_SYNC_SERIAL: - case IF_IFACE_V35: - case IF_IFACE_V24: - case IF_IFACE_X21: - case IF_IFACE_X21D: - case IF_IFACE_T1: - case IF_IFACE_E1: - return fst_set_iface(card, port, ifr); - - case IF_PROTO_RAW: - port->mode = FST_RAW; - return 0; +static int +fst_ioctl(struct net_device *dev, struct if_settings *ifs) +{ + struct fst_card_info *card; + struct fst_port_info *port; - case IF_GET_PROTO: - if (port->mode == FST_RAW) { - ifr->ifr_settings.type = IF_PROTO_RAW; - return 0; - } - return hdlc_ioctl(dev, ifr, cmd); + dbg(DBG_IOCTL, "SIOCDEVPRIVATE, %x\n", ifs->type); - default: - port->mode = FST_GEN_HDLC; - dbg(DBG_IOCTL, "Passing this type to hdlc %x\n", - ifr->ifr_settings.type); - return hdlc_ioctl(dev, ifr, cmd); + port = dev_to_port(dev); + card = port->card; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + switch (ifs->type) { + case IF_GET_IFACE: + return fst_get_iface(card, port, ifs); + + case IF_IFACE_SYNC_SERIAL: + case IF_IFACE_V35: + case IF_IFACE_V24: + case IF_IFACE_X21: + case IF_IFACE_X21D: + case IF_IFACE_T1: + case IF_IFACE_E1: + return fst_set_iface(card, port, ifs); + + case IF_PROTO_RAW: + port->mode = FST_RAW; + return 0; + + case IF_GET_PROTO: + if (port->mode == FST_RAW) { + ifs->type = IF_PROTO_RAW; + return 0; } + return hdlc_ioctl(dev, ifs); default: - /* Not one of ours. Pass through to HDLC package */ - return hdlc_ioctl(dev, ifr, cmd); + port->mode = FST_GEN_HDLC; + dbg(DBG_IOCTL, "Passing this type to hdlc %x\n", + ifs->type); + return hdlc_ioctl(dev, ifs); } } @@ -2310,7 +2320,8 @@ static const struct net_device_ops fst_ops = { .ndo_open = fst_open, .ndo_stop = fst_close, .ndo_start_xmit = hdlc_start_xmit, - .ndo_do_ioctl = fst_ioctl, + .ndo_siocwandev = fst_ioctl, + .ndo_siocdevprivate = fst_siocdevprivate, .ndo_tx_timeout = fst_tx_timeout, }; diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c index 39f05fabbfa4..cda1b4ce6b21 100644 --- a/drivers/net/wan/fsl_ucc_hdlc.c +++ b/drivers/net/wan/fsl_ucc_hdlc.c @@ -674,31 +674,28 @@ static irqreturn_t ucc_hdlc_irq_handler(int irq, void *dev_id) return IRQ_HANDLED; } -static int uhdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +static int uhdlc_ioctl(struct net_device *dev, struct if_settings *ifs) { const size_t size = sizeof(te1_settings); te1_settings line; struct ucc_hdlc_private *priv = netdev_priv(dev); - if (cmd != SIOCWANDEV) - return hdlc_ioctl(dev, ifr, cmd); - - switch (ifr->ifr_settings.type) { + switch (ifs->type) { case IF_GET_IFACE: - ifr->ifr_settings.type = IF_IFACE_E1; - if (ifr->ifr_settings.size < size) { - ifr->ifr_settings.size = size; /* data size wanted */ + ifs->type = IF_IFACE_E1; + if (ifs->size < size) { + ifs->size = size; /* data size wanted */ return -ENOBUFS; } memset(&line, 0, sizeof(line)); line.clock_type = priv->clocking; - if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &line, size)) + if (copy_to_user(ifs->ifs_ifsu.sync, &line, size)) return -EFAULT; return 0; default: - return hdlc_ioctl(dev, ifr, cmd); + return hdlc_ioctl(dev, ifs); } } @@ -1053,7 +1050,7 @@ static const struct net_device_ops uhdlc_ops = { .ndo_open = uhdlc_open, .ndo_stop = uhdlc_close, .ndo_start_xmit = hdlc_start_xmit, - .ndo_do_ioctl = uhdlc_ioctl, + .ndo_siocwandev = uhdlc_ioctl, .ndo_tx_timeout = uhdlc_tx_timeout, }; diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c index dd6312b69861..cbed10b1d862 100644 --- a/drivers/net/wan/hdlc.c +++ b/drivers/net/wan/hdlc.c @@ -196,16 +196,13 @@ void hdlc_close(struct net_device *dev) } EXPORT_SYMBOL(hdlc_close); -int hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +int hdlc_ioctl(struct net_device *dev, struct if_settings *ifs) { struct hdlc_proto *proto = first_proto; int result; - if (cmd != SIOCWANDEV) - return -EINVAL; - if (dev_to_hdlc(dev)->proto) { - result = dev_to_hdlc(dev)->proto->ioctl(dev, ifr); + result = dev_to_hdlc(dev)->proto->ioctl(dev, ifs); if (result != -EINVAL) return result; } @@ -213,7 +210,7 @@ int hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /* Not handled by currently attached protocol (if any) */ while (proto) { - result = proto->ioctl(dev, ifr); + result = proto->ioctl(dev, ifs); if (result != -EINVAL) return result; proto = proto->next; diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c index c54fdae950fb..cdebe65a7e2d 100644 --- a/drivers/net/wan/hdlc_cisco.c +++ b/drivers/net/wan/hdlc_cisco.c @@ -56,7 +56,7 @@ struct cisco_state { u32 rxseq; /* RX sequence number */ }; -static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr); +static int cisco_ioctl(struct net_device *dev, struct if_settings *ifs); static inline struct cisco_state *state(hdlc_device *hdlc) { @@ -306,21 +306,21 @@ static const struct header_ops cisco_header_ops = { .create = cisco_hard_header, }; -static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr) +static int cisco_ioctl(struct net_device *dev, struct if_settings *ifs) { - cisco_proto __user *cisco_s = ifr->ifr_settings.ifs_ifsu.cisco; + cisco_proto __user *cisco_s = ifs->ifs_ifsu.cisco; const size_t size = sizeof(cisco_proto); cisco_proto new_settings; hdlc_device *hdlc = dev_to_hdlc(dev); int result; - switch (ifr->ifr_settings.type) { + switch (ifs->type) { case IF_GET_PROTO: if (dev_to_hdlc(dev)->proto != &proto) return -EINVAL; - ifr->ifr_settings.type = IF_PROTO_CISCO; - if (ifr->ifr_settings.size < size) { - ifr->ifr_settings.size = size; /* data size wanted */ + ifs->type = IF_PROTO_CISCO; + if (ifs->size < size) { + ifs->size = size; /* data size wanted */ return -ENOBUFS; } if (copy_to_user(cisco_s, &state(hdlc)->settings, size)) diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c index 25e3564ce118..7637edce443e 100644 --- a/drivers/net/wan/hdlc_fr.c +++ b/drivers/net/wan/hdlc_fr.c @@ -146,7 +146,7 @@ struct frad_state { u8 rxseq; /* RX sequence number */ }; -static int fr_ioctl(struct net_device *dev, struct ifreq *ifr); +static int fr_ioctl(struct net_device *dev, struct if_settings *ifs); static inline u16 q922_to_dlci(u8 *hdr) { @@ -357,26 +357,26 @@ static int pvc_close(struct net_device *dev) return 0; } -static int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +static int pvc_ioctl(struct net_device *dev, struct if_settings *ifs) { struct pvc_device *pvc = dev->ml_priv; fr_proto_pvc_info info; - if (ifr->ifr_settings.type == IF_GET_PROTO) { + if (ifs->type == IF_GET_PROTO) { if (dev->type == ARPHRD_ETHER) - ifr->ifr_settings.type = IF_PROTO_FR_ETH_PVC; + ifs->type = IF_PROTO_FR_ETH_PVC; else - ifr->ifr_settings.type = IF_PROTO_FR_PVC; + ifs->type = IF_PROTO_FR_PVC; - if (ifr->ifr_settings.size < sizeof(info)) { + if (ifs->size < sizeof(info)) { /* data size wanted */ - ifr->ifr_settings.size = sizeof(info); + ifs->size = sizeof(info); return -ENOBUFS; } info.dlci = pvc->dlci; memcpy(info.master, pvc->frad->name, IFNAMSIZ); - if (copy_to_user(ifr->ifr_settings.ifs_ifsu.fr_pvc_info, + if (copy_to_user(ifs->ifs_ifsu.fr_pvc_info, &info, sizeof(info))) return -EFAULT; return 0; @@ -1056,7 +1056,7 @@ static const struct net_device_ops pvc_ops = { .ndo_open = pvc_open, .ndo_stop = pvc_close, .ndo_start_xmit = pvc_xmit, - .ndo_do_ioctl = pvc_ioctl, + .ndo_siocwandev = pvc_ioctl, }; static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type) @@ -1179,22 +1179,22 @@ static struct hdlc_proto proto = { .module = THIS_MODULE, }; -static int fr_ioctl(struct net_device *dev, struct ifreq *ifr) +static int fr_ioctl(struct net_device *dev, struct if_settings *ifs) { - fr_proto __user *fr_s = ifr->ifr_settings.ifs_ifsu.fr; + fr_proto __user *fr_s = ifs->ifs_ifsu.fr; const size_t size = sizeof(fr_proto); fr_proto new_settings; hdlc_device *hdlc = dev_to_hdlc(dev); fr_proto_pvc pvc; int result; - switch (ifr->ifr_settings.type) { + switch (ifs->type) { case IF_GET_PROTO: if (dev_to_hdlc(dev)->proto != &proto) /* Different proto */ return -EINVAL; - ifr->ifr_settings.type = IF_PROTO_FR; - if (ifr->ifr_settings.size < size) { - ifr->ifr_settings.size = size; /* data size wanted */ + ifs->type = IF_PROTO_FR; + if (ifs->size < size) { + ifs->size = size; /* data size wanted */ return -ENOBUFS; } if (copy_to_user(fr_s, &state(hdlc)->settings, size)) @@ -1256,21 +1256,21 @@ static int fr_ioctl(struct net_device *dev, struct ifreq *ifr) if (!capable(CAP_NET_ADMIN)) return -EPERM; - if (copy_from_user(&pvc, ifr->ifr_settings.ifs_ifsu.fr_pvc, + if (copy_from_user(&pvc, ifs->ifs_ifsu.fr_pvc, sizeof(fr_proto_pvc))) return -EFAULT; if (pvc.dlci <= 0 || pvc.dlci >= 1024) return -EINVAL; /* Only 10 bits, DLCI 0 reserved */ - if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC || - ifr->ifr_settings.type == IF_PROTO_FR_DEL_ETH_PVC) + if (ifs->type == IF_PROTO_FR_ADD_ETH_PVC || + ifs->type == IF_PROTO_FR_DEL_ETH_PVC) result = ARPHRD_ETHER; /* bridged Ethernet device */ else result = ARPHRD_DLCI; - if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_PVC || - ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC) + if (ifs->type == IF_PROTO_FR_ADD_PVC || + ifs->type == IF_PROTO_FR_ADD_ETH_PVC) return fr_add_pvc(dev, pvc.dlci, result); else return fr_del_pvc(hdlc, pvc.dlci, result); diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c index b81ecf432a0c..37a3c989cba1 100644 --- a/drivers/net/wan/hdlc_ppp.c +++ b/drivers/net/wan/hdlc_ppp.c @@ -100,7 +100,7 @@ static const char *const event_names[EVENTS] = { static struct sk_buff_head tx_queue; /* used when holding the spin lock */ -static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr); +static int ppp_ioctl(struct net_device *dev, struct if_settings *ifs); static inline struct ppp *get_ppp(struct net_device *dev) { @@ -655,17 +655,17 @@ static const struct header_ops ppp_header_ops = { .create = ppp_hard_header, }; -static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr) +static int ppp_ioctl(struct net_device *dev, struct if_settings *ifs) { hdlc_device *hdlc = dev_to_hdlc(dev); struct ppp *ppp; int result; - switch (ifr->ifr_settings.type) { + switch (ifs->type) { case IF_GET_PROTO: if (dev_to_hdlc(dev)->proto != &proto) return -EINVAL; - ifr->ifr_settings.type = IF_PROTO_PPP; + ifs->type = IF_PROTO_PPP; return 0; /* return protocol only, no settable parameters */ case IF_PROTO_PPP: diff --git a/drivers/net/wan/hdlc_raw.c b/drivers/net/wan/hdlc_raw.c index 54d28496fefd..4a2f068721bc 100644 --- a/drivers/net/wan/hdlc_raw.c +++ b/drivers/net/wan/hdlc_raw.c @@ -19,7 +19,7 @@ #include <linux/skbuff.h> -static int raw_ioctl(struct net_device *dev, struct ifreq *ifr); +static int raw_ioctl(struct net_device *dev, struct if_settings *ifs); static __be16 raw_type_trans(struct sk_buff *skb, struct net_device *dev) { @@ -33,21 +33,21 @@ static struct hdlc_proto proto = { }; -static int raw_ioctl(struct net_device *dev, struct ifreq *ifr) +static int raw_ioctl(struct net_device *dev, struct if_settings *ifs) { - raw_hdlc_proto __user *raw_s = ifr->ifr_settings.ifs_ifsu.raw_hdlc; + raw_hdlc_proto __user *raw_s = ifs->ifs_ifsu.raw_hdlc; const size_t size = sizeof(raw_hdlc_proto); raw_hdlc_proto new_settings; hdlc_device *hdlc = dev_to_hdlc(dev); int result; - switch (ifr->ifr_settings.type) { + switch (ifs->type) { case IF_GET_PROTO: if (dev_to_hdlc(dev)->proto != &proto) return -EINVAL; - ifr->ifr_settings.type = IF_PROTO_HDLC; - if (ifr->ifr_settings.size < size) { - ifr->ifr_settings.size = size; /* data size wanted */ + ifs->type = IF_PROTO_HDLC; + if (ifs->size < size) { + ifs->size = size; /* data size wanted */ return -ENOBUFS; } if (copy_to_user(raw_s, hdlc->state, size)) diff --git a/drivers/net/wan/hdlc_raw_eth.c b/drivers/net/wan/hdlc_raw_eth.c index 927596276a07..0a66b7356405 100644 --- a/drivers/net/wan/hdlc_raw_eth.c +++ b/drivers/net/wan/hdlc_raw_eth.c @@ -20,7 +20,7 @@ #include <linux/rtnetlink.h> #include <linux/skbuff.h> -static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr); +static int raw_eth_ioctl(struct net_device *dev, struct if_settings *ifs); static netdev_tx_t eth_tx(struct sk_buff *skb, struct net_device *dev) { @@ -48,22 +48,22 @@ static struct hdlc_proto proto = { }; -static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr) +static int raw_eth_ioctl(struct net_device *dev, struct if_settings *ifs) { - raw_hdlc_proto __user *raw_s = ifr->ifr_settings.ifs_ifsu.raw_hdlc; + raw_hdlc_proto __user *raw_s = ifs->ifs_ifsu.raw_hdlc; const size_t size = sizeof(raw_hdlc_proto); raw_hdlc_proto new_settings; hdlc_device *hdlc = dev_to_hdlc(dev); unsigned int old_qlen; int result; - switch (ifr->ifr_settings.type) { + switch (ifs->type) { case IF_GET_PROTO: if (dev_to_hdlc(dev)->proto != &proto) return -EINVAL; - ifr->ifr_settings.type = IF_PROTO_HDLC_ETH; - if (ifr->ifr_settings.size < size) { - ifr->ifr_settings.size = size; /* data size wanted */ + ifs->type = IF_PROTO_HDLC_ETH; + if (ifs->size < size) { + ifs->size = size; /* data size wanted */ return -ENOBUFS; } if (copy_to_user(raw_s, hdlc->state, size)) diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c index 9b7ebf8bd85c..f72c92c24003 100644 --- a/drivers/net/wan/hdlc_x25.c +++ b/drivers/net/wan/hdlc_x25.c @@ -29,7 +29,7 @@ struct x25_state { struct tasklet_struct rx_tasklet; }; -static int x25_ioctl(struct net_device *dev, struct ifreq *ifr); +static int x25_ioctl(struct net_device *dev, struct if_settings *ifs); static struct x25_state *state(hdlc_device *hdlc) { @@ -274,21 +274,21 @@ static struct hdlc_proto proto = { .module = THIS_MODULE, }; -static int x25_ioctl(struct net_device *dev, struct ifreq *ifr) +static int x25_ioctl(struct net_device *dev, struct if_settings *ifs) { - x25_hdlc_proto __user *x25_s = ifr->ifr_settings.ifs_ifsu.x25; + x25_hdlc_proto __user *x25_s = ifs->ifs_ifsu.x25; const size_t size = sizeof(x25_hdlc_proto); hdlc_device *hdlc = dev_to_hdlc(dev); x25_hdlc_proto new_settings; int result; - switch (ifr->ifr_settings.type) { + switch (ifs->type) { case IF_GET_PROTO: if (dev_to_hdlc(dev)->proto != &proto) return -EINVAL; - ifr->ifr_settings.type = IF_PROTO_X25; - if (ifr->ifr_settings.size < size) { - ifr->ifr_settings.size = size; /* data size wanted */ + ifs->type = IF_PROTO_X25; + if (ifs->size < size) { + ifs->size = size; /* data size wanted */ return -ENOBUFS; } if (copy_to_user(x25_s, &state(hdlc)->settings, size)) @@ -303,7 +303,7 @@ static int x25_ioctl(struct net_device *dev, struct ifreq *ifr) return -EBUSY; /* backward compatibility */ - if (ifr->ifr_settings.size == 0) { + if (ifs->size == 0) { new_settings.dce = 0; new_settings.modulo = 8; new_settings.window = 7; diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c index fd61a7cc4fdf..e985e54ba75d 100644 --- a/drivers/net/wan/hostess_sv11.c +++ b/drivers/net/wan/hostess_sv11.c @@ -142,11 +142,6 @@ static int hostess_close(struct net_device *d) return 0; } -static int hostess_ioctl(struct net_device *d, struct ifreq *ifr, int cmd) -{ - return hdlc_ioctl(d, ifr, cmd); -} - /* Passed network frames, fire them downwind. */ @@ -171,7 +166,7 @@ static const struct net_device_ops hostess_ops = { .ndo_open = hostess_open, .ndo_stop = hostess_close, .ndo_start_xmit = hdlc_start_xmit, - .ndo_do_ioctl = hostess_ioctl, + .ndo_siocwandev = hdlc_ioctl, }; static struct z8530_dev *sv11_init(int iobase, int irq) @@ -324,16 +319,18 @@ MODULE_DESCRIPTION("Modular driver for the Comtrol Hostess SV11"); static struct z8530_dev *sv11_unit; -int init_module(void) +static int sv11_module_init(void) { sv11_unit = sv11_init(io, irq); if (!sv11_unit) return -ENODEV; return 0; } +module_init(sv11_module_init); -void cleanup_module(void) +static void sv11_module_cleanup(void) { if (sv11_unit) sv11_shutdown(sv11_unit); } +module_exit(sv11_module_cleanup); diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c index 3c51ab239fb2..88a36a069311 100644 --- a/drivers/net/wan/ixp4xx_hss.c +++ b/drivers/net/wan/ixp4xx_hss.c @@ -975,11 +975,10 @@ static int init_hdlc_queues(struct port *port) return -ENOMEM; } - port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL, + port->desc_tab = dma_pool_zalloc(dma_pool, GFP_KERNEL, &port->desc_tab_phys); if (!port->desc_tab) return -ENOMEM; - memset(port->desc_tab, 0, POOL_ALLOC_SIZE); memset(port->rx_buff_tab, 0, sizeof(port->rx_buff_tab)); /* tables */ memset(port->tx_buff_tab, 0, sizeof(port->tx_buff_tab)); @@ -1255,23 +1254,20 @@ static void find_best_clock(u32 timer_freq, u32 rate, u32 *best, u32 *reg) } } -static int hss_hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +static int hss_hdlc_ioctl(struct net_device *dev, struct if_settings *ifs) { const size_t size = sizeof(sync_serial_settings); sync_serial_settings new_line; - sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync; + sync_serial_settings __user *line = ifs->ifs_ifsu.sync; struct port *port = dev_to_port(dev); unsigned long flags; int clk; - if (cmd != SIOCWANDEV) - return hdlc_ioctl(dev, ifr, cmd); - - switch (ifr->ifr_settings.type) { + switch (ifs->type) { case IF_GET_IFACE: - ifr->ifr_settings.type = IF_IFACE_V35; - if (ifr->ifr_settings.size < size) { - ifr->ifr_settings.size = size; /* data size wanted */ + ifs->type = IF_IFACE_V35; + if (ifs->size < size) { + ifs->size = size; /* data size wanted */ return -ENOBUFS; } memset(&new_line, 0, sizeof(new_line)); @@ -1324,7 +1320,7 @@ static int hss_hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return 0; default: - return hdlc_ioctl(dev, ifr, cmd); + return hdlc_ioctl(dev, ifs); } } @@ -1336,7 +1332,7 @@ static const struct net_device_ops hss_hdlc_ops = { .ndo_open = hss_hdlc_open, .ndo_stop = hss_hdlc_close, .ndo_start_xmit = hdlc_start_xmit, - .ndo_do_ioctl = hss_hdlc_ioctl, + .ndo_siocwandev = hss_hdlc_ioctl, }; static int hss_init_one(struct platform_device *pdev) diff --git a/drivers/net/wan/lmc/lmc.h b/drivers/net/wan/lmc/lmc.h index 3bd541c868d5..d7d59b4595f9 100644 --- a/drivers/net/wan/lmc/lmc.h +++ b/drivers/net/wan/lmc/lmc.h @@ -19,7 +19,7 @@ void lmc_mii_writereg(lmc_softc_t * const, unsigned, unsigned, unsigned); void lmc_gpio_mkinput(lmc_softc_t * const sc, u32 bits); void lmc_gpio_mkoutput(lmc_softc_t * const sc, u32 bits); -int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); +int lmc_ioctl(struct net_device *dev, struct if_settings *ifs); extern lmc_media_t lmc_ds3_media; extern lmc_media_t lmc_ssi_media; diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c index 6c163db52835..ed687bf6ec47 100644 --- a/drivers/net/wan/lmc/lmc_main.c +++ b/drivers/net/wan/lmc/lmc_main.c @@ -105,7 +105,8 @@ static void lmc_driver_timeout(struct net_device *dev, unsigned int txqueue); * linux reserves 16 device specific IOCTLs. We call them * LMCIOC* to control various bits of our world. */ -int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/ +static int lmc_siocdevprivate(struct net_device *dev, struct ifreq *ifr, + void __user *data, int cmd) /*fold00*/ { lmc_softc_t *sc = dev_to_sc(dev); lmc_ctl_t ctl; @@ -124,7 +125,7 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/ * To date internally, just copy this out to the user. */ case LMCIOCGINFO: /*fold01*/ - if (copy_to_user(ifr->ifr_data, &sc->ictl, sizeof(lmc_ctl_t))) + if (copy_to_user(data, &sc->ictl, sizeof(lmc_ctl_t))) ret = -EFAULT; else ret = 0; @@ -141,7 +142,7 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/ break; } - if (copy_from_user(&ctl, ifr->ifr_data, sizeof(lmc_ctl_t))) { + if (copy_from_user(&ctl, data, sizeof(lmc_ctl_t))) { ret = -EFAULT; break; } @@ -171,7 +172,7 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/ break; } - if (copy_from_user(&new_type, ifr->ifr_data, sizeof(u16))) { + if (copy_from_user(&new_type, data, sizeof(u16))) { ret = -EFAULT; break; } @@ -211,8 +212,7 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/ sc->lmc_xinfo.Magic1 = 0xDEADBEEF; - if (copy_to_user(ifr->ifr_data, &sc->lmc_xinfo, - sizeof(struct lmc_xinfo))) + if (copy_to_user(data, &sc->lmc_xinfo, sizeof(struct lmc_xinfo))) ret = -EFAULT; else ret = 0; @@ -245,9 +245,9 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/ regVal & T1FRAMER_SEF_MASK; } spin_unlock_irqrestore(&sc->lmc_lock, flags); - if (copy_to_user(ifr->ifr_data, &sc->lmc_device->stats, + if (copy_to_user(data, &sc->lmc_device->stats, sizeof(sc->lmc_device->stats)) || - copy_to_user(ifr->ifr_data + sizeof(sc->lmc_device->stats), + copy_to_user(data + sizeof(sc->lmc_device->stats), &sc->extra_stats, sizeof(sc->extra_stats))) ret = -EFAULT; else @@ -282,7 +282,7 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/ break; } - if (copy_from_user(&ctl, ifr->ifr_data, sizeof(lmc_ctl_t))) { + if (copy_from_user(&ctl, data, sizeof(lmc_ctl_t))) { ret = -EFAULT; break; } @@ -314,11 +314,11 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/ #ifdef DEBUG case LMCIOCDUMPEVENTLOG: - if (copy_to_user(ifr->ifr_data, &lmcEventLogIndex, sizeof(u32))) { + if (copy_to_user(data, &lmcEventLogIndex, sizeof(u32))) { ret = -EFAULT; break; } - if (copy_to_user(ifr->ifr_data + sizeof(u32), lmcEventLogBuf, + if (copy_to_user(data + sizeof(u32), lmcEventLogBuf, sizeof(lmcEventLogBuf))) ret = -EFAULT; else @@ -346,7 +346,7 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/ */ netif_stop_queue(dev); - if (copy_from_user(&xc, ifr->ifr_data, sizeof(struct lmc_xilinx_control))) { + if (copy_from_user(&xc, data, sizeof(struct lmc_xilinx_control))) { ret = -EFAULT; break; } @@ -609,10 +609,8 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/ } break; - default: /*fold01*/ - /* If we don't know what to do, give the protocol a shot. */ - ret = lmc_proto_ioctl (sc, ifr, cmd); - break; + default: + break; } return ret; @@ -788,7 +786,8 @@ static const struct net_device_ops lmc_ops = { .ndo_open = lmc_open, .ndo_stop = lmc_close, .ndo_start_xmit = hdlc_start_xmit, - .ndo_do_ioctl = lmc_ioctl, + .ndo_siocwandev = hdlc_ioctl, + .ndo_siocdevprivate = lmc_siocdevprivate, .ndo_tx_timeout = lmc_driver_timeout, .ndo_get_stats = lmc_get_stats, }; diff --git a/drivers/net/wan/lmc/lmc_proto.c b/drivers/net/wan/lmc/lmc_proto.c index 4e9cc83b615a..e5487616a816 100644 --- a/drivers/net/wan/lmc/lmc_proto.c +++ b/drivers/net/wan/lmc/lmc_proto.c @@ -58,13 +58,6 @@ void lmc_proto_attach(lmc_softc_t *sc) /*FOLD00*/ } } -int lmc_proto_ioctl(lmc_softc_t *sc, struct ifreq *ifr, int cmd) -{ - if (sc->if_type == LMC_PPP) - return hdlc_ioctl(sc->lmc_device, ifr, cmd); - return -EOPNOTSUPP; -} - int lmc_proto_open(lmc_softc_t *sc) { int ret = 0; diff --git a/drivers/net/wan/lmc/lmc_proto.h b/drivers/net/wan/lmc/lmc_proto.h index bb098e443776..e56e7072de44 100644 --- a/drivers/net/wan/lmc/lmc_proto.h +++ b/drivers/net/wan/lmc/lmc_proto.h @@ -5,7 +5,6 @@ #include <linux/hdlc.h> void lmc_proto_attach(lmc_softc_t *sc); -int lmc_proto_ioctl(lmc_softc_t *sc, struct ifreq *ifr, int cmd); int lmc_proto_open(lmc_softc_t *sc); void lmc_proto_close(lmc_softc_t *sc); __be16 lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb); diff --git a/drivers/net/wan/n2.c b/drivers/net/wan/n2.c index bdb6dc2409bc..f3e80722ba1d 100644 --- a/drivers/net/wan/n2.c +++ b/drivers/net/wan/n2.c @@ -227,27 +227,30 @@ static int n2_close(struct net_device *dev) return 0; } -static int n2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +static int n2_siocdevprivate(struct net_device *dev, struct ifreq *ifr, + void __user *data, int cmd) { - const size_t size = sizeof(sync_serial_settings); - sync_serial_settings new_line; - sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync; - port_t *port = dev_to_port(dev); - #ifdef DEBUG_RINGS if (cmd == SIOCDEVPRIVATE) { sca_dump_rings(dev); return 0; } #endif - if (cmd != SIOCWANDEV) - return hdlc_ioctl(dev, ifr, cmd); + return -EOPNOTSUPP; +} + +static int n2_ioctl(struct net_device *dev, struct if_settings *ifs) +{ + const size_t size = sizeof(sync_serial_settings); + sync_serial_settings new_line; + sync_serial_settings __user *line = ifs->ifs_ifsu.sync; + port_t *port = dev_to_port(dev); - switch (ifr->ifr_settings.type) { + switch (ifs->type) { case IF_GET_IFACE: - ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL; - if (ifr->ifr_settings.size < size) { - ifr->ifr_settings.size = size; /* data size wanted */ + ifs->type = IF_IFACE_SYNC_SERIAL; + if (ifs->size < size) { + ifs->size = size; /* data size wanted */ return -ENOBUFS; } if (copy_to_user(line, &port->settings, size)) @@ -275,7 +278,7 @@ static int n2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return 0; default: - return hdlc_ioctl(dev, ifr, cmd); + return hdlc_ioctl(dev, ifs); } } @@ -311,7 +314,8 @@ static const struct net_device_ops n2_ops = { .ndo_open = n2_open, .ndo_stop = n2_close, .ndo_start_xmit = hdlc_start_xmit, - .ndo_do_ioctl = n2_ioctl, + .ndo_siocwandev = n2_ioctl, + .ndo_siocdevprivate = n2_siocdevprivate, }; static int __init n2_run(unsigned long io, unsigned long irq, diff --git a/drivers/net/wan/pc300too.c b/drivers/net/wan/pc300too.c index 7b123a771aa6..4766446f0fa0 100644 --- a/drivers/net/wan/pc300too.c +++ b/drivers/net/wan/pc300too.c @@ -174,27 +174,30 @@ static int pc300_close(struct net_device *dev) return 0; } -static int pc300_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +static int pc300_siocdevprivate(struct net_device *dev, struct ifreq *ifr, + void __user *data, int cmd) { - const size_t size = sizeof(sync_serial_settings); - sync_serial_settings new_line; - sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync; - int new_type; - port_t *port = dev_to_port(dev); - #ifdef DEBUG_RINGS if (cmd == SIOCDEVPRIVATE) { sca_dump_rings(dev); return 0; } #endif - if (cmd != SIOCWANDEV) - return hdlc_ioctl(dev, ifr, cmd); + return -EOPNOTSUPP; +} + +static int pc300_ioctl(struct net_device *dev, struct if_settings *ifs) +{ + const size_t size = sizeof(sync_serial_settings); + sync_serial_settings new_line; + sync_serial_settings __user *line = ifs->ifs_ifsu.sync; + int new_type; + port_t *port = dev_to_port(dev); - if (ifr->ifr_settings.type == IF_GET_IFACE) { - ifr->ifr_settings.type = port->iface; - if (ifr->ifr_settings.size < size) { - ifr->ifr_settings.size = size; /* data size wanted */ + if (ifs->type == IF_GET_IFACE) { + ifs->type = port->iface; + if (ifs->size < size) { + ifs->size = size; /* data size wanted */ return -ENOBUFS; } if (copy_to_user(line, &port->settings, size)) @@ -203,21 +206,21 @@ static int pc300_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) } if (port->card->type == PC300_X21 && - (ifr->ifr_settings.type == IF_IFACE_SYNC_SERIAL || - ifr->ifr_settings.type == IF_IFACE_X21)) + (ifs->type == IF_IFACE_SYNC_SERIAL || + ifs->type == IF_IFACE_X21)) new_type = IF_IFACE_X21; else if (port->card->type == PC300_RSV && - (ifr->ifr_settings.type == IF_IFACE_SYNC_SERIAL || - ifr->ifr_settings.type == IF_IFACE_V35)) + (ifs->type == IF_IFACE_SYNC_SERIAL || + ifs->type == IF_IFACE_V35)) new_type = IF_IFACE_V35; else if (port->card->type == PC300_RSV && - ifr->ifr_settings.type == IF_IFACE_V24) + ifs->type == IF_IFACE_V24) new_type = IF_IFACE_V24; else - return hdlc_ioctl(dev, ifr, cmd); + return hdlc_ioctl(dev, ifs); if (!capable(CAP_NET_ADMIN)) return -EPERM; @@ -272,7 +275,8 @@ static const struct net_device_ops pc300_ops = { .ndo_open = pc300_open, .ndo_stop = pc300_close, .ndo_start_xmit = hdlc_start_xmit, - .ndo_do_ioctl = pc300_ioctl, + .ndo_siocwandev = pc300_ioctl, + .ndo_siocdevprivate = pc300_siocdevprivate, }; static int pc300_pci_init_one(struct pci_dev *pdev, diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c index dee9c4e15eca..ea86c7035653 100644 --- a/drivers/net/wan/pci200syn.c +++ b/drivers/net/wan/pci200syn.c @@ -167,27 +167,30 @@ static int pci200_close(struct net_device *dev) return 0; } -static int pci200_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +static int pci200_siocdevprivate(struct net_device *dev, struct ifreq *ifr, + void __user *data, int cmd) { - const size_t size = sizeof(sync_serial_settings); - sync_serial_settings new_line; - sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync; - port_t *port = dev_to_port(dev); - #ifdef DEBUG_RINGS if (cmd == SIOCDEVPRIVATE) { sca_dump_rings(dev); return 0; } #endif - if (cmd != SIOCWANDEV) - return hdlc_ioctl(dev, ifr, cmd); + return -EOPNOTSUPP; +} + +static int pci200_ioctl(struct net_device *dev, struct if_settings *ifs) +{ + const size_t size = sizeof(sync_serial_settings); + sync_serial_settings new_line; + sync_serial_settings __user *line = ifs->ifs_ifsu.sync; + port_t *port = dev_to_port(dev); - switch (ifr->ifr_settings.type) { + switch (ifs->type) { case IF_GET_IFACE: - ifr->ifr_settings.type = IF_IFACE_V35; - if (ifr->ifr_settings.size < size) { - ifr->ifr_settings.size = size; /* data size wanted */ + ifs->type = IF_IFACE_V35; + if (ifs->size < size) { + ifs->size = size; /* data size wanted */ return -ENOBUFS; } if (copy_to_user(line, &port->settings, size)) @@ -217,7 +220,7 @@ static int pci200_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return 0; default: - return hdlc_ioctl(dev, ifr, cmd); + return hdlc_ioctl(dev, ifs); } } @@ -253,7 +256,8 @@ static const struct net_device_ops pci200_ops = { .ndo_open = pci200_open, .ndo_stop = pci200_close, .ndo_start_xmit = hdlc_start_xmit, - .ndo_do_ioctl = pci200_ioctl, + .ndo_siocwandev = pci200_ioctl, + .ndo_siocdevprivate = pci200_siocdevprivate, }; static int pci200_pci_init_one(struct pci_dev *pdev, diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c deleted file mode 100644 index 3092a09d3eaa..000000000000 --- a/drivers/net/wan/sbni.c +++ /dev/null @@ -1,1638 +0,0 @@ -/* sbni.c: Granch SBNI12 leased line adapters driver for linux - * - * Written 2001 by Denis I.Timofeev (timofeev@granch.ru) - * - * Previous versions were written by Yaroslav Polyakov, - * Alexey Zverev and Max Khon. - * - * Driver supports SBNI12-02,-04,-05,-10,-11 cards, single and - * double-channel, PCI and ISA modifications. - * More info and useful utilities to work with SBNI12 cards you can find - * at http://www.granch.com (English) or http://www.granch.ru (Russian) - * - * This software may be used and distributed according to the terms - * of the GNU General Public License. - * - * - * 5.0.1 Jun 22 2001 - * - Fixed bug in probe - * 5.0.0 Jun 06 2001 - * - Driver was completely redesigned by Denis I.Timofeev, - * - now PCI/Dual, ISA/Dual (with single interrupt line) models are - * - supported - * 3.3.0 Thu Feb 24 21:30:28 NOVT 2000 - * - PCI cards support - * 3.2.0 Mon Dec 13 22:26:53 NOVT 1999 - * - Completely rebuilt all the packet storage system - * - to work in Ethernet-like style. - * 3.1.1 just fixed some bugs (5 aug 1999) - * 3.1.0 added balancing feature (26 apr 1999) - * 3.0.1 just fixed some bugs (14 apr 1999). - * 3.0.0 Initial Revision, Yaroslav Polyakov (24 Feb 1999) - * - added pre-calculation for CRC, fixed bug with "len-2" frames, - * - removed outbound fragmentation (MTU=1000), written CRC-calculation - * - on asm, added work with hard_headers and now we have our own cache - * - for them, optionally supported word-interchange on some chipsets, - * - * Known problem: this driver wasn't tested on multiprocessor machine. - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/ptrace.h> -#include <linux/fcntl.h> -#include <linux/ioport.h> -#include <linux/interrupt.h> -#include <linux/string.h> -#include <linux/errno.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/pci.h> -#include <linux/skbuff.h> -#include <linux/timer.h> -#include <linux/init.h> -#include <linux/delay.h> - -#include <net/net_namespace.h> -#include <net/arp.h> -#include <net/Space.h> - -#include <asm/io.h> -#include <asm/types.h> -#include <asm/byteorder.h> -#include <asm/irq.h> -#include <linux/uaccess.h> - -#include "sbni.h" - -/* device private data */ - -struct net_local { - struct timer_list watchdog; - struct net_device *watchdog_dev; - - spinlock_t lock; - struct sk_buff *rx_buf_p; /* receive buffer ptr */ - struct sk_buff *tx_buf_p; /* transmit buffer ptr */ - - unsigned int framelen; /* current frame length */ - unsigned int maxframe; /* maximum valid frame length */ - unsigned int state; - unsigned int inppos, outpos; /* positions in rx/tx buffers */ - - /* transmitting frame number - from frames qty to 1 */ - unsigned int tx_frameno; - - /* expected number of next receiving frame */ - unsigned int wait_frameno; - - /* count of failed attempts to frame send - 32 attempts do before - error - while receiver tunes on opposite side of wire */ - unsigned int trans_errors; - - /* idle time; send pong when limit exceeded */ - unsigned int timer_ticks; - - /* fields used for receive level autoselection */ - int delta_rxl; - unsigned int cur_rxl_index, timeout_rxl; - unsigned long cur_rxl_rcvd, prev_rxl_rcvd; - - struct sbni_csr1 csr1; /* current value of CSR1 */ - struct sbni_in_stats in_stats; /* internal statistics */ - - struct net_device *second; /* for ISA/dual cards */ - -#ifdef CONFIG_SBNI_MULTILINE - struct net_device *master; - struct net_device *link; -#endif -}; - - -static int sbni_card_probe( unsigned long ); -static int sbni_pci_probe( struct net_device * ); -static struct net_device *sbni_probe1(struct net_device *, unsigned long, int); -static int sbni_open( struct net_device * ); -static int sbni_close( struct net_device * ); -static netdev_tx_t sbni_start_xmit(struct sk_buff *, - struct net_device * ); -static int sbni_ioctl( struct net_device *, struct ifreq *, int ); -static void set_multicast_list( struct net_device * ); - -static irqreturn_t sbni_interrupt( int, void * ); -static void handle_channel( struct net_device * ); -static int recv_frame( struct net_device * ); -static void send_frame( struct net_device * ); -static int upload_data( struct net_device *, - unsigned, unsigned, unsigned, u32 ); -static void download_data( struct net_device *, u32 * ); -static void sbni_watchdog(struct timer_list *); -static void interpret_ack( struct net_device *, unsigned ); -static int append_frame_to_pkt( struct net_device *, unsigned, u32 ); -static void indicate_pkt( struct net_device * ); -static void card_start( struct net_device * ); -static void prepare_to_send( struct sk_buff *, struct net_device * ); -static void drop_xmit_queue( struct net_device * ); -static void send_frame_header( struct net_device *, u32 * ); -static int skip_tail( unsigned int, unsigned int, u32 ); -static int check_fhdr( u32, u32 *, u32 *, u32 *, u32 *, u32 * ); -static void change_level( struct net_device * ); -static void timeout_change_level( struct net_device * ); -static u32 calc_crc32( u32, u8 *, u32 ); -static struct sk_buff * get_rx_buf( struct net_device * ); -static int sbni_init( struct net_device * ); - -#ifdef CONFIG_SBNI_MULTILINE -static int enslave( struct net_device *, struct net_device * ); -static int emancipate( struct net_device * ); -#endif - -static const char version[] = - "Granch SBNI12 driver ver 5.0.1 Jun 22 2001 Denis I.Timofeev.\n"; - -static bool skip_pci_probe __initdata = false; -static int scandone __initdata = 0; -static int num __initdata = 0; - -static unsigned char rxl_tab[]; -static u32 crc32tab[]; - -/* A list of all installed devices, for removing the driver module. */ -static struct net_device *sbni_cards[ SBNI_MAX_NUM_CARDS ]; - -/* Lists of device's parameters */ -static u32 io[ SBNI_MAX_NUM_CARDS ] __initdata = - { [0 ... SBNI_MAX_NUM_CARDS-1] = -1 }; -static u32 irq[ SBNI_MAX_NUM_CARDS ] __initdata; -static u32 baud[ SBNI_MAX_NUM_CARDS ] __initdata; -static u32 rxl[ SBNI_MAX_NUM_CARDS ] __initdata = - { [0 ... SBNI_MAX_NUM_CARDS-1] = -1 }; -static u32 mac[ SBNI_MAX_NUM_CARDS ] __initdata; - -#ifndef MODULE -typedef u32 iarr[]; -static iarr *dest[5] __initdata = { &io, &irq, &baud, &rxl, &mac }; -#endif - -/* A zero-terminated list of I/O addresses to be probed on ISA bus */ -static unsigned int netcard_portlist[ ] __initdata = { - 0x210, 0x214, 0x220, 0x224, 0x230, 0x234, 0x240, 0x244, 0x250, 0x254, - 0x260, 0x264, 0x270, 0x274, 0x280, 0x284, 0x290, 0x294, 0x2a0, 0x2a4, - 0x2b0, 0x2b4, 0x2c0, 0x2c4, 0x2d0, 0x2d4, 0x2e0, 0x2e4, 0x2f0, 0x2f4, - 0 }; - -#define NET_LOCAL_LOCK(dev) (((struct net_local *)netdev_priv(dev))->lock) - -/* - * Look for SBNI card which addr stored in dev->base_addr, if nonzero. - * Otherwise, look through PCI bus. If none PCI-card was found, scan ISA. - */ - -static inline int __init -sbni_isa_probe( struct net_device *dev ) -{ - if( dev->base_addr > 0x1ff && - request_region( dev->base_addr, SBNI_IO_EXTENT, dev->name ) && - sbni_probe1( dev, dev->base_addr, dev->irq ) ) - - return 0; - else { - pr_err("base address 0x%lx is busy, or adapter is malfunctional!\n", - dev->base_addr); - return -ENODEV; - } -} - -static const struct net_device_ops sbni_netdev_ops = { - .ndo_open = sbni_open, - .ndo_stop = sbni_close, - .ndo_start_xmit = sbni_start_xmit, - .ndo_set_rx_mode = set_multicast_list, - .ndo_do_ioctl = sbni_ioctl, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, -}; - -static void __init sbni_devsetup(struct net_device *dev) -{ - ether_setup( dev ); - dev->netdev_ops = &sbni_netdev_ops; -} - -int __init sbni_probe(int unit) -{ - struct net_device *dev; - int err; - - dev = alloc_netdev(sizeof(struct net_local), "sbni", - NET_NAME_UNKNOWN, sbni_devsetup); - if (!dev) - return -ENOMEM; - - dev->netdev_ops = &sbni_netdev_ops; - - sprintf(dev->name, "sbni%d", unit); - netdev_boot_setup_check(dev); - - err = sbni_init(dev); - if (err) { - free_netdev(dev); - return err; - } - - err = register_netdev(dev); - if (err) { - release_region( dev->base_addr, SBNI_IO_EXTENT ); - free_netdev(dev); - return err; - } - pr_info_once("%s", version); - return 0; -} - -static int __init sbni_init(struct net_device *dev) -{ - int i; - if( dev->base_addr ) - return sbni_isa_probe( dev ); - /* otherwise we have to perform search our adapter */ - - if( io[ num ] != -1 ) { - dev->base_addr = io[ num ]; - dev->irq = irq[ num ]; - } else if( scandone || io[ 0 ] != -1 ) { - return -ENODEV; - } - - /* if io[ num ] contains non-zero address, then that is on ISA bus */ - if( dev->base_addr ) - return sbni_isa_probe( dev ); - - /* ...otherwise - scan PCI first */ - if( !skip_pci_probe && !sbni_pci_probe( dev ) ) - return 0; - - if( io[ num ] == -1 ) { - /* Auto-scan will be stopped when first ISA card were found */ - scandone = 1; - if( num > 0 ) - return -ENODEV; - } - - for( i = 0; netcard_portlist[ i ]; ++i ) { - int ioaddr = netcard_portlist[ i ]; - if( request_region( ioaddr, SBNI_IO_EXTENT, dev->name ) && - sbni_probe1( dev, ioaddr, 0 )) - return 0; - } - - return -ENODEV; -} - - -static int __init -sbni_pci_probe( struct net_device *dev ) -{ - struct pci_dev *pdev = NULL; - - while( (pdev = pci_get_class( PCI_CLASS_NETWORK_OTHER << 8, pdev )) - != NULL ) { - int pci_irq_line; - unsigned long pci_ioaddr; - - if( pdev->vendor != SBNI_PCI_VENDOR && - pdev->device != SBNI_PCI_DEVICE ) - continue; - - pci_ioaddr = pci_resource_start( pdev, 0 ); - pci_irq_line = pdev->irq; - - /* Avoid already found cards from previous calls */ - if( !request_region( pci_ioaddr, SBNI_IO_EXTENT, dev->name ) ) { - if (pdev->subsystem_device != 2) - continue; - - /* Dual adapter is present */ - if (!request_region(pci_ioaddr += 4, SBNI_IO_EXTENT, - dev->name ) ) - continue; - } - - if (pci_irq_line <= 0 || pci_irq_line >= nr_irqs) - pr_warn( -"WARNING: The PCI BIOS assigned this PCI card to IRQ %d, which is unlikely to work!.\n" -"You should use the PCI BIOS setup to assign a valid IRQ line.\n", - pci_irq_line ); - - /* avoiding re-enable dual adapters */ - if( (pci_ioaddr & 7) == 0 && pci_enable_device( pdev ) ) { - release_region( pci_ioaddr, SBNI_IO_EXTENT ); - pci_dev_put( pdev ); - return -EIO; - } - if( sbni_probe1( dev, pci_ioaddr, pci_irq_line ) ) { - SET_NETDEV_DEV(dev, &pdev->dev); - /* not the best thing to do, but this is all messed up - for hotplug systems anyway... */ - pci_dev_put( pdev ); - return 0; - } - } - return -ENODEV; -} - - -static struct net_device * __init -sbni_probe1( struct net_device *dev, unsigned long ioaddr, int irq ) -{ - struct net_local *nl; - - if( sbni_card_probe( ioaddr ) ) { - release_region( ioaddr, SBNI_IO_EXTENT ); - return NULL; - } - - outb( 0, ioaddr + CSR0 ); - - if( irq < 2 ) { - unsigned long irq_mask; - - irq_mask = probe_irq_on(); - outb( EN_INT | TR_REQ, ioaddr + CSR0 ); - outb( PR_RES, ioaddr + CSR1 ); - mdelay(50); - irq = probe_irq_off(irq_mask); - outb( 0, ioaddr + CSR0 ); - - if( !irq ) { - pr_err("%s: can't detect device irq!\n", dev->name); - release_region( ioaddr, SBNI_IO_EXTENT ); - return NULL; - } - } else if( irq == 2 ) - irq = 9; - - dev->irq = irq; - dev->base_addr = ioaddr; - - /* Fill in sbni-specific dev fields. */ - nl = netdev_priv(dev); - if( !nl ) { - pr_err("%s: unable to get memory!\n", dev->name); - release_region( ioaddr, SBNI_IO_EXTENT ); - return NULL; - } - - memset( nl, 0, sizeof(struct net_local) ); - spin_lock_init( &nl->lock ); - - /* store MAC address (generate if that isn't known) */ - *(__be16 *)dev->dev_addr = htons( 0x00ff ); - *(__be32 *)(dev->dev_addr + 2) = htonl( 0x01000000 | - ((mac[num] ? - mac[num] : - (u32)((long)netdev_priv(dev))) & 0x00ffffff)); - - /* store link settings (speed, receive level ) */ - nl->maxframe = DEFAULT_FRAME_LEN; - nl->csr1.rate = baud[ num ]; - - if( (nl->cur_rxl_index = rxl[ num ]) == -1 ) { - /* autotune rxl */ - nl->cur_rxl_index = DEF_RXL; - nl->delta_rxl = DEF_RXL_DELTA; - } else { - nl->delta_rxl = 0; - } - nl->csr1.rxl = rxl_tab[ nl->cur_rxl_index ]; - if( inb( ioaddr + CSR0 ) & 0x01 ) - nl->state |= FL_SLOW_MODE; - - pr_notice("%s: ioaddr %#lx, irq %d, MAC: 00:ff:01:%02x:%02x:%02x\n", - dev->name, dev->base_addr, dev->irq, - ((u8 *)dev->dev_addr)[3], - ((u8 *)dev->dev_addr)[4], - ((u8 *)dev->dev_addr)[5]); - - pr_notice("%s: speed %d", - dev->name, - ((nl->state & FL_SLOW_MODE) ? 500000 : 2000000) - / (1 << nl->csr1.rate)); - - if( nl->delta_rxl == 0 ) - pr_cont(", receive level 0x%x (fixed)\n", nl->cur_rxl_index); - else - pr_cont(", receive level (auto)\n"); - -#ifdef CONFIG_SBNI_MULTILINE - nl->master = dev; - nl->link = NULL; -#endif - - sbni_cards[ num++ ] = dev; - return dev; -} - -/* -------------------------------------------------------------------------- */ - -#ifdef CONFIG_SBNI_MULTILINE - -static netdev_tx_t -sbni_start_xmit( struct sk_buff *skb, struct net_device *dev ) -{ - struct net_device *p; - - netif_stop_queue( dev ); - - /* Looking for idle device in the list */ - for( p = dev; p; ) { - struct net_local *nl = netdev_priv(p); - spin_lock( &nl->lock ); - if( nl->tx_buf_p || (nl->state & FL_LINE_DOWN) ) { - p = nl->link; - spin_unlock( &nl->lock ); - } else { - /* Idle dev is found */ - prepare_to_send( skb, p ); - spin_unlock( &nl->lock ); - netif_start_queue( dev ); - return NETDEV_TX_OK; - } - } - - return NETDEV_TX_BUSY; -} - -#else /* CONFIG_SBNI_MULTILINE */ - -static netdev_tx_t -sbni_start_xmit( struct sk_buff *skb, struct net_device *dev ) -{ - struct net_local *nl = netdev_priv(dev); - - netif_stop_queue( dev ); - spin_lock( &nl->lock ); - - prepare_to_send( skb, dev ); - - spin_unlock( &nl->lock ); - return NETDEV_TX_OK; -} - -#endif /* CONFIG_SBNI_MULTILINE */ - -/* -------------------------------------------------------------------------- */ - -/* interrupt handler */ - -/* - * SBNI12D-10, -11/ISA boards within "common interrupt" mode could not - * be looked as two independent single-channel devices. Every channel seems - * as Ethernet interface but interrupt handler must be common. Really, first - * channel ("master") driver only registers the handler. In its struct net_local - * it has got pointer to "slave" channel's struct net_local and handles that's - * interrupts too. - * dev of successfully attached ISA SBNI boards is linked to list. - * While next board driver is initialized, it scans this list. If one - * has found dev with same irq and ioaddr different by 4 then it assumes - * this board to be "master". - */ - -static irqreturn_t -sbni_interrupt( int irq, void *dev_id ) -{ - struct net_device *dev = dev_id; - struct net_local *nl = netdev_priv(dev); - int repeat; - - spin_lock( &nl->lock ); - if( nl->second ) - spin_lock(&NET_LOCAL_LOCK(nl->second)); - - do { - repeat = 0; - if( inb( dev->base_addr + CSR0 ) & (RC_RDY | TR_RDY) ) { - handle_channel( dev ); - repeat = 1; - } - if( nl->second && /* second channel present */ - (inb( nl->second->base_addr+CSR0 ) & (RC_RDY | TR_RDY)) ) { - handle_channel( nl->second ); - repeat = 1; - } - } while( repeat ); - - if( nl->second ) - spin_unlock(&NET_LOCAL_LOCK(nl->second)); - spin_unlock( &nl->lock ); - return IRQ_HANDLED; -} - - -static void -handle_channel( struct net_device *dev ) -{ - struct net_local *nl = netdev_priv(dev); - unsigned long ioaddr = dev->base_addr; - - int req_ans; - unsigned char csr0; - -#ifdef CONFIG_SBNI_MULTILINE - /* Lock the master device because we going to change its local data */ - if( nl->state & FL_SLAVE ) - spin_lock(&NET_LOCAL_LOCK(nl->master)); -#endif - - outb( (inb( ioaddr + CSR0 ) & ~EN_INT) | TR_REQ, ioaddr + CSR0 ); - - nl->timer_ticks = CHANGE_LEVEL_START_TICKS; - for(;;) { - csr0 = inb( ioaddr + CSR0 ); - if( ( csr0 & (RC_RDY | TR_RDY) ) == 0 ) - break; - - req_ans = !(nl->state & FL_PREV_OK); - - if( csr0 & RC_RDY ) - req_ans = recv_frame( dev ); - - /* - * TR_RDY always equals 1 here because we have owned the marker, - * and we set TR_REQ when disabled interrupts - */ - csr0 = inb( ioaddr + CSR0 ); - if( !(csr0 & TR_RDY) || (csr0 & RC_RDY) ) - netdev_err(dev, "internal error!\n"); - - /* if state & FL_NEED_RESEND != 0 then tx_frameno != 0 */ - if( req_ans || nl->tx_frameno != 0 ) - send_frame( dev ); - else - /* send marker without any data */ - outb( inb( ioaddr + CSR0 ) & ~TR_REQ, ioaddr + CSR0 ); - } - - outb( inb( ioaddr + CSR0 ) | EN_INT, ioaddr + CSR0 ); - -#ifdef CONFIG_SBNI_MULTILINE - if( nl->state & FL_SLAVE ) - spin_unlock(&NET_LOCAL_LOCK(nl->master)); -#endif -} - - -/* - * Routine returns 1 if it needs to acknowledge received frame. - * Empty frame received without errors won't be acknowledged. - */ - -static int -recv_frame( struct net_device *dev ) -{ - struct net_local *nl = netdev_priv(dev); - unsigned long ioaddr = dev->base_addr; - - u32 crc = CRC32_INITIAL; - - unsigned framelen = 0, frameno, ack; - unsigned is_first, frame_ok = 0; - - if( check_fhdr( ioaddr, &framelen, &frameno, &ack, &is_first, &crc ) ) { - frame_ok = framelen > 4 - ? upload_data( dev, framelen, frameno, is_first, crc ) - : skip_tail( ioaddr, framelen, crc ); - if( frame_ok ) - interpret_ack( dev, ack ); - } - - outb( inb( ioaddr + CSR0 ) ^ CT_ZER, ioaddr + CSR0 ); - if( frame_ok ) { - nl->state |= FL_PREV_OK; - if( framelen > 4 ) - nl->in_stats.all_rx_number++; - } else { - nl->state &= ~FL_PREV_OK; - change_level( dev ); - nl->in_stats.all_rx_number++; - nl->in_stats.bad_rx_number++; - } - - return !frame_ok || framelen > 4; -} - - -static void -send_frame( struct net_device *dev ) -{ - struct net_local *nl = netdev_priv(dev); - - u32 crc = CRC32_INITIAL; - - if( nl->state & FL_NEED_RESEND ) { - - /* if frame was sended but not ACK'ed - resend it */ - if( nl->trans_errors ) { - --nl->trans_errors; - if( nl->framelen != 0 ) - nl->in_stats.resend_tx_number++; - } else { - /* cannot xmit with many attempts */ -#ifdef CONFIG_SBNI_MULTILINE - if( (nl->state & FL_SLAVE) || nl->link ) -#endif - nl->state |= FL_LINE_DOWN; - drop_xmit_queue( dev ); - goto do_send; - } - } else - nl->trans_errors = TR_ERROR_COUNT; - - send_frame_header( dev, &crc ); - nl->state |= FL_NEED_RESEND; - /* - * FL_NEED_RESEND will be cleared after ACK, but if empty - * frame sended then in prepare_to_send next frame - */ - - - if( nl->framelen ) { - download_data( dev, &crc ); - nl->in_stats.all_tx_number++; - nl->state |= FL_WAIT_ACK; - } - - outsb( dev->base_addr + DAT, (u8 *)&crc, sizeof crc ); - -do_send: - outb( inb( dev->base_addr + CSR0 ) & ~TR_REQ, dev->base_addr + CSR0 ); - - if( nl->tx_frameno ) - /* next frame exists - we request card to send it */ - outb( inb( dev->base_addr + CSR0 ) | TR_REQ, - dev->base_addr + CSR0 ); -} - - -/* - * Write the frame data into adapter's buffer memory, and calculate CRC. - * Do padding if necessary. - */ - -static void -download_data( struct net_device *dev, u32 *crc_p ) -{ - struct net_local *nl = netdev_priv(dev); - struct sk_buff *skb = nl->tx_buf_p; - - unsigned len = min_t(unsigned int, skb->len - nl->outpos, nl->framelen); - - outsb( dev->base_addr + DAT, skb->data + nl->outpos, len ); - *crc_p = calc_crc32( *crc_p, skb->data + nl->outpos, len ); - - /* if packet too short we should write some more bytes to pad */ - for( len = nl->framelen - len; len--; ) { - outb( 0, dev->base_addr + DAT ); - *crc_p = CRC32( 0, *crc_p ); - } -} - - -static int -upload_data( struct net_device *dev, unsigned framelen, unsigned frameno, - unsigned is_first, u32 crc ) -{ - struct net_local *nl = netdev_priv(dev); - - int frame_ok; - - if( is_first ) { - nl->wait_frameno = frameno; - nl->inppos = 0; - } - - if( nl->wait_frameno == frameno ) { - - if( nl->inppos + framelen <= ETHER_MAX_LEN ) - frame_ok = append_frame_to_pkt( dev, framelen, crc ); - - /* - * if CRC is right but framelen incorrect then transmitter - * error was occurred... drop entire packet - */ - else if( (frame_ok = skip_tail( dev->base_addr, framelen, crc )) - != 0 ) { - nl->wait_frameno = 0; - nl->inppos = 0; -#ifdef CONFIG_SBNI_MULTILINE - nl->master->stats.rx_errors++; - nl->master->stats.rx_missed_errors++; -#else - dev->stats.rx_errors++; - dev->stats.rx_missed_errors++; -#endif - } - /* now skip all frames until is_first != 0 */ - } else - frame_ok = skip_tail( dev->base_addr, framelen, crc ); - - if( is_first && !frame_ok ) { - /* - * Frame has been broken, but we had already stored - * is_first... Drop entire packet. - */ - nl->wait_frameno = 0; -#ifdef CONFIG_SBNI_MULTILINE - nl->master->stats.rx_errors++; - nl->master->stats.rx_crc_errors++; -#else - dev->stats.rx_errors++; - dev->stats.rx_crc_errors++; -#endif - } - - return frame_ok; -} - - -static inline void -send_complete( struct net_device *dev ) -{ - struct net_local *nl = netdev_priv(dev); - -#ifdef CONFIG_SBNI_MULTILINE - nl->master->stats.tx_packets++; - nl->master->stats.tx_bytes += nl->tx_buf_p->len; -#else - dev->stats.tx_packets++; - dev->stats.tx_bytes += nl->tx_buf_p->len; -#endif - dev_consume_skb_irq(nl->tx_buf_p); - - nl->tx_buf_p = NULL; - - nl->outpos = 0; - nl->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND); - nl->framelen = 0; -} - - -static void -interpret_ack( struct net_device *dev, unsigned ack ) -{ - struct net_local *nl = netdev_priv(dev); - - if( ack == FRAME_SENT_OK ) { - nl->state &= ~FL_NEED_RESEND; - - if( nl->state & FL_WAIT_ACK ) { - nl->outpos += nl->framelen; - - if( --nl->tx_frameno ) { - nl->framelen = min_t(unsigned int, - nl->maxframe, - nl->tx_buf_p->len - nl->outpos); - } else { - send_complete( dev ); -#ifdef CONFIG_SBNI_MULTILINE - netif_wake_queue( nl->master ); -#else - netif_wake_queue( dev ); -#endif - } - } - } - - nl->state &= ~FL_WAIT_ACK; -} - - -/* - * Glue received frame with previous fragments of packet. - * Indicate packet when last frame would be accepted. - */ - -static int -append_frame_to_pkt( struct net_device *dev, unsigned framelen, u32 crc ) -{ - struct net_local *nl = netdev_priv(dev); - - u8 *p; - - if( nl->inppos + framelen > ETHER_MAX_LEN ) - return 0; - - if( !nl->rx_buf_p && !(nl->rx_buf_p = get_rx_buf( dev )) ) - return 0; - - p = nl->rx_buf_p->data + nl->inppos; - insb( dev->base_addr + DAT, p, framelen ); - if( calc_crc32( crc, p, framelen ) != CRC32_REMAINDER ) - return 0; - - nl->inppos += framelen - 4; - if( --nl->wait_frameno == 0 ) /* last frame received */ - indicate_pkt( dev ); - - return 1; -} - - -/* - * Prepare to start output on adapter. - * Transmitter will be actually activated when marker is accepted. - */ - -static void -prepare_to_send( struct sk_buff *skb, struct net_device *dev ) -{ - struct net_local *nl = netdev_priv(dev); - - unsigned int len; - - /* nl->tx_buf_p == NULL here! */ - if( nl->tx_buf_p ) - netdev_err(dev, "memory leak!\n"); - - nl->outpos = 0; - nl->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND); - - len = skb->len; - if( len < SBNI_MIN_LEN ) - len = SBNI_MIN_LEN; - - nl->tx_buf_p = skb; - nl->tx_frameno = DIV_ROUND_UP(len, nl->maxframe); - nl->framelen = len < nl->maxframe ? len : nl->maxframe; - - outb( inb( dev->base_addr + CSR0 ) | TR_REQ, dev->base_addr + CSR0 ); -#ifdef CONFIG_SBNI_MULTILINE - netif_trans_update(nl->master); -#else - netif_trans_update(dev); -#endif -} - - -static void -drop_xmit_queue( struct net_device *dev ) -{ - struct net_local *nl = netdev_priv(dev); - - if( nl->tx_buf_p ) { - dev_kfree_skb_any( nl->tx_buf_p ); - nl->tx_buf_p = NULL; -#ifdef CONFIG_SBNI_MULTILINE - nl->master->stats.tx_errors++; - nl->master->stats.tx_carrier_errors++; -#else - dev->stats.tx_errors++; - dev->stats.tx_carrier_errors++; -#endif - } - - nl->tx_frameno = 0; - nl->framelen = 0; - nl->outpos = 0; - nl->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND); -#ifdef CONFIG_SBNI_MULTILINE - netif_start_queue( nl->master ); - netif_trans_update(nl->master); -#else - netif_start_queue( dev ); - netif_trans_update(dev); -#endif -} - - -static void -send_frame_header( struct net_device *dev, u32 *crc_p ) -{ - struct net_local *nl = netdev_priv(dev); - - u32 crc = *crc_p; - u32 len_field = nl->framelen + 6; /* CRC + frameno + reserved */ - u8 value; - - if( nl->state & FL_NEED_RESEND ) - len_field |= FRAME_RETRY; /* non-first attempt... */ - - if( nl->outpos == 0 ) - len_field |= FRAME_FIRST; - - len_field |= (nl->state & FL_PREV_OK) ? FRAME_SENT_OK : FRAME_SENT_BAD; - outb( SBNI_SIG, dev->base_addr + DAT ); - - value = (u8) len_field; - outb( value, dev->base_addr + DAT ); - crc = CRC32( value, crc ); - value = (u8) (len_field >> 8); - outb( value, dev->base_addr + DAT ); - crc = CRC32( value, crc ); - - outb( nl->tx_frameno, dev->base_addr + DAT ); - crc = CRC32( nl->tx_frameno, crc ); - outb( 0, dev->base_addr + DAT ); - crc = CRC32( 0, crc ); - *crc_p = crc; -} - - -/* - * if frame tail not needed (incorrect number or received twice), - * it won't store, but CRC will be calculated - */ - -static int -skip_tail( unsigned int ioaddr, unsigned int tail_len, u32 crc ) -{ - while( tail_len-- ) - crc = CRC32( inb( ioaddr + DAT ), crc ); - - return crc == CRC32_REMAINDER; -} - - -/* - * Preliminary checks if frame header is correct, calculates its CRC - * and split it to simple fields - */ - -static int -check_fhdr( u32 ioaddr, u32 *framelen, u32 *frameno, u32 *ack, - u32 *is_first, u32 *crc_p ) -{ - u32 crc = *crc_p; - u8 value; - - if( inb( ioaddr + DAT ) != SBNI_SIG ) - return 0; - - value = inb( ioaddr + DAT ); - *framelen = (u32)value; - crc = CRC32( value, crc ); - value = inb( ioaddr + DAT ); - *framelen |= ((u32)value) << 8; - crc = CRC32( value, crc ); - - *ack = *framelen & FRAME_ACK_MASK; - *is_first = (*framelen & FRAME_FIRST) != 0; - - if( (*framelen &= FRAME_LEN_MASK) < 6 || - *framelen > SBNI_MAX_FRAME - 3 ) - return 0; - - value = inb( ioaddr + DAT ); - *frameno = (u32)value; - crc = CRC32( value, crc ); - - crc = CRC32( inb( ioaddr + DAT ), crc ); /* reserved byte */ - *framelen -= 2; - - *crc_p = crc; - return 1; -} - - -static struct sk_buff * -get_rx_buf( struct net_device *dev ) -{ - /* +2 is to compensate for the alignment fixup below */ - struct sk_buff *skb = dev_alloc_skb( ETHER_MAX_LEN + 2 ); - if( !skb ) - return NULL; - - skb_reserve( skb, 2 ); /* Align IP on longword boundaries */ - return skb; -} - - -static void -indicate_pkt( struct net_device *dev ) -{ - struct net_local *nl = netdev_priv(dev); - struct sk_buff *skb = nl->rx_buf_p; - - skb_put( skb, nl->inppos ); - -#ifdef CONFIG_SBNI_MULTILINE - skb->protocol = eth_type_trans( skb, nl->master ); - netif_rx( skb ); - ++nl->master->stats.rx_packets; - nl->master->stats.rx_bytes += nl->inppos; -#else - skb->protocol = eth_type_trans( skb, dev ); - netif_rx( skb ); - ++dev->stats.rx_packets; - dev->stats.rx_bytes += nl->inppos; -#endif - nl->rx_buf_p = NULL; /* protocol driver will clear this sk_buff */ -} - - -/* -------------------------------------------------------------------------- */ - -/* - * Routine checks periodically wire activity and regenerates marker if - * connect was inactive for a long time. - */ - -static void -sbni_watchdog(struct timer_list *t) -{ - struct net_local *nl = from_timer(nl, t, watchdog); - struct net_device *dev = nl->watchdog_dev; - unsigned long flags; - unsigned char csr0; - - spin_lock_irqsave( &nl->lock, flags ); - - csr0 = inb( dev->base_addr + CSR0 ); - if( csr0 & RC_CHK ) { - - if( nl->timer_ticks ) { - if( csr0 & (RC_RDY | BU_EMP) ) - /* receiving not active */ - nl->timer_ticks--; - } else { - nl->in_stats.timeout_number++; - if( nl->delta_rxl ) - timeout_change_level( dev ); - - outb( *(u_char *)&nl->csr1 | PR_RES, - dev->base_addr + CSR1 ); - csr0 = inb( dev->base_addr + CSR0 ); - } - } else - nl->state &= ~FL_LINE_DOWN; - - outb( csr0 | RC_CHK, dev->base_addr + CSR0 ); - - mod_timer(t, jiffies + SBNI_TIMEOUT); - - spin_unlock_irqrestore( &nl->lock, flags ); -} - - -static unsigned char rxl_tab[] = { - 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x08, - 0x0a, 0x0c, 0x0f, 0x16, 0x18, 0x1a, 0x1c, 0x1f -}; - -#define SIZE_OF_TIMEOUT_RXL_TAB 4 -static unsigned char timeout_rxl_tab[] = { - 0x03, 0x05, 0x08, 0x0b -}; - -/* -------------------------------------------------------------------------- */ - -static void -card_start( struct net_device *dev ) -{ - struct net_local *nl = netdev_priv(dev); - - nl->timer_ticks = CHANGE_LEVEL_START_TICKS; - nl->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND); - nl->state |= FL_PREV_OK; - - nl->inppos = nl->outpos = 0; - nl->wait_frameno = 0; - nl->tx_frameno = 0; - nl->framelen = 0; - - outb( *(u_char *)&nl->csr1 | PR_RES, dev->base_addr + CSR1 ); - outb( EN_INT, dev->base_addr + CSR0 ); -} - -/* -------------------------------------------------------------------------- */ - -/* Receive level auto-selection */ - -static void -change_level( struct net_device *dev ) -{ - struct net_local *nl = netdev_priv(dev); - - if( nl->delta_rxl == 0 ) /* do not auto-negotiate RxL */ - return; - - if( nl->cur_rxl_index == 0 ) - nl->delta_rxl = 1; - else if( nl->cur_rxl_index == 15 ) - nl->delta_rxl = -1; - else if( nl->cur_rxl_rcvd < nl->prev_rxl_rcvd ) - nl->delta_rxl = -nl->delta_rxl; - - nl->csr1.rxl = rxl_tab[ nl->cur_rxl_index += nl->delta_rxl ]; - inb( dev->base_addr + CSR0 ); /* needs for PCI cards */ - outb( *(u8 *)&nl->csr1, dev->base_addr + CSR1 ); - - nl->prev_rxl_rcvd = nl->cur_rxl_rcvd; - nl->cur_rxl_rcvd = 0; -} - - -static void -timeout_change_level( struct net_device *dev ) -{ - struct net_local *nl = netdev_priv(dev); - - nl->cur_rxl_index = timeout_rxl_tab[ nl->timeout_rxl ]; - if( ++nl->timeout_rxl >= 4 ) - nl->timeout_rxl = 0; - - nl->csr1.rxl = rxl_tab[ nl->cur_rxl_index ]; - inb( dev->base_addr + CSR0 ); - outb( *(unsigned char *)&nl->csr1, dev->base_addr + CSR1 ); - - nl->prev_rxl_rcvd = nl->cur_rxl_rcvd; - nl->cur_rxl_rcvd = 0; -} - -/* -------------------------------------------------------------------------- */ - -/* - * Open/initialize the board. - */ - -static int -sbni_open( struct net_device *dev ) -{ - struct net_local *nl = netdev_priv(dev); - struct timer_list *w = &nl->watchdog; - - /* - * For double ISA adapters within "common irq" mode, we have to - * determine whether primary or secondary channel is initialized, - * and set the irq handler only in first case. - */ - if( dev->base_addr < 0x400 ) { /* ISA only */ - struct net_device **p = sbni_cards; - for( ; *p && p < sbni_cards + SBNI_MAX_NUM_CARDS; ++p ) - if( (*p)->irq == dev->irq && - ((*p)->base_addr == dev->base_addr + 4 || - (*p)->base_addr == dev->base_addr - 4) && - (*p)->flags & IFF_UP ) { - - ((struct net_local *) (netdev_priv(*p))) - ->second = dev; - netdev_notice(dev, "using shared irq with %s\n", - (*p)->name); - nl->state |= FL_SECONDARY; - goto handler_attached; - } - } - - if( request_irq(dev->irq, sbni_interrupt, IRQF_SHARED, dev->name, dev) ) { - netdev_err(dev, "unable to get IRQ %d\n", dev->irq); - return -EAGAIN; - } - -handler_attached: - - spin_lock( &nl->lock ); - memset( &dev->stats, 0, sizeof(struct net_device_stats) ); - memset( &nl->in_stats, 0, sizeof(struct sbni_in_stats) ); - - card_start( dev ); - - netif_start_queue( dev ); - - /* set timer watchdog */ - nl->watchdog_dev = dev; - timer_setup(w, sbni_watchdog, 0); - w->expires = jiffies + SBNI_TIMEOUT; - add_timer( w ); - - spin_unlock( &nl->lock ); - return 0; -} - - -static int -sbni_close( struct net_device *dev ) -{ - struct net_local *nl = netdev_priv(dev); - - if( nl->second && nl->second->flags & IFF_UP ) { - netdev_notice(dev, "Secondary channel (%s) is active!\n", - nl->second->name); - return -EBUSY; - } - -#ifdef CONFIG_SBNI_MULTILINE - if( nl->state & FL_SLAVE ) - emancipate( dev ); - else - while( nl->link ) /* it's master device! */ - emancipate( nl->link ); -#endif - - spin_lock( &nl->lock ); - - nl->second = NULL; - drop_xmit_queue( dev ); - netif_stop_queue( dev ); - - del_timer( &nl->watchdog ); - - outb( 0, dev->base_addr + CSR0 ); - - if( !(nl->state & FL_SECONDARY) ) - free_irq( dev->irq, dev ); - nl->state &= FL_SECONDARY; - - spin_unlock( &nl->lock ); - return 0; -} - - -/* - Valid combinations in CSR0 (for probing): - - VALID_DECODER 0000,0011,1011,1010 - - ; 0 ; - - TR_REQ ; 1 ; + - TR_RDY ; 2 ; - - TR_RDY TR_REQ ; 3 ; + - BU_EMP ; 4 ; + - BU_EMP TR_REQ ; 5 ; + - BU_EMP TR_RDY ; 6 ; - - BU_EMP TR_RDY TR_REQ ; 7 ; + - RC_RDY ; 8 ; + - RC_RDY TR_REQ ; 9 ; + - RC_RDY TR_RDY ; 10 ; - - RC_RDY TR_RDY TR_REQ ; 11 ; - - RC_RDY BU_EMP ; 12 ; - - RC_RDY BU_EMP TR_REQ ; 13 ; - - RC_RDY BU_EMP TR_RDY ; 14 ; - - RC_RDY BU_EMP TR_RDY TR_REQ ; 15 ; - -*/ - -#define VALID_DECODER (2 + 8 + 0x10 + 0x20 + 0x80 + 0x100 + 0x200) - - -static int -sbni_card_probe( unsigned long ioaddr ) -{ - unsigned char csr0; - - csr0 = inb( ioaddr + CSR0 ); - if( csr0 != 0xff && csr0 != 0x00 ) { - csr0 &= ~EN_INT; - if( csr0 & BU_EMP ) - csr0 |= EN_INT; - - if( VALID_DECODER & (1 << (csr0 >> 4)) ) - return 0; - } - - return -ENODEV; -} - -/* -------------------------------------------------------------------------- */ - -static int -sbni_ioctl( struct net_device *dev, struct ifreq *ifr, int cmd ) -{ - struct net_local *nl = netdev_priv(dev); - struct sbni_flags flags; - int error = 0; - -#ifdef CONFIG_SBNI_MULTILINE - struct net_device *slave_dev; - char slave_name[ 8 ]; -#endif - - switch( cmd ) { - case SIOCDEVGETINSTATS : - if (copy_to_user( ifr->ifr_data, &nl->in_stats, - sizeof(struct sbni_in_stats) )) - error = -EFAULT; - break; - - case SIOCDEVRESINSTATS : - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - memset( &nl->in_stats, 0, sizeof(struct sbni_in_stats) ); - break; - - case SIOCDEVGHWSTATE : - flags.mac_addr = *(u32 *)(dev->dev_addr + 3); - flags.rate = nl->csr1.rate; - flags.slow_mode = (nl->state & FL_SLOW_MODE) != 0; - flags.rxl = nl->cur_rxl_index; - flags.fixed_rxl = nl->delta_rxl == 0; - - if (copy_to_user( ifr->ifr_data, &flags, sizeof flags )) - error = -EFAULT; - break; - - case SIOCDEVSHWSTATE : - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - - spin_lock( &nl->lock ); - flags = *(struct sbni_flags*) &ifr->ifr_ifru; - if( flags.fixed_rxl ) { - nl->delta_rxl = 0; - nl->cur_rxl_index = flags.rxl; - } else { - nl->delta_rxl = DEF_RXL_DELTA; - nl->cur_rxl_index = DEF_RXL; - } - - nl->csr1.rxl = rxl_tab[ nl->cur_rxl_index ]; - nl->csr1.rate = flags.rate; - outb( *(u8 *)&nl->csr1 | PR_RES, dev->base_addr + CSR1 ); - spin_unlock( &nl->lock ); - break; - -#ifdef CONFIG_SBNI_MULTILINE - - case SIOCDEVENSLAVE : - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - - if (copy_from_user( slave_name, ifr->ifr_data, sizeof slave_name )) - return -EFAULT; - slave_dev = dev_get_by_name(&init_net, slave_name ); - if( !slave_dev || !(slave_dev->flags & IFF_UP) ) { - netdev_err(dev, "trying to enslave non-active device %s\n", - slave_name); - if (slave_dev) - dev_put(slave_dev); - return -EPERM; - } - - return enslave( dev, slave_dev ); - - case SIOCDEVEMANSIPATE : - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - - return emancipate( dev ); - -#endif /* CONFIG_SBNI_MULTILINE */ - - default : - return -EOPNOTSUPP; - } - - return error; -} - - -#ifdef CONFIG_SBNI_MULTILINE - -static int -enslave( struct net_device *dev, struct net_device *slave_dev ) -{ - struct net_local *nl = netdev_priv(dev); - struct net_local *snl = netdev_priv(slave_dev); - - if( nl->state & FL_SLAVE ) /* This isn't master or free device */ - return -EBUSY; - - if( snl->state & FL_SLAVE ) /* That was already enslaved */ - return -EBUSY; - - spin_lock( &nl->lock ); - spin_lock( &snl->lock ); - - /* append to list */ - snl->link = nl->link; - nl->link = slave_dev; - snl->master = dev; - snl->state |= FL_SLAVE; - - /* Summary statistics of MultiLine operation will be stored - in master's counters */ - memset( &slave_dev->stats, 0, sizeof(struct net_device_stats) ); - netif_stop_queue( slave_dev ); - netif_wake_queue( dev ); /* Now we are able to transmit */ - - spin_unlock( &snl->lock ); - spin_unlock( &nl->lock ); - netdev_notice(dev, "slave device (%s) attached\n", slave_dev->name); - return 0; -} - - -static int -emancipate( struct net_device *dev ) -{ - struct net_local *snl = netdev_priv(dev); - struct net_device *p = snl->master; - struct net_local *nl = netdev_priv(p); - - if( !(snl->state & FL_SLAVE) ) - return -EINVAL; - - spin_lock( &nl->lock ); - spin_lock( &snl->lock ); - drop_xmit_queue( dev ); - - /* exclude from list */ - for(;;) { /* must be in list */ - struct net_local *t = netdev_priv(p); - if( t->link == dev ) { - t->link = snl->link; - break; - } - p = t->link; - } - - snl->link = NULL; - snl->master = dev; - snl->state &= ~FL_SLAVE; - - netif_start_queue( dev ); - - spin_unlock( &snl->lock ); - spin_unlock( &nl->lock ); - - dev_put( dev ); - return 0; -} - -#endif - -static void -set_multicast_list( struct net_device *dev ) -{ - return; /* sbni always operate in promiscuos mode */ -} - - -#ifdef MODULE -module_param_hw_array(io, int, ioport, NULL, 0); -module_param_hw_array(irq, int, irq, NULL, 0); -module_param_array(baud, int, NULL, 0); -module_param_array(rxl, int, NULL, 0); -module_param_array(mac, int, NULL, 0); -module_param(skip_pci_probe, bool, 0); - -MODULE_LICENSE("GPL"); - - -int __init init_module( void ) -{ - struct net_device *dev; - int err; - - while( num < SBNI_MAX_NUM_CARDS ) { - dev = alloc_netdev(sizeof(struct net_local), "sbni%d", - NET_NAME_UNKNOWN, sbni_devsetup); - if( !dev) - break; - - sprintf( dev->name, "sbni%d", num ); - - err = sbni_init(dev); - if (err) { - free_netdev(dev); - break; - } - - if( register_netdev( dev ) ) { - release_region( dev->base_addr, SBNI_IO_EXTENT ); - free_netdev( dev ); - break; - } - } - - return *sbni_cards ? 0 : -ENODEV; -} - -void -cleanup_module(void) -{ - int i; - - for (i = 0; i < SBNI_MAX_NUM_CARDS; ++i) { - struct net_device *dev = sbni_cards[i]; - if (dev != NULL) { - unregister_netdev(dev); - release_region(dev->base_addr, SBNI_IO_EXTENT); - free_netdev(dev); - } - } -} - -#else /* MODULE */ - -static int __init -sbni_setup( char *p ) -{ - int n, parm; - - if( *p++ != '(' ) - goto bad_param; - - for( n = 0, parm = 0; *p && n < 8; ) { - (*dest[ parm ])[ n ] = simple_strtoul( p, &p, 0 ); - if( !*p || *p == ')' ) - return 1; - if( *p == ';' ) { - ++p; - ++n; - parm = 0; - } else if( *p++ != ',' ) { - break; - } else { - if( ++parm >= 5 ) - break; - } - } -bad_param: - pr_err("Error in sbni kernel parameter!\n"); - return 0; -} - -__setup( "sbni=", sbni_setup ); - -#endif /* MODULE */ - -/* -------------------------------------------------------------------------- */ - -static u32 -calc_crc32( u32 crc, u8 *p, u32 len ) -{ - while( len-- ) - crc = CRC32( *p++, crc ); - - return crc; -} - -static u32 crc32tab[] __attribute__ ((aligned(8))) = { - 0xD202EF8D, 0xA505DF1B, 0x3C0C8EA1, 0x4B0BBE37, - 0xD56F2B94, 0xA2681B02, 0x3B614AB8, 0x4C667A2E, - 0xDCD967BF, 0xABDE5729, 0x32D70693, 0x45D03605, - 0xDBB4A3A6, 0xACB39330, 0x35BAC28A, 0x42BDF21C, - 0xCFB5FFE9, 0xB8B2CF7F, 0x21BB9EC5, 0x56BCAE53, - 0xC8D83BF0, 0xBFDF0B66, 0x26D65ADC, 0x51D16A4A, - 0xC16E77DB, 0xB669474D, 0x2F6016F7, 0x58672661, - 0xC603B3C2, 0xB1048354, 0x280DD2EE, 0x5F0AE278, - 0xE96CCF45, 0x9E6BFFD3, 0x0762AE69, 0x70659EFF, - 0xEE010B5C, 0x99063BCA, 0x000F6A70, 0x77085AE6, - 0xE7B74777, 0x90B077E1, 0x09B9265B, 0x7EBE16CD, - 0xE0DA836E, 0x97DDB3F8, 0x0ED4E242, 0x79D3D2D4, - 0xF4DBDF21, 0x83DCEFB7, 0x1AD5BE0D, 0x6DD28E9B, - 0xF3B61B38, 0x84B12BAE, 0x1DB87A14, 0x6ABF4A82, - 0xFA005713, 0x8D076785, 0x140E363F, 0x630906A9, - 0xFD6D930A, 0x8A6AA39C, 0x1363F226, 0x6464C2B0, - 0xA4DEAE1D, 0xD3D99E8B, 0x4AD0CF31, 0x3DD7FFA7, - 0xA3B36A04, 0xD4B45A92, 0x4DBD0B28, 0x3ABA3BBE, - 0xAA05262F, 0xDD0216B9, 0x440B4703, 0x330C7795, - 0xAD68E236, 0xDA6FD2A0, 0x4366831A, 0x3461B38C, - 0xB969BE79, 0xCE6E8EEF, 0x5767DF55, 0x2060EFC3, - 0xBE047A60, 0xC9034AF6, 0x500A1B4C, 0x270D2BDA, - 0xB7B2364B, 0xC0B506DD, 0x59BC5767, 0x2EBB67F1, - 0xB0DFF252, 0xC7D8C2C4, 0x5ED1937E, 0x29D6A3E8, - 0x9FB08ED5, 0xE8B7BE43, 0x71BEEFF9, 0x06B9DF6F, - 0x98DD4ACC, 0xEFDA7A5A, 0x76D32BE0, 0x01D41B76, - 0x916B06E7, 0xE66C3671, 0x7F6567CB, 0x0862575D, - 0x9606C2FE, 0xE101F268, 0x7808A3D2, 0x0F0F9344, - 0x82079EB1, 0xF500AE27, 0x6C09FF9D, 0x1B0ECF0B, - 0x856A5AA8, 0xF26D6A3E, 0x6B643B84, 0x1C630B12, - 0x8CDC1683, 0xFBDB2615, 0x62D277AF, 0x15D54739, - 0x8BB1D29A, 0xFCB6E20C, 0x65BFB3B6, 0x12B88320, - 0x3FBA6CAD, 0x48BD5C3B, 0xD1B40D81, 0xA6B33D17, - 0x38D7A8B4, 0x4FD09822, 0xD6D9C998, 0xA1DEF90E, - 0x3161E49F, 0x4666D409, 0xDF6F85B3, 0xA868B525, - 0x360C2086, 0x410B1010, 0xD80241AA, 0xAF05713C, - 0x220D7CC9, 0x550A4C5F, 0xCC031DE5, 0xBB042D73, - 0x2560B8D0, 0x52678846, 0xCB6ED9FC, 0xBC69E96A, - 0x2CD6F4FB, 0x5BD1C46D, 0xC2D895D7, 0xB5DFA541, - 0x2BBB30E2, 0x5CBC0074, 0xC5B551CE, 0xB2B26158, - 0x04D44C65, 0x73D37CF3, 0xEADA2D49, 0x9DDD1DDF, - 0x03B9887C, 0x74BEB8EA, 0xEDB7E950, 0x9AB0D9C6, - 0x0A0FC457, 0x7D08F4C1, 0xE401A57B, 0x930695ED, - 0x0D62004E, 0x7A6530D8, 0xE36C6162, 0x946B51F4, - 0x19635C01, 0x6E646C97, 0xF76D3D2D, 0x806A0DBB, - 0x1E0E9818, 0x6909A88E, 0xF000F934, 0x8707C9A2, - 0x17B8D433, 0x60BFE4A5, 0xF9B6B51F, 0x8EB18589, - 0x10D5102A, 0x67D220BC, 0xFEDB7106, 0x89DC4190, - 0x49662D3D, 0x3E611DAB, 0xA7684C11, 0xD06F7C87, - 0x4E0BE924, 0x390CD9B2, 0xA0058808, 0xD702B89E, - 0x47BDA50F, 0x30BA9599, 0xA9B3C423, 0xDEB4F4B5, - 0x40D06116, 0x37D75180, 0xAEDE003A, 0xD9D930AC, - 0x54D13D59, 0x23D60DCF, 0xBADF5C75, 0xCDD86CE3, - 0x53BCF940, 0x24BBC9D6, 0xBDB2986C, 0xCAB5A8FA, - 0x5A0AB56B, 0x2D0D85FD, 0xB404D447, 0xC303E4D1, - 0x5D677172, 0x2A6041E4, 0xB369105E, 0xC46E20C8, - 0x72080DF5, 0x050F3D63, 0x9C066CD9, 0xEB015C4F, - 0x7565C9EC, 0x0262F97A, 0x9B6BA8C0, 0xEC6C9856, - 0x7CD385C7, 0x0BD4B551, 0x92DDE4EB, 0xE5DAD47D, - 0x7BBE41DE, 0x0CB97148, 0x95B020F2, 0xE2B71064, - 0x6FBF1D91, 0x18B82D07, 0x81B17CBD, 0xF6B64C2B, - 0x68D2D988, 0x1FD5E91E, 0x86DCB8A4, 0xF1DB8832, - 0x616495A3, 0x1663A535, 0x8F6AF48F, 0xF86DC419, - 0x660951BA, 0x110E612C, 0x88073096, 0xFF000000 -}; - diff --git a/drivers/net/wan/sbni.h b/drivers/net/wan/sbni.h deleted file mode 100644 index 84264510a8ed..000000000000 --- a/drivers/net/wan/sbni.h +++ /dev/null @@ -1,147 +0,0 @@ -/* sbni.h: definitions for a Granch SBNI12 driver, version 5.0.0 - * Written 2001 Denis I.Timofeev (timofeev@granch.ru) - * This file is distributed under the GNU GPL - */ - -#ifndef SBNI_H -#define SBNI_H - -#ifdef SBNI_DEBUG -#define DP( A ) A -#else -#define DP( A ) -#endif - - -/* We don't have official vendor id yet... */ -#define SBNI_PCI_VENDOR 0x55 -#define SBNI_PCI_DEVICE 0x9f - -#define ISA_MODE 0x00 -#define PCI_MODE 0x01 - -#define SBNI_IO_EXTENT 4 - -enum sbni_reg { - CSR0 = 0, - CSR1 = 1, - DAT = 2 -}; - -/* CSR0 mapping */ -enum { - BU_EMP = 0x02, - RC_CHK = 0x04, - CT_ZER = 0x08, - TR_REQ = 0x10, - TR_RDY = 0x20, - EN_INT = 0x40, - RC_RDY = 0x80 -}; - - -/* CSR1 mapping */ -#define PR_RES 0x80 - -struct sbni_csr1 { -#ifdef __LITTLE_ENDIAN_BITFIELD - u8 rxl : 5; - u8 rate : 2; - u8 : 1; -#else - u8 : 1; - u8 rate : 2; - u8 rxl : 5; -#endif -}; - -/* fields in frame header */ -#define FRAME_ACK_MASK (unsigned short)0x7000 -#define FRAME_LEN_MASK (unsigned short)0x03FF -#define FRAME_FIRST (unsigned short)0x8000 -#define FRAME_RETRY (unsigned short)0x0800 - -#define FRAME_SENT_BAD (unsigned short)0x4000 -#define FRAME_SENT_OK (unsigned short)0x3000 - - -/* state flags */ -enum { - FL_WAIT_ACK = 0x01, - FL_NEED_RESEND = 0x02, - FL_PREV_OK = 0x04, - FL_SLOW_MODE = 0x08, - FL_SECONDARY = 0x10, -#ifdef CONFIG_SBNI_MULTILINE - FL_SLAVE = 0x20, -#endif - FL_LINE_DOWN = 0x40 -}; - - -enum { - DEFAULT_IOBASEADDR = 0x210, - DEFAULT_INTERRUPTNUMBER = 5, - DEFAULT_RATE = 0, - DEFAULT_FRAME_LEN = 1012 -}; - -#define DEF_RXL_DELTA -1 -#define DEF_RXL 0xf - -#define SBNI_SIG 0x5a - -#define SBNI_MIN_LEN 60 /* Shortest Ethernet frame without FCS */ -#define SBNI_MAX_FRAME 1023 -#define ETHER_MAX_LEN 1518 - -#define SBNI_TIMEOUT (HZ/10) - -#define TR_ERROR_COUNT 32 -#define CHANGE_LEVEL_START_TICKS 4 - -#define SBNI_MAX_NUM_CARDS 16 - -/* internal SBNI-specific statistics */ -struct sbni_in_stats { - u32 all_rx_number; - u32 bad_rx_number; - u32 timeout_number; - u32 all_tx_number; - u32 resend_tx_number; -}; - -/* SBNI ioctl params */ -#define SIOCDEVGETINSTATS SIOCDEVPRIVATE -#define SIOCDEVRESINSTATS SIOCDEVPRIVATE+1 -#define SIOCDEVGHWSTATE SIOCDEVPRIVATE+2 -#define SIOCDEVSHWSTATE SIOCDEVPRIVATE+3 -#define SIOCDEVENSLAVE SIOCDEVPRIVATE+4 -#define SIOCDEVEMANSIPATE SIOCDEVPRIVATE+5 - - -/* data packet for SIOCDEVGHWSTATE/SIOCDEVSHWSTATE ioctl requests */ -struct sbni_flags { - u32 rxl : 4; - u32 rate : 2; - u32 fixed_rxl : 1; - u32 slow_mode : 1; - u32 mac_addr : 24; -}; - -/* - * CRC-32 stuff - */ -#define CRC32(c,crc) (crc32tab[((size_t)(crc) ^ (c)) & 0xff] ^ (((crc) >> 8) & 0x00FFFFFF)) - /* CRC generator 0xEDB88320 */ - /* CRC remainder 0x2144DF1C */ - /* CRC initial value 0x00000000 */ -#define CRC32_REMAINDER 0x2144DF1C -#define CRC32_INITIAL 0x00000000 - -#ifndef __initdata -#define __initdata -#endif - -#endif - diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c index 4403e219ca03..eddd20aab691 100644 --- a/drivers/net/wan/sealevel.c +++ b/drivers/net/wan/sealevel.c @@ -124,14 +124,6 @@ static int sealevel_close(struct net_device *d) return 0; } -static int sealevel_ioctl(struct net_device *d, struct ifreq *ifr, int cmd) -{ - /* struct slvl_device *slvl=dev_to_chan(d); - * z8530_ioctl(d,&slvl->sync.chanA,ifr,cmd) - */ - return hdlc_ioctl(d, ifr, cmd); -} - /* Passed network frames, fire them downwind. */ static netdev_tx_t sealevel_queue_xmit(struct sk_buff *skb, @@ -152,7 +144,7 @@ static const struct net_device_ops sealevel_ops = { .ndo_open = sealevel_open, .ndo_stop = sealevel_close, .ndo_start_xmit = hdlc_start_xmit, - .ndo_do_ioctl = sealevel_ioctl, + .ndo_siocwandev = hdlc_ioctl, }; static int slvl_setup(struct slvl_device *sv, int iobase, int irq) diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c index f22e48415e6f..5a9e262188ef 100644 --- a/drivers/net/wan/wanxl.c +++ b/drivers/net/wan/wanxl.c @@ -343,20 +343,17 @@ static int wanxl_attach(struct net_device *dev, unsigned short encoding, return 0; } -static int wanxl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +static int wanxl_ioctl(struct net_device *dev, struct if_settings *ifs) { const size_t size = sizeof(sync_serial_settings); sync_serial_settings line; struct port *port = dev_to_port(dev); - if (cmd != SIOCWANDEV) - return hdlc_ioctl(dev, ifr, cmd); - - switch (ifr->ifr_settings.type) { + switch (ifs->type) { case IF_GET_IFACE: - ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL; - if (ifr->ifr_settings.size < size) { - ifr->ifr_settings.size = size; /* data size wanted */ + ifs->type = IF_IFACE_SYNC_SERIAL; + if (ifs->size < size) { + ifs->size = size; /* data size wanted */ return -ENOBUFS; } memset(&line, 0, sizeof(line)); @@ -364,7 +361,7 @@ static int wanxl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) line.clock_rate = 0; line.loopback = 0; - if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &line, size)) + if (copy_to_user(ifs->ifs_ifsu.sync, &line, size)) return -EFAULT; return 0; @@ -374,7 +371,7 @@ static int wanxl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) if (dev->flags & IFF_UP) return -EBUSY; - if (copy_from_user(&line, ifr->ifr_settings.ifs_ifsu.sync, + if (copy_from_user(&line, ifs->ifs_ifsu.sync, size)) return -EFAULT; @@ -389,7 +386,7 @@ static int wanxl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return 0; default: - return hdlc_ioctl(dev, ifr, cmd); + return hdlc_ioctl(dev, ifs); } } @@ -545,7 +542,7 @@ static const struct net_device_ops wanxl_ops = { .ndo_open = wanxl_open, .ndo_stop = wanxl_close, .ndo_start_xmit = hdlc_start_xmit, - .ndo_do_ioctl = wanxl_ioctl, + .ndo_siocwandev = wanxl_ioctl, .ndo_get_stats = wanxl_get_stats, }; diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 71878ab35b93..4d4e2f91e15c 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -3393,19 +3393,12 @@ static int ath10k_pci_claim(struct ath10k *ar) } /* Target expects 32 bit DMA. Enforce it. */ - ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (ret) { ath10k_err(ar, "failed to set dma mask to 32-bit: %d\n", ret); goto err_region; } - ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); - if (ret) { - ath10k_err(ar, "failed to set consistent dma mask to 32-bit: %d\n", - ret); - goto err_region; - } - pci_set_master(pdev); /* Arrange for access to Target SoC registers. */ diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c index 603d2f93ac18..9a224817630a 100644 --- a/drivers/net/wireless/ath/ath11k/dp_rx.c +++ b/drivers/net/wireless/ath/ath11k/dp_rx.c @@ -1406,11 +1406,6 @@ ath11k_update_per_peer_tx_stats(struct ath11k *ar, * Firmware rate's control to be skipped for this? */ - if (flags == WMI_RATE_PREAMBLE_HE && mcs > 11) { - ath11k_warn(ab, "Invalid HE mcs %d peer stats", mcs); - return; - } - if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH11K_HE_MCS_MAX) { ath11k_warn(ab, "Invalid HE mcs %d peer stats", mcs); return; diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c index 646ad79f309c..5abb38cc3b55 100644 --- a/drivers/net/wireless/ath/ath11k/pci.c +++ b/drivers/net/wireless/ath/ath11k/pci.c @@ -933,20 +933,14 @@ static int ath11k_pci_claim(struct ath11k_pci *ab_pci, struct pci_dev *pdev) goto disable_device; } - ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(ATH11K_PCI_DMA_MASK)); + ret = dma_set_mask_and_coherent(&pdev->dev, + DMA_BIT_MASK(ATH11K_PCI_DMA_MASK)); if (ret) { ath11k_err(ab, "failed to set pci dma mask to %d: %d\n", ATH11K_PCI_DMA_MASK, ret); goto release_region; } - ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(ATH11K_PCI_DMA_MASK)); - if (ret) { - ath11k_err(ab, "failed to set pci consistent dma mask to %d: %d\n", - ATH11K_PCI_DMA_MASK, ret); - goto release_region; - } - pci_set_master(pdev); ab->mem_len = pci_resource_len(pdev, ATH11K_PCI_BAR_NUM); diff --git a/drivers/net/wireless/ath/ath5k/pci.c b/drivers/net/wireless/ath/ath5k/pci.c index 43b4ae86e5fb..86b8cb975b1a 100644 --- a/drivers/net/wireless/ath/ath5k/pci.c +++ b/drivers/net/wireless/ath/ath5k/pci.c @@ -191,7 +191,7 @@ ath5k_pci_probe(struct pci_dev *pdev, } /* XXX 32-bit addressing only */ - ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (ret) { dev_err(&pdev->dev, "32-bit DMA not available\n"); goto err_dis; diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c index b137e7f34397..bd1ef6334997 100644 --- a/drivers/net/wireless/ath/ath6kl/wmi.c +++ b/drivers/net/wireless/ath/ath6kl/wmi.c @@ -2504,8 +2504,10 @@ static int ath6kl_wmi_sync_point(struct wmi *wmi, u8 if_idx) goto free_data_skb; for (index = 0; index < num_pri_streams; index++) { - if (WARN_ON(!data_sync_bufs[index].skb)) + if (WARN_ON(!data_sync_bufs[index].skb)) { + ret = -ENOMEM; goto free_data_skb; + } ep_id = ath6kl_ac2_endpoint_id(wmi->parent_dev, data_sync_bufs[index]. diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index b4885a700296..b0a4ca3559fd 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c @@ -3351,7 +3351,8 @@ found: "Found block at %x: code=%d ref=%d length=%d major=%d minor=%d\n", cptr, code, reference, length, major, minor); if ((!AR_SREV_9485(ah) && length >= 1024) || - (AR_SREV_9485(ah) && length > EEPROM_DATA_LEN_9485)) { + (AR_SREV_9485(ah) && length > EEPROM_DATA_LEN_9485) || + (length > cptr)) { ath_dbg(common, EEPROM, "Skipping bad header\n"); cptr -= COMP_HDR_LEN; continue; diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 2ca3b86714a9..172081ffe477 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -1621,7 +1621,6 @@ static void ath9k_hw_apply_gpio_override(struct ath_hw *ah) ath9k_hw_gpio_request_out(ah, i, NULL, AR_GPIO_OUTPUT_MUX_AS_OUTPUT); ath9k_hw_set_gpio(ah, i, !!(ah->gpio_val & BIT(i))); - ath9k_hw_gpio_free(ah, i); } } @@ -2728,14 +2727,17 @@ static void ath9k_hw_gpio_cfg_output_mux(struct ath_hw *ah, u32 gpio, u32 type) static void ath9k_hw_gpio_cfg_soc(struct ath_hw *ah, u32 gpio, bool out, const char *label) { + int err; + if (ah->caps.gpio_requested & BIT(gpio)) return; - /* may be requested by BSP, free anyway */ - gpio_free(gpio); - - if (gpio_request_one(gpio, out ? GPIOF_OUT_INIT_LOW : GPIOF_IN, label)) + err = gpio_request_one(gpio, out ? GPIOF_OUT_INIT_LOW : GPIOF_IN, label); + if (err) { + ath_err(ath9k_hw_common(ah), "request GPIO%d failed:%d\n", + gpio, err); return; + } ah->caps.gpio_requested |= BIT(gpio); } diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c index cff9af3af38d..a074e23013c5 100644 --- a/drivers/net/wireless/ath/ath9k/pci.c +++ b/drivers/net/wireless/ath/ath9k/pci.c @@ -896,18 +896,12 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (pcim_enable_device(pdev)) return -EIO; - ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (ret) { pr_err("32-bit DMA not available\n"); return ret; } - ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); - if (ret) { - pr_err("32-bit DMA consistent DMA enable failed\n"); - return ret; - } - /* * Cache line size is used to size and align various * structures used to communicate with the hardware. diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index d202f2128df2..ec913ec991f3 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -408,13 +408,14 @@ static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed) wcn36xx_dbg(WCN36XX_DBG_MAC, "wcn36xx_config channel switch=%d\n", ch); - if (wcn->sw_scan_opchannel == ch) { + if (wcn->sw_scan_opchannel == ch && wcn->sw_scan_channel) { /* If channel is the initial operating channel, we may * want to receive/transmit regular data packets, then * simply stop the scan session and exit PS mode. */ wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN, wcn->sw_scan_vif); + wcn->sw_scan_channel = 0; } else if (wcn->sw_scan) { /* A scan is ongoing, do not change the operating * channel, but start a scan session on the channel. @@ -422,6 +423,7 @@ static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed) wcn36xx_smd_init_scan(wcn, HAL_SYS_MODE_SCAN, wcn->sw_scan_vif); wcn36xx_smd_start_scan(wcn, ch); + wcn->sw_scan_channel = ch; } else { wcn36xx_change_opchannel(wcn, ch); } @@ -702,6 +704,7 @@ static void wcn36xx_sw_scan_start(struct ieee80211_hw *hw, wcn->sw_scan = true; wcn->sw_scan_vif = vif; + wcn->sw_scan_channel = 0; if (vif_priv->sta_assoc) wcn->sw_scan_opchannel = WCN36XX_HW_CHANNEL(wcn); else @@ -1500,6 +1503,13 @@ static int wcn36xx_probe(struct platform_device *pdev) goto out_wq; } + wcn->nv_file = WLAN_NV_FILE; + ret = of_property_read_string(wcn->dev->parent->of_node, "firmware-name", &wcn->nv_file); + if (ret < 0 && ret != -EINVAL) { + wcn36xx_err("failed to read \"firmware-name\" property: %d\n", ret); + goto out_wq; + } + wcn->smd_channel = qcom_wcnss_open_channel(wcnss, "WLAN_CTRL", wcn36xx_smd_rsp_process, hw); if (IS_ERR(wcn->smd_channel)) { wcn36xx_err("failed to open WLAN_CTRL channel\n"); diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index 0e3be17d8cea..57fa857b290b 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@ -504,10 +504,10 @@ int wcn36xx_smd_load_nv(struct wcn36xx *wcn) u16 fm_offset = 0; if (!wcn->nv) { - ret = request_firmware(&wcn->nv, WLAN_NV_FILE, wcn->dev); + ret = request_firmware(&wcn->nv, wcn->nv_file, wcn->dev); if (ret) { wcn36xx_err("Failed to load nv file %s: %d\n", - WLAN_NV_FILE, ret); + wcn->nv_file, ret); goto out; } } diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c index 1b831157ede1..cab196bb38cd 100644 --- a/drivers/net/wireless/ath/wcn36xx/txrx.c +++ b/drivers/net/wireless/ath/wcn36xx/txrx.c @@ -287,6 +287,10 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb) status.rate_idx = 0; } + if (ieee80211_is_beacon(hdr->frame_control) || + ieee80211_is_probe_resp(hdr->frame_control)) + status.boottime_ns = ktime_get_boottime_ns(); + memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status)); if (ieee80211_is_beacon(hdr->frame_control)) { diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h index 6121d8a5641a..add6e527e833 100644 --- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h +++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h @@ -199,6 +199,7 @@ struct wcn36xx { struct device *dev; struct list_head vif_list; + const char *nv_file; const struct firmware *nv; u8 fw_revision; @@ -246,6 +247,7 @@ struct wcn36xx { struct cfg80211_scan_request *scan_req; bool sw_scan; u8 sw_scan_opchannel; + u8 sw_scan_channel; struct ieee80211_vif *sw_scan_vif; struct mutex scan_lock; bool scan_aborted; diff --git a/drivers/net/wireless/ath/wil6210/ethtool.c b/drivers/net/wireless/ath/wil6210/ethtool.c index e481674485c2..29a9f17c2df0 100644 --- a/drivers/net/wireless/ath/wil6210/ethtool.c +++ b/drivers/net/wireless/ath/wil6210/ethtool.c @@ -11,8 +11,11 @@ #include "wil6210.h" -static int wil_ethtoolops_get_coalesce(struct net_device *ndev, - struct ethtool_coalesce *cp) +static int +wil_ethtoolops_get_coalesce(struct net_device *ndev, + struct ethtool_coalesce *cp, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct wil6210_priv *wil = ndev_to_wil(ndev); u32 tx_itr_en, tx_itr_val = 0; @@ -45,8 +48,11 @@ out: return ret; } -static int wil_ethtoolops_set_coalesce(struct net_device *ndev, - struct ethtool_coalesce *cp) +static int +wil_ethtoolops_set_coalesce(struct net_device *ndev, + struct ethtool_coalesce *cp, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct wil6210_priv *wil = ndev_to_wil(ndev); struct wireless_dev *wdev = ndev->ieee80211_ptr; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile index 9b15bc3f6054..13c13504a6e8 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile @@ -23,7 +23,8 @@ brcmfmac-objs += \ feature.o \ btcoex.o \ vendor.o \ - pno.o + pno.o \ + xtlv.o brcmfmac-$(CONFIG_BRCMFMAC_PROTO_BCDC) += \ bcdc.o \ fwsignal.o diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c index 633d0ab19031..ac02244a6fdf 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c @@ -128,7 +128,8 @@ int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev) if (sdiodev->bus_if->chip == BRCM_CC_43362_CHIP_ID) { /* assign GPIO to SDIO core */ - addr = CORE_CC_REG(SI_ENUM_BASE, gpiocontrol); + addr = brcmf_chip_enum_base(sdiodev->func1->device); + addr = CORE_CC_REG(addr, gpiocontrol); gpiocontrol = brcmf_sdiod_readl(sdiodev, addr, &ret); gpiocontrol |= 0x2; brcmf_sdiod_writel(sdiodev, addr, gpiocontrol, &ret); @@ -990,6 +991,7 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = { BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4359), BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_4373), BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_43012), + BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_43752), BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_89359), { /* end: all zeroes */ } }; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index cedba56fc448..f7b96cd69242 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -1829,6 +1829,14 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme) profile->use_fwsup = BRCMF_PROFILE_FWSUP_SAE; } break; + case WLAN_AKM_SUITE_FT_OVER_SAE: + val = WPA3_AUTH_SAE_PSK | WPA2_AUTH_FT; + profile->is_ft = true; + if (sme->crypto.sae_pwd) { + brcmf_dbg(INFO, "using SAE offload\n"); + profile->use_fwsup = BRCMF_PROFILE_FWSUP_SAE; + } + break; default: bphy_err(drvr, "invalid cipher group (%d)\n", sme->crypto.cipher_group); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c index 45037decba40..1ee49f9e325d 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c @@ -139,6 +139,8 @@ struct sbconfig { u32 sbidhigh; /* identification */ }; +#define INVALID_RAMBASE ((u32)(~0)) + /* bankidx and bankinfo reg defines corerev >= 8 */ #define SOCRAM_BANKINFO_RETNTRAM_MASK 0x00010000 #define SOCRAM_BANKINFO_SZMASK 0x0000007f @@ -527,7 +529,7 @@ static int brcmf_chip_cores_check(struct brcmf_chip_priv *ci) int idx = 1; list_for_each_entry(core, &ci->cores, list) { - brcmf_dbg(INFO, " [%-2d] core 0x%x:%-2d base 0x%08x wrap 0x%08x\n", + brcmf_dbg(INFO, " [%-2d] core 0x%x:%-3d base 0x%08x wrap 0x%08x\n", idx++, core->pub.id, core->pub.rev, core->pub.base, core->wrapbase); @@ -727,11 +729,13 @@ static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci) case BRCM_CC_4364_CHIP_ID: case CY_CC_4373_CHIP_ID: return 0x160000; + case CY_CC_43752_CHIP_ID: + return 0x170000; default: brcmf_err("unknown chip: %s\n", ci->pub.name); break; } - return 0; + return INVALID_RAMBASE; } int brcmf_chip_get_raminfo(struct brcmf_chip *pub) @@ -746,7 +750,7 @@ int brcmf_chip_get_raminfo(struct brcmf_chip *pub) mem_core = container_of(mem, struct brcmf_core_priv, pub); ci->pub.ramsize = brcmf_chip_tcm_ramsize(mem_core); ci->pub.rambase = brcmf_chip_tcm_rambase(ci); - if (!ci->pub.rambase) { + if (ci->pub.rambase == INVALID_RAMBASE) { brcmf_err("RAM base not provided with ARM CR4 core\n"); return -EINVAL; } @@ -757,7 +761,7 @@ int brcmf_chip_get_raminfo(struct brcmf_chip *pub) pub); ci->pub.ramsize = brcmf_chip_sysmem_ramsize(mem_core); ci->pub.rambase = brcmf_chip_tcm_rambase(ci); - if (!ci->pub.rambase) { + if (ci->pub.rambase == INVALID_RAMBASE) { brcmf_err("RAM base not provided with ARM CA7 core\n"); return -EINVAL; } @@ -894,7 +898,8 @@ int brcmf_chip_dmp_erom_scan(struct brcmf_chip_priv *ci) u32 base, wrap; int err; - eromaddr = ci->ops->read32(ci->ctx, CORE_CC_REG(SI_ENUM_BASE, eromptr)); + eromaddr = ci->ops->read32(ci->ctx, + CORE_CC_REG(ci->pub.enum_base, eromptr)); while (desc_type != DMP_DESC_EOT) { val = brcmf_chip_dmp_get_desc(ci, &eromaddr, &desc_type); @@ -942,6 +947,11 @@ int brcmf_chip_dmp_erom_scan(struct brcmf_chip_priv *ci) return 0; } +u32 brcmf_chip_enum_base(u16 devid) +{ + return SI_ENUM_BASE_DEFAULT; +} + static int brcmf_chip_recognition(struct brcmf_chip_priv *ci) { struct brcmf_core *core; @@ -954,7 +964,8 @@ static int brcmf_chip_recognition(struct brcmf_chip_priv *ci) * For different chiptypes or old sdio hosts w/o chipcommon, * other ways of recognition should be added here. */ - regdata = ci->ops->read32(ci->ctx, CORE_CC_REG(SI_ENUM_BASE, chipid)); + regdata = ci->ops->read32(ci->ctx, + CORE_CC_REG(ci->pub.enum_base, chipid)); ci->pub.chip = regdata & CID_ID_MASK; ci->pub.chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT; socitype = (regdata & CID_TYPE_MASK) >> CID_TYPE_SHIFT; @@ -974,7 +985,7 @@ static int brcmf_chip_recognition(struct brcmf_chip_priv *ci) ci->resetcore = brcmf_chip_sb_resetcore; core = brcmf_chip_add_core(ci, BCMA_CORE_CHIPCOMMON, - SI_ENUM_BASE, 0); + SI_ENUM_BASE_DEFAULT, 0); brcmf_chip_sb_corerev(ci, core); core = brcmf_chip_add_core(ci, BCMA_CORE_SDIO_DEV, BCM4329_CORE_BUS_BASE, 0); @@ -1088,7 +1099,7 @@ static int brcmf_chip_setup(struct brcmf_chip_priv *chip) return ret; } -struct brcmf_chip *brcmf_chip_attach(void *ctx, +struct brcmf_chip *brcmf_chip_attach(void *ctx, u16 devid, const struct brcmf_buscore_ops *ops) { struct brcmf_chip_priv *chip; @@ -1113,6 +1124,7 @@ struct brcmf_chip *brcmf_chip_attach(void *ctx, chip->num_cores = 0; chip->ops = ops; chip->ctx = ctx; + chip->pub.enum_base = brcmf_chip_enum_base(devid); err = ops->prepare(ctx); if (err < 0) @@ -1411,6 +1423,7 @@ bool brcmf_chip_sr_capable(struct brcmf_chip *pub) reg = chip->ops->read32(chip->ctx, addr); return (reg & CC_SR_CTL0_ENABLE_MASK) != 0; case BRCM_CC_4359_CHIP_ID: + case CY_CC_43752_CHIP_ID: case CY_CC_43012_CHIP_ID: addr = CORE_CC_REG(pmu->base, retention_ctl); reg = chip->ops->read32(chip->ctx, addr); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h index 8fa38658e727..d69f101f5834 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h @@ -15,6 +15,7 @@ * * @chip: chip identifier. * @chiprev: chip revision. + * @enum_base: base address of core enumeration space. * @cc_caps: chipcommon core capabilities. * @cc_caps_ext: chipcommon core extended capabilities. * @pmucaps: PMU capabilities. @@ -27,6 +28,7 @@ struct brcmf_chip { u32 chip; u32 chiprev; + u32 enum_base; u32 cc_caps; u32 cc_caps_ext; u32 pmucaps; @@ -70,7 +72,7 @@ struct brcmf_buscore_ops { }; int brcmf_chip_get_raminfo(struct brcmf_chip *pub); -struct brcmf_chip *brcmf_chip_attach(void *ctx, +struct brcmf_chip *brcmf_chip_attach(void *ctx, u16 devid, const struct brcmf_buscore_ops *ops); void brcmf_chip_detach(struct brcmf_chip *chip); struct brcmf_core *brcmf_chip_get_core(struct brcmf_chip *chip, u16 coreid); @@ -85,5 +87,6 @@ void brcmf_chip_set_passive(struct brcmf_chip *ci); bool brcmf_chip_set_active(struct brcmf_chip *ci, u32 rstvec); bool brcmf_chip_sr_capable(struct brcmf_chip *pub); char *brcmf_chip_name(u32 chipid, u32 chiprev, char *buf, uint len); +u32 brcmf_chip_enum_base(u16 devid); #endif /* BRCMF_AXIDMP_H */ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c index d40104b8df55..0eb13e5df517 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c @@ -431,8 +431,6 @@ struct brcmf_fw { void (*done)(struct device *dev, int err, struct brcmf_fw_request *req); }; -static void brcmf_fw_request_done(const struct firmware *fw, void *ctx); - #ifdef CONFIG_EFI /* In some cases the EFI-var stored nvram contains "ccode=ALL" or "ccode=XV" * to specify "worldwide" compatible settings, but these 2 ccode-s do not work @@ -594,28 +592,47 @@ static int brcmf_fw_complete_request(const struct firmware *fw, return (cur->flags & BRCMF_FW_REQF_OPTIONAL) ? 0 : ret; } +static char *brcm_alt_fw_path(const char *path, const char *board_type) +{ + char alt_path[BRCMF_FW_NAME_LEN]; + char suffix[5]; + + strscpy(alt_path, path, BRCMF_FW_NAME_LEN); + /* At least one character + suffix */ + if (strlen(alt_path) < 5) + return NULL; + + /* strip .txt or .bin at the end */ + strscpy(suffix, alt_path + strlen(alt_path) - 4, 5); + alt_path[strlen(alt_path) - 4] = 0; + strlcat(alt_path, ".", BRCMF_FW_NAME_LEN); + strlcat(alt_path, board_type, BRCMF_FW_NAME_LEN); + strlcat(alt_path, suffix, BRCMF_FW_NAME_LEN); + + return kstrdup(alt_path, GFP_KERNEL); +} + static int brcmf_fw_request_firmware(const struct firmware **fw, struct brcmf_fw *fwctx) { struct brcmf_fw_item *cur = &fwctx->req->items[fwctx->curpos]; int ret; - /* nvram files are board-specific, first try a board-specific path */ + /* Files can be board-specific, first try a board-specific path */ if (cur->type == BRCMF_FW_TYPE_NVRAM && fwctx->req->board_type) { - char alt_path[BRCMF_FW_NAME_LEN]; + char *alt_path; - strlcpy(alt_path, cur->path, BRCMF_FW_NAME_LEN); - /* strip .txt at the end */ - alt_path[strlen(alt_path) - 4] = 0; - strlcat(alt_path, ".", BRCMF_FW_NAME_LEN); - strlcat(alt_path, fwctx->req->board_type, BRCMF_FW_NAME_LEN); - strlcat(alt_path, ".txt", BRCMF_FW_NAME_LEN); + alt_path = brcm_alt_fw_path(cur->path, fwctx->req->board_type); + if (!alt_path) + goto fallback; ret = request_firmware(fw, alt_path, fwctx->dev); + kfree(alt_path); if (ret == 0) return ret; } +fallback: return request_firmware(fw, cur->path, fwctx->dev); } @@ -639,6 +656,22 @@ static void brcmf_fw_request_done(const struct firmware *fw, void *ctx) kfree(fwctx); } +static void brcmf_fw_request_done_alt_path(const struct firmware *fw, void *ctx) +{ + struct brcmf_fw *fwctx = ctx; + struct brcmf_fw_item *first = &fwctx->req->items[0]; + int ret = 0; + + /* Fall back to canonical path if board firmware not found */ + if (!fw) + ret = request_firmware_nowait(THIS_MODULE, true, first->path, + fwctx->dev, GFP_KERNEL, fwctx, + brcmf_fw_request_done); + + if (fw || ret < 0) + brcmf_fw_request_done(fw, ctx); +} + static bool brcmf_fw_request_is_valid(struct brcmf_fw_request *req) { struct brcmf_fw_item *item; @@ -660,6 +693,7 @@ int brcmf_fw_get_firmwares(struct device *dev, struct brcmf_fw_request *req, { struct brcmf_fw_item *first = &req->items[0]; struct brcmf_fw *fwctx; + char *alt_path; int ret; brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(dev)); @@ -677,9 +711,18 @@ int brcmf_fw_get_firmwares(struct device *dev, struct brcmf_fw_request *req, fwctx->req = req; fwctx->done = fw_cb; - ret = request_firmware_nowait(THIS_MODULE, true, first->path, - fwctx->dev, GFP_KERNEL, fwctx, - brcmf_fw_request_done); + /* First try alternative board-specific path if any */ + alt_path = brcm_alt_fw_path(first->path, fwctx->req->board_type); + if (alt_path) { + ret = request_firmware_nowait(THIS_MODULE, true, alt_path, + fwctx->dev, GFP_KERNEL, fwctx, + brcmf_fw_request_done_alt_path); + kfree(alt_path); + } else { + ret = request_firmware_nowait(THIS_MODULE, true, first->path, + fwctx->dev, GFP_KERNEL, fwctx, + brcmf_fw_request_done); + } if (ret < 0) brcmf_fw_request_done(NULL, fwctx); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c index 9ed85420f3ca..d5578ca681bb 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c @@ -15,6 +15,7 @@ #include "bus.h" #include "debug.h" #include "tracepoint.h" +#include "xtlv.h" #include "fwil.h" #include "proto.h" @@ -150,7 +151,8 @@ brcmf_fil_cmd_data_get(struct brcmf_if *ifp, u32 cmd, void *data, u32 len) mutex_lock(&ifp->drvr->proto_block); err = brcmf_fil_cmd_data(ifp, cmd, data, len, false); - brcmf_dbg(FIL, "ifidx=%d, cmd=%d, len=%d\n", ifp->ifidx, cmd, len); + brcmf_dbg(FIL, "ifidx=%d, cmd=%d, len=%d, err=%d\n", ifp->ifidx, cmd, + len, err); brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data, min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n"); @@ -260,7 +262,8 @@ brcmf_fil_iovar_data_get(struct brcmf_if *ifp, char *name, void *data, bphy_err(drvr, "Creating iovar failed\n"); } - brcmf_dbg(FIL, "ifidx=%d, name=%s, len=%d\n", ifp->ifidx, name, len); + brcmf_dbg(FIL, "ifidx=%d, name=%s, len=%d, err=%d\n", ifp->ifidx, name, + len, err); brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data, min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n"); @@ -383,14 +386,13 @@ brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, char *name, err = -EPERM; bphy_err(drvr, "Creating bsscfg failed\n"); } - brcmf_dbg(FIL, "ifidx=%d, bsscfgidx=%d, name=%s, len=%d\n", ifp->ifidx, - ifp->bsscfgidx, name, len); + brcmf_dbg(FIL, "ifidx=%d, bsscfgidx=%d, name=%s, len=%d, err=%d\n", + ifp->ifidx, ifp->bsscfgidx, name, len, err); brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data, min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n"); mutex_unlock(&drvr->proto_block); return err; - } s32 @@ -414,3 +416,117 @@ brcmf_fil_bsscfg_int_get(struct brcmf_if *ifp, char *name, u32 *data) *data = le32_to_cpu(data_le); return err; } + +static u32 brcmf_create_xtlv(char *name, u16 id, char *data, u32 len, + char *buf, u32 buflen) +{ + u32 iolen; + u32 nmlen; + + nmlen = strlen(name) + 1; + iolen = nmlen + brcmf_xtlv_data_size(len, BRCMF_XTLV_OPTION_ALIGN32); + + if (iolen > buflen) { + brcmf_err("buffer is too short\n"); + return 0; + } + + memcpy(buf, name, nmlen); + brcmf_xtlv_pack_header((void *)(buf + nmlen), id, len, data, + BRCMF_XTLV_OPTION_ALIGN32); + + return iolen; +} + +s32 brcmf_fil_xtlv_data_set(struct brcmf_if *ifp, char *name, u16 id, + void *data, u32 len) +{ + struct brcmf_pub *drvr = ifp->drvr; + s32 err; + u32 buflen; + + mutex_lock(&drvr->proto_block); + + brcmf_dbg(FIL, "ifidx=%d, name=%s, id=%u, len=%u\n", ifp->ifidx, name, + id, len); + brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data, + min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n"); + + buflen = brcmf_create_xtlv(name, id, data, len, + drvr->proto_buf, sizeof(drvr->proto_buf)); + if (buflen) { + err = brcmf_fil_cmd_data(ifp, BRCMF_C_SET_VAR, drvr->proto_buf, + buflen, true); + } else { + err = -EPERM; + bphy_err(drvr, "Creating xtlv failed\n"); + } + + mutex_unlock(&drvr->proto_block); + return err; +} + +s32 brcmf_fil_xtlv_data_get(struct brcmf_if *ifp, char *name, u16 id, + void *data, u32 len) +{ + struct brcmf_pub *drvr = ifp->drvr; + s32 err; + u32 buflen; + + mutex_lock(&drvr->proto_block); + + buflen = brcmf_create_xtlv(name, id, data, len, + drvr->proto_buf, sizeof(drvr->proto_buf)); + if (buflen) { + err = brcmf_fil_cmd_data(ifp, BRCMF_C_GET_VAR, drvr->proto_buf, + buflen, false); + if (err == 0) + memcpy(data, drvr->proto_buf, len); + } else { + err = -EPERM; + bphy_err(drvr, "Creating bsscfg failed\n"); + } + brcmf_dbg(FIL, "ifidx=%d, name=%s, id=%u, len=%u, err=%d\n", + ifp->ifidx, name, id, len, err); + brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data, + min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n"); + + mutex_unlock(&drvr->proto_block); + return err; +} + +s32 brcmf_fil_xtlv_int_set(struct brcmf_if *ifp, char *name, u16 id, u32 data) +{ + __le32 data_le = cpu_to_le32(data); + + return brcmf_fil_xtlv_data_set(ifp, name, id, &data_le, + sizeof(data_le)); +} + +s32 brcmf_fil_xtlv_int_get(struct brcmf_if *ifp, char *name, u16 id, u32 *data) +{ + __le32 data_le = cpu_to_le32(*data); + s32 err; + + err = brcmf_fil_xtlv_data_get(ifp, name, id, &data_le, sizeof(data_le)); + if (err == 0) + *data = le32_to_cpu(data_le); + return err; +} + +s32 brcmf_fil_xtlv_int8_get(struct brcmf_if *ifp, char *name, u16 id, u8 *data) +{ + return brcmf_fil_xtlv_data_get(ifp, name, id, data, sizeof(*data)); +} + +s32 brcmf_fil_xtlv_int16_get(struct brcmf_if *ifp, char *name, u16 id, u16 *data) +{ + __le16 data_le = cpu_to_le16(*data); + s32 err; + + err = brcmf_fil_xtlv_data_get(ifp, name, id, &data_le, sizeof(data_le)); + if (err == 0) + *data = le16_to_cpu(data_le); + return err; +} + diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h index ae4cf4372908..cb26f8c59c21 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h @@ -97,5 +97,13 @@ s32 brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, char *name, void *data, u32 len); s32 brcmf_fil_bsscfg_int_set(struct brcmf_if *ifp, char *name, u32 data); s32 brcmf_fil_bsscfg_int_get(struct brcmf_if *ifp, char *name, u32 *data); +s32 brcmf_fil_xtlv_data_set(struct brcmf_if *ifp, char *name, u16 id, + void *data, u32 len); +s32 brcmf_fil_xtlv_data_get(struct brcmf_if *ifp, char *name, u16 id, + void *data, u32 len); +s32 brcmf_fil_xtlv_int_set(struct brcmf_if *ifp, char *name, u16 id, u32 data); +s32 brcmf_fil_xtlv_int_get(struct brcmf_if *ifp, char *name, u16 id, u32 *data); +s32 brcmf_fil_xtlv_int8_get(struct brcmf_if *ifp, char *name, u16 id, u8 *data); +s32 brcmf_fil_xtlv_int16_get(struct brcmf_if *ifp, char *name, u16 id, u16 *data); #endif /* _fwil_h_ */ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index c49dd0c36ae4..8b149996fc00 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c @@ -1886,7 +1886,8 @@ brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) devinfo->pdev = pdev; pcie_bus_dev = NULL; - devinfo->ci = brcmf_chip_attach(devinfo, &brcmf_pcie_buscore_ops); + devinfo->ci = brcmf_chip_attach(devinfo, pdev->device, + &brcmf_pcie_buscore_ops); if (IS_ERR(devinfo->ci)) { ret = PTR_ERR(devinfo->ci); devinfo->ci = NULL; @@ -2075,7 +2076,7 @@ cleanup: err = brcmf_pcie_probe(pdev, NULL); if (err) - brcmf_err(bus, "probe after resume failed, err=%d\n", err); + __brcmf_err(NULL, __func__, "probe after resume failed, err=%d\n", err); return err; } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index 97ee9e2e2e35..8effeb7a7269 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -617,6 +617,7 @@ BRCMF_FW_DEF(4339, "brcmfmac4339-sdio"); BRCMF_FW_DEF(43430A0, "brcmfmac43430a0-sdio"); /* Note the names are not postfixed with a1 for backward compatibility */ BRCMF_FW_CLM_DEF(43430A1, "brcmfmac43430-sdio"); +BRCMF_FW_DEF(43430B0, "brcmfmac43430b0-sdio"); BRCMF_FW_CLM_DEF(43455, "brcmfmac43455-sdio"); BRCMF_FW_DEF(43456, "brcmfmac43456-sdio"); BRCMF_FW_CLM_DEF(4354, "brcmfmac4354-sdio"); @@ -624,11 +625,15 @@ BRCMF_FW_CLM_DEF(4356, "brcmfmac4356-sdio"); BRCMF_FW_DEF(4359, "brcmfmac4359-sdio"); BRCMF_FW_CLM_DEF(4373, "brcmfmac4373-sdio"); BRCMF_FW_CLM_DEF(43012, "brcmfmac43012-sdio"); +BRCMF_FW_CLM_DEF(43752, "brcmfmac43752-sdio"); /* firmware config files */ MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-sdio.*.txt"); MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-pcie.*.txt"); +/* per-board firmware binaries */ +MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-sdio.*.bin"); + static const struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = { BRCMF_FW_ENTRY(BRCM_CC_43143_CHIP_ID, 0xFFFFFFFF, 43143), BRCMF_FW_ENTRY(BRCM_CC_43241_CHIP_ID, 0x0000001F, 43241B0), @@ -643,14 +648,16 @@ static const struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = { BRCMF_FW_ENTRY(BRCM_CC_43362_CHIP_ID, 0xFFFFFFFE, 43362), BRCMF_FW_ENTRY(BRCM_CC_4339_CHIP_ID, 0xFFFFFFFF, 4339), BRCMF_FW_ENTRY(BRCM_CC_43430_CHIP_ID, 0x00000001, 43430A0), - BRCMF_FW_ENTRY(BRCM_CC_43430_CHIP_ID, 0xFFFFFFFE, 43430A1), + BRCMF_FW_ENTRY(BRCM_CC_43430_CHIP_ID, 0x00000002, 43430A1), + BRCMF_FW_ENTRY(BRCM_CC_43430_CHIP_ID, 0xFFFFFFFC, 43430B0), BRCMF_FW_ENTRY(BRCM_CC_4345_CHIP_ID, 0x00000200, 43456), BRCMF_FW_ENTRY(BRCM_CC_4345_CHIP_ID, 0xFFFFFDC0, 43455), BRCMF_FW_ENTRY(BRCM_CC_4354_CHIP_ID, 0xFFFFFFFF, 4354), BRCMF_FW_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356), BRCMF_FW_ENTRY(BRCM_CC_4359_CHIP_ID, 0xFFFFFFFF, 4359), BRCMF_FW_ENTRY(CY_CC_4373_CHIP_ID, 0xFFFFFFFF, 4373), - BRCMF_FW_ENTRY(CY_CC_43012_CHIP_ID, 0xFFFFFFFF, 43012) + BRCMF_FW_ENTRY(CY_CC_43012_CHIP_ID, 0xFFFFFFFF, 43012), + BRCMF_FW_ENTRY(CY_CC_43752_CHIP_ID, 0xFFFFFFFF, 43752) }; #define TXCTL_CREDITS 2 @@ -3416,7 +3423,8 @@ err: static bool brcmf_sdio_aos_no_decode(struct brcmf_sdio *bus) { - if (bus->ci->chip == CY_CC_43012_CHIP_ID) + if (bus->ci->chip == CY_CC_43012_CHIP_ID || + bus->ci->chip == CY_CC_43752_CHIP_ID) return true; else return false; @@ -3907,7 +3915,7 @@ static u32 brcmf_sdio_buscore_read32(void *ctx, u32 addr) * It can be identified as 4339 by looking at the chip revision. It * is corrected here so the chip.c module has the right info. */ - if (addr == CORE_CC_REG(SI_ENUM_BASE, chipid) && + if (addr == CORE_CC_REG(SI_ENUM_BASE_DEFAULT, chipid) && (sdiodev->func1->device == SDIO_DEVICE_ID_BROADCOM_4339 || sdiodev->func1->device == SDIO_DEVICE_ID_BROADCOM_4335_4339)) { rev = (val & CID_REV_MASK) >> CID_REV_SHIFT; @@ -3943,12 +3951,15 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus) int reg_addr; u32 reg_val; u32 drivestrength; + u32 enum_base; sdiodev = bus->sdiodev; sdio_claim_host(sdiodev->func1); - pr_debug("F1 signature read @0x18000000=0x%4x\n", - brcmf_sdiod_readl(sdiodev, SI_ENUM_BASE, NULL)); + enum_base = brcmf_chip_enum_base(sdiodev->func1->device); + + pr_debug("F1 signature read @0x%08x=0x%4x\n", enum_base, + brcmf_sdiod_readl(sdiodev, enum_base, NULL)); /* * Force PLL off until brcmf_chip_attach() @@ -3967,7 +3978,8 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus) goto fail; } - bus->ci = brcmf_chip_attach(sdiodev, &brcmf_sdio_buscore_ops); + bus->ci = brcmf_chip_attach(sdiodev, sdiodev->func1->device, + &brcmf_sdio_buscore_ops); if (IS_ERR(bus->ci)) { brcmf_err("brcmf_chip_attach failed!\n"); bus->ci = NULL; @@ -4257,6 +4269,7 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err, switch (sdiod->func1->device) { case SDIO_DEVICE_ID_BROADCOM_CYPRESS_4373: + case SDIO_DEVICE_ID_BROADCOM_CYPRESS_43752: brcmf_dbg(INFO, "set F2 watermark to 0x%x*4 bytes\n", CY_4373_F2_WATERMARK); brcmf_sdiod_writeb(sdiod, SBSDIO_WATERMARK, @@ -4442,7 +4455,7 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev) bus->tx_seq = SDPCM_SEQ_WRAP - 1; /* single-threaded workqueue */ - wq = alloc_ordered_workqueue("brcmf_wq/%s", WQ_MEM_RECLAIM, + wq = alloc_ordered_workqueue("brcmf_wq/%s", WQ_MEM_RECLAIM | WQ_HIGHPRI, dev_name(&sdiodev->func1->dev)); if (!wq) { brcmf_err("insufficient memory to create txworkqueue\n"); @@ -4616,4 +4629,3 @@ int brcmf_sdio_sleep(struct brcmf_sdio *bus, bool sleep) return ret; } - diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/xtlv.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/xtlv.c new file mode 100644 index 000000000000..2f3c451148db --- /dev/null +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/xtlv.c @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2019 Broadcom + */ + +#include <asm/unaligned.h> +#include <linux/string.h> +#include <linux/bug.h> + +#include "xtlv.h" + +static int brcmf_xtlv_header_size(u16 opts) +{ + int len = (int)offsetof(struct brcmf_xtlv, data); + + if (opts & BRCMF_XTLV_OPTION_IDU8) + --len; + if (opts & BRCMF_XTLV_OPTION_LENU8) + --len; + + return len; +} + +int brcmf_xtlv_data_size(int dlen, u16 opts) +{ + int hsz; + + hsz = brcmf_xtlv_header_size(opts); + if (opts & BRCMF_XTLV_OPTION_ALIGN32) + return roundup(dlen + hsz, 4); + + return dlen + hsz; +} + +void brcmf_xtlv_pack_header(struct brcmf_xtlv *xtlv, u16 id, u16 len, + const u8 *data, u16 opts) +{ + u8 *data_buf; + u16 mask = BRCMF_XTLV_OPTION_IDU8 | BRCMF_XTLV_OPTION_LENU8; + + if (!(opts & mask)) { + u8 *idp = (u8 *)xtlv; + u8 *lenp = idp + sizeof(xtlv->id); + + put_unaligned_le16(id, idp); + put_unaligned_le16(len, lenp); + data_buf = lenp + sizeof(u16); + } else if ((opts & mask) == mask) { /* u8 id and u8 len */ + u8 *idp = (u8 *)xtlv; + u8 *lenp = idp + 1; + + *idp = (u8)id; + *lenp = (u8)len; + data_buf = lenp + sizeof(u8); + } else if (opts & BRCMF_XTLV_OPTION_IDU8) { /* u8 id, u16 len */ + u8 *idp = (u8 *)xtlv; + u8 *lenp = idp + 1; + + *idp = (u8)id; + put_unaligned_le16(len, lenp); + data_buf = lenp + sizeof(u16); + } else if (opts & BRCMF_XTLV_OPTION_LENU8) { /* u16 id, u8 len */ + u8 *idp = (u8 *)xtlv; + u8 *lenp = idp + sizeof(u16); + + put_unaligned_le16(id, idp); + *lenp = (u8)len; + data_buf = lenp + sizeof(u8); + } else { + WARN(true, "Unexpected xtlv option"); + return; + } + + if (opts & BRCMF_XTLV_OPTION_LENU8) { + WARN_ON(len > 0x00ff); + len &= 0xff; + } + + if (data) + memcpy(data_buf, data, len); +} + diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/xtlv.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/xtlv.h new file mode 100644 index 000000000000..e1930ce1b642 --- /dev/null +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/xtlv.h @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2019 Broadcom + */ +#ifndef __BRCMF_XTLV_H +#define __BRCMF_XTLV_H + +#include <linux/types.h> +#include <linux/bits.h> + +/* bcm type(id), length, value with w/16 bit id/len. The structure below + * is nominal, and is used to support variable length id and type. See + * xtlv options below. + */ +struct brcmf_xtlv { + u16 id; + u16 len; + u8 data[0]; +}; + +enum brcmf_xtlv_option { + BRCMF_XTLV_OPTION_ALIGN32 = BIT(0), + BRCMF_XTLV_OPTION_IDU8 = BIT(1), + BRCMF_XTLV_OPTION_LENU8 = BIT(2), +}; + +int brcmf_xtlv_data_size(int dlen, u16 opts); +void brcmf_xtlv_pack_header(struct brcmf_xtlv *xtlv, u16 id, u16 len, + const u8 *data, u16 opts); + +#endif /* __BRCMF_XTLV_H */ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c index 26de1bd7fee9..8ddfc3d06687 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c @@ -704,7 +704,7 @@ static void brcms_c_write_inits(struct brcms_hardware *wlc_hw, static void brcms_c_write_mhf(struct brcms_hardware *wlc_hw, u16 *mhfs) { u8 idx; - u16 addr[] = { + static const u16 addr[] = { M_HOST_FLAGS1, M_HOST_FLAGS2, M_HOST_FLAGS3, M_HOST_FLAGS4, M_HOST_FLAGS5 }; diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h index 00309b272a0e..9d81320164ce 100644 --- a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h +++ b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h @@ -52,6 +52,7 @@ #define BRCM_CC_4371_CHIP_ID 0x4371 #define CY_CC_4373_CHIP_ID 0x4373 #define CY_CC_43012_CHIP_ID 43012 +#define CY_CC_43752_CHIP_ID 43752 /* USB Device IDs */ #define BRCM_USB_43143_DEVICE_ID 0xbd1e diff --git a/drivers/net/wireless/broadcom/brcm80211/include/soc.h b/drivers/net/wireless/broadcom/brcm80211/include/soc.h index 92d942b44f2c..824921191366 100644 --- a/drivers/net/wireless/broadcom/brcm80211/include/soc.h +++ b/drivers/net/wireless/broadcom/brcm80211/include/soc.h @@ -6,7 +6,7 @@ #ifndef _BRCM_SOC_H #define _BRCM_SOC_H -#define SI_ENUM_BASE 0x18000000 /* Enumeration space base */ +#define SI_ENUM_BASE_DEFAULT 0x18000000 /* Common core control flags */ #define SICF_BIST_EN 0x8000 diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c index fd37d4d2983b..65dd8cff1b01 100644 --- a/drivers/net/wireless/cisco/airo.c +++ b/drivers/net/wireless/cisco/airo.c @@ -1144,7 +1144,7 @@ static int waitbusy(struct airo_info *ai); static irqreturn_t airo_interrupt(int irq, void* dev_id); static int airo_thread(void *data); static void timer_func(struct net_device *dev); -static int airo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); +static int airo_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *, int cmd); static struct iw_statistics *airo_get_wireless_stats(struct net_device *dev); #ifdef CISCO_EXT static int readrids(struct net_device *dev, aironet_ioctl *comp); @@ -2664,7 +2664,7 @@ static const struct net_device_ops airo11_netdev_ops = { .ndo_start_xmit = airo_start_xmit11, .ndo_get_stats = airo_get_stats, .ndo_set_mac_address = airo_set_mac_address, - .ndo_do_ioctl = airo_ioctl, + .ndo_siocdevprivate = airo_siocdevprivate, }; static void wifi_setup(struct net_device *dev) @@ -2764,7 +2764,7 @@ static const struct net_device_ops airo_netdev_ops = { .ndo_get_stats = airo_get_stats, .ndo_set_rx_mode = airo_set_multicast_list, .ndo_set_mac_address = airo_set_mac_address, - .ndo_do_ioctl = airo_ioctl, + .ndo_siocdevprivate = airo_siocdevprivate, .ndo_validate_addr = eth_validate_addr, }; @@ -2775,7 +2775,7 @@ static const struct net_device_ops mpi_netdev_ops = { .ndo_get_stats = airo_get_stats, .ndo_set_rx_mode = airo_set_multicast_list, .ndo_set_mac_address = airo_set_mac_address, - .ndo_do_ioctl = airo_ioctl, + .ndo_siocdevprivate = airo_siocdevprivate, .ndo_validate_addr = eth_validate_addr, }; @@ -7661,7 +7661,8 @@ static const struct iw_handler_def airo_handler_def = * Javier Achirica did a great job of merging code from the unnamed CISCO * developer that added support for flashing the card. */ -static int airo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +static int airo_siocdevprivate(struct net_device *dev, struct ifreq *rq, + void __user *data, int cmd) { int rc = 0; struct airo_info *ai = dev->ml_priv; @@ -7678,7 +7679,7 @@ static int airo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { int val = AIROMAGIC; aironet_ioctl com; - if (copy_from_user(&com, rq->ifr_data, sizeof(com))) + if (copy_from_user(&com, data, sizeof(com))) rc = -EFAULT; else if (copy_to_user(com.data, (char *)&val, sizeof(val))) rc = -EFAULT; @@ -7694,7 +7695,7 @@ static int airo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) */ { aironet_ioctl com; - if (copy_from_user(&com, rq->ifr_data, sizeof(com))) { + if (copy_from_user(&com, data, sizeof(com))) { rc = -EFAULT; break; } diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_rx.c b/drivers/net/wireless/intel/ipw2x00/libipw_rx.c index 5a2a723e480b..7a684b76f39b 100644 --- a/drivers/net/wireless/intel/ipw2x00/libipw_rx.c +++ b/drivers/net/wireless/intel/ipw2x00/libipw_rx.c @@ -927,7 +927,8 @@ static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 }; static int libipw_verify_qos_info(struct libipw_qos_information_element *info_element, int sub_type) { - + if (info_element->elementID != QOS_ELEMENT_ID) + return -1; if (info_element->qui_subtype != sub_type) return -1; if (memcmp(info_element->qui, qos_oui, QOS_OUI_LEN)) @@ -943,57 +944,34 @@ static int libipw_verify_qos_info(struct libipw_qos_information_element /* * Parse a QoS parameter element */ -static int libipw_read_qos_param_element(struct libipw_qos_parameter_info - *element_param, struct libipw_info_element - *info_element) +static int libipw_read_qos_param_element( + struct libipw_qos_parameter_info *element_param, + struct libipw_info_element *info_element) { - int ret = 0; - u16 size = sizeof(struct libipw_qos_parameter_info) - 2; + size_t size = sizeof(*element_param); - if ((info_element == NULL) || (element_param == NULL)) + if (!element_param || !info_element || info_element->len != size - 2) return -1; - if (info_element->id == QOS_ELEMENT_ID && info_element->len == size) { - memcpy(element_param->info_element.qui, info_element->data, - info_element->len); - element_param->info_element.elementID = info_element->id; - element_param->info_element.length = info_element->len; - } else - ret = -1; - if (ret == 0) - ret = libipw_verify_qos_info(&element_param->info_element, - QOS_OUI_PARAM_SUB_TYPE); - return ret; + memcpy(element_param, info_element, size); + return libipw_verify_qos_info(&element_param->info_element, + QOS_OUI_PARAM_SUB_TYPE); } /* * Parse a QoS information element */ -static int libipw_read_qos_info_element(struct - libipw_qos_information_element - *element_info, struct libipw_info_element - *info_element) +static int libipw_read_qos_info_element( + struct libipw_qos_information_element *element_info, + struct libipw_info_element *info_element) { - int ret = 0; - u16 size = sizeof(struct libipw_qos_information_element) - 2; + size_t size = sizeof(struct libipw_qos_information_element) - 2; - if (element_info == NULL) + if (!element_info || !info_element || info_element->len != size - 2) return -1; - if (info_element == NULL) - return -1; - - if ((info_element->id == QOS_ELEMENT_ID) && (info_element->len == size)) { - memcpy(element_info->qui, info_element->data, - info_element->len); - element_info->elementID = info_element->id; - element_info->length = info_element->len; - } else - ret = -1; - if (ret == 0) - ret = libipw_verify_qos_info(element_info, - QOS_OUI_INFO_SUB_TYPE); - return ret; + memcpy(element_info, info_element, size); + return libipw_verify_qos_info(element_info, QOS_OUI_INFO_SUB_TYPE); } /* diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_tx.c b/drivers/net/wireless/intel/ipw2x00/libipw_tx.c index d9baa2fa603b..36d1e6b2568d 100644 --- a/drivers/net/wireless/intel/ipw2x00/libipw_tx.c +++ b/drivers/net/wireless/intel/ipw2x00/libipw_tx.c @@ -179,8 +179,8 @@ static struct libipw_txb *libipw_alloc_txb(int nr_frags, int txb_size, { struct libipw_txb *txb; int i; - txb = kmalloc(sizeof(struct libipw_txb) + (sizeof(u8 *) * nr_frags), - gfp_mask); + + txb = kmalloc(struct_size(txb, fragments, nr_frags), gfp_mask); if (!txb) return NULL; diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c index 6ff2674f8466..45abb25b65a9 100644 --- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c @@ -571,20 +571,18 @@ il3945_tx_skb(struct il_priv *il, /* Physical address of this Tx command's header (not MAC header!), * within command buffer array. */ - txcmd_phys = - pci_map_single(il->pci_dev, &out_cmd->hdr, firstlen, - PCI_DMA_TODEVICE); - if (unlikely(pci_dma_mapping_error(il->pci_dev, txcmd_phys))) + txcmd_phys = dma_map_single(&il->pci_dev->dev, &out_cmd->hdr, firstlen, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(&il->pci_dev->dev, txcmd_phys))) goto drop_unlock; /* Set up TFD's 2nd entry to point directly to remainder of skb, * if any (802.11 null frames have no payload). */ secondlen = skb->len - hdr_len; if (secondlen > 0) { - phys_addr = - pci_map_single(il->pci_dev, skb->data + hdr_len, secondlen, - PCI_DMA_TODEVICE); - if (unlikely(pci_dma_mapping_error(il->pci_dev, phys_addr))) + phys_addr = dma_map_single(&il->pci_dev->dev, skb->data + hdr_len, + secondlen, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(&il->pci_dev->dev, phys_addr))) goto drop_unlock; } @@ -1015,11 +1013,11 @@ il3945_rx_allocate(struct il_priv *il, gfp_t priority) /* Get physical address of RB/SKB */ page_dma = - pci_map_page(il->pci_dev, page, 0, + dma_map_page(&il->pci_dev->dev, page, 0, PAGE_SIZE << il->hw_params.rx_page_order, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); - if (unlikely(pci_dma_mapping_error(il->pci_dev, page_dma))) { + if (unlikely(dma_mapping_error(&il->pci_dev->dev, page_dma))) { __free_pages(page, il->hw_params.rx_page_order); break; } @@ -1028,9 +1026,9 @@ il3945_rx_allocate(struct il_priv *il, gfp_t priority) if (list_empty(&rxq->rx_used)) { spin_unlock_irqrestore(&rxq->lock, flags); - pci_unmap_page(il->pci_dev, page_dma, + dma_unmap_page(&il->pci_dev->dev, page_dma, PAGE_SIZE << il->hw_params.rx_page_order, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); __free_pages(page, il->hw_params.rx_page_order); return; } @@ -1062,9 +1060,10 @@ il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq) /* In the reset function, these buffers may have been allocated * to an SKB, so we need to unmap and free potential storage */ if (rxq->pool[i].page != NULL) { - pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma, + dma_unmap_page(&il->pci_dev->dev, + rxq->pool[i].page_dma, PAGE_SIZE << il->hw_params.rx_page_order, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); __il_free_pages(il, rxq->pool[i].page); rxq->pool[i].page = NULL; } @@ -1111,9 +1110,10 @@ il3945_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq) int i; for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { if (rxq->pool[i].page != NULL) { - pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma, + dma_unmap_page(&il->pci_dev->dev, + rxq->pool[i].page_dma, PAGE_SIZE << il->hw_params.rx_page_order, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); __il_free_pages(il, rxq->pool[i].page); rxq->pool[i].page = NULL; } @@ -1213,9 +1213,9 @@ il3945_rx_handle(struct il_priv *il) rxq->queue[i] = NULL; - pci_unmap_page(il->pci_dev, rxb->page_dma, + dma_unmap_page(&il->pci_dev->dev, rxb->page_dma, PAGE_SIZE << il->hw_params.rx_page_order, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); pkt = rxb_addr(rxb); len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK; @@ -1260,11 +1260,11 @@ il3945_rx_handle(struct il_priv *il) spin_lock_irqsave(&rxq->lock, flags); if (rxb->page != NULL) { rxb->page_dma = - pci_map_page(il->pci_dev, rxb->page, 0, - PAGE_SIZE << il->hw_params. - rx_page_order, PCI_DMA_FROMDEVICE); - if (unlikely(pci_dma_mapping_error(il->pci_dev, - rxb->page_dma))) { + dma_map_page(&il->pci_dev->dev, rxb->page, 0, + PAGE_SIZE << il->hw_params.rx_page_order, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(&il->pci_dev->dev, + rxb->page_dma))) { __il_free_pages(il, rxb->page); rxb->page = NULL; list_add_tail(&rxb->list, &rxq->rx_used); @@ -3616,9 +3616,7 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_master(pdev); - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (!err) - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (err) { IL_WARN("No suitable DMA available.\n"); goto out_pci_disable_device; diff --git a/drivers/net/wireless/intel/iwlegacy/3945.c b/drivers/net/wireless/intel/iwlegacy/3945.c index 0597d828bee1..a773939b8c2a 100644 --- a/drivers/net/wireless/intel/iwlegacy/3945.c +++ b/drivers/net/wireless/intel/iwlegacy/3945.c @@ -652,16 +652,16 @@ il3945_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq) /* Unmap tx_cmd */ if (counter) - pci_unmap_single(dev, dma_unmap_addr(&txq->meta[idx], mapping), + dma_unmap_single(&dev->dev, + dma_unmap_addr(&txq->meta[idx], mapping), dma_unmap_len(&txq->meta[idx], len), - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); /* unmap chunks if any */ for (i = 1; i < counter; i++) - pci_unmap_single(dev, le32_to_cpu(tfd->tbs[i].addr), - le32_to_cpu(tfd->tbs[i].len), - PCI_DMA_TODEVICE); + dma_unmap_single(&dev->dev, le32_to_cpu(tfd->tbs[i].addr), + le32_to_cpu(tfd->tbs[i].len), DMA_TO_DEVICE); /* free SKB */ if (txq->skbs) { diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c index 341d6a2bc690..0223532fd56a 100644 --- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c @@ -94,9 +94,10 @@ il4965_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq) /* In the reset function, these buffers may have been allocated * to an SKB, so we need to unmap and free potential storage */ if (rxq->pool[i].page != NULL) { - pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma, + dma_unmap_page(&il->pci_dev->dev, + rxq->pool[i].page_dma, PAGE_SIZE << il->hw_params.rx_page_order, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); __il_free_pages(il, rxq->pool[i].page); rxq->pool[i].page = NULL; } @@ -342,11 +343,10 @@ il4965_rx_allocate(struct il_priv *il, gfp_t priority) } /* Get physical address of the RB */ - page_dma = - pci_map_page(il->pci_dev, page, 0, - PAGE_SIZE << il->hw_params.rx_page_order, - PCI_DMA_FROMDEVICE); - if (unlikely(pci_dma_mapping_error(il->pci_dev, page_dma))) { + page_dma = dma_map_page(&il->pci_dev->dev, page, 0, + PAGE_SIZE << il->hw_params.rx_page_order, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(&il->pci_dev->dev, page_dma))) { __free_pages(page, il->hw_params.rx_page_order); break; } @@ -355,9 +355,9 @@ il4965_rx_allocate(struct il_priv *il, gfp_t priority) if (list_empty(&rxq->rx_used)) { spin_unlock_irqrestore(&rxq->lock, flags); - pci_unmap_page(il->pci_dev, page_dma, + dma_unmap_page(&il->pci_dev->dev, page_dma, PAGE_SIZE << il->hw_params.rx_page_order, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); __free_pages(page, il->hw_params.rx_page_order); return; } @@ -409,9 +409,10 @@ il4965_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq) int i; for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { if (rxq->pool[i].page != NULL) { - pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma, + dma_unmap_page(&il->pci_dev->dev, + rxq->pool[i].page_dma, PAGE_SIZE << il->hw_params.rx_page_order, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); __il_free_pages(il, rxq->pool[i].page); rxq->pool[i].page = NULL; } @@ -1815,20 +1816,18 @@ il4965_tx_skb(struct il_priv *il, /* Physical address of this Tx command's header (not MAC header!), * within command buffer array. */ - txcmd_phys = - pci_map_single(il->pci_dev, &out_cmd->hdr, firstlen, - PCI_DMA_BIDIRECTIONAL); - if (unlikely(pci_dma_mapping_error(il->pci_dev, txcmd_phys))) + txcmd_phys = dma_map_single(&il->pci_dev->dev, &out_cmd->hdr, firstlen, + DMA_BIDIRECTIONAL); + if (unlikely(dma_mapping_error(&il->pci_dev->dev, txcmd_phys))) goto drop_unlock; /* Set up TFD's 2nd entry to point directly to remainder of skb, * if any (802.11 null frames have no payload). */ secondlen = skb->len - hdr_len; if (secondlen > 0) { - phys_addr = - pci_map_single(il->pci_dev, skb->data + hdr_len, secondlen, - PCI_DMA_TODEVICE); - if (unlikely(pci_dma_mapping_error(il->pci_dev, phys_addr))) + phys_addr = dma_map_single(&il->pci_dev->dev, skb->data + hdr_len, + secondlen, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(&il->pci_dev->dev, phys_addr))) goto drop_unlock; } @@ -1853,8 +1852,8 @@ il4965_tx_skb(struct il_priv *il, offsetof(struct il_tx_cmd, scratch); /* take back ownership of DMA buffer to enable update */ - pci_dma_sync_single_for_cpu(il->pci_dev, txcmd_phys, firstlen, - PCI_DMA_BIDIRECTIONAL); + dma_sync_single_for_cpu(&il->pci_dev->dev, txcmd_phys, firstlen, + DMA_BIDIRECTIONAL); tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); tx_cmd->dram_msb_ptr = il_get_dma_hi_addr(scratch_phys); @@ -1869,8 +1868,8 @@ il4965_tx_skb(struct il_priv *il, if (info->flags & IEEE80211_TX_CTL_AMPDU) il->ops->txq_update_byte_cnt_tbl(il, txq, le16_to_cpu(tx_cmd->len)); - pci_dma_sync_single_for_device(il->pci_dev, txcmd_phys, firstlen, - PCI_DMA_BIDIRECTIONAL); + dma_sync_single_for_device(&il->pci_dev->dev, txcmd_phys, firstlen, + DMA_BIDIRECTIONAL); /* Tell device the write idx *just past* this latest filled TFD */ q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd); @@ -3929,15 +3928,15 @@ il4965_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq) /* Unmap tx_cmd */ if (num_tbs) - pci_unmap_single(dev, dma_unmap_addr(&txq->meta[idx], mapping), + dma_unmap_single(&dev->dev, + dma_unmap_addr(&txq->meta[idx], mapping), dma_unmap_len(&txq->meta[idx], len), - PCI_DMA_BIDIRECTIONAL); + DMA_BIDIRECTIONAL); /* Unmap chunks, if any. */ for (i = 1; i < num_tbs; i++) - pci_unmap_single(dev, il4965_tfd_tb_get_addr(tfd, i), - il4965_tfd_tb_get_len(tfd, i), - PCI_DMA_TODEVICE); + dma_unmap_single(&dev->dev, il4965_tfd_tb_get_addr(tfd, i), + il4965_tfd_tb_get_len(tfd, i), DMA_TO_DEVICE); /* free SKB */ if (txq->skbs) { @@ -4243,9 +4242,9 @@ il4965_rx_handle(struct il_priv *il) rxq->queue[i] = NULL; - pci_unmap_page(il->pci_dev, rxb->page_dma, + dma_unmap_page(&il->pci_dev->dev, rxb->page_dma, PAGE_SIZE << il->hw_params.rx_page_order, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); pkt = rxb_addr(rxb); len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK; @@ -4290,12 +4289,12 @@ il4965_rx_handle(struct il_priv *il) spin_lock_irqsave(&rxq->lock, flags); if (rxb->page != NULL) { rxb->page_dma = - pci_map_page(il->pci_dev, rxb->page, 0, - PAGE_SIZE << il->hw_params. - rx_page_order, PCI_DMA_FROMDEVICE); + dma_map_page(&il->pci_dev->dev, rxb->page, 0, + PAGE_SIZE << il->hw_params.rx_page_order, + DMA_FROM_DEVICE); - if (unlikely(pci_dma_mapping_error(il->pci_dev, - rxb->page_dma))) { + if (unlikely(dma_mapping_error(&il->pci_dev->dev, + rxb->page_dma))) { __il_free_pages(il, rxb->page); rxb->page = NULL; list_add_tail(&rxb->list, &rxq->rx_used); @@ -6514,14 +6513,9 @@ il4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_master(pdev); - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); - if (!err) - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(36)); if (err) { - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (!err) - err = - pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); /* both attempts failed: */ if (err) { IL_WARN("No suitable DMA available.\n"); diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c index 219fed91cac5..683b632981ed 100644 --- a/drivers/net/wireless/intel/iwlegacy/common.c +++ b/drivers/net/wireless/intel/iwlegacy/common.c @@ -2819,10 +2819,10 @@ il_cmd_queue_unmap(struct il_priv *il) i = il_get_cmd_idx(q, q->read_ptr, 0); if (txq->meta[i].flags & CMD_MAPPED) { - pci_unmap_single(il->pci_dev, + dma_unmap_single(&il->pci_dev->dev, dma_unmap_addr(&txq->meta[i], mapping), dma_unmap_len(&txq->meta[i], len), - PCI_DMA_BIDIRECTIONAL); + DMA_BIDIRECTIONAL); txq->meta[i].flags = 0; } @@ -2831,10 +2831,10 @@ il_cmd_queue_unmap(struct il_priv *il) i = q->n_win; if (txq->meta[i].flags & CMD_MAPPED) { - pci_unmap_single(il->pci_dev, + dma_unmap_single(&il->pci_dev->dev, dma_unmap_addr(&txq->meta[i], mapping), dma_unmap_len(&txq->meta[i], len), - PCI_DMA_BIDIRECTIONAL); + DMA_BIDIRECTIONAL); txq->meta[i].flags = 0; } } @@ -3197,10 +3197,9 @@ il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd) } #endif - phys_addr = - pci_map_single(il->pci_dev, &out_cmd->hdr, fix_size, - PCI_DMA_BIDIRECTIONAL); - if (unlikely(pci_dma_mapping_error(il->pci_dev, phys_addr))) { + phys_addr = dma_map_single(&il->pci_dev->dev, &out_cmd->hdr, fix_size, + DMA_BIDIRECTIONAL); + if (unlikely(dma_mapping_error(&il->pci_dev->dev, phys_addr))) { idx = -ENOMEM; goto out; } @@ -3298,8 +3297,8 @@ il_tx_cmd_complete(struct il_priv *il, struct il_rx_buf *rxb) txq->time_stamp = jiffies; - pci_unmap_single(il->pci_dev, dma_unmap_addr(meta, mapping), - dma_unmap_len(meta, len), PCI_DMA_BIDIRECTIONAL); + dma_unmap_single(&il->pci_dev->dev, dma_unmap_addr(meta, mapping), + dma_unmap_len(meta, len), DMA_BIDIRECTIONAL); /* Input error checking is done when commands are added to queue. */ if (meta->flags & CMD_WANT_SKB) { diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c index 7f1faa9d97b4..52d1d391f4c6 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c @@ -9,7 +9,7 @@ #include "iwl-prph.h" /* Highest firmware API version supported */ -#define IWL_22000_UCODE_API_MAX 64 +#define IWL_22000_UCODE_API_MAX 65 /* Lowest firmware API version supported */ #define IWL_22000_UCODE_API_MIN 39 @@ -154,7 +154,7 @@ static const struct iwl_ht_params iwl_22000_ht_params = { .apmg_not_supported = true, \ .trans.mq_rx_supported = true, \ .vht_mu_mimo_supported = true, \ - .mac_addr_from_csr = true, \ + .mac_addr_from_csr = 0x380, \ .ht_params = &iwl_22000_ht_params, \ .nvm_ver = IWL_22000_NVM_VERSION, \ .trans.use_tfh = true, \ @@ -215,6 +215,67 @@ static const struct iwl_ht_params iwl_22000_ht_params = { }, \ } +#define IWL_DEVICE_BZ_COMMON \ + .ucode_api_max = IWL_22000_UCODE_API_MAX, \ + .ucode_api_min = IWL_22000_UCODE_API_MIN, \ + .led_mode = IWL_LED_RF_STATE, \ + .nvm_hw_section_num = 10, \ + .non_shared_ant = ANT_B, \ + .dccm_offset = IWL_22000_DCCM_OFFSET, \ + .dccm_len = IWL_22000_DCCM_LEN, \ + .dccm2_offset = IWL_22000_DCCM2_OFFSET, \ + .dccm2_len = IWL_22000_DCCM2_LEN, \ + .smem_offset = IWL_22000_SMEM_OFFSET, \ + .smem_len = IWL_22000_SMEM_LEN, \ + .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \ + .apmg_not_supported = true, \ + .trans.mq_rx_supported = true, \ + .vht_mu_mimo_supported = true, \ + .mac_addr_from_csr = 0x30, \ + .ht_params = &iwl_22000_ht_params, \ + .nvm_ver = IWL_22000_NVM_VERSION, \ + .trans.use_tfh = true, \ + .trans.rf_id = true, \ + .trans.gen2 = true, \ + .nvm_type = IWL_NVM_EXT, \ + .dbgc_supported = true, \ + .min_umac_error_event_table = 0x400000, \ + .d3_debug_data_base_addr = 0x401000, \ + .d3_debug_data_length = 60 * 1024, \ + .mon_smem_regs = { \ + .write_ptr = { \ + .addr = LDBG_M2S_BUF_WPTR, \ + .mask = LDBG_M2S_BUF_WPTR_VAL_MSK, \ + }, \ + .cycle_cnt = { \ + .addr = LDBG_M2S_BUF_WRAP_CNT, \ + .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK, \ + }, \ + } + +#define IWL_DEVICE_BZ \ + IWL_DEVICE_BZ_COMMON, \ + .trans.umac_prph_offset = 0x300000, \ + .trans.device_family = IWL_DEVICE_FAMILY_BZ, \ + .trans.base_params = &iwl_ax210_base_params, \ + .min_txq_size = 128, \ + .gp2_reg_addr = 0xd02c68, \ + .min_256_ba_txq_size = 1024, \ + .mon_dram_regs = { \ + .write_ptr = { \ + .addr = DBGC_CUR_DBGBUF_STATUS, \ + .mask = DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK, \ + }, \ + .cycle_cnt = { \ + .addr = DBGC_DBGBUF_WRAP_AROUND, \ + .mask = 0xffffffff, \ + }, \ + .cur_frag = { \ + .addr = DBGC_CUR_DBGBUF_STATUS, \ + .mask = DBGC_CUR_DBGBUF_STATUS_IDX_MSK, \ + }, \ + } + const struct iwl_cfg_trans_params iwl_qnj_trans_cfg = { .mq_rx_supported = true, .use_tfh = true, @@ -373,7 +434,7 @@ const struct iwl_cfg_trans_params iwl_ma_trans_cfg = { }; const struct iwl_cfg_trans_params iwl_bz_trans_cfg = { - .device_family = IWL_DEVICE_FAMILY_AX210, + .device_family = IWL_DEVICE_FAMILY_BZ, .base_params = &iwl_ax210_base_params, .mq_rx_supported = true, .use_tfh = true, @@ -394,6 +455,7 @@ const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6E AX211 160MHz"; const char iwl_ax221_name[] = "Intel(R) Wi-Fi 6E AX221 160MHz"; const char iwl_ax231_name[] = "Intel(R) Wi-Fi 6E AX231 160MHz"; const char iwl_ax411_name[] = "Intel(R) Wi-Fi 6E AX411 160MHz"; +const char iwl_bz_name[] = "Intel(R) TBD Bz device"; const char iwl_ax200_killer_1650w_name[] = "Killer(R) Wi-Fi 6 AX1650w 160MHz Wireless Network Adapter (200D2W)"; @@ -763,28 +825,28 @@ const struct iwl_cfg iwl_cfg_quz_a0_hr_b0 = { const struct iwl_cfg iwl_cfg_bz_a0_hr_b0 = { .fw_name_pre = IWL_BZ_A_HR_B_FW_PRE, .uhb_supported = true, - IWL_DEVICE_AX210, + IWL_DEVICE_BZ, .num_rbds = IWL_NUM_RBDS_AX210_HE, }; const struct iwl_cfg iwl_cfg_bz_a0_gf_a0 = { .fw_name_pre = IWL_BZ_A_GF_A_FW_PRE, .uhb_supported = true, - IWL_DEVICE_AX210, + IWL_DEVICE_BZ, .num_rbds = IWL_NUM_RBDS_AX210_HE, }; const struct iwl_cfg iwl_cfg_bz_a0_gf4_a0 = { .fw_name_pre = IWL_BZ_A_GF4_A_FW_PRE, .uhb_supported = true, - IWL_DEVICE_AX210, + IWL_DEVICE_BZ, .num_rbds = IWL_NUM_RBDS_AX210_HE, }; const struct iwl_cfg iwl_cfg_bz_a0_mr_a0 = { .fw_name_pre = IWL_BZ_A_MR_A_FW_PRE, .uhb_supported = true, - IWL_DEVICE_AX210, + IWL_DEVICE_BZ, .num_rbds = IWL_NUM_RBDS_AX210_HE, }; diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c index 871533beff30..7a7ca06d46c1 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c @@ -89,7 +89,7 @@ static const struct iwl_tt_params iwl9000_tt_params = { .apmg_not_supported = true, \ .num_rbds = 512, \ .vht_mu_mimo_supported = true, \ - .mac_addr_from_csr = true, \ + .mac_addr_from_csr = 0x380, \ .nvm_type = IWL_NVM_EXT, \ .dbgc_supported = true, \ .min_umac_error_event_table = 0x800000, \ diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c index c01523f64bfc..cc7b69fd14d3 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /****************************************************************************** * - * Copyright(c) 2003 - 2014, 2018 - 2020 Intel Corporation. All rights reserved. + * Copyright(c) 2003 - 2014, 2018 - 2021 Intel Corporation. All rights reserved. * Copyright(c) 2015 Intel Deutschland GmbH * * Portions of this file are derived from the ipw3945 project, as well @@ -1950,7 +1950,7 @@ static void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand) } } -static void iwl_nic_error(struct iwl_op_mode *op_mode) +static void iwl_nic_error(struct iwl_op_mode *op_mode, bool sync) { struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c index 80475c7a6fba..3cd7b423c588 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c @@ -318,7 +318,7 @@ iwlagn_accumulative_statistics(struct iwl_priv *priv, (__le32 *)&priv->delta_stats._name, \ (__le32 *)&priv->max_delta_stats._name, \ (__le32 *)&priv->accum_stats._name, \ - sizeof(*_name)); + sizeof(*_name)) ACCUM(common); ACCUM(rx_non_phy); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c index 34933f133a0a..1efac0b2a94d 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c @@ -264,7 +264,7 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt, goto out_free; } - enabled = !!wifi_pkg->package.elements[0].integer.value; + enabled = !!wifi_pkg->package.elements[1].integer.value; if (!enabled) { *block_list_size = -1; @@ -273,15 +273,15 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt, goto out_free; } - if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER || - wifi_pkg->package.elements[1].integer.value > + if (wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER || + wifi_pkg->package.elements[2].integer.value > APCI_WTAS_BLACK_LIST_MAX) { IWL_DEBUG_RADIO(fwrt, "TAS invalid array size %llu\n", wifi_pkg->package.elements[1].integer.value); ret = -EINVAL; goto out_free; } - *block_list_size = wifi_pkg->package.elements[1].integer.value; + *block_list_size = wifi_pkg->package.elements[2].integer.value; IWL_DEBUG_RADIO(fwrt, "TAS array size %d\n", *block_list_size); if (*block_list_size > APCI_WTAS_BLACK_LIST_MAX) { @@ -294,15 +294,15 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt, for (i = 0; i < *block_list_size; i++) { u32 country; - if (wifi_pkg->package.elements[2 + i].type != + if (wifi_pkg->package.elements[3 + i].type != ACPI_TYPE_INTEGER) { IWL_DEBUG_RADIO(fwrt, - "TAS invalid array elem %d\n", 2 + i); + "TAS invalid array elem %d\n", 3 + i); ret = -EINVAL; goto out_free; } - country = wifi_pkg->package.elements[2 + i].integer.value; + country = wifi_pkg->package.elements[3 + i].integer.value; block_list_array[i] = cpu_to_le32(country); IWL_DEBUG_RADIO(fwrt, "TAS block list country %d\n", country); } @@ -412,20 +412,35 @@ IWL_EXPORT_SYMBOL(iwl_acpi_get_eckv); static int iwl_sar_set_profile(union acpi_object *table, struct iwl_sar_profile *profile, - bool enabled) + bool enabled, u8 num_chains, u8 num_sub_bands) { - int i; - - profile->enabled = enabled; - - for (i = 0; i < ACPI_SAR_TABLE_SIZE; i++) { - if (table[i].type != ACPI_TYPE_INTEGER || - table[i].integer.value > U8_MAX) - return -EINVAL; + int i, j, idx = 0; - profile->table[i] = table[i].integer.value; + /* + * The table from ACPI is flat, but we store it in a + * structured array. + */ + for (i = 0; i < ACPI_SAR_NUM_CHAINS_REV2; i++) { + for (j = 0; j < ACPI_SAR_NUM_SUB_BANDS_REV2; j++) { + /* if we don't have the values, use the default */ + if (i >= num_chains || j >= num_sub_bands) { + profile->chains[i].subbands[j] = 0; + } else { + if (table[idx].type != ACPI_TYPE_INTEGER || + table[idx].integer.value > U8_MAX) + return -EINVAL; + + profile->chains[i].subbands[j] = + table[idx].integer.value; + + idx++; + } + } } + /* Only if all values were valid can the profile be enabled */ + profile->enabled = enabled; + return 0; } @@ -433,10 +448,10 @@ static int iwl_sar_fill_table(struct iwl_fw_runtime *fwrt, __le16 *per_chain, u32 n_subbands, int prof_a, int prof_b) { - int profs[ACPI_SAR_NUM_CHAIN_LIMITS] = { prof_a, prof_b }; - int i, j, idx; + int profs[ACPI_SAR_NUM_CHAINS_REV0] = { prof_a, prof_b }; + int i, j; - for (i = 0; i < ACPI_SAR_NUM_CHAIN_LIMITS; i++) { + for (i = 0; i < ACPI_SAR_NUM_CHAINS_REV0; i++) { struct iwl_sar_profile *prof; /* don't allow SAR to be disabled (profile 0 means disable) */ @@ -467,11 +482,10 @@ static int iwl_sar_fill_table(struct iwl_fw_runtime *fwrt, i, profs[i]); IWL_DEBUG_RADIO(fwrt, " Chain[%d]:\n", i); for (j = 0; j < n_subbands; j++) { - idx = i * ACPI_SAR_NUM_SUB_BANDS + j; per_chain[i * n_subbands + j] = - cpu_to_le16(prof->table[idx]); + cpu_to_le16(prof->chains[i].subbands[j]); IWL_DEBUG_RADIO(fwrt, " Band[%d] = %d * .125dBm\n", - j, prof->table[idx]); + j, prof->chains[i].subbands[j]); } } @@ -486,7 +500,7 @@ int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt, for (i = 0; i < n_tables; i++) { ret = iwl_sar_fill_table(fwrt, - &per_chain[i * n_subbands * ACPI_SAR_NUM_CHAIN_LIMITS], + &per_chain[i * n_subbands * ACPI_SAR_NUM_CHAINS_REV0], n_subbands, prof_a, prof_b); if (ret) break; @@ -501,28 +515,71 @@ int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt) union acpi_object *wifi_pkg, *table, *data; bool enabled; int ret, tbl_rev; + u8 num_chains, num_sub_bands; data = iwl_acpi_get_object(fwrt->dev, ACPI_WRDS_METHOD); if (IS_ERR(data)) return PTR_ERR(data); + /* start by trying to read revision 2 */ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, - ACPI_WRDS_WIFI_DATA_SIZE, &tbl_rev); - if (IS_ERR(wifi_pkg)) { - ret = PTR_ERR(wifi_pkg); - goto out_free; + ACPI_WRDS_WIFI_DATA_SIZE_REV2, + &tbl_rev); + if (!IS_ERR(wifi_pkg)) { + if (tbl_rev != 2) { + ret = PTR_ERR(wifi_pkg); + goto out_free; + } + + num_chains = ACPI_SAR_NUM_CHAINS_REV2; + num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV2; + + goto read_table; } - if (tbl_rev != 0) { - ret = -EINVAL; - goto out_free; + /* then try revision 1 */ + wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, + ACPI_WRDS_WIFI_DATA_SIZE_REV1, + &tbl_rev); + if (!IS_ERR(wifi_pkg)) { + if (tbl_rev != 1) { + ret = PTR_ERR(wifi_pkg); + goto out_free; + } + + num_chains = ACPI_SAR_NUM_CHAINS_REV1; + num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV1; + + goto read_table; + } + + /* then finally revision 0 */ + wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, + ACPI_WRDS_WIFI_DATA_SIZE_REV0, + &tbl_rev); + if (!IS_ERR(wifi_pkg)) { + if (tbl_rev != 0) { + ret = PTR_ERR(wifi_pkg); + goto out_free; + } + + num_chains = ACPI_SAR_NUM_CHAINS_REV0; + num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV0; + + goto read_table; } + ret = PTR_ERR(wifi_pkg); + goto out_free; + +read_table: if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) { ret = -EINVAL; goto out_free; } + IWL_DEBUG_RADIO(fwrt, "Reading WRDS tbl_rev=%d\n", tbl_rev); + enabled = !!(wifi_pkg->package.elements[1].integer.value); /* position of the actual table */ @@ -531,7 +588,8 @@ int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt) /* The profile from WRDS is officially profile 1, but goes * into sar_profiles[0] (because we don't have a profile 0). */ - ret = iwl_sar_set_profile(table, &fwrt->sar_profiles[0], enabled); + ret = iwl_sar_set_profile(table, &fwrt->sar_profiles[0], enabled, + num_chains, num_sub_bands); out_free: kfree(data); return ret; @@ -544,23 +602,64 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt) bool enabled; int i, n_profiles, tbl_rev, pos; int ret = 0; + u8 num_chains, num_sub_bands; data = iwl_acpi_get_object(fwrt->dev, ACPI_EWRD_METHOD); if (IS_ERR(data)) return PTR_ERR(data); + /* start by trying to read revision 2 */ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, - ACPI_EWRD_WIFI_DATA_SIZE, &tbl_rev); - if (IS_ERR(wifi_pkg)) { - ret = PTR_ERR(wifi_pkg); - goto out_free; + ACPI_EWRD_WIFI_DATA_SIZE_REV2, + &tbl_rev); + if (!IS_ERR(wifi_pkg)) { + if (tbl_rev != 2) { + ret = PTR_ERR(wifi_pkg); + goto out_free; + } + + num_chains = ACPI_SAR_NUM_CHAINS_REV2; + num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV2; + + goto read_table; } - if (tbl_rev != 0) { - ret = -EINVAL; - goto out_free; + /* then try revision 1 */ + wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, + ACPI_EWRD_WIFI_DATA_SIZE_REV1, + &tbl_rev); + if (!IS_ERR(wifi_pkg)) { + if (tbl_rev != 1) { + ret = PTR_ERR(wifi_pkg); + goto out_free; + } + + num_chains = ACPI_SAR_NUM_CHAINS_REV1; + num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV1; + + goto read_table; + } + + /* then finally revision 0 */ + wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, + ACPI_EWRD_WIFI_DATA_SIZE_REV0, + &tbl_rev); + if (!IS_ERR(wifi_pkg)) { + if (tbl_rev != 0) { + ret = PTR_ERR(wifi_pkg); + goto out_free; + } + + num_chains = ACPI_SAR_NUM_CHAINS_REV0; + num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV0; + + goto read_table; } + ret = PTR_ERR(wifi_pkg); + goto out_free; + +read_table: if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER || wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER) { ret = -EINVAL; @@ -589,13 +688,13 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt) * have profile 0). So in the array we start from 1. */ ret = iwl_sar_set_profile(&wifi_pkg->package.elements[pos], - &fwrt->sar_profiles[i + 1], - enabled); + &fwrt->sar_profiles[i + 1], enabled, + num_chains, num_sub_bands); if (ret < 0) break; /* go to the next table */ - pos += ACPI_SAR_TABLE_SIZE; + pos += num_chains * num_sub_bands; } out_free: @@ -607,41 +706,93 @@ IWL_EXPORT_SYMBOL(iwl_sar_get_ewrd_table); int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt) { union acpi_object *wifi_pkg, *data; - int i, j, ret, tbl_rev; - int idx = 1; + int i, j, k, ret, tbl_rev; + int idx = 1; /* start from one to skip the domain */ + u8 num_bands; data = iwl_acpi_get_object(fwrt->dev, ACPI_WGDS_METHOD); if (IS_ERR(data)) return PTR_ERR(data); + /* start by trying to read revision 2 */ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, - ACPI_WGDS_WIFI_DATA_SIZE, &tbl_rev); + ACPI_WGDS_WIFI_DATA_SIZE_REV2, + &tbl_rev); + if (!IS_ERR(wifi_pkg)) { + if (tbl_rev != 2) { + ret = PTR_ERR(wifi_pkg); + goto out_free; + } - if (IS_ERR(wifi_pkg)) { - ret = PTR_ERR(wifi_pkg); - goto out_free; + num_bands = ACPI_GEO_NUM_BANDS_REV2; + + goto read_table; } - if (tbl_rev > 1) { - ret = -EINVAL; - goto out_free; + /* then try revision 0 (which is the same as 1) */ + wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, + ACPI_WGDS_WIFI_DATA_SIZE_REV0, + &tbl_rev); + if (!IS_ERR(wifi_pkg)) { + if (tbl_rev != 0 && tbl_rev != 1) { + ret = PTR_ERR(wifi_pkg); + goto out_free; + } + + num_bands = ACPI_GEO_NUM_BANDS_REV0; + + goto read_table; } + ret = PTR_ERR(wifi_pkg); + goto out_free; + +read_table: fwrt->geo_rev = tbl_rev; for (i = 0; i < ACPI_NUM_GEO_PROFILES; i++) { - for (j = 0; j < ACPI_GEO_TABLE_SIZE; j++) { + for (j = 0; j < ACPI_GEO_NUM_BANDS_REV2; j++) { union acpi_object *entry; - entry = &wifi_pkg->package.elements[idx++]; - if (entry->type != ACPI_TYPE_INTEGER || - entry->integer.value > U8_MAX) { - ret = -EINVAL; - goto out_free; + /* + * num_bands is either 2 or 3, if it's only 2 then + * fill the third band (6 GHz) with the values from + * 5 GHz (second band) + */ + if (j >= num_bands) { + fwrt->geo_profiles[i].bands[j].max = + fwrt->geo_profiles[i].bands[1].max; + } else { + entry = &wifi_pkg->package.elements[idx++]; + if (entry->type != ACPI_TYPE_INTEGER || + entry->integer.value > U8_MAX) { + ret = -EINVAL; + goto out_free; + } + + fwrt->geo_profiles[i].bands[j].max = + entry->integer.value; } - fwrt->geo_profiles[i].values[j] = entry->integer.value; + for (k = 0; k < ACPI_GEO_NUM_CHAINS; k++) { + /* same here as above */ + if (j >= num_bands) { + fwrt->geo_profiles[i].bands[j].chains[k] = + fwrt->geo_profiles[i].bands[1].chains[k]; + } else { + entry = &wifi_pkg->package.elements[idx++]; + if (entry->type != ACPI_TYPE_INTEGER || + entry->integer.value > U8_MAX) { + ret = -EINVAL; + goto out_free; + } + + fwrt->geo_profiles[i].bands[j].chains[k] = + entry->integer.value; + } + } } } + ret = 0; out_free: kfree(data); @@ -673,43 +824,26 @@ IWL_EXPORT_SYMBOL(iwl_sar_geo_support); int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt, struct iwl_per_chain_offset *table, u32 n_bands) { - int ret, i, j; + int i, j; if (!iwl_sar_geo_support(fwrt)) return -EOPNOTSUPP; - ret = iwl_sar_get_wgds_table(fwrt); - if (ret < 0) { - IWL_DEBUG_RADIO(fwrt, - "Geo SAR BIOS table invalid or unavailable. (%d)\n", - ret); - /* we don't fail if the table is not available */ - return -ENOENT; - } - for (i = 0; i < ACPI_NUM_GEO_PROFILES; i++) { for (j = 0; j < n_bands; j++) { struct iwl_per_chain_offset *chain = &table[i * n_bands + j]; - u8 *value; - - if (j * ACPI_GEO_PER_CHAIN_SIZE >= - ARRAY_SIZE(fwrt->geo_profiles[0].values)) - /* - * Currently we only store lb an hb values, and - * don't have any special ones for uhb. So leave - * those empty for the time being - */ - break; - - value = &fwrt->geo_profiles[i].values[j * - ACPI_GEO_PER_CHAIN_SIZE]; - chain->max_tx_power = cpu_to_le16(value[0]); - chain->chain_a = value[1]; - chain->chain_b = value[2]; + + chain->max_tx_power = + cpu_to_le16(fwrt->geo_profiles[i].bands[j].max); + chain->chain_a = fwrt->geo_profiles[i].bands[j].chains[0]; + chain->chain_b = fwrt->geo_profiles[i].bands[j].chains[1]; IWL_DEBUG_RADIO(fwrt, "SAR geographic profile[%d] Band[%d]: chain A = %d chain B = %d max_tx_power = %d\n", - i, j, value[1], value[2], value[0]); + i, j, + fwrt->geo_profiles[i].bands[j].chains[0], + fwrt->geo_profiles[i].bands[j].chains[1], + fwrt->geo_profiles[i].bands[j].max); } } diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h index b858e998999c..16ed0995b51e 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h @@ -26,21 +26,46 @@ #define ACPI_WIFI_DOMAIN (0x07) -#define ACPI_SAR_TABLE_SIZE 10 #define ACPI_SAR_PROFILE_NUM 4 -#define ACPI_GEO_TABLE_SIZE 6 #define ACPI_NUM_GEO_PROFILES 3 #define ACPI_GEO_PER_CHAIN_SIZE 3 -#define ACPI_SAR_NUM_CHAIN_LIMITS 2 -#define ACPI_SAR_NUM_SUB_BANDS 5 -#define ACPI_SAR_NUM_TABLES 1 +#define ACPI_SAR_NUM_CHAINS_REV0 2 +#define ACPI_SAR_NUM_CHAINS_REV1 2 +#define ACPI_SAR_NUM_CHAINS_REV2 4 +#define ACPI_SAR_NUM_SUB_BANDS_REV0 5 +#define ACPI_SAR_NUM_SUB_BANDS_REV1 11 +#define ACPI_SAR_NUM_SUB_BANDS_REV2 11 + +#define ACPI_WRDS_WIFI_DATA_SIZE_REV0 (ACPI_SAR_NUM_CHAINS_REV0 * \ + ACPI_SAR_NUM_SUB_BANDS_REV0 + 2) +#define ACPI_WRDS_WIFI_DATA_SIZE_REV1 (ACPI_SAR_NUM_CHAINS_REV1 * \ + ACPI_SAR_NUM_SUB_BANDS_REV1 + 2) +#define ACPI_WRDS_WIFI_DATA_SIZE_REV2 (ACPI_SAR_NUM_CHAINS_REV2 * \ + ACPI_SAR_NUM_SUB_BANDS_REV2 + 2) +#define ACPI_EWRD_WIFI_DATA_SIZE_REV0 ((ACPI_SAR_PROFILE_NUM - 1) * \ + ACPI_SAR_NUM_CHAINS_REV0 * \ + ACPI_SAR_NUM_SUB_BANDS_REV0 + 3) +#define ACPI_EWRD_WIFI_DATA_SIZE_REV1 ((ACPI_SAR_PROFILE_NUM - 1) * \ + ACPI_SAR_NUM_CHAINS_REV1 * \ + ACPI_SAR_NUM_SUB_BANDS_REV1 + 3) +#define ACPI_EWRD_WIFI_DATA_SIZE_REV2 ((ACPI_SAR_PROFILE_NUM - 1) * \ + ACPI_SAR_NUM_CHAINS_REV2 * \ + ACPI_SAR_NUM_SUB_BANDS_REV2 + 3) + +/* revision 0 and 1 are identical, except for the semantics in the FW */ +#define ACPI_GEO_NUM_BANDS_REV0 2 +#define ACPI_GEO_NUM_BANDS_REV2 3 +#define ACPI_GEO_NUM_CHAINS 2 + +#define ACPI_WGDS_WIFI_DATA_SIZE_REV0 (ACPI_NUM_GEO_PROFILES * \ + ACPI_GEO_NUM_BANDS_REV0 * \ + ACPI_GEO_PER_CHAIN_SIZE + 1) +#define ACPI_WGDS_WIFI_DATA_SIZE_REV2 (ACPI_NUM_GEO_PROFILES * \ + ACPI_GEO_NUM_BANDS_REV2 * \ + ACPI_GEO_PER_CHAIN_SIZE + 1) -#define ACPI_WRDS_WIFI_DATA_SIZE (ACPI_SAR_TABLE_SIZE + 2) -#define ACPI_EWRD_WIFI_DATA_SIZE ((ACPI_SAR_PROFILE_NUM - 1) * \ - ACPI_SAR_TABLE_SIZE + 3) -#define ACPI_WGDS_WIFI_DATA_SIZE 19 #define ACPI_WRDD_WIFI_DATA_SIZE 2 #define ACPI_SPLC_WIFI_DATA_SIZE 2 #define ACPI_ECKV_WIFI_DATA_SIZE 2 @@ -51,8 +76,6 @@ #define APCI_WTAS_BLACK_LIST_MAX 16 #define ACPI_WTAS_WIFI_DATA_SIZE (3 + APCI_WTAS_BLACK_LIST_MAX) -#define ACPI_WGDS_TABLE_SIZE 3 - #define ACPI_PPAG_WIFI_DATA_SIZE_V1 ((IWL_NUM_CHAIN_LIMITS * \ IWL_NUM_SUB_BANDS_V1) + 2) #define ACPI_PPAG_WIFI_DATA_SIZE_V2 ((IWL_NUM_CHAIN_LIMITS * \ @@ -64,13 +87,28 @@ #define ACPI_PPAG_MIN_HB -16 #define ACPI_PPAG_MAX_HB 40 +/* + * The profile for revision 2 is a superset of revision 1, which is in + * turn a superset of revision 0. So we can store all revisions + * inside revision 2, which is what we represent here. + */ +struct iwl_sar_profile_chain { + u8 subbands[ACPI_SAR_NUM_SUB_BANDS_REV2]; +}; + struct iwl_sar_profile { bool enabled; - u8 table[ACPI_SAR_TABLE_SIZE]; + struct iwl_sar_profile_chain chains[ACPI_SAR_NUM_CHAINS_REV2]; +}; + +/* Same thing as with SAR, all revisions fit in revision 2 */ +struct iwl_geo_profile_band { + u8 max; + u8 chains[ACPI_GEO_NUM_CHAINS]; }; struct iwl_geo_profile { - u8 values[ACPI_GEO_TABLE_SIZE]; + struct iwl_geo_profile_band bands[ACPI_GEO_NUM_BANDS_REV2]; }; enum iwl_dsm_funcs_rev_0 { @@ -234,7 +272,7 @@ static inline int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt) static inline int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt) { - return -ENOENT; + return 1; } static inline bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h index 01580c9175f3..3e81e9369224 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h @@ -142,7 +142,7 @@ enum iwl_bt_mxbox_dw3 { "\t%s: %d%s", \ #_field, \ BT_MBOX_MSG(notif, _num, _field), \ - true ? "\n" : ", "); + true ? "\n" : ", ") enum iwl_bt_activity_grading { BT_OFF = 0, BT_ON_NO_CONNECTION = 1, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h index ce060c3dfd7b..ee6b5844a871 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h @@ -550,7 +550,8 @@ enum iwl_legacy_cmds { WOWLAN_CONFIGURATION = 0xe1, /** - * @WOWLAN_TSC_RSC_PARAM: &struct iwl_wowlan_rsc_tsc_params_cmd + * @WOWLAN_TSC_RSC_PARAM: &struct iwl_wowlan_rsc_tsc_params_cmd_v4, + * &struct iwl_wowlan_rsc_tsc_params_cmd */ WOWLAN_TSC_RSC_PARAM = 0xe2, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h index b2e7ef3ddc88..3ec82cae3981 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h @@ -6,6 +6,7 @@ */ #ifndef __iwl_fw_api_d3_h__ #define __iwl_fw_api_d3_h__ +#include <iwl-trans.h> /** * enum iwl_d0i3_flags - d0i3 flags @@ -389,11 +390,14 @@ struct iwl_wowlan_config_cmd { u8 reserved; } __packed; /* WOWLAN_CONFIG_API_S_VER_5 */ +#define IWL_NUM_RSC 16 +#define WOWLAN_KEY_MAX_SIZE 32 +#define WOWLAN_GTK_KEYS_NUM 2 +#define WOWLAN_IGTK_KEYS_NUM 2 + /* * WOWLAN_TSC_RSC_PARAMS */ -#define IWL_NUM_RSC 16 - struct tkip_sc { __le16 iv16; __le16 pad; @@ -425,11 +429,19 @@ struct iwl_wowlan_rsc_tsc_params_cmd_ver_2 { union iwl_all_tsc_rsc all_tsc_rsc; } __packed; /* ALL_TSC_RSC_API_S_VER_2 */ -struct iwl_wowlan_rsc_tsc_params_cmd { +struct iwl_wowlan_rsc_tsc_params_cmd_v4 { struct iwl_wowlan_rsc_tsc_params_cmd_ver_2 params; __le32 sta_id; } __packed; /* ALL_TSC_RSC_API_S_VER_4 */ +struct iwl_wowlan_rsc_tsc_params_cmd { + __le64 ucast_rsc[IWL_MAX_TID_COUNT]; + __le64 mcast_rsc[WOWLAN_GTK_KEYS_NUM][IWL_MAX_TID_COUNT]; + __le32 sta_id; +#define IWL_MCAST_KEY_MAP_INVALID 0xff + u8 mcast_key_id_map[4]; +} __packed; /* ALL_TSC_RSC_API_S_VER_5 */ + #define IWL_MIC_KEY_SIZE 8 struct iwl_mic_keys { u8 tx[IWL_MIC_KEY_SIZE]; @@ -541,10 +553,6 @@ struct iwl_wowlan_gtk_status_v1 { struct iwl_wowlan_rsc_tsc_params_cmd_ver_2 rsc; } __packed; /* WOWLAN_GTK_MATERIAL_VER_1 */ -#define WOWLAN_KEY_MAX_SIZE 32 -#define WOWLAN_GTK_KEYS_NUM 2 -#define WOWLAN_IGTK_KEYS_NUM 2 - /** * struct iwl_wowlan_gtk_status - GTK status * @key: GTK material diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h index 5a2d9a1f7e73..d8b5870d6e9a 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h @@ -33,12 +33,11 @@ struct iwl_fw_ini_hcmd { * * @version: TLV version * @domain: domain of the TLV. One of &enum iwl_fw_ini_dbg_domain - * @data: TLV data */ struct iwl_fw_ini_header { __le32 version; __le32 domain; - u8 data[]; + /* followed by the data */ } __packed; /* FW_TLV_DEBUG_HEADER_S_VER_1 */ /** @@ -130,6 +129,7 @@ struct iwl_fw_ini_region_internal_buffer { * &IWL_FW_INI_REGION_PERIPHERY_PHY, &IWL_FW_INI_REGION_PERIPHERY_AUX, * &IWL_FW_INI_REGION_PAGING, &IWL_FW_INI_REGION_CSR, * &IWL_FW_INI_REGION_DRAM_IMR and &IWL_FW_INI_REGION_PCI_IOSF_CONFIG + * &IWL_FW_INI_REGION_DBGI_SRAM, &FW_TLV_DEBUG_REGION_TYPE_DBGI_SRAM, * @fifos: fifos configuration. Used by &IWL_FW_INI_REGION_TXF and * &IWL_FW_INI_REGION_RXF * @err_table: error table configuration. Used by @@ -249,7 +249,6 @@ struct iwl_fw_ini_hcmd_tlv { * @IWL_FW_INI_ALLOCATION_ID_DBGC1: allocation meant for DBGC1 configuration * @IWL_FW_INI_ALLOCATION_ID_DBGC2: allocation meant for DBGC2 configuration * @IWL_FW_INI_ALLOCATION_ID_DBGC3: allocation meant for DBGC3 configuration - * @IWL_FW_INI_ALLOCATION_ID_INTERNAL: allocation meant for Intreanl SMEM in D3 * @IWL_FW_INI_ALLOCATION_NUM: number of allocation ids */ enum iwl_fw_ini_allocation_id { @@ -257,7 +256,6 @@ enum iwl_fw_ini_allocation_id { IWL_FW_INI_ALLOCATION_ID_DBGC1, IWL_FW_INI_ALLOCATION_ID_DBGC2, IWL_FW_INI_ALLOCATION_ID_DBGC3, - IWL_FW_INI_ALLOCATION_ID_INTERNAL, IWL_FW_INI_ALLOCATION_NUM, }; /* FW_DEBUG_TLV_ALLOCATION_ID_E_VER_1 */ @@ -298,6 +296,7 @@ enum iwl_fw_ini_buffer_location { * @IWL_FW_INI_REGION_DRAM_IMR: IMR memory * @IWL_FW_INI_REGION_PCI_IOSF_CONFIG: PCI/IOSF config * @IWL_FW_INI_REGION_SPECIAL_DEVICE_MEMORY: special device memory + * @IWL_FW_INI_REGION_DBGI_SRAM: periphery registers of DBGI SRAM * @IWL_FW_INI_REGION_NUM: number of region types */ enum iwl_fw_ini_region_type { @@ -319,6 +318,7 @@ enum iwl_fw_ini_region_type { IWL_FW_INI_REGION_DRAM_IMR, IWL_FW_INI_REGION_PCI_IOSF_CONFIG, IWL_FW_INI_REGION_SPECIAL_DEVICE_MEMORY, + IWL_FW_INI_REGION_DBGI_SRAM, IWL_FW_INI_REGION_NUM }; /* FW_TLV_DEBUG_REGION_TYPE_API_E */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h index 0e38eb1cd75d..6bbb8b8c91cd 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright (C) 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2020 Intel Corporation + * Copyright (C) 2018-2021 Intel Corporation */ #ifndef __iwl_fw_api_location_h__ #define __iwl_fw_api_location_h__ @@ -151,6 +151,10 @@ enum iwl_tof_mcsi_enable { * is valid * @IWL_TOF_RESPONDER_CMD_VALID_NDP_PARAMS: NDP parameters are valid * @IWL_TOF_RESPONDER_CMD_VALID_LMR_FEEDBACK: LMR feedback support is valid + * @IWL_TOF_RESPONDER_CMD_VALID_SESSION_ID: session id flag is valid + * @IWL_TOF_RESPONDER_CMD_VALID_BSS_COLOR: the bss_color field is valid + * @IWL_TOF_RESPONDER_CMD_VALID_MIN_MAX_TIME_BETWEEN_MSR: the + * min_time_between_msr and max_time_between_msr fields are valid */ enum iwl_tof_responder_cmd_valid_field { IWL_TOF_RESPONDER_CMD_VALID_CHAN_INFO = BIT(0), @@ -169,6 +173,9 @@ enum iwl_tof_responder_cmd_valid_field { IWL_TOF_RESPONDER_CMD_VALID_NDP_SUPPORT = BIT(22), IWL_TOF_RESPONDER_CMD_VALID_NDP_PARAMS = BIT(23), IWL_TOF_RESPONDER_CMD_VALID_LMR_FEEDBACK = BIT(24), + IWL_TOF_RESPONDER_CMD_VALID_SESSION_ID = BIT(25), + IWL_TOF_RESPONDER_CMD_VALID_BSS_COLOR = BIT(26), + IWL_TOF_RESPONDER_CMD_VALID_MIN_MAX_TIME_BETWEEN_MSR = BIT(27), }; /** @@ -186,6 +193,8 @@ enum iwl_tof_responder_cmd_valid_field { * @IWL_TOF_RESPONDER_FLAGS_NDP_SUPPORT: support NDP ranging * @IWL_TOF_RESPONDER_FLAGS_LMR_FEEDBACK: request for LMR feedback if the * initiator supports it + * @IWL_TOF_RESPONDER_FLAGS_SESSION_ID: send the session id in the initial FTM + * frame. */ enum iwl_tof_responder_cfg_flags { IWL_TOF_RESPONDER_FLAGS_NON_ASAP_SUPPORT = BIT(0), @@ -200,6 +209,7 @@ enum iwl_tof_responder_cfg_flags { IWL_TOF_RESPONDER_FLAGS_FTM_TX_ANT = RATE_MCS_ANT_ABC_MSK, IWL_TOF_RESPONDER_FLAGS_NDP_SUPPORT = BIT(24), IWL_TOF_RESPONDER_FLAGS_LMR_FEEDBACK = BIT(25), + IWL_TOF_RESPONDER_FLAGS_SESSION_ID = BIT(27), }; /** @@ -297,13 +307,13 @@ struct iwl_tof_responder_config_cmd_v7 { * @r2i_ndp_params: parameters for R2I NDP. * bits 0 - 2: max number of LTF repetitions * bits 3 - 5: max number of spatial streams (supported values are < 2) - * bits 6 - 7: max number of total LTFs - * (&enum ieee80211_range_params_max_total_ltf) + * bits 6 - 7: max number of total LTFs see + * &enum ieee80211_range_params_max_total_ltf * @i2r_ndp_params: parameters for I2R NDP. * bits 0 - 2: max number of LTF repetitions * bits 3 - 5: max number of spatial streams - * bits 6 - 7: max number of total LTFs - * (&enum ieee80211_range_params_max_total_ltf) + * bits 6 - 7: max number of total LTFs see + * &enum ieee80211_range_params_max_total_ltf */ struct iwl_tof_responder_config_cmd_v8 { __le32 cmd_valid_fields; @@ -322,6 +332,58 @@ struct iwl_tof_responder_config_cmd_v8 { u8 i2r_ndp_params; } __packed; /* TOF_RESPONDER_CONFIG_CMD_API_S_VER_8 */ +/** + * struct iwl_tof_responder_config_cmd_v9 - ToF AP mode (for debug) + * @cmd_valid_fields: &iwl_tof_responder_cmd_valid_field + * @responder_cfg_flags: &iwl_tof_responder_cfg_flags + * @format_bw: bits 0 - 3: &enum iwl_location_frame_format. + * bits 4 - 7: &enum iwl_location_bw. + * @bss_color: current AP bss_color + * @channel_num: current AP Channel + * @ctrl_ch_position: coding of the control channel position relative to + * the center frequency, see iwl_mvm_get_ctrl_pos() + * @sta_id: index of the AP STA when in AP mode + * @reserved1: reserved + * @toa_offset: Artificial addition [pSec] for the ToA - to be used for debug + * purposes, simulating station movement by adding various values + * to this field + * @common_calib: XVT: common calibration value + * @specific_calib: XVT: specific calibration value + * @bssid: Current AP BSSID + * @r2i_ndp_params: parameters for R2I NDP. + * bits 0 - 2: max number of LTF repetitions + * bits 3 - 5: max number of spatial streams (supported values are < 2) + * bits 6 - 7: max number of total LTFs see + * &enum ieee80211_range_params_max_total_ltf + * @i2r_ndp_params: parameters for I2R NDP. + * bits 0 - 2: max number of LTF repetitions + * bits 3 - 5: max number of spatial streams + * bits 6 - 7: max number of total LTFs see + * &enum ieee80211_range_params_max_total_ltf + * @min_time_between_msr: for non trigger based NDP ranging, minimum time + * between measurements in milliseconds. + * @max_time_between_msr: for non trigger based NDP ranging, maximum time + * between measurements in milliseconds. + */ +struct iwl_tof_responder_config_cmd_v9 { + __le32 cmd_valid_fields; + __le32 responder_cfg_flags; + u8 format_bw; + u8 bss_color; + u8 channel_num; + u8 ctrl_ch_position; + u8 sta_id; + u8 reserved1; + __le16 toa_offset; + __le16 common_calib; + __le16 specific_calib; + u8 bssid[ETH_ALEN]; + u8 r2i_ndp_params; + u8 i2r_ndp_params; + __le16 min_time_between_msr; + __le16 max_time_between_msr; +} __packed; /* TOF_RESPONDER_CONFIG_CMD_API_S_VER_8 */ + #define IWL_LCI_CIVIC_IE_MAX_SIZE 400 /** @@ -489,6 +551,10 @@ struct iwl_tof_range_req_ap_entry_v2 { * instead of fw internal values. * @IWL_INITIATOR_AP_FLAGS_PMF: request to protect the negotiation and LMR * frames with protected management frames. + * @IWL_INITIATOR_AP_FLAGS_TERMINATE_ON_LMR_FEEDBACK: terminate the session if + * the responder asked for LMR feedback although the initiator did not set + * the LMR feedback bit in the FTM request. If not set, the initiator will + * continue with the session and will provide the LMR feedback. */ enum iwl_initiator_ap_flags { IWL_INITIATOR_AP_FLAGS_ASAP = BIT(1), @@ -504,6 +570,7 @@ enum iwl_initiator_ap_flags { IWL_INITIATOR_AP_FLAGS_LMR_FEEDBACK = BIT(12), IWL_INITIATOR_AP_FLAGS_USE_CALIB = BIT(13), IWL_INITIATOR_AP_FLAGS_PMF = BIT(14), + IWL_INITIATOR_AP_FLAGS_TERMINATE_ON_LMR_FEEDBACK = BIT(15), }; /** @@ -795,6 +862,90 @@ struct iwl_tof_range_req_ap_entry_v8 { } __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_8 */ /** + * struct iwl_tof_range_req_ap_entry_v9 - AP configuration parameters + * @initiator_ap_flags: see &enum iwl_initiator_ap_flags. + * @channel_num: AP Channel number + * @format_bw: bits 0 - 3: &enum iwl_location_frame_format. + * bits 4 - 7: &enum iwl_location_bw. + * @ctrl_ch_position: Coding of the control channel position relative to the + * center frequency, see iwl_mvm_get_ctrl_pos(). + * @ftmr_max_retries: Max number of retries to send the FTMR in case of no + * reply from the AP. + * @bssid: AP's BSSID + * @burst_period: For EDCA based ranging: Recommended value to be sent to the + * AP. Measurement periodicity In units of 100ms. ignored if + * num_of_bursts_exp = 0. + * For non trigger based NDP ranging, the maximum time between + * measurements in units of milliseconds. + * @samples_per_burst: the number of FTMs pairs in single Burst (1-31); + * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of + * the number of measurement iterations (min 2^0 = 1, max 2^14) + * @sta_id: the station id of the AP. Only relevant when associated to the AP, + * otherwise should be set to &IWL_MVM_INVALID_STA. + * @cipher: pairwise cipher suite for secured measurement. + * &enum iwl_location_cipher. + * @hltk: HLTK to be used for secured 11az measurement + * @tk: TK to be used for secured 11az measurement + * @calib: An array of calibration values per FTM rx bandwidth. + * If &IWL_INITIATOR_AP_FLAGS_USE_CALIB is set, the fw will use the + * calibration value that corresponds to the rx bandwidth of the FTM + * frame. + * @beacon_interval: beacon interval of the AP in TUs. Only required if + * &IWL_INITIATOR_AP_FLAGS_TB is set. + * @bss_color: the BSS color of the responder. Only valid if + * &IWL_INITIATOR_AP_FLAGS_TB or &IWL_INITIATOR_AP_FLAGS_NON_TB is set. + * @rx_pn: the next expected PN for protected management frames Rx. LE byte + * order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id + * is set to &IWL_MVM_INVALID_STA. + * @tx_pn: the next PN to use for protected management frames Tx. LE byte + * order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id + * is set to &IWL_MVM_INVALID_STA. + * @r2i_ndp_params: parameters for R2I NDP ranging negotiation. + * bits 0 - 2: max LTF repetitions + * bits 3 - 5: max number of spatial streams + * bits 6 - 7: reserved + * @i2r_ndp_params: parameters for I2R NDP ranging negotiation. + * bits 0 - 2: max LTF repetitions + * bits 3 - 5: max number of spatial streams (supported values are < 2) + * bits 6 - 7: reserved + * @r2i_max_total_ltf: R2I Max Total LTFs for NDP ranging negotiation. + * One of &enum ieee80211_range_params_max_total_ltf. + * @i2r_max_total_ltf: I2R Max Total LTFs for NDP ranging negotiation. + * One of &enum ieee80211_range_params_max_total_ltf. + * @bss_color: the BSS color of the responder. Only valid if + * &IWL_INITIATOR_AP_FLAGS_NON_TB or &IWL_INITIATOR_AP_FLAGS_TB is set. + * @band: 0 for 5.2 GHz, 1 for 2.4 GHz, 2 for 6GHz + * @min_time_between_msr: For non trigger based NDP ranging, the minimum time + * between measurements in units of milliseconds + */ +struct iwl_tof_range_req_ap_entry_v9 { + __le32 initiator_ap_flags; + u8 channel_num; + u8 format_bw; + u8 ctrl_ch_position; + u8 ftmr_max_retries; + u8 bssid[ETH_ALEN]; + __le16 burst_period; + u8 samples_per_burst; + u8 num_of_bursts; + u8 sta_id; + u8 cipher; + u8 hltk[HLTK_11AZ_LEN]; + u8 tk[TK_11AZ_LEN]; + __le16 calib[IWL_TOF_BW_NUM]; + u16 beacon_interval; + u8 rx_pn[IEEE80211_CCMP_PN_LEN]; + u8 tx_pn[IEEE80211_CCMP_PN_LEN]; + u8 r2i_ndp_params; + u8 i2r_ndp_params; + u8 r2i_max_total_ltf; + u8 i2r_max_total_ltf; + u8 bss_color; + u8 band; + __le16 min_time_between_msr; +} __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_9 */ + +/** * enum iwl_tof_response_mode * @IWL_MVM_TOF_RESPONSE_ASAP: report each AP measurement separately as soon as * possible (not supported for this release) @@ -1043,6 +1194,34 @@ struct iwl_tof_range_req_cmd_v12 { struct iwl_tof_range_req_ap_entry_v8 ap[IWL_MVM_TOF_MAX_APS]; } __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_12 */ +/** + * struct iwl_tof_range_req_cmd_v13 - start measurement cmd + * @initiator_flags: see flags @ iwl_tof_initiator_flags + * @request_id: A Token incremented per request. The same Token will be + * sent back in the range response + * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS) + * @range_req_bssid: ranging request BSSID + * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template. + * Bits set to 1 shall be randomized by the UMAC + * @macaddr_template: MAC address template to use for non-randomized bits + * @req_timeout_ms: Requested timeout of the response in units of milliseconds. + * This is the session time for completing the measurement. + * @tsf_mac_id: report the measurement start time for each ap in terms of the + * TSF of this mac id. 0xff to disable TSF reporting. + * @ap: per-AP request data, see &struct iwl_tof_range_req_ap_entry_v9. + */ +struct iwl_tof_range_req_cmd_v13 { + __le32 initiator_flags; + u8 request_id; + u8 num_of_ap; + u8 range_req_bssid[ETH_ALEN]; + u8 macaddr_mask[ETH_ALEN]; + u8 macaddr_template[ETH_ALEN]; + __le32 req_timeout_ms; + __le32 tsf_mac_id; + struct iwl_tof_range_req_ap_entry_v9 ap[IWL_MVM_TOF_MAX_APS]; +} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_13 */ + /* * enum iwl_tof_range_request_status - status of the sent request * @IWL_TOF_RANGE_REQUEST_STATUS_SUCCESSFUL - FW successfully received the diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h index 93084bbad534..7be7715b431d 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2012-2014, 2018-2021 Intel Corporation * Copyright (C) 2017 Intel Deutschland GmbH */ #ifndef __iwl_fw_api_mac_h__ @@ -137,12 +137,14 @@ struct iwl_mac_data_ibss { * early termination detection. * @FLEXIBLE_TWT_SUPPORTED: AP supports flexible TWT schedule * @PROTECTED_TWT_SUPPORTED: AP supports protected TWT frames (with 11w) + * @BROADCAST_TWT_SUPPORTED: AP and STA support broadcast TWT */ enum iwl_mac_data_policy { TWT_SUPPORTED = BIT(0), MORE_DATA_ACK_SUPPORTED = BIT(1), FLEXIBLE_TWT_SUPPORTED = BIT(2), PROTECTED_TWT_SUPPORTED = BIT(3), + BROADCAST_TWT_SUPPORTED = BIT(4), }; /** diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h b/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h index f06214d418aa..5204aa94e72a 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h @@ -3,6 +3,7 @@ * Copyright (C) 2012-2014 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH + * Copyright (C) 2021 Intel Corporation */ #ifndef __iwl_fw_api_offload_h__ #define __iwl_fw_api_offload_h__ @@ -20,7 +21,7 @@ enum iwl_prot_offload_subcmd_ids { #define MAX_STORED_BEACON_SIZE 600 /** - * struct iwl_stored_beacon_notif - Stored beacon notification + * struct iwl_stored_beacon_notif_common - Stored beacon notif common fields * * @system_time: system time on air rise * @tsf: TSF on air rise @@ -29,9 +30,8 @@ enum iwl_prot_offload_subcmd_ids { * @channel: channel this beacon was received on * @rates: rate in ucode internal format * @byte_count: frame's byte count - * @data: beacon data, length in @byte_count */ -struct iwl_stored_beacon_notif { +struct iwl_stored_beacon_notif_common { __le32 system_time; __le64 tsf; __le32 beacon_timestamp; @@ -39,7 +39,32 @@ struct iwl_stored_beacon_notif { __le16 channel; __le32 rates; __le32 byte_count; +} __packed; + +/** + * struct iwl_stored_beacon_notif - Stored beacon notification + * + * @common: fields common for all versions + * @data: beacon data, length in @byte_count + */ +struct iwl_stored_beacon_notif_v2 { + struct iwl_stored_beacon_notif_common common; u8 data[MAX_STORED_BEACON_SIZE]; } __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_2 */ +/** + * struct iwl_stored_beacon_notif_v3 - Stored beacon notification + * + * @common: fields common for all versions + * @sta_id: station for which the beacon was received + * @reserved: reserved for alignment + * @data: beacon data, length in @byte_count + */ +struct iwl_stored_beacon_notif_v3 { + struct iwl_stored_beacon_notif_common common; + u8 sta_id; + u8 reserved[3]; + u8 data[MAX_STORED_BEACON_SIZE]; +} __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_3 */ + #endif /* __iwl_fw_api_offload_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h index b2605aefc290..8b200379f7c2 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2012-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -874,7 +874,7 @@ struct iwl_scan_probe_params_v3 { u8 reserved; struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX]; __le32 short_ssid[SCAN_SHORT_SSID_MAX_SIZE]; - u8 bssid_array[ETH_ALEN][SCAN_BSSID_MAX_SIZE]; + u8 bssid_array[SCAN_BSSID_MAX_SIZE][ETH_ALEN]; } __packed; /* SCAN_PROBE_PARAMS_API_S_VER_3 */ /** @@ -894,7 +894,7 @@ struct iwl_scan_probe_params_v4 { __le16 reserved; struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX]; __le32 short_ssid[SCAN_SHORT_SSID_MAX_SIZE]; - u8 bssid_array[ETH_ALEN][SCAN_BSSID_MAX_SIZE]; + u8 bssid_array[SCAN_BSSID_MAX_SIZE][ETH_ALEN]; } __packed; /* SCAN_PROBE_PARAMS_API_S_VER_4 */ #define SCAN_MAX_NUM_CHANS_V3 67 diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h index 12b2f2c48387..f1a3e14880e7 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2012-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -384,13 +384,17 @@ struct iwl_mvm_add_sta_key_cmd_v1 { * @rx_mic_key: TKIP RX unicast or multicast key * @tx_mic_key: TKIP TX key * @transmit_seq_cnt: TSC, transmit packet number + * + * Note: This is used for both v2 and v3, the difference being + * in the way the common.rx_secur_seq_cnt is used, in v2 that's + * the strange hole format, in v3 it's just a u64. */ struct iwl_mvm_add_sta_key_cmd { struct iwl_mvm_add_sta_key_common common; __le64 rx_mic_key; __le64 tx_mic_key; __le64 transmit_seq_cnt; -} __packed; /* ADD_MODIFY_STA_KEY_API_S_VER_2 */ +} __packed; /* ADD_MODIFY_STA_KEY_API_S_VER_2, ADD_MODIFY_STA_KEY_API_S_VER_3 */ /** * enum iwl_mvm_add_sta_rsp_status - status in the response to ADD_STA command diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index df7c55e06f54..6dcafd0a3d4b 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c @@ -1517,6 +1517,37 @@ iwl_dump_ini_special_mem_iter(struct iwl_fw_runtime *fwrt, return sizeof(*range) + le32_to_cpu(range->range_data_size); } +static int +iwl_dump_ini_dbgi_sram_iter(struct iwl_fw_runtime *fwrt, + struct iwl_dump_ini_region_data *reg_data, + void *range_ptr, int idx) +{ + struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; + struct iwl_fw_ini_error_dump_range *range = range_ptr; + __le32 *val = range->data; + u32 prph_data; + int i; + + if (!iwl_trans_grab_nic_access(fwrt->trans)) + return -EBUSY; + + range->range_data_size = reg->dev_addr.size; + iwl_write_prph_no_grab(fwrt->trans, DBGI_SRAM_TARGET_ACCESS_CFG, + DBGI_SRAM_TARGET_ACCESS_CFG_RESET_ADDRESS_MSK); + for (i = 0; i < (le32_to_cpu(reg->dev_addr.size) / 4); i++) { + prph_data = iwl_read_prph(fwrt->trans, (i % 2) ? + DBGI_SRAM_TARGET_ACCESS_RDATA_MSB : + DBGI_SRAM_TARGET_ACCESS_RDATA_LSB); + if (prph_data == 0x5a5a5a5a) { + iwl_trans_release_nic_access(fwrt->trans); + return -EBUSY; + } + *val++ = cpu_to_le32(prph_data); + } + iwl_trans_release_nic_access(fwrt->trans); + return sizeof(*range) + le32_to_cpu(range->range_data_size); +} + static int iwl_dump_ini_fw_pkt_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *range_ptr, int idx) @@ -1547,7 +1578,7 @@ iwl_dump_ini_mem_fill_header(struct iwl_fw_runtime *fwrt, dump->header.version = cpu_to_le32(IWL_INI_DUMP_VER); - return dump->ranges; + return dump->data; } /** @@ -1611,7 +1642,7 @@ iwl_dump_ini_mon_fill_header(struct iwl_fw_runtime *fwrt, data->header.version = cpu_to_le32(IWL_INI_DUMP_VER); - return data->ranges; + return data->data; } static void * @@ -1647,7 +1678,7 @@ iwl_dump_ini_err_table_fill_header(struct iwl_fw_runtime *fwrt, dump->header.version = cpu_to_le32(IWL_INI_DUMP_VER); dump->version = reg->err_table.version; - return dump->ranges; + return dump->data; } static void * @@ -1662,7 +1693,7 @@ iwl_dump_ini_special_mem_fill_header(struct iwl_fw_runtime *fwrt, dump->type = reg->special_mem.type; dump->version = reg->special_mem.version; - return dump->ranges; + return dump->data; } static u32 iwl_dump_ini_mem_ranges(struct iwl_fw_runtime *fwrt, @@ -2189,6 +2220,12 @@ static const struct iwl_dump_ini_mem_ops iwl_dump_ini_region_ops[] = { .fill_mem_hdr = iwl_dump_ini_special_mem_fill_header, .fill_range = iwl_dump_ini_special_mem_iter, }, + [IWL_FW_INI_REGION_DBGI_SRAM] = { + .get_num_of_ranges = iwl_dump_ini_mem_ranges, + .get_size = iwl_dump_ini_mem_get_size, + .fill_mem_hdr = iwl_dump_ini_mem_fill_header, + .fill_range = iwl_dump_ini_dbgi_sram_iter, + }, }; static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt, @@ -2321,7 +2358,7 @@ static void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt, return; if (dump_data->monitor_only) - dump_mask &= IWL_FW_ERROR_DUMP_FW_MONITOR; + dump_mask &= BIT(IWL_FW_ERROR_DUMP_FW_MONITOR); fw_error_dump.trans_ptr = iwl_trans_dump_data(fwrt->trans, dump_mask); file_len = le32_to_cpu(dump_file->file_len); @@ -2530,51 +2567,6 @@ int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt, } IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect); -int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt, - struct iwl_fwrt_dump_data *dump_data) -{ - struct iwl_fw_ini_trigger_tlv *trig = dump_data->trig; - enum iwl_fw_ini_time_point tp_id = le32_to_cpu(trig->time_point); - u32 occur, delay; - unsigned long idx; - - if (!iwl_fw_ini_trigger_on(fwrt, trig)) { - IWL_WARN(fwrt, "WRT: Trigger %d is not active, aborting dump\n", - tp_id); - return -EINVAL; - } - - delay = le32_to_cpu(trig->dump_delay); - occur = le32_to_cpu(trig->occurrences); - if (!occur) - return 0; - - trig->occurrences = cpu_to_le32(--occur); - - /* Check there is an available worker. - * ffz return value is undefined if no zero exists, - * so check against ~0UL first. - */ - if (fwrt->dump.active_wks == ~0UL) - return -EBUSY; - - idx = ffz(fwrt->dump.active_wks); - - if (idx >= IWL_FW_RUNTIME_DUMP_WK_NUM || - test_and_set_bit(fwrt->dump.wks[idx].idx, &fwrt->dump.active_wks)) - return -EBUSY; - - fwrt->dump.wks[idx].dump_data = *dump_data; - - IWL_WARN(fwrt, - "WRT: Collecting data: ini trigger %d fired (delay=%dms).\n", - tp_id, (u32)(delay / USEC_PER_MSEC)); - - schedule_delayed_work(&fwrt->dump.wks[idx].wk, usecs_to_jiffies(delay)); - - return 0; -} - int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt, struct iwl_fw_dbg_trigger_tlv *trigger, const char *fmt, ...) @@ -2703,6 +2695,58 @@ out: clear_bit(wk_idx, &fwrt->dump.active_wks); } +int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt, + struct iwl_fwrt_dump_data *dump_data, + bool sync) +{ + struct iwl_fw_ini_trigger_tlv *trig = dump_data->trig; + enum iwl_fw_ini_time_point tp_id = le32_to_cpu(trig->time_point); + u32 occur, delay; + unsigned long idx; + + if (!iwl_fw_ini_trigger_on(fwrt, trig)) { + IWL_WARN(fwrt, "WRT: Trigger %d is not active, aborting dump\n", + tp_id); + return -EINVAL; + } + + delay = le32_to_cpu(trig->dump_delay); + occur = le32_to_cpu(trig->occurrences); + if (!occur) + return 0; + + trig->occurrences = cpu_to_le32(--occur); + + /* Check there is an available worker. + * ffz return value is undefined if no zero exists, + * so check against ~0UL first. + */ + if (fwrt->dump.active_wks == ~0UL) + return -EBUSY; + + idx = ffz(fwrt->dump.active_wks); + + if (idx >= IWL_FW_RUNTIME_DUMP_WK_NUM || + test_and_set_bit(fwrt->dump.wks[idx].idx, &fwrt->dump.active_wks)) + return -EBUSY; + + fwrt->dump.wks[idx].dump_data = *dump_data; + + if (sync) + delay = 0; + + IWL_WARN(fwrt, + "WRT: Collecting data: ini trigger %d fired (delay=%dms).\n", + tp_id, (u32)(delay / USEC_PER_MSEC)); + + schedule_delayed_work(&fwrt->dump.wks[idx].wk, usecs_to_jiffies(delay)); + + if (sync) + iwl_fw_dbg_collect_sync(fwrt, idx); + + return 0; +} + void iwl_fw_error_dump_wk(struct work_struct *work) { struct iwl_fwrt_wk_data *wks = diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h index c0e84ef84f5d..8c3c890066b0 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h @@ -46,7 +46,8 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt, int iwl_fw_dbg_error_collect(struct iwl_fw_runtime *fwrt, enum iwl_fw_dbg_trigger trig_type); int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt, - struct iwl_fwrt_dump_data *dump_data); + struct iwl_fwrt_dump_data *dump_data, + bool sync); int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt, enum iwl_fw_dbg_trigger trig, const char *str, size_t len, struct iwl_fw_dbg_trigger_tlv *trigger); @@ -284,7 +285,7 @@ static inline void iwl_fw_umac_set_alive_err_table(struct iwl_trans *trans, trans->dbg.umac_error_event_table = umac_error_event_table; } -static inline void iwl_fw_error_collect(struct iwl_fw_runtime *fwrt) +static inline void iwl_fw_error_collect(struct iwl_fw_runtime *fwrt, bool sync) { enum iwl_fw_ini_time_point tp_id; @@ -300,7 +301,7 @@ static inline void iwl_fw_error_collect(struct iwl_fw_runtime *fwrt) tp_id = IWL_FW_INI_TIME_POINT_FW_ASSERT; } - iwl_dbg_tlv_time_point(fwrt, tp_id, NULL); + _iwl_dbg_tlv_time_point(fwrt, tp_id, NULL, sync); } void iwl_fw_error_print_fseq_regs(struct iwl_fw_runtime *fwrt); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h index 9fffac903b93..521ca2bb0e92 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2014, 2018-2020 Intel Corporation + * Copyright (C) 2014, 2018-2021 Intel Corporation * Copyright (C) 2014-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -305,11 +305,12 @@ struct iwl_fw_ini_error_dump_header { /** * struct iwl_fw_ini_error_dump - ini region dump * @header: the header of this region - * @ranges: the memory ranges of this region + * @data: data of memory ranges in this region, + * see &struct iwl_fw_ini_error_dump_range */ struct iwl_fw_ini_error_dump { struct iwl_fw_ini_error_dump_header header; - struct iwl_fw_ini_error_dump_range ranges[]; + u8 data[]; } __packed; /* This bit is used to differentiate between lmac and umac rxf */ @@ -399,12 +400,13 @@ struct iwl_fw_ini_dump_info { * struct iwl_fw_ini_err_table_dump - ini error table dump * @header: header of the region * @version: error table version - * @ranges: the memory ranges of this this region + * @data: data of memory ranges in this region, + * see &struct iwl_fw_ini_error_dump_range */ struct iwl_fw_ini_err_table_dump { struct iwl_fw_ini_error_dump_header header; __le32 version; - struct iwl_fw_ini_error_dump_range ranges[]; + u8 data[]; } __packed; /** @@ -427,14 +429,15 @@ struct iwl_fw_error_dump_rb { * @write_ptr: write pointer position in the buffer * @cycle_cnt: cycles count * @cur_frag: current fragment in use - * @ranges: the memory ranges of this this region + * @data: data of memory ranges in this region, + * see &struct iwl_fw_ini_error_dump_range */ struct iwl_fw_ini_monitor_dump { struct iwl_fw_ini_error_dump_header header; __le32 write_ptr; __le32 cycle_cnt; __le32 cur_frag; - struct iwl_fw_ini_error_dump_range ranges[]; + u8 data[]; } __packed; /** @@ -442,13 +445,14 @@ struct iwl_fw_ini_monitor_dump { * @header: header of the region * @type: type of special memory * @version: struct special memory version - * @ranges: the memory ranges of this this region + * @data: data of memory ranges in this region, + * see &struct iwl_fw_ini_error_dump_range */ struct iwl_fw_ini_special_device_memory { struct iwl_fw_ini_error_dump_header header; __le16 type; __le16 version; - struct iwl_fw_ini_error_dump_range ranges[]; + u8 data[]; } __packed; /** diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h index 9a8c7b7a0816..6c8e9f3a6af2 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/file.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h @@ -414,6 +414,7 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_PROTECTED_TWT = (__force iwl_ucode_tlv_capa_t)56, IWL_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE = (__force iwl_ucode_tlv_capa_t)57, IWL_UCODE_TLV_CAPA_PASSIVE_6GHZ_SCAN = (__force iwl_ucode_tlv_capa_t)58, + IWL_UCODE_TLV_CAPA_HIDDEN_6GHZ_SCAN = (__force iwl_ucode_tlv_capa_t)59, IWL_UCODE_TLV_CAPA_BROADCAST_TWT = (__force iwl_ucode_tlv_capa_t)60, /* set 2 */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c index b4b1f75b9c2a..314ed90c23dd 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c @@ -24,7 +24,7 @@ static bool iwl_pnvm_complete_fn(struct iwl_notif_wait_data *notif_wait, struct iwl_pnvm_init_complete_ntfy *pnvm_ntf = (void *)pkt->data; IWL_DEBUG_FW(trans, - "PNVM complete notification received with status %d\n", + "PNVM complete notification received with status 0x%0x\n", le32_to_cpu(pnvm_ntf->status)); return true; @@ -230,19 +230,10 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data, static int iwl_pnvm_get_from_fs(struct iwl_trans *trans, u8 **data, size_t *len) { const struct firmware *pnvm; - char pnvm_name[64]; + char pnvm_name[MAX_PNVM_NAME]; int ret; - /* - * The prefix unfortunately includes a hyphen at the end, so - * don't add the dot here... - */ - snprintf(pnvm_name, sizeof(pnvm_name), "%spnvm", - trans->cfg->fw_name_pre); - - /* ...but replace the hyphen with the dot here. */ - if (strlen(trans->cfg->fw_name_pre) < sizeof(pnvm_name)) - pnvm_name[strlen(trans->cfg->fw_name_pre) - 1] = '.'; + iwl_pnvm_get_fs_name(trans, pnvm_name, sizeof(pnvm_name)); ret = firmware_request_nowarn(&pnvm, pnvm_name, trans->dev); if (ret) { diff --git a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.h b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.h index 61d3d4e0b7d9..203c367dd4de 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.h @@ -12,7 +12,27 @@ #define MVM_UCODE_PNVM_TIMEOUT (HZ / 4) +#define MAX_PNVM_NAME 64 + int iwl_pnvm_load(struct iwl_trans *trans, struct iwl_notif_wait_data *notif_wait); +static inline +void iwl_pnvm_get_fs_name(struct iwl_trans *trans, + u8 *pnvm_name, size_t max_len) +{ + int pre_len; + + /* + * The prefix unfortunately includes a hyphen at the end, so + * don't add the dot here... + */ + snprintf(pnvm_name, max_len, "%spnvm", trans->cfg->fw_name_pre); + + /* ...but replace the hyphen with the dot here. */ + pre_len = strlen(trans->cfg->fw_name_pre); + if (pre_len < max_len && pre_len > 0) + pnvm_name[pre_len - 1] = '.'; +} + #endif /* __IWL_PNVM_H__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index bf6ee56d4d96..7eb534df5331 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -33,6 +33,7 @@ enum iwl_device_family { IWL_DEVICE_FAMILY_9000, IWL_DEVICE_FAMILY_22000, IWL_DEVICE_FAMILY_AX210, + IWL_DEVICE_FAMILY_BZ, }; /* @@ -321,7 +322,7 @@ struct iwl_fw_mon_regs { * @host_interrupt_operation_mode: device needs host interrupt operation * mode set * @nvm_hw_section_num: the ID of the HW NVM section - * @mac_addr_from_csr: read HW address from CSR registers + * @mac_addr_from_csr: read HW address from CSR registers at this offset * @features: hw features, any combination of feature_passlist * @pwr_tx_backoffs: translation table between power limits and backoffs * @max_tx_agg_size: max TX aggregation size of the ADDBA request/response @@ -343,6 +344,8 @@ struct iwl_fw_mon_regs { * supports 256 BA aggregation * @num_rbds: number of receive buffer descriptors to use * (only used for multi-queue capable devices) + * @mac_addr_csr_base: CSR base register for MAC address access, if not set + * assume 0x380 * * We enable the driver to be backward compatible wrt. hardware features. * API differences in uCode shouldn't be handled here but through TLVs @@ -378,7 +381,7 @@ struct iwl_cfg { internal_wimax_coex:1, host_interrupt_operation_mode:1, high_temp:1, - mac_addr_from_csr:1, + mac_addr_from_csr:10, lp_xtal_workaround:1, disable_dummy_notification:1, apmg_not_supported:1, @@ -512,6 +515,7 @@ extern const char iwl_ax211_name[]; extern const char iwl_ax221_name[]; extern const char iwl_ax231_name[]; extern const char iwl_ax411_name[]; +extern const char iwl_bz_name[]; #if IS_ENABLED(CONFIG_IWLDVM) extern const struct iwl_cfg iwl5300_agn_cfg; extern const struct iwl_cfg iwl5100_agn_cfg; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h index 47e5a17c0f48..cf796403c45c 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h @@ -104,6 +104,10 @@ /* GIO Chicken Bits (PCI Express bus link power management) */ #define CSR_GIO_CHICKEN_BITS (CSR_BASE+0x100) +/* Doorbell NMI (since Bz) */ +#define CSR_DOORBELL_VECTOR (CSR_BASE + 0x130) +#define CSR_DOORBELL_VECTOR_NMI BIT(1) + /* host chicken bits */ #define CSR_HOST_CHICKEN (CSR_BASE + 0x204) #define CSR_HOST_CHICKEN_PM_IDLE_SRC_DIS_SB_PME BIT(19) @@ -266,6 +270,14 @@ #define CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN (0x04000000) #define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000) +/* From Bz we use these instead during init/reset flow */ +#define CSR_GP_CNTRL_REG_FLAG_MAC_INIT BIT(6) +#define CSR_GP_CNTRL_REG_FLAG_ROM_START BIT(7) +#define CSR_GP_CNTRL_REG_FLAG_MAC_STATUS BIT(20) +#define CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ BIT(21) +#define CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS BIT(28) +#define CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_REQ BIT(29) +#define CSR_GP_CNTRL_REG_FLAG_SW_RESET BIT(31) /* HW REV */ #define CSR_HW_REV_DASH(_val) (((_val) & 0x0000003) >> 0) @@ -604,10 +616,10 @@ enum msix_hw_int_causes { * HW address related registers * *****************************************************************************/ -#define CSR_ADDR_BASE (0x380) -#define CSR_MAC_ADDR0_OTP (CSR_ADDR_BASE) -#define CSR_MAC_ADDR1_OTP (CSR_ADDR_BASE + 4) -#define CSR_MAC_ADDR0_STRAP (CSR_ADDR_BASE + 8) -#define CSR_MAC_ADDR1_STRAP (CSR_ADDR_BASE + 0xC) +#define CSR_ADDR_BASE(trans) ((trans)->cfg->mac_addr_from_csr) +#define CSR_MAC_ADDR0_OTP(trans) (CSR_ADDR_BASE(trans) + 0x00) +#define CSR_MAC_ADDR1_OTP(trans) (CSR_ADDR_BASE(trans) + 0x04) +#define CSR_MAC_ADDR0_STRAP(trans) (CSR_ADDR_BASE(trans) + 0x08) +#define CSR_MAC_ADDR1_STRAP(trans) (CSR_ADDR_BASE(trans) + 0x0c) #endif /* !__iwl_csr_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c index 0ddd255a8cc1..125479b5c0d6 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c @@ -131,8 +131,7 @@ static int iwl_dbg_tlv_alloc_buf_alloc(struct iwl_trans *trans, goto err; if (buf_location == IWL_FW_INI_LOCATION_SRAM_PATH && - alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1 && - alloc_id != IWL_FW_INI_ALLOCATION_ID_INTERNAL) + alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1) goto err; trans->dbg.fw_mon_cfg[alloc_id] = *alloc; @@ -435,13 +434,16 @@ static int iwl_dbg_tlv_parse_bin(struct iwl_trans *trans, const u8 *data, void iwl_dbg_tlv_load_bin(struct device *dev, struct iwl_trans *trans) { const struct firmware *fw; + const char *yoyo_bin = "iwl-debug-yoyo.bin"; int res; if (!iwlwifi_mod_params.enable_ini || trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_9000) return; - res = firmware_request_nowarn(&fw, "iwl-debug-yoyo.bin", dev); + res = firmware_request_nowarn(&fw, yoyo_bin, dev); + IWL_DEBUG_FW(trans, "%s %s\n", res ? "didn't load" : "loaded", yoyo_bin); + if (res) return; @@ -621,6 +623,7 @@ static int iwl_dbg_tlv_apply_buffer(struct iwl_fw_runtime *fwrt, .id = WIDE_ID(DEBUG_GROUP, BUFFER_ALLOCATION), .data[0] = &data, .len[0] = sizeof(data), + .flags = CMD_SEND_IN_RFKILL, }; int ret, j; @@ -683,7 +686,7 @@ static void iwl_dbg_tlv_periodic_trig_handler(struct timer_list *t) }; int ret; - ret = iwl_fw_dbg_ini_collect(timer_node->fwrt, &dump_data); + ret = iwl_fw_dbg_ini_collect(timer_node->fwrt, &dump_data, false); if (!ret || ret == -EBUSY) { u32 occur = le32_to_cpu(dump_data.trig->occurrences); u32 collect_interval = le32_to_cpu(dump_data.trig->data[0]); @@ -927,7 +930,7 @@ static bool iwl_dbg_tlv_check_fw_pkt(struct iwl_fw_runtime *fwrt, } static int -iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt, +iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt, bool sync, struct list_head *active_trig_list, union iwl_dbg_tlv_tp_data *tp_data, bool (*data_check)(struct iwl_fw_runtime *fwrt, @@ -946,7 +949,7 @@ iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt, int ret, i; if (!num_data) { - ret = iwl_fw_dbg_ini_collect(fwrt, &dump_data); + ret = iwl_fw_dbg_ini_collect(fwrt, &dump_data, sync); if (ret) return ret; } @@ -955,7 +958,7 @@ iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt, if (!data_check || data_check(fwrt, &dump_data, tp_data, le32_to_cpu(dump_data.trig->data[i]))) { - ret = iwl_fw_dbg_ini_collect(fwrt, &dump_data); + ret = iwl_fw_dbg_ini_collect(fwrt, &dump_data, sync); if (ret) return ret; @@ -1043,9 +1046,10 @@ static void iwl_dbg_tlv_init_cfg(struct iwl_fw_runtime *fwrt) } } -void iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt, - enum iwl_fw_ini_time_point tp_id, - union iwl_dbg_tlv_tp_data *tp_data) +void _iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt, + enum iwl_fw_ini_time_point tp_id, + union iwl_dbg_tlv_tp_data *tp_data, + bool sync) { struct list_head *hcmd_list, *trig_list; @@ -1060,12 +1064,12 @@ void iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt, switch (tp_id) { case IWL_FW_INI_TIME_POINT_EARLY: iwl_dbg_tlv_init_cfg(fwrt); - iwl_dbg_tlv_tp_trigger(fwrt, trig_list, tp_data, NULL); + iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL); break; case IWL_FW_INI_TIME_POINT_AFTER_ALIVE: iwl_dbg_tlv_apply_buffers(fwrt); iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list); - iwl_dbg_tlv_tp_trigger(fwrt, trig_list, tp_data, NULL); + iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL); break; case IWL_FW_INI_TIME_POINT_PERIODIC: iwl_dbg_tlv_set_periodic_trigs(fwrt); @@ -1075,13 +1079,13 @@ void iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt, case IWL_FW_INI_TIME_POINT_MISSED_BEACONS: case IWL_FW_INI_TIME_POINT_FW_DHC_NOTIFICATION: iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list); - iwl_dbg_tlv_tp_trigger(fwrt, trig_list, tp_data, + iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, iwl_dbg_tlv_check_fw_pkt); break; default: iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list); - iwl_dbg_tlv_tp_trigger(fwrt, trig_list, tp_data, NULL); + iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL); break; } } -IWL_EXPORT_SYMBOL(iwl_dbg_tlv_time_point); +IWL_EXPORT_SYMBOL(_iwl_dbg_tlv_time_point); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h index 92c720527946..c12b1fd3f479 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2018-2020 Intel Corporation + * Copyright (C) 2018-2021 Intel Corporation */ #ifndef __iwl_dbg_tlv_h__ #define __iwl_dbg_tlv_h__ @@ -48,9 +48,25 @@ void iwl_dbg_tlv_free(struct iwl_trans *trans); void iwl_dbg_tlv_alloc(struct iwl_trans *trans, const struct iwl_ucode_tlv *tlv, bool ext); void iwl_dbg_tlv_init(struct iwl_trans *trans); -void iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt, - enum iwl_fw_ini_time_point tp_id, - union iwl_dbg_tlv_tp_data *tp_data); +void _iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt, + enum iwl_fw_ini_time_point tp_id, + union iwl_dbg_tlv_tp_data *tp_data, + bool sync); + +static inline void iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt, + enum iwl_fw_ini_time_point tp_id, + union iwl_dbg_tlv_tp_data *tp_data) +{ + _iwl_dbg_tlv_time_point(fwrt, tp_id, tp_data, false); +} + +static inline void iwl_dbg_tlv_time_point_sync(struct iwl_fw_runtime *fwrt, + enum iwl_fw_ini_time_point tp_id, + union iwl_dbg_tlv_tp_data *tp_data) +{ + _iwl_dbg_tlv_time_point(fwrt, tp_id, tp_data, true); +} + void iwl_dbg_tlv_del_timers(struct iwl_trans *trans); #endif /* __iwl_dbg_tlv_h__*/ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index 977dce686bdb..77124b8b235e 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -78,7 +78,7 @@ enum { }; /* Protects the table contents, i.e. the ops pointer & drv list */ -static struct mutex iwlwifi_opmode_table_mtx; +static DEFINE_MUTEX(iwlwifi_opmode_table_mtx); static struct iwlwifi_opmode_table { const char *name; /* name: iwldvm, iwlmvm, etc */ const struct iwl_op_mode_ops *ops; /* pointer to op_mode ops */ @@ -1754,8 +1754,6 @@ static int __init iwl_drv_init(void) { int i, err; - mutex_init(&iwlwifi_opmode_table_mtx); - for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) INIT_LIST_HEAD(&iwlwifi_opmode_table[i].drv); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.c b/drivers/net/wireless/intel/iwlwifi/iwl-io.c index 33d42e08d5b8..2517c4ae07ab 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-io.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2003-2014, 2018-2020 Intel Corporation + * Copyright (C) 2003-2014, 2018-2021 Intel Corporation * Copyright (C) 2015-2016 Intel Deutschland GmbH */ #include <linux/delay.h> @@ -213,9 +213,12 @@ void iwl_force_nmi(struct iwl_trans *trans) else if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) iwl_write_umac_prph(trans, UREG_NIC_SET_NMI_DRIVER, UREG_NIC_SET_NMI_DRIVER_NMI_FROM_DRIVER); - else + else if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ) iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6, UREG_DOORBELL_TO_ISR6_NMI_BIT); + else + iwl_write32(trans, CSR_DOORBELL_VECTOR, + CSR_DOORBELL_VECTOR_NMI); } IWL_EXPORT_SYMBOL(iwl_force_nmi); @@ -398,6 +401,7 @@ int iwl_dump_fh(struct iwl_trans *trans, char **buf) int iwl_finish_nic_init(struct iwl_trans *trans, const struct iwl_cfg_trans_params *cfg_trans) { + u32 poll_ready; int err; if (cfg_trans->bisr_workaround) { @@ -409,7 +413,16 @@ int iwl_finish_nic_init(struct iwl_trans *trans, * Set "initialization complete" bit to move adapter from * D0U* --> D0A* (powered-up active) state. */ - iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + if (cfg_trans->device_family >= IWL_DEVICE_FAMILY_BZ) { + iwl_set_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | + CSR_GP_CNTRL_REG_FLAG_MAC_INIT); + poll_ready = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS; + } else { + iwl_set_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + poll_ready = CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY; + } if (cfg_trans->device_family == IWL_DEVICE_FAMILY_8000) udelay(2); @@ -419,10 +432,7 @@ int iwl_finish_nic_init(struct iwl_trans *trans, * device-internal resources is supported, e.g. iwl_write_prph() * and accesses to uCode SRAM. */ - err = iwl_poll_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, - CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, - 25000); + err = iwl_poll_bit(trans, CSR_GP_CNTRL, poll_ready, poll_ready, 25000); if (err < 0) IWL_DEBUG_INFO(trans, "Failed to wake NIC\n"); @@ -468,5 +478,5 @@ void iwl_trans_sync_nmi_with_addr(struct iwl_trans *trans, u32 inta_addr, if (interrupts_enabled) iwl_trans_interrupts(trans, true); - iwl_trans_fw_error(trans); + iwl_trans_fw_error(trans, false); } diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index 850648ebd61c..475f951d4b1e 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -549,7 +549,8 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = { .mac_cap_info[2] = IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP, .mac_cap_info[3] = - IEEE80211_HE_MAC_CAP3_OMI_CONTROL, + IEEE80211_HE_MAC_CAP3_OMI_CONTROL | + IEEE80211_HE_MAC_CAP3_RX_CTRL_FRAME_TO_MULTIBSS, .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU | IEEE80211_HE_MAC_CAP4_MULTI_TID_AGG_TX_QOS_B39, @@ -568,7 +569,8 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = { IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A | IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD, .phy_cap_info[2] = - IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US, + IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US | + IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ, .phy_cap_info[3] = IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_NO_DCM | IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 | @@ -595,6 +597,8 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = { IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB | IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB | IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_RESERVED, + .phy_cap_info[10] = + IEEE80211_HE_PHY_CAP10_HE_MU_M1RU_MAX_LTF, }, /* * Set default Tx/Rx HE MCS NSS Support field. @@ -634,6 +638,7 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = { .phy_cap_info[1] = IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD, .phy_cap_info[2] = + IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ | IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US, .phy_cap_info[3] = IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_NO_DCM | @@ -742,6 +747,8 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans, IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G; if ((tx_chains & rx_chains) == ANT_AB) { + iftype_data->he_cap.he_cap_elem.phy_cap_info[2] |= + IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ; iftype_data->he_cap.he_cap_elem.phy_cap_info[5] |= IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 | IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2; @@ -958,8 +965,10 @@ static void iwl_flip_hw_address(__le32 mac_addr0, __le32 mac_addr1, u8 *dest) static void iwl_set_hw_address_from_csr(struct iwl_trans *trans, struct iwl_nvm_data *data) { - __le32 mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_STRAP)); - __le32 mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_STRAP)); + __le32 mac_addr0 = cpu_to_le32(iwl_read32(trans, + CSR_MAC_ADDR0_STRAP(trans))); + __le32 mac_addr1 = cpu_to_le32(iwl_read32(trans, + CSR_MAC_ADDR1_STRAP(trans))); iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr); /* @@ -969,8 +978,8 @@ static void iwl_set_hw_address_from_csr(struct iwl_trans *trans, if (is_valid_ether_addr(data->hw_addr)) return; - mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_OTP)); - mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_OTP)); + mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_OTP(trans))); + mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_OTP(trans))); iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr); } @@ -1373,6 +1382,25 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, reg_query_regdb_wmm(regd->alpha2, center_freq, rule); } + /* + * Certain firmware versions might report no valid channels + * if booted in RF-kill, i.e. not all calibrations etc. are + * running. We'll get out of this situation later when the + * rfkill is removed and we update the regdomain again, but + * since cfg80211 doesn't accept an empty regdomain, add a + * dummy (unusable) rule here in this case so we can init. + */ + if (!valid_rules) { + valid_rules = 1; + rule = ®d->reg_rules[valid_rules - 1]; + rule->freq_range.start_freq_khz = MHZ_TO_KHZ(2412); + rule->freq_range.end_freq_khz = MHZ_TO_KHZ(2413); + rule->freq_range.max_bandwidth_khz = MHZ_TO_KHZ(1); + rule->power_rule.max_antenna_gain = DBI_TO_MBI(6); + rule->power_rule.max_eirp = + DBM_TO_MBM(IWL_DEFAULT_MAX_TX_POWER); + } + regd->n_reg_rules = valid_rules; /* diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h index cf9c64090014..af5f9b210f22 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h @@ -78,7 +78,7 @@ struct iwl_cfg; * there are Tx packets pending in the transport layer. * Must be atomic * @nic_error: error notification. Must be atomic and must be called with BH - * disabled. + * disabled, unless the sync parameter is true. * @cmd_queue_full: Called when the command queue gets full. Must be atomic and * called with BH disabled. * @nic_config: configure NIC, called before firmware is started. @@ -102,7 +102,7 @@ struct iwl_op_mode_ops { void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue); bool (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state); void (*free_skb)(struct iwl_op_mode *op_mode, struct sk_buff *skb); - void (*nic_error)(struct iwl_op_mode *op_mode); + void (*nic_error)(struct iwl_op_mode *op_mode, bool sync); void (*cmd_queue_full)(struct iwl_op_mode *op_mode); void (*nic_config)(struct iwl_op_mode *op_mode); void (*wimax_active)(struct iwl_op_mode *op_mode); @@ -181,9 +181,9 @@ static inline void iwl_op_mode_free_skb(struct iwl_op_mode *op_mode, op_mode->ops->free_skb(op_mode, skb); } -static inline void iwl_op_mode_nic_error(struct iwl_op_mode *op_mode) +static inline void iwl_op_mode_nic_error(struct iwl_op_mode *op_mode, bool sync) { - op_mode->ops->nic_error(op_mode); + op_mode->ops->nic_error(op_mode, sync); } static inline void iwl_op_mode_cmd_queue_full(struct iwl_op_mode *op_mode) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h index 9a9e714bf9af..d0a7d58336a9 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h @@ -348,6 +348,13 @@ #define RFIC_REG_RD 0xAD0470 #define WFPM_CTRL_REG 0xA03030 #define WFPM_GP2 0xA030B4 + +/* DBGI SRAM Register details */ +#define DBGI_SRAM_TARGET_ACCESS_CFG 0x00A2E14C +#define DBGI_SRAM_TARGET_ACCESS_CFG_RESET_ADDRESS_MSK 0x10000 +#define DBGI_SRAM_TARGET_ACCESS_RDATA_LSB 0x00A2E154 +#define DBGI_SRAM_TARGET_ACCESS_RDATA_MSB 0x00A2E158 + enum { ENABLE_WFPM = BIT(31), WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK = 0x80000000, diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index 0199d7a5a648..8f0ff540f439 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -887,7 +887,7 @@ struct iwl_trans_txqs { bool bc_table_dword; u8 page_offs; u8 dev_cmd_offs; - struct __percpu iwl_tso_hdr_page * tso_hdr_page; + struct iwl_tso_hdr_page __percpu *tso_hdr_page; struct { u8 fifo; @@ -1385,14 +1385,14 @@ iwl_trans_release_nic_access(struct iwl_trans *trans) __release(nic_access); } -static inline void iwl_trans_fw_error(struct iwl_trans *trans) +static inline void iwl_trans_fw_error(struct iwl_trans *trans, bool sync) { if (WARN_ON_ONCE(!trans->op_mode)) return; /* prevent double restarts due to the same erroneous FW */ if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status)) { - iwl_op_mode_nic_error(trans->op_mode); + iwl_op_mode_nic_error(trans->op_mode, sync); trans->state = IWL_TRANS_NO_FW; } } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h index 1343f25f1090..9d0d01f27d92 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright (C) 2013-2015 Intel Mobile Communications GmbH - * Copyright (C) 2013-2014, 2018-2020 Intel Corporation + * Copyright (C) 2013-2014, 2018-2021 Intel Corporation * Copyright (C) 2015 Intel Deutschland GmbH */ #ifndef __MVM_CONSTANTS_H @@ -93,6 +93,7 @@ #define IWL_MVM_ENABLE_EBS 1 #define IWL_MVM_FTM_INITIATOR_ALGO IWL_TOF_ALGO_TYPE_MAX_LIKE #define IWL_MVM_FTM_INITIATOR_DYNACK true +#define IWL_MVM_FTM_LMR_FEEDBACK_TERMINATE false #define IWL_MVM_FTM_R2I_MAX_REP 7 #define IWL_MVM_FTM_I2R_MAX_REP 7 #define IWL_MVM_FTM_R2I_MAX_STS 1 @@ -102,6 +103,8 @@ #define IWL_MVM_FTM_INITIATOR_SECURE_LTF false #define IWL_MVM_FTM_RESP_NDP_SUPPORT true #define IWL_MVM_FTM_RESP_LMR_FEEDBACK_SUPPORT true +#define IWL_MVM_FTM_NON_TB_MIN_TIME_BETWEEN_MSR 5 +#define IWL_MVM_FTM_NON_TB_MAX_TIME_BETWEEN_MSR 1000 #define IWL_MVM_D3_DEBUG false #define IWL_MVM_USE_TWT true #define IWL_MVM_AMPDU_CONSEC_DROPS_DELBA 10 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index 6a259d867d90..0e97d5e6c644 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -101,11 +101,8 @@ static const u8 *iwl_mvm_find_max_pn(struct ieee80211_key_conf *key, return ret; } -struct wowlan_key_data { - struct iwl_wowlan_rsc_tsc_params_cmd *rsc_tsc; - struct iwl_wowlan_tkip_params_cmd *tkip; - struct iwl_wowlan_kek_kck_material_cmd_v4 *kek_kck_cmd; - bool error, use_rsc_tsc, use_tkip, configure_keys; +struct wowlan_key_reprogram_data { + bool error; int wep_key_idx; }; @@ -117,15 +114,8 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct wowlan_key_data *data = _data; - struct aes_sc *aes_sc, *aes_tx_sc = NULL; - struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL; - struct iwl_p1k_cache *rx_p1ks; - u8 *rx_mic_key; - struct ieee80211_key_seq seq; - u32 cur_rx_iv32 = 0; - u16 p1k[IWL_P1K_SIZE]; - int ret, i; + struct wowlan_key_reprogram_data *data = _data; + int ret; switch (key->cipher) { case WLAN_CIPHER_SUITE_WEP40: @@ -162,18 +152,14 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, wkc.wep_key.key_offset = data->wep_key_idx; } - if (data->configure_keys) { - mutex_lock(&mvm->mutex); - ret = iwl_mvm_send_cmd_pdu(mvm, WEP_KEY, 0, - sizeof(wkc), &wkc); - data->error = ret != 0; - - mvm->ptk_ivlen = key->iv_len; - mvm->ptk_icvlen = key->icv_len; - mvm->gtk_ivlen = key->iv_len; - mvm->gtk_icvlen = key->icv_len; - mutex_unlock(&mvm->mutex); - } + mutex_lock(&mvm->mutex); + ret = iwl_mvm_send_cmd_pdu(mvm, WEP_KEY, 0, sizeof(wkc), &wkc); + data->error = ret != 0; + + mvm->ptk_ivlen = key->iv_len; + mvm->ptk_icvlen = key->icv_len; + mvm->gtk_ivlen = key->iv_len; + mvm->gtk_icvlen = key->icv_len; /* don't upload key again */ return; @@ -183,10 +169,8 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, return; case WLAN_CIPHER_SUITE_BIP_GMAC_256: case WLAN_CIPHER_SUITE_BIP_GMAC_128: - data->kek_kck_cmd->igtk_cipher = cpu_to_le32(STA_KEY_FLG_GCMP); return; case WLAN_CIPHER_SUITE_AES_CMAC: - data->kek_kck_cmd->igtk_cipher = cpu_to_le32(STA_KEY_FLG_CCM); /* * Ignore CMAC keys -- the WoWLAN firmware doesn't support them * but we also shouldn't abort suspend due to that. It does have @@ -196,6 +180,58 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, */ return; case WLAN_CIPHER_SUITE_TKIP: + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: + break; + } + + mutex_lock(&mvm->mutex); + /* + * The D3 firmware hardcodes the key offset 0 as the key it + * uses to transmit packets to the AP, i.e. the PTK. + */ + if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) { + mvm->ptk_ivlen = key->iv_len; + mvm->ptk_icvlen = key->icv_len; + ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 0); + } else { + /* + * firmware only supports TSC/RSC for a single key, + * so if there are multiple keep overwriting them + * with new ones -- this relies on mac80211 doing + * list_add_tail(). + */ + mvm->gtk_ivlen = key->iv_len; + mvm->gtk_icvlen = key->icv_len; + ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 1); + } + mutex_unlock(&mvm->mutex); + data->error = ret != 0; +} + +struct wowlan_key_rsc_tsc_data { + struct iwl_wowlan_rsc_tsc_params_cmd_v4 *rsc_tsc; + bool have_rsc_tsc; +}; + +static void iwl_mvm_wowlan_get_rsc_tsc_data(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, + void *_data) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct wowlan_key_rsc_tsc_data *data = _data; + struct aes_sc *aes_sc; + struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL; + struct ieee80211_key_seq seq; + int i; + + switch (key->cipher) { + default: + break; + case WLAN_CIPHER_SUITE_TKIP: if (sta) { u64 pn64; @@ -204,28 +240,12 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, tkip_tx_sc = &data->rsc_tsc->params.all_tsc_rsc.tkip.tsc; - rx_p1ks = data->tkip->rx_uni; - pn64 = atomic64_read(&key->tx_pn); tkip_tx_sc->iv16 = cpu_to_le16(TKIP_PN_TO_IV16(pn64)); tkip_tx_sc->iv32 = cpu_to_le32(TKIP_PN_TO_IV32(pn64)); - - ieee80211_get_tkip_p1k_iv(key, TKIP_PN_TO_IV32(pn64), - p1k); - iwl_mvm_convert_p1k(p1k, data->tkip->tx.p1k); - - memcpy(data->tkip->mic_keys.tx, - &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY], - IWL_MIC_KEY_SIZE); - - rx_mic_key = data->tkip->mic_keys.rx_unicast; } else { tkip_sc = data->rsc_tsc->params.all_tsc_rsc.tkip.multicast_rsc; - rx_p1ks = data->tkip->rx_multi; - rx_mic_key = data->tkip->mic_keys.rx_mcast; - data->kek_kck_cmd->gtk_cipher = - cpu_to_le32(STA_KEY_FLG_TKIP); } /* @@ -237,29 +257,15 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, ieee80211_get_key_rx_seq(key, i, &seq); tkip_sc[i].iv16 = cpu_to_le16(seq.tkip.iv16); tkip_sc[i].iv32 = cpu_to_le32(seq.tkip.iv32); - /* wrapping isn't allowed, AP must rekey */ - if (seq.tkip.iv32 > cur_rx_iv32) - cur_rx_iv32 = seq.tkip.iv32; } - ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid, - cur_rx_iv32, p1k); - iwl_mvm_convert_p1k(p1k, rx_p1ks[0].p1k); - ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid, - cur_rx_iv32 + 1, p1k); - iwl_mvm_convert_p1k(p1k, rx_p1ks[1].p1k); - - memcpy(rx_mic_key, - &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY], - IWL_MIC_KEY_SIZE); - - data->use_tkip = true; - data->use_rsc_tsc = true; + data->have_rsc_tsc = true; break; case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: if (sta) { + struct aes_sc *aes_tx_sc; u64 pn64; aes_sc = @@ -272,10 +278,6 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, } else { aes_sc = data->rsc_tsc->params.all_tsc_rsc.aes.multicast_rsc; - data->kek_kck_cmd->gtk_cipher = - key->cipher == WLAN_CIPHER_SUITE_CCMP ? - cpu_to_le32(STA_KEY_FLG_CCM) : - cpu_to_le32(STA_KEY_FLG_GCMP); } /* @@ -320,35 +322,301 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, ((u64)pn[0] << 40)); } } - data->use_rsc_tsc = true; + data->have_rsc_tsc = true; break; } +} - IWL_DEBUG_WOWLAN(mvm, "GTK cipher %d\n", data->kek_kck_cmd->gtk_cipher); +struct wowlan_key_rsc_v5_data { + struct iwl_wowlan_rsc_tsc_params_cmd *rsc; + bool have_rsc; + int gtks; + int gtk_ids[4]; +}; - if (data->configure_keys) { - mutex_lock(&mvm->mutex); +static void iwl_mvm_wowlan_get_rsc_v5_data(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, + void *_data) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct wowlan_key_rsc_v5_data *data = _data; + struct ieee80211_key_seq seq; + __le64 *rsc; + int i; + + /* only for ciphers that can be PTK/GTK */ + switch (key->cipher) { + default: + return; + case WLAN_CIPHER_SUITE_TKIP: + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: + break; + } + + if (sta) { + rsc = data->rsc->ucast_rsc; + } else { + if (WARN_ON(data->gtks > ARRAY_SIZE(data->gtk_ids))) + return; + data->gtk_ids[data->gtks] = key->keyidx; + rsc = data->rsc->mcast_rsc[data->gtks % 2]; + if (WARN_ON(key->keyidx > + ARRAY_SIZE(data->rsc->mcast_key_id_map))) + return; + data->rsc->mcast_key_id_map[key->keyidx] = data->gtks % 2; + if (data->gtks >= 2) { + int prev = data->gtks - 2; + int prev_idx = data->gtk_ids[prev]; + + data->rsc->mcast_key_id_map[prev_idx] = + IWL_MCAST_KEY_MAP_INVALID; + } + data->gtks++; + } + + switch (key->cipher) { + default: + WARN_ON(1); + break; + case WLAN_CIPHER_SUITE_TKIP: + + /* + * For non-QoS this relies on the fact that both the uCode and + * mac80211 use TID 0 (as they need to to avoid replay attacks) + * for checking the IV in the frames. + */ + for (i = 0; i < IWL_MAX_TID_COUNT; i++) { + ieee80211_get_key_rx_seq(key, i, &seq); + + rsc[i] = cpu_to_le64(((u64)seq.tkip.iv32 << 16) | + seq.tkip.iv16); + } + + data->have_rsc = true; + break; + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: /* - * The D3 firmware hardcodes the key offset 0 as the key it - * uses to transmit packets to the AP, i.e. the PTK. + * For non-QoS this relies on the fact that both the uCode and + * mac80211/our RX code use TID 0 for checking the PN. */ - if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) { - mvm->ptk_ivlen = key->iv_len; - mvm->ptk_icvlen = key->icv_len; - ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 0); + if (sta) { + struct iwl_mvm_sta *mvmsta; + struct iwl_mvm_key_pn *ptk_pn; + const u8 *pn; + + mvmsta = iwl_mvm_sta_from_mac80211(sta); + rcu_read_lock(); + ptk_pn = rcu_dereference(mvmsta->ptk_pn[key->keyidx]); + if (WARN_ON(!ptk_pn)) { + rcu_read_unlock(); + break; + } + + for (i = 0; i < IWL_MAX_TID_COUNT; i++) { + pn = iwl_mvm_find_max_pn(key, ptk_pn, &seq, i, + mvm->trans->num_rx_queues); + rsc[i] = cpu_to_le64((u64)pn[5] | + ((u64)pn[4] << 8) | + ((u64)pn[3] << 16) | + ((u64)pn[2] << 24) | + ((u64)pn[1] << 32) | + ((u64)pn[0] << 40)); + } + + rcu_read_unlock(); } else { - /* - * firmware only supports TSC/RSC for a single key, - * so if there are multiple keep overwriting them - * with new ones -- this relies on mac80211 doing - * list_add_tail(). - */ - mvm->gtk_ivlen = key->iv_len; - mvm->gtk_icvlen = key->icv_len; - ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 1); + for (i = 0; i < IWL_MAX_TID_COUNT; i++) { + u8 *pn = seq.ccmp.pn; + + ieee80211_get_key_rx_seq(key, i, &seq); + rsc[i] = cpu_to_le64((u64)pn[5] | + ((u64)pn[4] << 8) | + ((u64)pn[3] << 16) | + ((u64)pn[2] << 24) | + ((u64)pn[1] << 32) | + ((u64)pn[0] << 40)); + } } - mutex_unlock(&mvm->mutex); - data->error = ret != 0; + data->have_rsc = true; + break; + } +} + +static int iwl_mvm_wowlan_config_rsc_tsc(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, + WOWLAN_TSC_RSC_PARAM, + IWL_FW_CMD_VER_UNKNOWN); + int ret; + + if (ver == 5) { + struct wowlan_key_rsc_v5_data data = {}; + int i; + + data.rsc = kmalloc(sizeof(*data.rsc), GFP_KERNEL); + if (!data.rsc) + return -ENOMEM; + + memset(data.rsc, 0xff, sizeof(*data.rsc)); + + for (i = 0; i < ARRAY_SIZE(data.rsc->mcast_key_id_map); i++) + data.rsc->mcast_key_id_map[i] = + IWL_MCAST_KEY_MAP_INVALID; + data.rsc->sta_id = cpu_to_le32(mvmvif->ap_sta_id); + + ieee80211_iter_keys(mvm->hw, vif, + iwl_mvm_wowlan_get_rsc_v5_data, + &data); + + if (data.have_rsc) + ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_TSC_RSC_PARAM, + CMD_ASYNC, sizeof(*data.rsc), + data.rsc); + else + ret = 0; + kfree(data.rsc); + } else if (ver == 4 || ver == 2 || ver == IWL_FW_CMD_VER_UNKNOWN) { + struct wowlan_key_rsc_tsc_data data = {}; + int size; + + data.rsc_tsc = kzalloc(sizeof(*data.rsc_tsc), GFP_KERNEL); + if (!data.rsc_tsc) + return -ENOMEM; + + if (ver == 4) { + size = sizeof(*data.rsc_tsc); + data.rsc_tsc->sta_id = cpu_to_le32(mvmvif->ap_sta_id); + } else { + /* ver == 2 || ver == IWL_FW_CMD_VER_UNKNOWN */ + size = sizeof(data.rsc_tsc->params); + } + + ieee80211_iter_keys(mvm->hw, vif, + iwl_mvm_wowlan_get_rsc_tsc_data, + &data); + + if (data.have_rsc_tsc) + ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_TSC_RSC_PARAM, + CMD_ASYNC, size, + data.rsc_tsc); + else + ret = 0; + kfree(data.rsc_tsc); + } else { + ret = 0; + WARN_ON_ONCE(1); + } + + return ret; +} + +struct wowlan_key_tkip_data { + struct iwl_wowlan_tkip_params_cmd tkip; + bool have_tkip_keys; +}; + +static void iwl_mvm_wowlan_get_tkip_data(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, + void *_data) +{ + struct wowlan_key_tkip_data *data = _data; + struct iwl_p1k_cache *rx_p1ks; + u8 *rx_mic_key; + struct ieee80211_key_seq seq; + u32 cur_rx_iv32 = 0; + u16 p1k[IWL_P1K_SIZE]; + int i; + + switch (key->cipher) { + default: + break; + case WLAN_CIPHER_SUITE_TKIP: + if (sta) { + u64 pn64; + + rx_p1ks = data->tkip.rx_uni; + + pn64 = atomic64_read(&key->tx_pn); + + ieee80211_get_tkip_p1k_iv(key, TKIP_PN_TO_IV32(pn64), + p1k); + iwl_mvm_convert_p1k(p1k, data->tkip.tx.p1k); + + memcpy(data->tkip.mic_keys.tx, + &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY], + IWL_MIC_KEY_SIZE); + + rx_mic_key = data->tkip.mic_keys.rx_unicast; + } else { + rx_p1ks = data->tkip.rx_multi; + rx_mic_key = data->tkip.mic_keys.rx_mcast; + } + + for (i = 0; i < IWL_NUM_RSC; i++) { + /* wrapping isn't allowed, AP must rekey */ + if (seq.tkip.iv32 > cur_rx_iv32) + cur_rx_iv32 = seq.tkip.iv32; + } + + ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid, + cur_rx_iv32, p1k); + iwl_mvm_convert_p1k(p1k, rx_p1ks[0].p1k); + ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid, + cur_rx_iv32 + 1, p1k); + iwl_mvm_convert_p1k(p1k, rx_p1ks[1].p1k); + + memcpy(rx_mic_key, + &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY], + IWL_MIC_KEY_SIZE); + + data->have_tkip_keys = true; + break; + } +} + +struct wowlan_key_gtk_type_iter { + struct iwl_wowlan_kek_kck_material_cmd_v4 *kek_kck_cmd; +}; + +static void iwl_mvm_wowlan_gtk_type_iter(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, + void *_data) +{ + struct wowlan_key_gtk_type_iter *data = _data; + + switch (key->cipher) { + default: + return; + case WLAN_CIPHER_SUITE_BIP_GMAC_256: + case WLAN_CIPHER_SUITE_BIP_GMAC_128: + data->kek_kck_cmd->igtk_cipher = cpu_to_le32(STA_KEY_FLG_GCMP); + return; + case WLAN_CIPHER_SUITE_AES_CMAC: + data->kek_kck_cmd->igtk_cipher = cpu_to_le32(STA_KEY_FLG_CCM); + return; + case WLAN_CIPHER_SUITE_CCMP: + if (!sta) + data->kek_kck_cmd->gtk_cipher = + cpu_to_le32(STA_KEY_FLG_CCM); + break; + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: + if (!sta) + data->kek_kck_cmd->gtk_cipher = + cpu_to_le32(STA_KEY_FLG_GCMP); + break; } } @@ -713,109 +981,81 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm, } static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - u32 cmd_flags) + struct ieee80211_vif *vif) { - struct iwl_wowlan_kek_kck_material_cmd_v4 kek_kck_cmd = {}; - struct iwl_wowlan_kek_kck_material_cmd_v4 *_kek_kck_cmd = &kek_kck_cmd; - struct iwl_wowlan_tkip_params_cmd tkip_cmd = {}; bool unified = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); - struct wowlan_key_data key_data = { - .configure_keys = !unified, - .use_rsc_tsc = false, - .tkip = &tkip_cmd, - .use_tkip = false, - .kek_kck_cmd = _kek_kck_cmd, - }; + struct wowlan_key_reprogram_data key_data = {}; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; u8 cmd_ver; size_t cmd_size; - key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL); - if (!key_data.rsc_tsc) - return -ENOMEM; - - /* - * if we have to configure keys, call ieee80211_iter_keys(), - * as we need non-atomic context in order to take the - * required locks. - */ - /* - * Note that currently we don't propagate cmd_flags - * to the iterator. In case of key_data.configure_keys, - * all the configured commands are SYNC, and - * iwl_mvm_wowlan_program_keys() will take care of - * locking/unlocking mvm->mutex. - */ - ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_wowlan_program_keys, - &key_data); + if (!unified) { + /* + * if we have to configure keys, call ieee80211_iter_keys(), + * as we need non-atomic context in order to take the + * required locks. + */ + /* + * Note that currently we don't use CMD_ASYNC in the iterator. + * In case of key_data.configure_keys, all the configured + * commands are SYNC, and iwl_mvm_wowlan_program_keys() will + * take care of locking/unlocking mvm->mutex. + */ + ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_wowlan_program_keys, + &key_data); - if (key_data.error) { - ret = -EIO; - goto out; + if (key_data.error) + return -EIO; } - if (key_data.use_rsc_tsc) { - int ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, - WOWLAN_TSC_RSC_PARAM, - IWL_FW_CMD_VER_UNKNOWN); - int size; - - if (ver == 4) { - size = sizeof(*key_data.rsc_tsc); - key_data.rsc_tsc->sta_id = - cpu_to_le32(mvmvif->ap_sta_id); - - } else if (ver == 2 || ver == IWL_FW_CMD_VER_UNKNOWN) { - size = sizeof(key_data.rsc_tsc->params); - } else { - ret = 0; - WARN_ON_ONCE(1); - goto out; - } - - ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_TSC_RSC_PARAM, - cmd_flags, - size, - key_data.rsc_tsc); - - if (ret) - goto out; - } + ret = iwl_mvm_wowlan_config_rsc_tsc(mvm, vif); + if (ret) + return ret; - if (key_data.use_tkip && - !fw_has_api(&mvm->fw->ucode_capa, + if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TKIP_MIC_KEYS)) { int ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, WOWLAN_TKIP_PARAM, IWL_FW_CMD_VER_UNKNOWN); + struct wowlan_key_tkip_data tkip_data = {}; int size; if (ver == 2) { - size = sizeof(tkip_cmd); - key_data.tkip->sta_id = + size = sizeof(tkip_data.tkip); + tkip_data.tkip.sta_id = cpu_to_le32(mvmvif->ap_sta_id); } else if (ver == 1 || ver == IWL_FW_CMD_VER_UNKNOWN) { size = sizeof(struct iwl_wowlan_tkip_params_cmd_ver_1); } else { - ret = -EINVAL; WARN_ON_ONCE(1); - goto out; + return -EINVAL; } - /* send relevant data according to CMD version */ - ret = iwl_mvm_send_cmd_pdu(mvm, - WOWLAN_TKIP_PARAM, - cmd_flags, size, - &tkip_cmd); - if (ret) - goto out; + ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_wowlan_get_tkip_data, + &tkip_data); + + if (tkip_data.have_tkip_keys) { + /* send relevant data according to CMD version */ + ret = iwl_mvm_send_cmd_pdu(mvm, + WOWLAN_TKIP_PARAM, + CMD_ASYNC, size, + &tkip_data.tkip); + if (ret) + return ret; + } } /* configure rekey data only if offloaded rekey is supported (d3) */ if (mvmvif->rekey_data.valid) { + struct iwl_wowlan_kek_kck_material_cmd_v4 kek_kck_cmd = {}; + struct iwl_wowlan_kek_kck_material_cmd_v4 *_kek_kck_cmd = + &kek_kck_cmd; + struct wowlan_key_gtk_type_iter gtk_type_data = { + .kek_kck_cmd = _kek_kck_cmd, + }; + cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP, WOWLAN_KEK_KCK_MATERIAL, @@ -824,6 +1064,9 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm, cmd_ver != IWL_FW_CMD_VER_UNKNOWN)) return -EINVAL; + ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_wowlan_gtk_type_iter, + >k_type_data); + memcpy(kek_kck_cmd.kck, mvmvif->rekey_data.kck, mvmvif->rekey_data.kck_len); kek_kck_cmd.kck_len = cpu_to_le16(mvmvif->rekey_data.kck_len); @@ -851,17 +1094,13 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm, IWL_DEBUG_WOWLAN(mvm, "setting akm %d\n", mvmvif->rekey_data.akm); - ret = iwl_mvm_send_cmd_pdu(mvm, - WOWLAN_KEK_KCK_MATERIAL, cmd_flags, - cmd_size, - _kek_kck_cmd); + ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_KEK_KCK_MATERIAL, + CMD_ASYNC, cmd_size, _kek_kck_cmd); if (ret) - goto out; + return ret; } - ret = 0; -out: - kfree(key_data.rsc_tsc); - return ret; + + return 0; } static int @@ -893,7 +1132,7 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm, * that isn't really a problem though. */ mutex_unlock(&mvm->mutex); - ret = iwl_mvm_wowlan_config_key_params(mvm, vif, CMD_ASYNC); + ret = iwl_mvm_wowlan_config_key_params(mvm, vif); mutex_lock(&mvm->mutex); if (ret) return ret; @@ -1694,9 +1933,12 @@ iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm, u8 sta_id) status->gtk[0] = v7->gtk[0]; status->igtk[0] = v7->igtk[0]; - } else if (notif_ver == 9 || notif_ver == 10) { + } else if (notif_ver == 9 || notif_ver == 10 || notif_ver == 11) { struct iwl_wowlan_status_v9 *v9 = (void *)cmd.resp_pkt->data; + /* these three command versions have same layout and size, the + * difference is only in a few not used (reserved) fields. + */ status = iwl_mvm_parse_wowlan_status_common_v9(mvm, cmd.resp_pkt->data, len); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index 95f883aba148..5dc39fbb74d6 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -305,7 +305,6 @@ static ssize_t iwl_dbgfs_sar_geo_profile_read(struct file *file, int pos = 0; int bufsz = sizeof(buf); int tbl_idx; - u8 *value; if (!iwl_mvm_firmware_running(mvm)) return -EIO; @@ -321,16 +320,18 @@ static ssize_t iwl_dbgfs_sar_geo_profile_read(struct file *file, pos = scnprintf(buf, bufsz, "SAR geographic profile disabled\n"); } else { - value = &mvm->fwrt.geo_profiles[tbl_idx - 1].values[0]; - pos += scnprintf(buf + pos, bufsz - pos, "Use geographic profile %d\n", tbl_idx); pos += scnprintf(buf + pos, bufsz - pos, "2.4GHz:\n\tChain A offset: %hhu dBm\n\tChain B offset: %hhu dBm\n\tmax tx power: %hhu dBm\n", - value[1], value[2], value[0]); + mvm->fwrt.geo_profiles[tbl_idx - 1].bands[0].chains[0], + mvm->fwrt.geo_profiles[tbl_idx - 1].bands[0].chains[1], + mvm->fwrt.geo_profiles[tbl_idx - 1].bands[0].max); pos += scnprintf(buf + pos, bufsz - pos, "5.2GHz:\n\tChain A offset: %hhu dBm\n\tChain B offset: %hhu dBm\n\tmax tx power: %hhu dBm\n", - value[4], value[5], value[3]); + mvm->fwrt.geo_profiles[tbl_idx - 1].bands[1].chains[0], + mvm->fwrt.geo_profiles[tbl_idx - 1].bands[1].chains[1], + mvm->fwrt.geo_profiles[tbl_idx - 1].bands[1].max); } mutex_unlock(&mvm->mutex); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c index 59cef0d89a6d..03e5bf5cb909 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c @@ -754,6 +754,33 @@ iwl_mvm_ftm_set_ndp_params(struct iwl_mvm *mvm, target->i2r_max_total_ltf = IWL_MVM_FTM_I2R_MAX_TOTAL_LTF; } +static int +iwl_mvm_ftm_put_target_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct cfg80211_pmsr_request_peer *peer, + struct iwl_tof_range_req_ap_entry_v8 *target) +{ + u32 flags; + int ret = iwl_mvm_ftm_put_target_v7(mvm, vif, peer, (void *)target); + + if (ret) + return ret; + + iwl_mvm_ftm_set_ndp_params(mvm, target); + + /* + * If secure LTF is turned off, replace the flag with PMF only + */ + flags = le32_to_cpu(target->initiator_ap_flags); + if ((flags & IWL_INITIATOR_AP_FLAGS_SECURED) && + !IWL_MVM_FTM_INITIATOR_SECURE_LTF) { + flags &= ~IWL_INITIATOR_AP_FLAGS_SECURED; + flags |= IWL_INITIATOR_AP_FLAGS_PMF; + target->initiator_ap_flags = cpu_to_le32(flags); + } + + return 0; +} + static int iwl_mvm_ftm_start_v12(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_pmsr_request *req) @@ -773,24 +800,53 @@ static int iwl_mvm_ftm_start_v12(struct iwl_mvm *mvm, for (i = 0; i < cmd.num_of_ap; i++) { struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; struct iwl_tof_range_req_ap_entry_v8 *target = &cmd.ap[i]; - u32 flags; - err = iwl_mvm_ftm_put_target_v7(mvm, vif, peer, (void *)target); + err = iwl_mvm_ftm_put_target_v8(mvm, vif, peer, target); if (err) return err; + } - iwl_mvm_ftm_set_ndp_params(mvm, target); - - /* - * If secure LTF is turned off, replace the flag with PMF only - */ - flags = le32_to_cpu(target->initiator_ap_flags); - if ((flags & IWL_INITIATOR_AP_FLAGS_SECURED) && - !IWL_MVM_FTM_INITIATOR_SECURE_LTF) { - flags &= ~IWL_INITIATOR_AP_FLAGS_SECURED; - flags |= IWL_INITIATOR_AP_FLAGS_PMF; - target->initiator_ap_flags = cpu_to_le32(flags); + return iwl_mvm_ftm_send_cmd(mvm, &hcmd); +} + +static int iwl_mvm_ftm_start_v13(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct cfg80211_pmsr_request *req) +{ + struct iwl_tof_range_req_cmd_v13 cmd; + struct iwl_host_cmd hcmd = { + .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0), + .dataflags[0] = IWL_HCMD_DFL_DUP, + .data[0] = &cmd, + .len[0] = sizeof(cmd), + }; + u8 i; + int err; + + iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req); + + for (i = 0; i < cmd.num_of_ap; i++) { + struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; + struct iwl_tof_range_req_ap_entry_v9 *target = &cmd.ap[i]; + + err = iwl_mvm_ftm_put_target_v8(mvm, vif, peer, (void *)target); + if (err) + return err; + + if (peer->ftm.trigger_based || peer->ftm.non_trigger_based) + target->bss_color = peer->ftm.bss_color; + + if (peer->ftm.non_trigger_based) { + target->min_time_between_msr = + cpu_to_le16(IWL_MVM_FTM_NON_TB_MIN_TIME_BETWEEN_MSR); + target->burst_period = + cpu_to_le16(IWL_MVM_FTM_NON_TB_MAX_TIME_BETWEEN_MSR); + } else { + target->min_time_between_msr = cpu_to_le16(0); } + + target->band = + iwl_mvm_phy_band_from_nl80211(peer->chandef.chan->band); } return iwl_mvm_ftm_send_cmd(mvm, &hcmd); @@ -814,6 +870,9 @@ int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, IWL_FW_CMD_VER_UNKNOWN); switch (cmd_ver) { + case 13: + err = iwl_mvm_ftm_start_v13(mvm, vif, req); + break; case 12: err = iwl_mvm_ftm_start_v12(mvm, vif, req); break; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c index 5a249ea97eb2..eba5433c2626 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2020 Intel Corporation + * Copyright (C) 2018-2021 Intel Corporation */ #include <net/cfg80211.h> #include <linux/etherdevice.h> @@ -77,7 +77,7 @@ static int iwl_mvm_ftm_responder_set_bw_v2(struct cfg80211_chan_def *chandef, static void iwl_mvm_ftm_responder_set_ndp(struct iwl_mvm *mvm, - struct iwl_tof_responder_config_cmd_v8 *cmd) + struct iwl_tof_responder_config_cmd_v9 *cmd) { /* Up to 2 R2I STS are allowed on the responder */ u32 r2i_max_sts = IWL_MVM_FTM_R2I_MAX_STS < 2 ? @@ -104,7 +104,7 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm, * field interpretation is different), so the same struct can be use * for all cases. */ - struct iwl_tof_responder_config_cmd_v8 cmd = { + struct iwl_tof_responder_config_cmd_v9 cmd = { .channel_num = chandef->chan->hw_value, .cmd_valid_fields = cpu_to_le32(IWL_TOF_RESPONDER_CMD_VALID_CHAN_INFO | @@ -115,10 +115,27 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm, u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP, TOF_RESPONDER_CONFIG_CMD, 6); int err; + int cmd_size; lockdep_assert_held(&mvm->mutex); -if (cmd_ver == 8) + /* Use a default of bss_color=1 for now */ + if (cmd_ver == 9) { + cmd.cmd_valid_fields |= + cpu_to_le32(IWL_TOF_RESPONDER_CMD_VALID_BSS_COLOR | + IWL_TOF_RESPONDER_CMD_VALID_MIN_MAX_TIME_BETWEEN_MSR); + cmd.bss_color = 1; + cmd.min_time_between_msr = + cpu_to_le16(IWL_MVM_FTM_NON_TB_MIN_TIME_BETWEEN_MSR); + cmd.max_time_between_msr = + cpu_to_le16(IWL_MVM_FTM_NON_TB_MAX_TIME_BETWEEN_MSR); + cmd_size = sizeof(struct iwl_tof_responder_config_cmd_v9); + } else { + /* All versions up to version 8 have the same size */ + cmd_size = sizeof(struct iwl_tof_responder_config_cmd_v8); + } + + if (cmd_ver >= 8) iwl_mvm_ftm_responder_set_ndp(mvm, &cmd); if (cmd_ver >= 7) @@ -137,7 +154,7 @@ if (cmd_ver == 8) return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_RESPONDER_CONFIG_CMD, LOCATION_GROUP, 0), - 0, sizeof(cmd), &cmd); + 0, cmd_size, &cmd); } static int diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 38fd5886af2d..74404c96063b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -743,7 +743,8 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b) /* all structs have the same common part, add it */ len += sizeof(cmd.common); - ret = iwl_sar_select_profile(&mvm->fwrt, per_chain, ACPI_SAR_NUM_TABLES, + ret = iwl_sar_select_profile(&mvm->fwrt, per_chain, + IWL_NUM_CHAIN_TABLES, n_subbands, prof_a, prof_b); /* return on error or if the profile is disabled (positive number) */ @@ -1057,16 +1058,7 @@ static const struct dmi_system_id dmi_ppag_approved_list[] = { static int iwl_mvm_ppag_init(struct iwl_mvm *mvm) { - int ret; - - ret = iwl_mvm_get_ppag_table(mvm); - if (ret < 0) { - IWL_DEBUG_RADIO(mvm, - "PPAG BIOS table invalid or unavailable. (%d)\n", - ret); - return 0; - } - + /* no need to read the table, done in INIT stage */ if (!dmi_check_system(dmi_ppag_approved_list)) { IWL_DEBUG_RADIO(mvm, "System vendor '%s' is not in the approved list, disabling PPAG.\n", @@ -1191,12 +1183,65 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm) ret); } } + +void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm) +{ + int ret; + + /* read PPAG table */ + ret = iwl_mvm_get_ppag_table(mvm); + if (ret < 0) { + IWL_DEBUG_RADIO(mvm, + "PPAG BIOS table invalid or unavailable. (%d)\n", + ret); + } + + /* read SAR tables */ + ret = iwl_sar_get_wrds_table(&mvm->fwrt); + if (ret < 0) { + IWL_DEBUG_RADIO(mvm, + "WRDS SAR BIOS table invalid or unavailable. (%d)\n", + ret); + /* + * If not available, don't fail and don't bother with EWRD and + * WGDS */ + + if (!iwl_sar_get_wgds_table(&mvm->fwrt)) { + /* + * If basic SAR is not available, we check for WGDS, + * which should *not* be available either. If it is + * available, issue an error, because we can't use SAR + * Geo without basic SAR. + */ + IWL_ERR(mvm, "BIOS contains WGDS but no WRDS\n"); + } + + } else { + ret = iwl_sar_get_ewrd_table(&mvm->fwrt); + /* if EWRD is not available, we can still use + * WRDS, so don't fail */ + if (ret < 0) + IWL_DEBUG_RADIO(mvm, + "EWRD SAR BIOS table invalid or unavailable. (%d)\n", + ret); + + /* read geo SAR table */ + if (iwl_sar_geo_support(&mvm->fwrt)) { + ret = iwl_sar_get_wgds_table(&mvm->fwrt); + if (ret < 0) + IWL_DEBUG_RADIO(mvm, + "Geo SAR BIOS table invalid or unavailable. (%d)\n", + ret); + /* we don't fail if the table is not available */ + } + } +} #else /* CONFIG_ACPI */ inline int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b) { - return -ENOENT; + return 1; } inline int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm) @@ -1231,6 +1276,10 @@ static u8 iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm) { return DSM_VALUE_RFI_DISABLE; } + +void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm) +{ +} #endif /* CONFIG_ACPI */ void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags) @@ -1286,27 +1335,6 @@ void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags) static int iwl_mvm_sar_init(struct iwl_mvm *mvm) { - int ret; - - ret = iwl_sar_get_wrds_table(&mvm->fwrt); - if (ret < 0) { - IWL_DEBUG_RADIO(mvm, - "WRDS SAR BIOS table invalid or unavailable. (%d)\n", - ret); - /* - * If not available, don't fail and don't bother with EWRD. - * Return 1 to tell that we can't use WGDS either. - */ - return 1; - } - - ret = iwl_sar_get_ewrd_table(&mvm->fwrt); - /* if EWRD is not available, we can still use WRDS, so don't fail */ - if (ret < 0) - IWL_DEBUG_RADIO(mvm, - "EWRD SAR BIOS table invalid or unavailable. (%d)\n", - ret); - return iwl_mvm_sar_select_profile(mvm, 1, 1); } @@ -1542,19 +1570,9 @@ int iwl_mvm_up(struct iwl_mvm *mvm) goto error; ret = iwl_mvm_sar_init(mvm); - if (ret == 0) { + if (ret == 0) ret = iwl_mvm_sar_geo_init(mvm); - } else if (ret == -ENOENT && !iwl_sar_get_wgds_table(&mvm->fwrt)) { - /* - * If basic SAR is not available, we check for WGDS, - * which should *not* be available either. If it is - * available, issue an error, because we can't use SAR - * Geo without basic SAR. - */ - IWL_ERR(mvm, "BIOS contains WGDS but no WRDS\n"); - } - - if (ret < 0) + else if (ret < 0) goto error; iwl_mvm_tas_init(mvm); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index fd5e08961651..fd352b2624a6 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2012-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ @@ -647,12 +647,14 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm, if (vif->bss_conf.he_support && !iwlwifi_mod_params.disable_11ax) { cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_11AX); - if (vif->bss_conf.twt_requester && IWL_MVM_USE_TWT) { + if (vif->bss_conf.twt_requester && IWL_MVM_USE_TWT) ctxt_sta->data_policy |= cpu_to_le32(TWT_SUPPORTED); - if (vif->bss_conf.twt_protected) - ctxt_sta->data_policy |= - cpu_to_le32(PROTECTED_TWT_SUPPORTED); - } + if (vif->bss_conf.twt_protected) + ctxt_sta->data_policy |= + cpu_to_le32(PROTECTED_TWT_SUPPORTED); + if (vif->bss_conf.twt_broadcast) + ctxt_sta->data_policy |= + cpu_to_le32(BROADCAST_TWT_SUPPORTED); } @@ -1005,8 +1007,10 @@ int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm, return -ENOMEM; #ifdef CONFIG_IWLWIFI_DEBUGFS - if (mvm->beacon_inject_active) + if (mvm->beacon_inject_active) { + dev_kfree_skb(beacon); return -EBUSY; + } #endif ret = iwl_mvm_mac_ctxt_send_beacon(mvm, vif, beacon); @@ -1427,14 +1431,34 @@ void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm, { struct iwl_rx_packet *pkt = rxb_addr(rxb); unsigned int pkt_len = iwl_rx_packet_payload_len(pkt); - struct iwl_stored_beacon_notif *sb = (void *)pkt->data; + struct iwl_stored_beacon_notif_common *sb = (void *)pkt->data; struct ieee80211_rx_status rx_status; struct sk_buff *skb; + u8 *data; u32 size = le32_to_cpu(sb->byte_count); + int ver = iwl_fw_lookup_cmd_ver(mvm->fw, PROT_OFFLOAD_GROUP, + STORED_BEACON_NTF, 0); - if (size == 0 || pkt_len < struct_size(sb, data, size)) + if (size == 0) return; + /* handle per-version differences */ + if (ver <= 2) { + struct iwl_stored_beacon_notif_v2 *sb_v2 = (void *)pkt->data; + + if (pkt_len < struct_size(sb_v2, data, size)) + return; + + data = sb_v2->data; + } else { + struct iwl_stored_beacon_notif_v3 *sb_v3 = (void *)pkt->data; + + if (pkt_len < struct_size(sb_v3, data, size)) + return; + + data = sb_v3->data; + } + skb = alloc_skb(size, GFP_ATOMIC); if (!skb) { IWL_ERR(mvm, "alloc_skb failed\n"); @@ -1455,7 +1479,7 @@ void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm, rx_status.band); /* copy the data */ - skb_put_data(skb, sb->data, size); + skb_put_data(skb, data, size); memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); /* pass it as regular rx to mac80211 */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 70ebecb73c24..3a4585222d6d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -390,7 +390,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) if (mvm->trans->max_skb_frags) hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG; - hw->queues = IEEE80211_MAX_QUEUES; + hw->queues = IEEE80211_NUM_ACS; hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE; hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC | IEEE80211_RADIOTAP_MCS_HAVE_STBC; @@ -762,11 +762,11 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, !test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) goto drop; - /* treat non-bufferable MMPDUs on AP interfaces as broadcast */ - if ((info->control.vif->type == NL80211_IFTYPE_AP || - info->control.vif->type == NL80211_IFTYPE_ADHOC) && - ieee80211_is_mgmt(hdr->frame_control) && - !ieee80211_is_bufferable_mmpdu(hdr->frame_control)) + /* + * bufferable MMPDUs or MMPDUs on STA interfaces come via TXQs + * so we treat the others as broadcast + */ + if (ieee80211_is_mgmt(hdr->frame_control)) sta = NULL; /* If there is no sta, and it's not offchannel - send through AP */ @@ -2440,6 +2440,9 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, IWL_DEBUG_MAC80211(mvm, "arp filter changed\n"); iwl_mvm_configure_bcast_filter(mvm); } + + if (changes & BSS_CHANGED_BANDWIDTH) + iwl_mvm_apply_fw_smps_request(vif); } static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw, @@ -2987,16 +2990,20 @@ static void iwl_mvm_check_he_obss_narrow_bw_ru_iter(struct wiphy *wiphy, void *_data) { struct iwl_mvm_he_obss_narrow_bw_ru_data *data = _data; + const struct cfg80211_bss_ies *ies; const struct element *elem; - elem = cfg80211_find_elem(WLAN_EID_EXT_CAPABILITY, bss->ies->data, - bss->ies->len); + rcu_read_lock(); + ies = rcu_dereference(bss->ies); + elem = cfg80211_find_elem(WLAN_EID_EXT_CAPABILITY, ies->data, + ies->len); if (!elem || elem->datalen < 10 || !(elem->data[10] & WLAN_EXT_CAPA10_OBSS_NARROW_BW_RU_TOLERANCE_SUPPORT)) { data->tolerated = false; } + rcu_read_unlock(); } static void iwl_mvm_check_he_obss_narrow_bw_ru(struct ieee80211_hw *hw, @@ -5035,22 +5042,14 @@ static void iwl_mvm_event_mlme_callback_ini(struct iwl_mvm *mvm, struct ieee80211_vif *vif, const struct ieee80211_mlme_event *mlme) { - if (mlme->data == ASSOC_EVENT && (mlme->status == MLME_DENIED || - mlme->status == MLME_TIMEOUT)) { + if ((mlme->data == ASSOC_EVENT || mlme->data == AUTH_EVENT) && + (mlme->status == MLME_DENIED || mlme->status == MLME_TIMEOUT)) { iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_ASSOC_FAILED, NULL); return; } - if (mlme->data == AUTH_EVENT && (mlme->status == MLME_DENIED || - mlme->status == MLME_TIMEOUT)) { - iwl_dbg_tlv_time_point(&mvm->fwrt, - IWL_FW_INI_TIME_POINT_EAPOL_FAILED, - NULL); - return; - } - if (mlme->data == DEAUTH_RX_EVENT || mlme->data == DEAUTH_TX_EVENT) { iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_DEASSOC, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index b50942f28bb7..f877d86b038e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -431,8 +431,6 @@ struct iwl_mvm_vif { static inline struct iwl_mvm_vif * iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif) { - if (!vif) - return NULL; return (void *)vif->drv_priv; } @@ -2045,6 +2043,7 @@ void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm, int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b); int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm); int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm); +void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm); #ifdef CONFIG_IWLWIFI_DEBUGFS void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c index 7fb4e618f76e..da705fcaf0fc 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2019 Intel Corporation + * Copyright (C) 2012-2014, 2018-2019, 2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -416,7 +416,7 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, struct iwl_rx_packet *pkt; struct iwl_host_cmd cmd = { .id = MCC_UPDATE_CMD, - .flags = CMD_WANT_SKB, + .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL, .data = { &mcc_update_cmd }, }; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 20e8d343a950..6f60018feed1 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -78,7 +78,6 @@ module_exit(iwl_mvm_exit); static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); - struct iwl_trans_debug *dbg = &mvm->trans->dbg; u8 radio_cfg_type, radio_cfg_step, radio_cfg_dash; u32 reg_val = 0; u32 phy_config = iwl_mvm_get_phy_config(mvm); @@ -115,10 +114,7 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode) if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) reg_val |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI; - if (iwl_fw_dbg_is_d3_debug_enabled(&mvm->fwrt) || - (iwl_trans_dbg_ini_valid(mvm->trans) && - dbg->fw_mon_cfg[IWL_FW_INI_ALLOCATION_ID_INTERNAL].buf_location) - ) + if (iwl_fw_dbg_is_d3_debug_enabled(&mvm->fwrt)) reg_val |= CSR_HW_IF_CONFIG_REG_D3_DEBUG; iwl_trans_set_bits_mask(mvm->trans, CSR_HW_IF_CONFIG_REG, @@ -214,11 +210,14 @@ void iwl_mvm_apply_fw_smps_request(struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = mvmvif->mvm; + enum ieee80211_smps_mode mode = IEEE80211_SMPS_AUTOMATIC; - iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_FW, - mvm->fw_static_smps_request ? - IEEE80211_SMPS_STATIC : - IEEE80211_SMPS_AUTOMATIC); + if (mvm->fw_static_smps_request && + vif->bss_conf.chandef.width == NL80211_CHAN_WIDTH_160 && + vif->bss_conf.he_support) + mode = IEEE80211_SMPS_STATIC; + + iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_FW, mode); } static void iwl_mvm_intf_dual_chain_req(void *data, u8 *mac, @@ -374,7 +373,7 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { struct iwl_mfu_assert_dump_notif), RX_HANDLER_GRP(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF, iwl_mvm_rx_stored_beacon_notif, RX_HANDLER_SYNC, - struct iwl_stored_beacon_notif), + struct iwl_stored_beacon_notif_v2), RX_HANDLER_GRP(DATA_PATH_GROUP, MU_GROUP_MGMT_NOTIF, iwl_mvm_mu_mimo_grp_notif, RX_HANDLER_SYNC, struct iwl_mu_group_mgmt_notif), @@ -693,11 +692,16 @@ static int iwl_mvm_start_get_nvm(struct iwl_mvm *mvm) if (ret && ret != -ERFKILL) iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_DRIVER); + if (!ret && iwl_mvm_is_lar_supported(mvm)) { + mvm->hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED; + ret = iwl_mvm_init_mcc(mvm); + } if (!iwlmvm_mod_params.init_dbg || !ret) iwl_mvm_stop_device(mvm); mutex_unlock(&mvm->mutex); + rtnl_unlock(); if (ret < 0) IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret); @@ -772,6 +776,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, iwl_fw_runtime_init(&mvm->fwrt, trans, fw, &iwl_mvm_fwrt_ops, mvm, dbgfs_dir); + iwl_mvm_get_acpi_tables(mvm); + mvm->init_status = 0; if (iwl_mvm_has_new_rx_api(mvm)) { @@ -792,10 +798,26 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, mvm->fw_restart = iwlwifi_mod_params.fw_restart ? -1 : 0; - mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE; - mvm->snif_queue = IWL_MVM_DQA_INJECT_MONITOR_QUEUE; - mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; - mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE; + if (iwl_mvm_has_new_tx_api(mvm)) { + /* + * If we have the new TX/queue allocation API initialize them + * all to invalid numbers. We'll rewrite the ones that we need + * later, but that doesn't happen for all of them all of the + * time (e.g. P2P Device is optional), and if a dynamic queue + * ends up getting number 2 (IWL_MVM_DQA_P2P_DEVICE_QUEUE) then + * iwl_mvm_is_static_queue() erroneously returns true, and we + * might have things getting stuck. + */ + mvm->aux_queue = IWL_MVM_INVALID_QUEUE; + mvm->snif_queue = IWL_MVM_INVALID_QUEUE; + mvm->probe_queue = IWL_MVM_INVALID_QUEUE; + mvm->p2p_dev_queue = IWL_MVM_INVALID_QUEUE; + } else { + mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE; + mvm->snif_queue = IWL_MVM_DQA_INJECT_MONITOR_QUEUE; + mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; + mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE; + } mvm->sf_state = SF_UNINIT; if (iwl_mvm_has_unified_ucode(mvm)) @@ -1400,7 +1422,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) * can't recover this since we're already half suspended. */ if (!mvm->fw_restart && fw_error) { - iwl_fw_error_collect(&mvm->fwrt); + iwl_fw_error_collect(&mvm->fwrt, false); } else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { struct iwl_mvm_reprobe *reprobe; @@ -1451,7 +1473,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) } } - iwl_fw_error_collect(&mvm->fwrt); + iwl_fw_error_collect(&mvm->fwrt, false); if (fw_error && mvm->fw_restart > 0) mvm->fw_restart--; @@ -1459,13 +1481,31 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) } } -static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode) +static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode, bool sync) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); if (!test_bit(STATUS_TRANS_DEAD, &mvm->trans->status)) iwl_mvm_dump_nic_error_log(mvm); + if (sync) { + iwl_fw_error_collect(&mvm->fwrt, true); + /* + * Currently, the only case for sync=true is during + * shutdown, so just stop in this case. If/when that + * changes, we need to be a bit smarter here. + */ + return; + } + + /* + * If the firmware crashes while we're already considering it + * to be dead then don't ask for a restart, that cannot do + * anything useful anyway. + */ + if (!test_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status)) + return; + iwl_mvm_nic_restart(mvm, true); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c b/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c index 0b818067067c..44344216a1a9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c @@ -11,7 +11,7 @@ * DDR needs frequency in units of 16.666MHz, so provide FW with the * frequency values in the adjusted format. */ -const static struct iwl_rfi_lut_entry iwl_rfi_table[IWL_RFI_LUT_SIZE] = { +static const struct iwl_rfi_lut_entry iwl_rfi_table[IWL_RFI_LUT_SIZE] = { /* LPDDR4 */ /* frequency 3733MHz */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index c0babb8d5b5c..c12f303cf652 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -69,8 +69,8 @@ static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb, /* if we are here - this for sure is either CCMP or GCMP */ if (IS_ERR_OR_NULL(sta)) { - IWL_ERR(mvm, - "expected hw-decrypted unicast frame for station\n"); + IWL_DEBUG_DROP(mvm, + "expected hw-decrypted unicast frame for station\n"); return -1; } @@ -279,7 +279,6 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta, { struct iwl_mvm_sta *mvmsta; struct iwl_mvm_vif *mvmvif; - u8 fwkeyid = u32_get_bits(status, IWL_RX_MPDU_STATUS_KEY); u8 keyid; struct ieee80211_key_conf *key; u32 len = le16_to_cpu(desc->mpdu_len); @@ -299,6 +298,10 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta, if (!ieee80211_is_beacon(hdr->frame_control)) return 0; + /* key mismatch - will also report !MIC_OK but we shouldn't count it */ + if (!(status & IWL_RX_MPDU_STATUS_KEY_VALID)) + return -1; + /* good cases */ if (likely(status & IWL_RX_MPDU_STATUS_MIC_OK && !(status & IWL_RX_MPDU_STATUS_REPLAY_ERROR))) @@ -309,26 +312,36 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta, mvmsta = iwl_mvm_sta_from_mac80211(sta); - /* what? */ - if (fwkeyid != 6 && fwkeyid != 7) - return -1; - mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); - key = rcu_dereference(mvmvif->bcn_prot.keys[fwkeyid - 6]); - if (!key) - return -1; + /* + * both keys will have the same cipher and MIC length, use + * whichever one is available + */ + key = rcu_dereference(mvmvif->bcn_prot.keys[0]); + if (!key) { + key = rcu_dereference(mvmvif->bcn_prot.keys[1]); + if (!key) + return -1; + } if (len < key->icv_len + IEEE80211_GMAC_PN_LEN + 2) return -1; - /* - * See if the key ID matches - if not this may be due to a - * switch and the firmware may erroneously report !MIC_OK. - */ + /* get the real key ID */ keyid = frame[len - key->icv_len - IEEE80211_GMAC_PN_LEN - 2]; - if (keyid != fwkeyid) - return -1; + /* and if that's the other key, look it up */ + if (keyid != key->keyidx) { + /* + * shouldn't happen since firmware checked, but be safe + * in case the MIC length is wrong too, for example + */ + if (keyid != 6 && keyid != 7) + return -1; + key = rcu_dereference(mvmvif->bcn_prot.keys[keyid - 6]); + if (!key) + return -1; + } /* Report status to mac80211 */ if (!(status & IWL_RX_MPDU_STATUS_MIC_OK)) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index 0368b7101222..d78e436fa8b5 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -1648,7 +1648,7 @@ iwl_mvm_umac_scan_cfg_channels_v6(struct iwl_mvm *mvm, struct iwl_scan_channel_cfg_umac *cfg = &cp->channel_config[i]; u32 n_aps_flag = iwl_mvm_scan_ch_n_aps_flag(vif_type, - cfg->v2.channel_num); + channels[i]->hw_value); cfg->flags = cpu_to_le32(flags | n_aps_flag); cfg->v2.channel_num = channels[i]->hw_value; @@ -1661,22 +1661,32 @@ iwl_mvm_umac_scan_cfg_channels_v6(struct iwl_mvm *mvm, } static int -iwl_mvm_umac_scan_fill_6g_chan_list(struct iwl_mvm_scan_params *params, - __le32 *cmd_short_ssid, u8 *cmd_bssid, - u8 *scan_ssid_num, u8 *bssid_num) +iwl_mvm_umac_scan_fill_6g_chan_list(struct iwl_mvm *mvm, + struct iwl_mvm_scan_params *params, + struct iwl_scan_probe_params_v4 *pp) { int j, idex_s = 0, idex_b = 0; struct cfg80211_scan_6ghz_params *scan_6ghz_params = params->scan_6ghz_params; + bool hidden_supported = fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_HIDDEN_6GHZ_SCAN); - if (!params->n_6ghz_params) { - for (j = 0; j < params->n_ssids; j++) { - cmd_short_ssid[idex_s++] = - cpu_to_le32(~crc32_le(~0, params->ssids[j].ssid, - params->ssids[j].ssid_len)); - (*scan_ssid_num)++; + for (j = 0; j < params->n_ssids && idex_s < SCAN_SHORT_SSID_MAX_SIZE; + j++) { + if (!params->ssids[j].ssid_len) + continue; + + pp->short_ssid[idex_s] = + cpu_to_le32(~crc32_le(~0, params->ssids[j].ssid, + params->ssids[j].ssid_len)); + + if (hidden_supported) { + pp->direct_scan[idex_s].id = WLAN_EID_SSID; + pp->direct_scan[idex_s].len = params->ssids[j].ssid_len; + memcpy(pp->direct_scan[idex_s].ssid, params->ssids[j].ssid, + params->ssids[j].ssid_len); } - return 0; + idex_s++; } /* @@ -1693,40 +1703,40 @@ iwl_mvm_umac_scan_fill_6g_chan_list(struct iwl_mvm_scan_params *params, /* First, try to place the short SSID */ if (scan_6ghz_params[j].short_ssid_valid) { for (k = 0; k < idex_s; k++) { - if (cmd_short_ssid[k] == + if (pp->short_ssid[k] == cpu_to_le32(scan_6ghz_params[j].short_ssid)) break; } if (k == idex_s && idex_s < SCAN_SHORT_SSID_MAX_SIZE) { - cmd_short_ssid[idex_s++] = + pp->short_ssid[idex_s++] = cpu_to_le32(scan_6ghz_params[j].short_ssid); - (*scan_ssid_num)++; } } /* try to place BSSID for the same entry */ for (k = 0; k < idex_b; k++) { - if (!memcmp(&cmd_bssid[ETH_ALEN * k], + if (!memcmp(&pp->bssid_array[k], scan_6ghz_params[j].bssid, ETH_ALEN)) break; } if (k == idex_b && idex_b < SCAN_BSSID_MAX_SIZE) { - memcpy(&cmd_bssid[ETH_ALEN * idex_b++], + memcpy(&pp->bssid_array[idex_b++], scan_6ghz_params[j].bssid, ETH_ALEN); - (*bssid_num)++; } } + + pp->short_ssid_num = idex_s; + pp->bssid_num = idex_b; return 0; } /* TODO: this function can be merged with iwl_mvm_scan_umac_fill_ch_p_v6 */ static void iwl_mvm_umac_scan_cfg_channels_v6_6g(struct iwl_mvm_scan_params *params, - u32 n_channels, __le32 *cmd_short_ssid, - u8 *cmd_bssid, u8 scan_ssid_num, - u8 bssid_num, + u32 n_channels, + struct iwl_scan_probe_params_v4 *pp, struct iwl_scan_channel_params_v6 *cp, enum nl80211_iftype vif_type) { @@ -1741,7 +1751,7 @@ iwl_mvm_umac_scan_cfg_channels_v6_6g(struct iwl_mvm_scan_params *params, u32 s_ssid_bitmap = 0, bssid_bitmap = 0, flags = 0; u8 j, k, s_max = 0, b_max = 0, n_used_bssid_entries; - bool force_passive, found = false, + bool force_passive, found = false, allow_passive = true, unsolicited_probe_on_chan = false, psc_no_listen = false; cfg->v1.channel_num = params->channels[i]->hw_value; @@ -1766,9 +1776,9 @@ iwl_mvm_umac_scan_cfg_channels_v6_6g(struct iwl_mvm_scan_params *params, scan_6ghz_params[j].unsolicited_probe; psc_no_listen |= scan_6ghz_params[j].psc_no_listen; - for (k = 0; k < scan_ssid_num; k++) { + for (k = 0; k < pp->short_ssid_num; k++) { if (!scan_6ghz_params[j].unsolicited_probe && - le32_to_cpu(cmd_short_ssid[k]) == + le32_to_cpu(pp->short_ssid[k]) == scan_6ghz_params[j].short_ssid) { /* Relevant short SSID bit set */ if (s_ssid_bitmap & BIT(k)) { @@ -1778,7 +1788,10 @@ iwl_mvm_umac_scan_cfg_channels_v6_6g(struct iwl_mvm_scan_params *params, /* * Use short SSID only to create a new - * iteration during channel dwell. + * iteration during channel dwell or in + * case that the short SSID has a + * matching SSID, i.e., scan for hidden + * APs. */ if (n_used_bssid_entries >= 3) { s_ssid_bitmap |= BIT(k); @@ -1786,6 +1799,12 @@ iwl_mvm_umac_scan_cfg_channels_v6_6g(struct iwl_mvm_scan_params *params, n_used_bssid_entries -= 3; found = true; break; + } else if (pp->direct_scan[k].len) { + s_ssid_bitmap |= BIT(k); + s_max++; + found = true; + allow_passive = false; + break; } } } @@ -1793,8 +1812,8 @@ iwl_mvm_umac_scan_cfg_channels_v6_6g(struct iwl_mvm_scan_params *params, if (found) continue; - for (k = 0; k < bssid_num; k++) { - if (!memcmp(&cmd_bssid[ETH_ALEN * k], + for (k = 0; k < pp->bssid_num; k++) { + if (!memcmp(&pp->bssid_array[k], scan_6ghz_params[j].bssid, ETH_ALEN)) { if (!(bssid_bitmap & BIT(k))) { @@ -1849,7 +1868,7 @@ iwl_mvm_umac_scan_cfg_channels_v6_6g(struct iwl_mvm_scan_params *params, force_passive |= (unsolicited_probe_on_chan && (s_max > 1 || b_max > 3)); } - if (force_passive || + if ((allow_passive && force_passive) || (!flags && !cfg80211_channel_is_psc(params->channels[i]))) flags |= IWL_UHB_CHAN_CFG_FLAG_FORCE_PASSIVE; @@ -2368,32 +2387,28 @@ static int iwl_mvm_scan_umac_v14(struct iwl_mvm *mvm, struct ieee80211_vif *vif, if (ret) return ret; - iwl_mvm_scan_umac_fill_probe_p_v4(params, &scan_p->probe_params, - &bitmap_ssid); if (!params->scan_6ghz) { + iwl_mvm_scan_umac_fill_probe_p_v4(params, &scan_p->probe_params, + &bitmap_ssid); iwl_mvm_scan_umac_fill_ch_p_v6(mvm, params, vif, - &scan_p->channel_params, bitmap_ssid); + &scan_p->channel_params, bitmap_ssid); return 0; + } else { + pb->preq = params->preq; } + cp->flags = iwl_mvm_scan_umac_chan_flags_v2(mvm, params, vif); cp->n_aps_override[0] = IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY; cp->n_aps_override[1] = IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS; - ret = iwl_mvm_umac_scan_fill_6g_chan_list(params, pb->short_ssid, - pb->bssid_array[0], - &pb->short_ssid_num, - &pb->bssid_num); + ret = iwl_mvm_umac_scan_fill_6g_chan_list(mvm, params, pb); if (ret) return ret; iwl_mvm_umac_scan_cfg_channels_v6_6g(params, params->n_channels, - pb->short_ssid, - pb->bssid_array[0], - pb->short_ssid_num, - pb->bssid_num, cp, - vif->type); + pb, cp, vif->type); cp->count = params->n_channels; if (!params->n_ssids || (params->n_ssids == 1 && !params->ssids[0].ssid_len)) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 9c45a64c5009..a64874c05ced 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -316,8 +316,9 @@ static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue, } static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - int queue, u8 tid, u8 flags) + u16 *queueptr, u8 tid, u8 flags) { + int queue = *queueptr; struct iwl_scd_txq_cfg_cmd cmd = { .scd_queue = queue, .action = SCD_CFG_DISABLE_QUEUE, @@ -326,6 +327,7 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta, if (iwl_mvm_has_new_tx_api(mvm)) { iwl_trans_txq_free(mvm->trans, queue); + *queueptr = IWL_MVM_INVALID_QUEUE; return 0; } @@ -487,6 +489,7 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue, u8 sta_id, tid; unsigned long disable_agg_tids = 0; bool same_sta; + u16 queue_tmp = queue; int ret; lockdep_assert_held(&mvm->mutex); @@ -509,7 +512,7 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue, iwl_mvm_invalidate_sta_queue(mvm, queue, disable_agg_tids, false); - ret = iwl_mvm_disable_txq(mvm, old_sta, queue, tid, 0); + ret = iwl_mvm_disable_txq(mvm, old_sta, &queue_tmp, tid, 0); if (ret) { IWL_ERR(mvm, "Failed to free inactive queue %d (ret=%d)\n", @@ -1184,6 +1187,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, unsigned int wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); int queue = -1; + u16 queue_tmp; unsigned long disable_agg_tids = 0; enum iwl_mvm_agg_state queue_state; bool shared_queue = false, inc_ssn; @@ -1332,7 +1336,8 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, return 0; out_err: - iwl_mvm_disable_txq(mvm, sta, queue, tid, 0); + queue_tmp = queue; + iwl_mvm_disable_txq(mvm, sta, &queue_tmp, tid, 0); return ret; } @@ -1779,7 +1784,7 @@ static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm, if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE) continue; - iwl_mvm_disable_txq(mvm, sta, mvm_sta->tid_data[i].txq_id, i, + iwl_mvm_disable_txq(mvm, sta, &mvm_sta->tid_data[i].txq_id, i, 0); mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE; } @@ -1987,7 +1992,7 @@ static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm *mvm, int macidx, ret = iwl_mvm_add_int_sta_common(mvm, sta, addr, macidx, maccolor); if (ret) { if (!iwl_mvm_has_new_tx_api(mvm)) - iwl_mvm_disable_txq(mvm, NULL, *queue, + iwl_mvm_disable_txq(mvm, NULL, queue, IWL_MAX_TID_COUNT, 0); return ret; } @@ -2060,7 +2065,7 @@ int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) if (WARN_ON_ONCE(mvm->snif_sta.sta_id == IWL_MVM_INVALID_STA)) return -EINVAL; - iwl_mvm_disable_txq(mvm, NULL, mvm->snif_queue, IWL_MAX_TID_COUNT, 0); + iwl_mvm_disable_txq(mvm, NULL, &mvm->snif_queue, IWL_MAX_TID_COUNT, 0); ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id); if (ret) IWL_WARN(mvm, "Failed sending remove station\n"); @@ -2077,7 +2082,7 @@ int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm) if (WARN_ON_ONCE(mvm->aux_sta.sta_id == IWL_MVM_INVALID_STA)) return -EINVAL; - iwl_mvm_disable_txq(mvm, NULL, mvm->aux_queue, IWL_MAX_TID_COUNT, 0); + iwl_mvm_disable_txq(mvm, NULL, &mvm->aux_queue, IWL_MAX_TID_COUNT, 0); ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id); if (ret) IWL_WARN(mvm, "Failed sending remove station\n"); @@ -2173,7 +2178,7 @@ static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - int queue; + u16 *queueptr, queue; lockdep_assert_held(&mvm->mutex); @@ -2182,10 +2187,10 @@ static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm, switch (vif->type) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_ADHOC: - queue = mvm->probe_queue; + queueptr = &mvm->probe_queue; break; case NL80211_IFTYPE_P2P_DEVICE: - queue = mvm->p2p_dev_queue; + queueptr = &mvm->p2p_dev_queue; break; default: WARN(1, "Can't free bcast queue on vif type %d\n", @@ -2193,7 +2198,8 @@ static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm, return; } - iwl_mvm_disable_txq(mvm, NULL, queue, IWL_MAX_TID_COUNT, 0); + queue = *queueptr; + iwl_mvm_disable_txq(mvm, NULL, queueptr, IWL_MAX_TID_COUNT, 0); if (iwl_mvm_has_new_tx_api(mvm)) return; @@ -2428,7 +2434,7 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true); - iwl_mvm_disable_txq(mvm, NULL, mvmvif->cab_queue, 0, 0); + iwl_mvm_disable_txq(mvm, NULL, &mvmvif->cab_queue, 0, 0); ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id); if (ret) @@ -3190,6 +3196,20 @@ static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm, return NULL; } +static int iwl_mvm_pn_cmp(const u8 *pn1, const u8 *pn2, int len) +{ + int i; + + for (i = len - 1; i >= 0; i--) { + if (pn1[i] > pn2[i]) + return 1; + if (pn1[i] < pn2[i]) + return -1; + } + + return 0; +} + static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, u32 sta_id, struct ieee80211_key_conf *key, bool mcast, @@ -3208,6 +3228,9 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, int i, size; bool new_api = fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TKIP_MIC_KEYS); + int api_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, + ADD_STA_KEY, + new_api ? 2 : 1); if (sta_id == IWL_MVM_INVALID_STA) return -EINVAL; @@ -3220,7 +3243,7 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, switch (key->cipher) { case WLAN_CIPHER_SUITE_TKIP: key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP); - if (new_api) { + if (api_ver >= 2) { memcpy((void *)&u.cmd.tx_mic_key, &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY], IWL_MIC_KEY_SIZE); @@ -3241,7 +3264,7 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, case WLAN_CIPHER_SUITE_CCMP: key_flags |= cpu_to_le16(STA_KEY_FLG_CCM); memcpy(u.cmd.common.key, key->key, key->keylen); - if (new_api) + if (api_ver >= 2) pn = atomic64_read(&key->tx_pn); break; case WLAN_CIPHER_SUITE_WEP104: @@ -3257,7 +3280,7 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, case WLAN_CIPHER_SUITE_GCMP: key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP); memcpy(u.cmd.common.key, key->key, key->keylen); - if (new_api) + if (api_ver >= 2) pn = atomic64_read(&key->tx_pn); break; default: @@ -3274,7 +3297,46 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, u.cmd.common.key_flags = key_flags; u.cmd.common.sta_id = sta_id; - if (new_api) { + if (key->cipher == WLAN_CIPHER_SUITE_TKIP) + i = 0; + else + i = -1; + + for (; i < IEEE80211_NUM_TIDS; i++) { + struct ieee80211_key_seq seq = {}; + u8 _rx_pn[IEEE80211_MAX_PN_LEN] = {}, *rx_pn = _rx_pn; + int rx_pn_len = 8; + /* there's a hole at 2/3 in FW format depending on version */ + int hole = api_ver >= 3 ? 0 : 2; + + ieee80211_get_key_rx_seq(key, i, &seq); + + if (key->cipher == WLAN_CIPHER_SUITE_TKIP) { + rx_pn[0] = seq.tkip.iv16; + rx_pn[1] = seq.tkip.iv16 >> 8; + rx_pn[2 + hole] = seq.tkip.iv32; + rx_pn[3 + hole] = seq.tkip.iv32 >> 8; + rx_pn[4 + hole] = seq.tkip.iv32 >> 16; + rx_pn[5 + hole] = seq.tkip.iv32 >> 24; + } else if (key_flags & cpu_to_le16(STA_KEY_FLG_EXT)) { + rx_pn = seq.hw.seq; + rx_pn_len = seq.hw.seq_len; + } else { + rx_pn[0] = seq.ccmp.pn[0]; + rx_pn[1] = seq.ccmp.pn[1]; + rx_pn[2 + hole] = seq.ccmp.pn[2]; + rx_pn[3 + hole] = seq.ccmp.pn[3]; + rx_pn[4 + hole] = seq.ccmp.pn[4]; + rx_pn[5 + hole] = seq.ccmp.pn[5]; + } + + if (iwl_mvm_pn_cmp(rx_pn, (u8 *)&u.cmd.common.rx_secur_seq_cnt, + rx_pn_len) > 0) + memcpy(&u.cmd.common.rx_secur_seq_cnt, rx_pn, + rx_pn_len); + } + + if (api_ver >= 2) { u.cmd.transmit_seq_cnt = cpu_to_le64(pn); size = sizeof(u.cmd); } else { @@ -3411,7 +3473,6 @@ static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm, u8 key_offset, bool mcast) { - int ret; const u8 *addr; struct ieee80211_key_seq seq; u16 p1k[5]; @@ -3433,30 +3494,19 @@ static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm, return -EINVAL; } - switch (keyconf->cipher) { - case WLAN_CIPHER_SUITE_TKIP: + if (keyconf->cipher == WLAN_CIPHER_SUITE_TKIP) { addr = iwl_mvm_get_mac_addr(mvm, vif, sta); /* get phase 1 key from mac80211 */ ieee80211_get_key_rx_seq(keyconf, 0, &seq); ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k); - ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast, - seq.tkip.iv32, p1k, 0, key_offset, - mfp); - break; - case WLAN_CIPHER_SUITE_CCMP: - case WLAN_CIPHER_SUITE_WEP40: - case WLAN_CIPHER_SUITE_WEP104: - case WLAN_CIPHER_SUITE_GCMP: - case WLAN_CIPHER_SUITE_GCMP_256: - ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast, - 0, NULL, 0, key_offset, mfp); - break; - default: - ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast, - 0, NULL, 0, key_offset, mfp); + + return iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast, + seq.tkip.iv32, p1k, 0, key_offset, + mfp); } - return ret; + return iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast, + 0, NULL, 0, key_offset, mfp); } int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c index d3307a11fcac..25af88a3edce 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c @@ -168,6 +168,16 @@ static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm, rcu_read_unlock(); } + if (vif->bss_conf.assoc) { + /* + * When not associated, this will be called from + * iwl_mvm_event_mlme_callback_ini() + */ + iwl_dbg_tlv_time_point(&mvm->fwrt, + IWL_FW_INI_TIME_POINT_ASSOC_FAILED, + NULL); + } + iwl_mvm_connection_loss(mvm, vif, errmsg); return true; } @@ -246,6 +256,18 @@ static void iwl_mvm_te_check_trigger(struct iwl_mvm *mvm, } } +static void iwl_mvm_p2p_roc_finished(struct iwl_mvm *mvm) +{ + /* + * If the IWL_MVM_STATUS_NEED_FLUSH_P2P is already set, then the + * roc_done_wk is already scheduled or running, so don't schedule it + * again to avoid a race where the roc_done_wk clears this bit after + * it is set here, affecting the next run of the roc_done_wk. + */ + if (!test_and_set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status)) + iwl_mvm_roc_finished(mvm); +} + /* * Handles a FW notification for an event that is known to the driver. * @@ -297,8 +319,7 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm, switch (te_data->vif->type) { case NL80211_IFTYPE_P2P_DEVICE: ieee80211_remain_on_channel_expired(mvm->hw); - set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status); - iwl_mvm_roc_finished(mvm); + iwl_mvm_p2p_roc_finished(mvm); break; case NL80211_IFTYPE_STATION: /* @@ -674,8 +695,7 @@ static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm, /* Session protection is still ongoing. Cancel it */ iwl_mvm_cancel_session_protection(mvm, mvmvif, id); if (iftype == NL80211_IFTYPE_P2P_DEVICE) { - set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status); - iwl_mvm_roc_finished(mvm); + iwl_mvm_p2p_roc_finished(mvm); } } return false; @@ -842,8 +862,7 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm, /* End TE, notify mac80211 */ mvmvif->time_event_data.id = SESSION_PROTECT_CONF_MAX_ID; ieee80211_remain_on_channel_expired(mvm->hw); - set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status); - iwl_mvm_roc_finished(mvm); + iwl_mvm_p2p_roc_finished(mvm); } else if (le32_to_cpu(notif->start)) { if (WARN_ON(mvmvif->time_event_data.id != le32_to_cpu(notif->conf_id))) @@ -1004,14 +1023,13 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif) if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { iwl_mvm_cancel_session_protection(mvm, mvmvif, mvmvif->time_event_data.id); - set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status); + iwl_mvm_p2p_roc_finished(mvm); } else { iwl_mvm_remove_aux_roc_te(mvm, mvmvif, &mvmvif->time_event_data); + iwl_mvm_roc_finished(mvm); } - iwl_mvm_roc_finished(mvm); - return; } @@ -1025,12 +1043,11 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif) if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) { iwl_mvm_remove_time_event(mvm, mvmvif, te_data); - set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status); + iwl_mvm_p2p_roc_finished(mvm); } else { iwl_mvm_remove_aux_roc_te(mvm, mvmvif, te_data); + iwl_mvm_roc_finished(mvm); } - - iwl_mvm_roc_finished(mvm); } void iwl_mvm_remove_csa_period(struct iwl_mvm *mvm, diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 0b8a0cd3b652..8dc1b8eecb86 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -1093,22 +1093,22 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, - iwl_cfg_bz_a0_hr_b0, iwl_ax201_name), + iwl_cfg_bz_a0_hr_b0, iwl_bz_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, - iwl_cfg_bz_a0_gf_a0, iwl_ax211_name), + iwl_cfg_bz_a0_gf_a0, iwl_bz_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_CDB, - iwl_cfg_bz_a0_gf4_a0, iwl_ax211_name), + iwl_cfg_bz_a0_gf4_a0, iwl_bz_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, - iwl_cfg_bz_a0_mr_a0, iwl_ax211_name), + iwl_cfg_bz_a0_mr_a0, iwl_bz_name), /* SoF with JF2 */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index cc550f6ef957..a43e56c7689f 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -42,6 +42,7 @@ struct iwl_host_cmd; * struct iwl_rx_mem_buffer * @page_dma: bus address of rxb page * @page: driver's pointer to the rxb page + * @list: list entry for the membuffer * @invalid: rxb is in driver ownership - not owned by HW * @vid: index of this rxb in the global table * @offset: indicates which offset of the page (in bytes) @@ -50,10 +51,10 @@ struct iwl_host_cmd; struct iwl_rx_mem_buffer { dma_addr_t page_dma; struct page *page; - u16 vid; - bool invalid; struct list_head list; u32 offset; + u16 vid; + bool invalid; }; /** @@ -253,6 +254,13 @@ struct cont_rec { }; #endif +enum iwl_pcie_fw_reset_state { + FW_RESET_IDLE, + FW_RESET_REQUESTED, + FW_RESET_OK, + FW_RESET_ERROR, +}; + /** * struct iwl_trans_pcie - PCIe transport specific data * @rxq: all the RX queue data @@ -404,7 +412,7 @@ struct iwl_trans_pcie { dma_addr_t base_rb_stts_dma; bool fw_reset_handshake; - bool fw_reset_done; + enum iwl_pcie_fw_reset_state fw_reset_state; wait_queue_head_t fw_reset_waitq; char rf_name[32]; @@ -670,19 +678,19 @@ static inline const char *queue_name(struct device *dev, IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0; if (i == 0) - return DRV_NAME ": shared IRQ"; + return DRV_NAME ":shared_IRQ"; return devm_kasprintf(dev, GFP_KERNEL, - DRV_NAME ": queue %d", i + vec); + DRV_NAME ":queue_%d", i + vec); } if (i == 0) - return DRV_NAME ": default queue"; + return DRV_NAME ":default_queue"; if (i == trans_p->alloc_vecs - 1) - return DRV_NAME ": exception"; + return DRV_NAME ":exception"; return devm_kasprintf(dev, GFP_KERNEL, - DRV_NAME ": queue %d", i); + DRV_NAME ":queue_%d", i); } static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index 4f6f4b2720f0..8e45eb38304b 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -487,6 +487,9 @@ void iwl_pcie_free_rbs_pool(struct iwl_trans *trans) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int i; + if (!trans_pcie->rx_pool) + return; + for (i = 0; i < RX_POOL_SIZE(trans_pcie->num_rx_bufs); i++) { if (!trans_pcie->rx_pool[i].page) continue; @@ -1062,7 +1065,7 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans) INIT_LIST_HEAD(&rba->rbd_empty); spin_unlock_bh(&rba->lock); - /* free all first - we might be reconfigured for a different size */ + /* free all first - we overwrite everything here */ iwl_pcie_free_rbs_pool(trans); for (i = 0; i < RX_QUEUE_SIZE; i++) @@ -1653,7 +1656,7 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) /* The STATUS_FW_ERROR bit is set in this function. This must happen * before we wake up the command caller, to ensure a proper cleanup. */ - iwl_trans_fw_error(trans); + iwl_trans_fw_error(trans, false); clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); wake_up(&trans->wait_command_queue); @@ -2225,7 +2228,13 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) "Microcode SW error detected. Restarting 0x%X.\n", inta_fh); isr_stats->sw++; - iwl_pcie_irq_handle_error(trans); + /* during FW reset flow report errors from there */ + if (trans_pcie->fw_reset_state == FW_RESET_REQUESTED) { + trans_pcie->fw_reset_state = FW_RESET_ERROR; + wake_up(&trans_pcie->fw_reset_waitq); + } else { + iwl_pcie_irq_handle_error(trans); + } } /* After checking FH register check HW register */ @@ -2293,7 +2302,7 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) if (inta_hw & MSIX_HW_INT_CAUSES_REG_RESET_DONE) { IWL_DEBUG_ISR(trans, "Reset flow completed\n"); - trans_pcie->fw_reset_done = true; + trans_pcie->fw_reset_state = FW_RESET_OK; wake_up(&trans_pcie->fw_reset_waitq); } diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c index a34009357227..bf0c32a74ca4 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c @@ -87,7 +87,12 @@ static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave) * Clear "initialization complete" bit to move adapter from * D0A* (powered-up Active) --> D0U* (Uninitialized) state. */ - iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) + iwl_clear_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_INIT); + else + iwl_clear_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_INIT_DONE); } static void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans) @@ -95,7 +100,7 @@ static void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int ret; - trans_pcie->fw_reset_done = false; + trans_pcie->fw_reset_state = FW_RESET_REQUESTED; if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) iwl_write_umac_prph(trans, UREG_NIC_SET_NMI_DRIVER, @@ -106,10 +111,15 @@ static void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans) /* wait 200ms */ ret = wait_event_timeout(trans_pcie->fw_reset_waitq, - trans_pcie->fw_reset_done, FW_RESET_TIMEOUT); - if (!ret) + trans_pcie->fw_reset_state != FW_RESET_REQUESTED, + FW_RESET_TIMEOUT); + if (!ret || trans_pcie->fw_reset_state == FW_RESET_ERROR) { IWL_INFO(trans, "firmware didn't ACK the reset - continue anyway\n"); + iwl_trans_fw_error(trans, true); + } + + trans_pcie->fw_reset_state = FW_RESET_IDLE; } void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans) @@ -121,9 +131,21 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans) if (trans_pcie->is_down) return; - if (trans_pcie->fw_reset_handshake && - trans->state >= IWL_TRANS_FW_STARTED) - iwl_trans_pcie_fw_reset_handshake(trans); + if (trans->state >= IWL_TRANS_FW_STARTED) { + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { + iwl_set_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_REQ); + iwl_poll_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS, + CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS, + 5000); + msleep(100); + iwl_set_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_SW_RESET); + } else if (trans_pcie->fw_reset_handshake) { + iwl_trans_pcie_fw_reset_handshake(trans); + } + } trans_pcie->is_down = true; @@ -154,9 +176,17 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans) iwl_pcie_ctxt_info_free(trans); /* Make sure (redundant) we've released our request to stay awake */ - iwl_clear_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) + iwl_clear_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ); + else + iwl_clear_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { + iwl_set_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_SW_RESET); + } /* Stop the device, and put it in low power state */ iwl_pcie_gen2_apm_stop(trans, false); @@ -436,7 +466,10 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, iwl_pcie_set_ltr(trans); - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) + iwl_set_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_ROM_START); + else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) iwl_write_umac_prph(trans, UREG_CPU_INIT_RUN, 1); else iwl_write_prph(trans, UREG_CPU_INIT_RUN, 1); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index bee6b4574226..f252680f18e8 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -449,11 +449,23 @@ void iwl_pcie_apm_stop_master(struct iwl_trans *trans) int ret; /* stop device's busmaster DMA activity */ - iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); - ret = iwl_poll_bit(trans, CSR_RESET, - CSR_RESET_REG_FLAG_MASTER_DISABLED, - CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { + iwl_set_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_REQ); + + ret = iwl_poll_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS, + CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS, + 100); + } else { + iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); + + ret = iwl_poll_bit(trans, CSR_RESET, + CSR_RESET_REG_FLAG_MASTER_DISABLED, + CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); + } + if (ret < 0) IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n"); @@ -1866,6 +1878,9 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans, { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + /* free all first - we might be reconfigured for a different size */ + iwl_pcie_free_rbs_pool(trans); + trans->txqs.cmd.q_id = trans_cfg->cmd_queue; trans->txqs.cmd.fifo = trans_cfg->cmd_fifo; trans->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout; @@ -1992,15 +2007,24 @@ bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans) { int ret; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + u32 write = CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ; + u32 mask = CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | + CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP; + u32 poll = CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN; spin_lock(&trans_pcie->reg_lock); if (trans_pcie->cmd_hold_nic_awake) goto out; + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { + write = CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ; + mask = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS; + poll = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS; + } + /* this bit wakes up the NIC */ - __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, write); if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000) udelay(2); @@ -2024,10 +2048,7 @@ bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans) * 5000 series and later (including 1000 series) have non-volatile SRAM, * and do not save/restore SRAM when power cycling. */ - ret = iwl_poll_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, - (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | - CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000); + ret = iwl_poll_bit(trans, CSR_GP_CNTRL, poll, mask, 15000); if (unlikely(ret < 0)) { u32 cntrl = iwl_read32(trans, CSR_GP_CNTRL); @@ -2947,8 +2968,8 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans, struct iwl_rx_mem_buffer *rxb = rxq->queue[i]; struct iwl_fw_error_dump_rb *rb; - dma_unmap_page(trans->dev, rxb->page_dma, max_len, - DMA_FROM_DEVICE); + dma_sync_single_for_cpu(trans->dev, rxb->page_dma, + max_len, DMA_FROM_DEVICE); rb_len += sizeof(**data) + sizeof(*rb) + max_len; @@ -2957,10 +2978,6 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans, rb = (void *)(*data)->data; rb->index = cpu_to_le32(i); memcpy(rb->data, page_address(rxb->page), max_len); - /* remap the page for the free benefit */ - rxb->page_dma = dma_map_page(trans->dev, rxb->page, - rxb->offset, max_len, - DMA_FROM_DEVICE); *data = iwl_fw_error_next_data(*data); } @@ -3489,15 +3506,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, pci_set_master(pdev); addr_size = trans->txqs.tfd.addr_size; - ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(addr_size)); - if (!ret) - ret = pci_set_consistent_dma_mask(pdev, - DMA_BIT_MASK(addr_size)); + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_size)); if (ret) { - ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (!ret) - ret = pci_set_consistent_dma_mask(pdev, - DMA_BIT_MASK(32)); + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); /* both attempts failed: */ if (ret) { dev_err(&pdev->dev, "No suitable DMA available\n"); diff --git a/drivers/net/wireless/intersil/Kconfig b/drivers/net/wireless/intersil/Kconfig index c52d9b535623..bd6bf70ece03 100644 --- a/drivers/net/wireless/intersil/Kconfig +++ b/drivers/net/wireless/intersil/Kconfig @@ -16,24 +16,4 @@ source "drivers/net/wireless/intersil/hostap/Kconfig" source "drivers/net/wireless/intersil/orinoco/Kconfig" source "drivers/net/wireless/intersil/p54/Kconfig" -config PRISM54 - tristate 'Intersil Prism GT/Duette/Indigo PCI/Cardbus (DEPRECATED)' - depends on PCI - select WIRELESS_EXT - select WEXT_SPY - select WEXT_PRIV - select FW_LOADER - help - This enables support for FullMAC PCI/Cardbus prism54 devices. This - driver is now deprecated in favor for the SoftMAC driver, p54pci. - p54pci supports FullMAC PCI/Cardbus devices as well. - - For more information refer to the p54 wiki: - - http://wireless.wiki.kernel.org/en/users/Drivers/p54 - - Note: You need a motherboard with DMA support to use any of these cards - - When built as module you get the module prism54 - endif # WLAN_VENDOR_INTERSIL diff --git a/drivers/net/wireless/intersil/Makefile b/drivers/net/wireless/intersil/Makefile index aa630e9c3d3d..65281d1b3d85 100644 --- a/drivers/net/wireless/intersil/Makefile +++ b/drivers/net/wireless/intersil/Makefile @@ -2,4 +2,3 @@ obj-$(CONFIG_HOSTAP) += hostap/ obj-$(CONFIG_HERMES) += orinoco/ obj-$(CONFIG_P54_COMMON) += p54/ -obj-$(CONFIG_PRISM54) += prism54/ diff --git a/drivers/net/wireless/intersil/hostap/hostap.h b/drivers/net/wireless/intersil/hostap/hostap.h index c4b81ff7d7e4..c17ab6dbbb53 100644 --- a/drivers/net/wireless/intersil/hostap/hostap.h +++ b/drivers/net/wireless/intersil/hostap/hostap.h @@ -93,6 +93,7 @@ extern const struct iw_handler_def hostap_iw_handler_def; extern const struct ethtool_ops prism2_ethtool_ops; int hostap_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); - +int hostap_siocdevprivate(struct net_device *dev, struct ifreq *ifr, + void __user *data, int cmd); #endif /* HOSTAP_H */ diff --git a/drivers/net/wireless/intersil/hostap/hostap_ioctl.c b/drivers/net/wireless/intersil/hostap/hostap_ioctl.c index 49766b285230..0a376f112db9 100644 --- a/drivers/net/wireless/intersil/hostap/hostap_ioctl.c +++ b/drivers/net/wireless/intersil/hostap/hostap_ioctl.c @@ -3941,7 +3941,8 @@ const struct iw_handler_def hostap_iw_handler_def = .get_wireless_stats = hostap_get_wireless_stats, }; - +/* Private ioctls (iwpriv) that have not yet been converted + * into new wireless extensions API */ int hostap_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct iwreq *wrq = (struct iwreq *) ifr; @@ -3953,9 +3954,6 @@ int hostap_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) local = iface->local; switch (cmd) { - /* Private ioctls (iwpriv) that have not yet been converted - * into new wireless extensions API */ - case PRISM2_IOCTL_INQUIRE: if (!capable(CAP_NET_ADMIN)) ret = -EPERM; else ret = prism2_ioctl_priv_inquire(dev, (int *) wrq->u.name); @@ -4009,11 +4007,31 @@ int hostap_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) wrq->u.ap_addr.sa_data); break; #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ + default: + ret = -EOPNOTSUPP; + break; + } + + return ret; +} +/* Private ioctls that are not used with iwpriv; + * in SIOCDEVPRIVATE range */ +int hostap_siocdevprivate(struct net_device *dev, struct ifreq *ifr, + void __user *data, int cmd) +{ + struct iwreq *wrq = (struct iwreq *)ifr; + struct hostap_interface *iface; + local_info_t *local; + int ret = 0; - /* Private ioctls that are not used with iwpriv; - * in SIOCDEVPRIVATE range */ + iface = netdev_priv(dev); + local = iface->local; + + if (in_compat_syscall()) /* not implemented yet */ + return -EOPNOTSUPP; + switch (cmd) { #ifdef PRISM2_DOWNLOAD_SUPPORT case PRISM2_IOCTL_DOWNLOAD: if (!capable(CAP_NET_ADMIN)) ret = -EPERM; diff --git a/drivers/net/wireless/intersil/hostap/hostap_main.c b/drivers/net/wireless/intersil/hostap/hostap_main.c index de97b3304115..54f67b682b6a 100644 --- a/drivers/net/wireless/intersil/hostap/hostap_main.c +++ b/drivers/net/wireless/intersil/hostap/hostap_main.c @@ -797,6 +797,7 @@ static const struct net_device_ops hostap_netdev_ops = { .ndo_open = prism2_open, .ndo_stop = prism2_close, .ndo_do_ioctl = hostap_ioctl, + .ndo_siocdevprivate = hostap_siocdevprivate, .ndo_set_mac_address = prism2_set_mac_address, .ndo_set_rx_mode = hostap_set_multicast_list, .ndo_tx_timeout = prism2_tx_timeout, @@ -809,6 +810,7 @@ static const struct net_device_ops hostap_mgmt_netdev_ops = { .ndo_open = prism2_open, .ndo_stop = prism2_close, .ndo_do_ioctl = hostap_ioctl, + .ndo_siocdevprivate = hostap_siocdevprivate, .ndo_set_mac_address = prism2_set_mac_address, .ndo_set_rx_mode = hostap_set_multicast_list, .ndo_tx_timeout = prism2_tx_timeout, @@ -821,6 +823,7 @@ static const struct net_device_ops hostap_master_ops = { .ndo_open = prism2_open, .ndo_stop = prism2_close, .ndo_do_ioctl = hostap_ioctl, + .ndo_siocdevprivate = hostap_siocdevprivate, .ndo_set_mac_address = prism2_set_mac_address, .ndo_set_rx_mode = hostap_set_multicast_list, .ndo_tx_timeout = prism2_tx_timeout, diff --git a/drivers/net/wireless/intersil/prism54/Makefile b/drivers/net/wireless/intersil/prism54/Makefile deleted file mode 100644 index 4f5572dffb5e..000000000000 --- a/drivers/net/wireless/intersil/prism54/Makefile +++ /dev/null @@ -1,9 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -# $Id: Makefile.k26,v 1.7 2004/01/30 16:24:00 ajfa Exp $ - -prism54-objs := islpci_eth.o islpci_mgt.o \ - isl_38xx.o isl_ioctl.o islpci_dev.o \ - islpci_hotplug.o oid_mgt.o - -obj-$(CONFIG_PRISM54) += prism54.o - diff --git a/drivers/net/wireless/intersil/prism54/isl_38xx.c b/drivers/net/wireless/intersil/prism54/isl_38xx.c deleted file mode 100644 index ae964de347f7..000000000000 --- a/drivers/net/wireless/intersil/prism54/isl_38xx.c +++ /dev/null @@ -1,245 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2002 Intersil Americas Inc. - * Copyright (C) 2003-2004 Luis R. Rodriguez <mcgrof@ruslug.rutgers.edu>_ - */ - -#include <linux/module.h> -#include <linux/types.h> -#include <linux/delay.h> -#include <linux/ktime.h> - -#include <linux/uaccess.h> -#include <asm/io.h> - -#include "prismcompat.h" -#include "isl_38xx.h" -#include "islpci_dev.h" -#include "islpci_mgt.h" - -/****************************************************************************** - Device Interface & Control functions -******************************************************************************/ - -/** - * isl38xx_disable_interrupts - disable all interrupts - * @device: pci memory base address - * - * Instructs the device to disable all interrupt reporting by asserting - * the IRQ line. New events may still show up in the interrupt identification - * register located at offset %ISL38XX_INT_IDENT_REG. - */ -void -isl38xx_disable_interrupts(void __iomem *device) -{ - isl38xx_w32_flush(device, 0x00000000, ISL38XX_INT_EN_REG); - udelay(ISL38XX_WRITEIO_DELAY); -} - -void -isl38xx_handle_sleep_request(isl38xx_control_block *control_block, - int *powerstate, void __iomem *device_base) -{ - /* device requests to go into sleep mode - * check whether the transmit queues for data and management are empty */ - if (isl38xx_in_queue(control_block, ISL38XX_CB_TX_DATA_LQ)) - /* data tx queue not empty */ - return; - - if (isl38xx_in_queue(control_block, ISL38XX_CB_TX_MGMTQ)) - /* management tx queue not empty */ - return; - - /* check also whether received frames are pending */ - if (isl38xx_in_queue(control_block, ISL38XX_CB_RX_DATA_LQ)) - /* data rx queue not empty */ - return; - - if (isl38xx_in_queue(control_block, ISL38XX_CB_RX_MGMTQ)) - /* management rx queue not empty */ - return; - -#if VERBOSE > SHOW_ERROR_MESSAGES - DEBUG(SHOW_TRACING, "Device going to sleep mode\n"); -#endif - - /* all queues are empty, allow the device to go into sleep mode */ - *powerstate = ISL38XX_PSM_POWERSAVE_STATE; - - /* assert the Sleep interrupt in the Device Interrupt Register */ - isl38xx_w32_flush(device_base, ISL38XX_DEV_INT_SLEEP, - ISL38XX_DEV_INT_REG); - udelay(ISL38XX_WRITEIO_DELAY); -} - -void -isl38xx_handle_wakeup(isl38xx_control_block *control_block, - int *powerstate, void __iomem *device_base) -{ - /* device is in active state, update the powerstate flag */ - *powerstate = ISL38XX_PSM_ACTIVE_STATE; - - /* now check whether there are frames pending for the card */ - if (!isl38xx_in_queue(control_block, ISL38XX_CB_TX_DATA_LQ) - && !isl38xx_in_queue(control_block, ISL38XX_CB_TX_MGMTQ)) - return; - -#if VERBOSE > SHOW_ERROR_MESSAGES - DEBUG(SHOW_ANYTHING, "Wake up handler trigger the device\n"); -#endif - - /* either data or management transmit queue has a frame pending - * trigger the device by setting the Update bit in the Device Int reg */ - isl38xx_w32_flush(device_base, ISL38XX_DEV_INT_UPDATE, - ISL38XX_DEV_INT_REG); - udelay(ISL38XX_WRITEIO_DELAY); -} - -void -isl38xx_trigger_device(int asleep, void __iomem *device_base) -{ - u32 reg; - -#if VERBOSE > SHOW_ERROR_MESSAGES - u32 counter = 0; - struct timespec64 current_ts64; - DEBUG(SHOW_FUNCTION_CALLS, "isl38xx trigger device\n"); -#endif - - /* check whether the device is in power save mode */ - if (asleep) { - /* device is in powersave, trigger the device for wakeup */ -#if VERBOSE > SHOW_ERROR_MESSAGES - ktime_get_real_ts64(¤t_ts64); - DEBUG(SHOW_TRACING, "%lld.%09ld Device wakeup triggered\n", - (s64)current_ts64.tv_sec, current_ts64.tv_nsec); - - DEBUG(SHOW_TRACING, "%lld.%09ld Device register read %08x\n", - (s64)current_ts64.tv_sec, current_ts64.tv_nsec, - readl(device_base + ISL38XX_CTRL_STAT_REG)); -#endif - - reg = readl(device_base + ISL38XX_INT_IDENT_REG); - if (reg == 0xabadface) { -#if VERBOSE > SHOW_ERROR_MESSAGES - ktime_get_real_ts64(¤t_ts64); - DEBUG(SHOW_TRACING, - "%lld.%09ld Device register abadface\n", - (s64)current_ts64.tv_sec, current_ts64.tv_nsec); -#endif - /* read the Device Status Register until Sleepmode bit is set */ - while (reg = readl(device_base + ISL38XX_CTRL_STAT_REG), - (reg & ISL38XX_CTRL_STAT_SLEEPMODE) == 0) { - udelay(ISL38XX_WRITEIO_DELAY); -#if VERBOSE > SHOW_ERROR_MESSAGES - counter++; -#endif - } - -#if VERBOSE > SHOW_ERROR_MESSAGES - DEBUG(SHOW_TRACING, - "%lld.%09ld Device register read %08x\n", - (s64)current_ts64.tv_sec, current_ts64.tv_nsec, - readl(device_base + ISL38XX_CTRL_STAT_REG)); - ktime_get_real_ts64(¤t_ts64); - DEBUG(SHOW_TRACING, - "%lld.%09ld Device asleep counter %i\n", - (s64)current_ts64.tv_sec, current_ts64.tv_nsec, - counter); -#endif - } - /* assert the Wakeup interrupt in the Device Interrupt Register */ - isl38xx_w32_flush(device_base, ISL38XX_DEV_INT_WAKEUP, - ISL38XX_DEV_INT_REG); - -#if VERBOSE > SHOW_ERROR_MESSAGES - udelay(ISL38XX_WRITEIO_DELAY); - - /* perform another read on the Device Status Register */ - reg = readl(device_base + ISL38XX_CTRL_STAT_REG); - ktime_get_real_ts64(¤t_ts64); - DEBUG(SHOW_TRACING, "%lld.%00ld Device register read %08x\n", - (s64)current_ts64.tv_sec, current_ts64.tv_nsec, reg); -#endif - } else { - /* device is (still) awake */ -#if VERBOSE > SHOW_ERROR_MESSAGES - DEBUG(SHOW_TRACING, "Device is in active state\n"); -#endif - /* trigger the device by setting the Update bit in the Device Int reg */ - - isl38xx_w32_flush(device_base, ISL38XX_DEV_INT_UPDATE, - ISL38XX_DEV_INT_REG); - } -} - -void -isl38xx_interface_reset(void __iomem *device_base, dma_addr_t host_address) -{ -#if VERBOSE > SHOW_ERROR_MESSAGES - DEBUG(SHOW_FUNCTION_CALLS, "isl38xx_interface_reset\n"); -#endif - - /* load the address of the control block in the device */ - isl38xx_w32_flush(device_base, host_address, ISL38XX_CTRL_BLK_BASE_REG); - udelay(ISL38XX_WRITEIO_DELAY); - - /* set the reset bit in the Device Interrupt Register */ - isl38xx_w32_flush(device_base, ISL38XX_DEV_INT_RESET, ISL38XX_DEV_INT_REG); - udelay(ISL38XX_WRITEIO_DELAY); - - /* enable the interrupt for detecting initialization */ - - /* Note: Do not enable other interrupts here. We want the - * device to have come up first 100% before allowing any other - * interrupts. */ - isl38xx_w32_flush(device_base, ISL38XX_INT_IDENT_INIT, ISL38XX_INT_EN_REG); - udelay(ISL38XX_WRITEIO_DELAY); /* allow complete full reset */ -} - -void -isl38xx_enable_common_interrupts(void __iomem *device_base) -{ - u32 reg; - - reg = ISL38XX_INT_IDENT_UPDATE | ISL38XX_INT_IDENT_SLEEP | - ISL38XX_INT_IDENT_WAKEUP; - isl38xx_w32_flush(device_base, reg, ISL38XX_INT_EN_REG); - udelay(ISL38XX_WRITEIO_DELAY); -} - -int -isl38xx_in_queue(isl38xx_control_block *cb, int queue) -{ - const s32 delta = (le32_to_cpu(cb->driver_curr_frag[queue]) - - le32_to_cpu(cb->device_curr_frag[queue])); - - /* determine the amount of fragments in the queue depending on the type - * of the queue, either transmit or receive */ - - BUG_ON(delta < 0); /* driver ptr must be ahead of device ptr */ - - switch (queue) { - /* send queues */ - case ISL38XX_CB_TX_MGMTQ: - BUG_ON(delta > ISL38XX_CB_MGMT_QSIZE); - fallthrough; - - case ISL38XX_CB_TX_DATA_LQ: - case ISL38XX_CB_TX_DATA_HQ: - BUG_ON(delta > ISL38XX_CB_TX_QSIZE); - return delta; - - /* receive queues */ - case ISL38XX_CB_RX_MGMTQ: - BUG_ON(delta > ISL38XX_CB_MGMT_QSIZE); - return ISL38XX_CB_MGMT_QSIZE - delta; - - case ISL38XX_CB_RX_DATA_LQ: - case ISL38XX_CB_RX_DATA_HQ: - BUG_ON(delta > ISL38XX_CB_RX_QSIZE); - return ISL38XX_CB_RX_QSIZE - delta; - } - BUG(); - return 0; -} diff --git a/drivers/net/wireless/intersil/prism54/isl_38xx.h b/drivers/net/wireless/intersil/prism54/isl_38xx.h deleted file mode 100644 index 69218b8b2b23..000000000000 --- a/drivers/net/wireless/intersil/prism54/isl_38xx.h +++ /dev/null @@ -1,158 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2002 Intersil Americas Inc. - */ - -#ifndef _ISL_38XX_H -#define _ISL_38XX_H - -#include <asm/io.h> -#include <asm/byteorder.h> - -#define ISL38XX_CB_RX_QSIZE 8 -#define ISL38XX_CB_TX_QSIZE 32 - -/* ISL38XX Access Point Specific definitions */ -#define ISL38XX_MAX_WDS_LINKS 8 - -/* ISL38xx Client Specific definitions */ -#define ISL38XX_PSM_ACTIVE_STATE 0 -#define ISL38XX_PSM_POWERSAVE_STATE 1 - -/* ISL38XX Host Interface Definitions */ -#define ISL38XX_PCI_MEM_SIZE 0x02000 -#define ISL38XX_MEMORY_WINDOW_SIZE 0x01000 -#define ISL38XX_DEV_FIRMWARE_ADDRES 0x20000 -#define ISL38XX_WRITEIO_DELAY 10 /* in us */ -#define ISL38XX_RESET_DELAY 50 /* in ms */ -#define ISL38XX_WAIT_CYCLE 10 /* in 10ms */ -#define ISL38XX_MAX_WAIT_CYCLES 10 - -/* PCI Memory Area */ -#define ISL38XX_HARDWARE_REG 0x0000 -#define ISL38XX_CARDBUS_CIS 0x0800 -#define ISL38XX_DIRECT_MEM_WIN 0x1000 - -/* Hardware registers */ -#define ISL38XX_DEV_INT_REG 0x0000 -#define ISL38XX_INT_IDENT_REG 0x0010 -#define ISL38XX_INT_ACK_REG 0x0014 -#define ISL38XX_INT_EN_REG 0x0018 -#define ISL38XX_GEN_PURP_COM_REG_1 0x0020 -#define ISL38XX_GEN_PURP_COM_REG_2 0x0024 -#define ISL38XX_CTRL_BLK_BASE_REG ISL38XX_GEN_PURP_COM_REG_1 -#define ISL38XX_DIR_MEM_BASE_REG 0x0030 -#define ISL38XX_CTRL_STAT_REG 0x0078 - -/* High end mobos queue up pci writes, the following - * is used to "read" from after a write to force flush */ -#define ISL38XX_PCI_POSTING_FLUSH ISL38XX_INT_EN_REG - -/** - * isl38xx_w32_flush - PCI iomem write helper - * @base: (host) memory base address of the device - * @val: 32bit value (host order) to write - * @offset: byte offset into @base to write value to - * - * This helper takes care of writing a 32bit datum to the - * specified offset into the device's pci memory space, and making sure - * the pci memory buffers get flushed by performing one harmless read - * from the %ISL38XX_PCI_POSTING_FLUSH offset. - */ -static inline void -isl38xx_w32_flush(void __iomem *base, u32 val, unsigned long offset) -{ - writel(val, base + offset); - (void) readl(base + ISL38XX_PCI_POSTING_FLUSH); -} - -/* Device Interrupt register bits */ -#define ISL38XX_DEV_INT_RESET 0x0001 -#define ISL38XX_DEV_INT_UPDATE 0x0002 -#define ISL38XX_DEV_INT_WAKEUP 0x0008 -#define ISL38XX_DEV_INT_SLEEP 0x0010 - -/* Interrupt Identification/Acknowledge/Enable register bits */ -#define ISL38XX_INT_IDENT_UPDATE 0x0002 -#define ISL38XX_INT_IDENT_INIT 0x0004 -#define ISL38XX_INT_IDENT_WAKEUP 0x0008 -#define ISL38XX_INT_IDENT_SLEEP 0x0010 -#define ISL38XX_INT_SOURCES 0x001E - -/* Control/Status register bits */ -/* Looks like there are other meaningful bits - 0x20004400 seen in normal operation, - 0x200044db at 'timeout waiting for mgmt response' -*/ -#define ISL38XX_CTRL_STAT_SLEEPMODE 0x00000200 -#define ISL38XX_CTRL_STAT_CLKRUN 0x00800000 -#define ISL38XX_CTRL_STAT_RESET 0x10000000 -#define ISL38XX_CTRL_STAT_RAMBOOT 0x20000000 -#define ISL38XX_CTRL_STAT_STARTHALTED 0x40000000 -#define ISL38XX_CTRL_STAT_HOST_OVERRIDE 0x80000000 - -/* Control Block definitions */ -#define ISL38XX_CB_RX_DATA_LQ 0 -#define ISL38XX_CB_TX_DATA_LQ 1 -#define ISL38XX_CB_RX_DATA_HQ 2 -#define ISL38XX_CB_TX_DATA_HQ 3 -#define ISL38XX_CB_RX_MGMTQ 4 -#define ISL38XX_CB_TX_MGMTQ 5 -#define ISL38XX_CB_QCOUNT 6 -#define ISL38XX_CB_MGMT_QSIZE 4 -#define ISL38XX_MIN_QTHRESHOLD 4 /* fragments */ - -/* Memory Manager definitions */ -#define MGMT_FRAME_SIZE 1500 /* >= size struct obj_bsslist */ -#define MGMT_TX_FRAME_COUNT 24 /* max 4 + spare 4 + 8 init */ -#define MGMT_RX_FRAME_COUNT 24 /* 4*4 + spare 8 */ -#define MGMT_FRAME_COUNT (MGMT_TX_FRAME_COUNT + MGMT_RX_FRAME_COUNT) -#define CONTROL_BLOCK_SIZE 1024 /* should be enough */ -#define PSM_FRAME_SIZE 1536 -#define PSM_MINIMAL_STATION_COUNT 64 -#define PSM_FRAME_COUNT PSM_MINIMAL_STATION_COUNT -#define PSM_BUFFER_SIZE PSM_FRAME_SIZE * PSM_FRAME_COUNT -#define MAX_TRAP_RX_QUEUE 4 -#define HOST_MEM_BLOCK CONTROL_BLOCK_SIZE + PSM_BUFFER_SIZE - -/* Fragment package definitions */ -#define FRAGMENT_FLAG_MF 0x0001 -#define MAX_FRAGMENT_SIZE 1536 - -/* In monitor mode frames have a header. I don't know exactly how big those - * frame can be but I've never seen any frame bigger than 1584... : - */ -#define MAX_FRAGMENT_SIZE_RX 1600 - -typedef struct { - __le32 address; /* physical address on host */ - __le16 size; /* packet size */ - __le16 flags; /* set of bit-wise flags */ -} isl38xx_fragment; - -struct isl38xx_cb { - __le32 driver_curr_frag[ISL38XX_CB_QCOUNT]; - __le32 device_curr_frag[ISL38XX_CB_QCOUNT]; - isl38xx_fragment rx_data_low[ISL38XX_CB_RX_QSIZE]; - isl38xx_fragment tx_data_low[ISL38XX_CB_TX_QSIZE]; - isl38xx_fragment rx_data_high[ISL38XX_CB_RX_QSIZE]; - isl38xx_fragment tx_data_high[ISL38XX_CB_TX_QSIZE]; - isl38xx_fragment rx_data_mgmt[ISL38XX_CB_MGMT_QSIZE]; - isl38xx_fragment tx_data_mgmt[ISL38XX_CB_MGMT_QSIZE]; -}; - -typedef struct isl38xx_cb isl38xx_control_block; - -/* determine number of entries currently in queue */ -int isl38xx_in_queue(isl38xx_control_block *cb, int queue); - -void isl38xx_disable_interrupts(void __iomem *); -void isl38xx_enable_common_interrupts(void __iomem *); - -void isl38xx_handle_sleep_request(isl38xx_control_block *, int *, - void __iomem *); -void isl38xx_handle_wakeup(isl38xx_control_block *, int *, void __iomem *); -void isl38xx_trigger_device(int, void __iomem *); -void isl38xx_interface_reset(void __iomem *, dma_addr_t); - -#endif /* _ISL_38XX_H */ diff --git a/drivers/net/wireless/intersil/prism54/isl_ioctl.c b/drivers/net/wireless/intersil/prism54/isl_ioctl.c deleted file mode 100644 index 5e5ceafe098b..000000000000 --- a/drivers/net/wireless/intersil/prism54/isl_ioctl.c +++ /dev/null @@ -1,2909 +0,0 @@ -/* - * Copyright (C) 2002 Intersil Americas Inc. - * (C) 2003,2004 Aurelien Alleaume <slts@free.fr> - * (C) 2003 Herbert Valerio Riedel <hvr@gnu.org> - * (C) 2003 Luis R. Rodriguez <mcgrof@ruslug.rutgers.edu> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see <http://www.gnu.org/licenses/>. - * - */ - -#include <linux/capability.h> -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/if_arp.h> -#include <linux/slab.h> -#include <linux/pci.h> -#include <linux/etherdevice.h> - -#include <linux/uaccess.h> - -#include "prismcompat.h" -#include "isl_ioctl.h" -#include "islpci_mgt.h" -#include "isl_oid.h" /* additional types and defs for isl38xx fw */ -#include "oid_mgt.h" - -#include <net/iw_handler.h> /* New driver API */ - -#define KEY_SIZE_WEP104 13 /* 104/128-bit WEP keys */ -#define KEY_SIZE_WEP40 5 /* 40/64-bit WEP keys */ -/* KEY_SIZE_TKIP should match isl_oid.h, struct obj_key.key[] size */ -#define KEY_SIZE_TKIP 32 /* TKIP keys */ - -static void prism54_wpa_bss_ie_add(islpci_private *priv, u8 *bssid, - u8 *wpa_ie, size_t wpa_ie_len); -static size_t prism54_wpa_bss_ie_get(islpci_private *priv, u8 *bssid, u8 *wpa_ie); -static int prism54_set_wpa(struct net_device *, struct iw_request_info *, - __u32 *, char *); - -/* In 500 kbps */ -static const unsigned char scan_rate_list[] = { 2, 4, 11, 22, - 12, 18, 24, 36, - 48, 72, 96, 108 }; - -/** - * prism54_mib_mode_helper - MIB change mode helper function - * @priv: the &struct islpci_private object to modify - * @iw_mode: new mode (%IW_MODE_*) - * - * This is a helper function, hence it does not lock. Make sure - * caller deals with locking *if* necessary. This function sets the - * mode-dependent mib values and does the mapping of the Linux - * Wireless API modes to Device firmware modes. It also checks for - * correct valid Linux wireless modes. - */ -static int -prism54_mib_mode_helper(islpci_private *priv, u32 iw_mode) -{ - u32 config = INL_CONFIG_MANUALRUN; - u32 mode, bsstype; - - /* For now, just catch early the Repeater and Secondary modes here */ - if (iw_mode == IW_MODE_REPEAT || iw_mode == IW_MODE_SECOND) { - printk(KERN_DEBUG - "%s(): Sorry, Repeater mode and Secondary mode " - "are not yet supported by this driver.\n", __func__); - return -EINVAL; - } - - priv->iw_mode = iw_mode; - - switch (iw_mode) { - case IW_MODE_AUTO: - mode = INL_MODE_CLIENT; - bsstype = DOT11_BSSTYPE_ANY; - break; - case IW_MODE_ADHOC: - mode = INL_MODE_CLIENT; - bsstype = DOT11_BSSTYPE_IBSS; - break; - case IW_MODE_INFRA: - mode = INL_MODE_CLIENT; - bsstype = DOT11_BSSTYPE_INFRA; - break; - case IW_MODE_MASTER: - mode = INL_MODE_AP; - bsstype = DOT11_BSSTYPE_INFRA; - break; - case IW_MODE_MONITOR: - mode = INL_MODE_PROMISCUOUS; - bsstype = DOT11_BSSTYPE_ANY; - config |= INL_CONFIG_RXANNEX; - break; - default: - return -EINVAL; - } - - if (init_wds) - config |= INL_CONFIG_WDS; - mgt_set(priv, DOT11_OID_BSSTYPE, &bsstype); - mgt_set(priv, OID_INL_CONFIG, &config); - mgt_set(priv, OID_INL_MODE, &mode); - - return 0; -} - -/* - * prism54_mib_init - fill MIB cache with defaults - * - * this function initializes the struct given as @mib with defaults, - * of which many are retrieved from the global module parameter - * variables. - */ -void -prism54_mib_init(islpci_private *priv) -{ - u32 channel, authen, wep, filter, dot1x, mlme, conformance, power, mode; - struct obj_buffer psm_buffer = { - .size = PSM_BUFFER_SIZE, - .addr = priv->device_psm_buffer - }; - - channel = CARD_DEFAULT_CHANNEL; - authen = CARD_DEFAULT_AUTHEN; - wep = CARD_DEFAULT_WEP; - filter = CARD_DEFAULT_FILTER; /* (0) Do not filter un-encrypted data */ - dot1x = CARD_DEFAULT_DOT1X; - mlme = CARD_DEFAULT_MLME_MODE; - conformance = CARD_DEFAULT_CONFORMANCE; - power = 127; - mode = CARD_DEFAULT_IW_MODE; - - mgt_set(priv, DOT11_OID_CHANNEL, &channel); - mgt_set(priv, DOT11_OID_AUTHENABLE, &authen); - mgt_set(priv, DOT11_OID_PRIVACYINVOKED, &wep); - mgt_set(priv, DOT11_OID_PSMBUFFER, &psm_buffer); - mgt_set(priv, DOT11_OID_EXUNENCRYPTED, &filter); - mgt_set(priv, DOT11_OID_DOT1XENABLE, &dot1x); - mgt_set(priv, DOT11_OID_MLMEAUTOLEVEL, &mlme); - mgt_set(priv, OID_INL_DOT11D_CONFORMANCE, &conformance); - mgt_set(priv, OID_INL_OUTPUTPOWER, &power); - - /* This sets all of the mode-dependent values */ - prism54_mib_mode_helper(priv, mode); -} - -/* this will be executed outside of atomic context thanks to - * schedule_work(), thus we can as well use sleeping semaphore - * locking */ -void -prism54_update_stats(struct work_struct *work) -{ - islpci_private *priv = container_of(work, islpci_private, stats_work); - char *data; - struct obj_bss bss, *bss2; - union oid_res_t r; - - mutex_lock(&priv->stats_lock); - -/* Noise floor. - * I'm not sure if the unit is dBm. - * Note : If we are not connected, this value seems to be irrelevant. */ - - mgt_get_request(priv, DOT11_OID_NOISEFLOOR, 0, NULL, &r); - priv->local_iwstatistics.qual.noise = r.u; - -/* Get the rssi of the link. To do this we need to retrieve a bss. */ - - /* First get the MAC address of the AP we are associated with. */ - mgt_get_request(priv, DOT11_OID_BSSID, 0, NULL, &r); - data = r.ptr; - - /* copy this MAC to the bss */ - memcpy(bss.address, data, ETH_ALEN); - kfree(data); - - /* now ask for the corresponding bss */ - mgt_get_request(priv, DOT11_OID_BSSFIND, 0, (void *) &bss, &r); - bss2 = r.ptr; - /* report the rssi and use it to calculate - * link quality through a signal-noise - * ratio */ - priv->local_iwstatistics.qual.level = bss2->rssi; - priv->local_iwstatistics.qual.qual = - bss2->rssi - priv->iwstatistics.qual.noise; - - kfree(bss2); - - /* report that the stats are new */ - priv->local_iwstatistics.qual.updated = 0x7; - -/* Rx : unable to decrypt the MPDU */ - mgt_get_request(priv, DOT11_OID_PRIVRXFAILED, 0, NULL, &r); - priv->local_iwstatistics.discard.code = r.u; - -/* Tx : Max MAC retries num reached */ - mgt_get_request(priv, DOT11_OID_MPDUTXFAILED, 0, NULL, &r); - priv->local_iwstatistics.discard.retries = r.u; - - mutex_unlock(&priv->stats_lock); -} - -struct iw_statistics * -prism54_get_wireless_stats(struct net_device *ndev) -{ - islpci_private *priv = netdev_priv(ndev); - - /* If the stats are being updated return old data */ - if (mutex_trylock(&priv->stats_lock)) { - memcpy(&priv->iwstatistics, &priv->local_iwstatistics, - sizeof (struct iw_statistics)); - /* They won't be marked updated for the next time */ - priv->local_iwstatistics.qual.updated = 0; - mutex_unlock(&priv->stats_lock); - } else - priv->iwstatistics.qual.updated = 0; - - /* Update our wireless stats, but do not schedule to often - * (max 1 HZ) */ - if ((priv->stats_timestamp == 0) || - time_after(jiffies, priv->stats_timestamp + 1 * HZ)) { - schedule_work(&priv->stats_work); - priv->stats_timestamp = jiffies; - } - - return &priv->iwstatistics; -} - -static int -prism54_commit(struct net_device *ndev, struct iw_request_info *info, - char *cwrq, char *extra) -{ - islpci_private *priv = netdev_priv(ndev); - - /* simply re-set the last set SSID, this should commit most stuff */ - - /* Commit in Monitor mode is not necessary, also setting essid - * in Monitor mode does not make sense and isn't allowed for this - * device's firmware */ - if (priv->iw_mode != IW_MODE_MONITOR) - return mgt_set_request(priv, DOT11_OID_SSID, 0, NULL); - return 0; -} - -static int -prism54_get_name(struct net_device *ndev, struct iw_request_info *info, - char *cwrq, char *extra) -{ - islpci_private *priv = netdev_priv(ndev); - char *capabilities; - union oid_res_t r; - int rvalue; - - if (islpci_get_state(priv) < PRV_STATE_INIT) { - strncpy(cwrq, "NOT READY!", IFNAMSIZ); - return 0; - } - rvalue = mgt_get_request(priv, OID_INL_PHYCAPABILITIES, 0, NULL, &r); - - switch (r.u) { - case INL_PHYCAP_5000MHZ: - capabilities = "IEEE 802.11a/b/g"; - break; - case INL_PHYCAP_FAA: - capabilities = "IEEE 802.11b/g - FAA Support"; - break; - case INL_PHYCAP_2400MHZ: - default: - capabilities = "IEEE 802.11b/g"; /* Default */ - break; - } - strncpy(cwrq, capabilities, IFNAMSIZ); - return rvalue; -} - -static int -prism54_set_freq(struct net_device *ndev, struct iw_request_info *info, - struct iw_freq *fwrq, char *extra) -{ - islpci_private *priv = netdev_priv(ndev); - int rvalue; - u32 c; - - if (fwrq->m < 1000) - /* we have a channel number */ - c = fwrq->m; - else - c = (fwrq->e == 1) ? channel_of_freq(fwrq->m / 100000) : 0; - - rvalue = c ? mgt_set_request(priv, DOT11_OID_CHANNEL, 0, &c) : -EINVAL; - - /* Call commit handler */ - return (rvalue ? rvalue : -EINPROGRESS); -} - -static int -prism54_get_freq(struct net_device *ndev, struct iw_request_info *info, - struct iw_freq *fwrq, char *extra) -{ - islpci_private *priv = netdev_priv(ndev); - union oid_res_t r; - int rvalue; - - rvalue = mgt_get_request(priv, DOT11_OID_CHANNEL, 0, NULL, &r); - fwrq->i = r.u; - rvalue |= mgt_get_request(priv, DOT11_OID_FREQUENCY, 0, NULL, &r); - fwrq->m = r.u; - fwrq->e = 3; - - return rvalue; -} - -static int -prism54_set_mode(struct net_device *ndev, struct iw_request_info *info, - __u32 * uwrq, char *extra) -{ - islpci_private *priv = netdev_priv(ndev); - u32 mlmeautolevel = CARD_DEFAULT_MLME_MODE; - - /* Let's see if the user passed a valid Linux Wireless mode */ - if (*uwrq > IW_MODE_MONITOR || *uwrq < IW_MODE_AUTO) { - printk(KERN_DEBUG - "%s: %s() You passed a non-valid init_mode.\n", - priv->ndev->name, __func__); - return -EINVAL; - } - - down_write(&priv->mib_sem); - - if (prism54_mib_mode_helper(priv, *uwrq)) { - up_write(&priv->mib_sem); - return -EOPNOTSUPP; - } - - /* the ACL code needs an intermediate mlmeautolevel. The wpa stuff an - * extended one. - */ - if ((*uwrq == IW_MODE_MASTER) && (priv->acl.policy != MAC_POLICY_OPEN)) - mlmeautolevel = DOT11_MLME_INTERMEDIATE; - if (priv->wpa) - mlmeautolevel = DOT11_MLME_EXTENDED; - - mgt_set(priv, DOT11_OID_MLMEAUTOLEVEL, &mlmeautolevel); - - if (mgt_commit(priv)) { - up_write(&priv->mib_sem); - return -EIO; - } - priv->ndev->type = (priv->iw_mode == IW_MODE_MONITOR) - ? priv->monitor_type : ARPHRD_ETHER; - up_write(&priv->mib_sem); - - return 0; -} - -/* Use mib cache */ -static int -prism54_get_mode(struct net_device *ndev, struct iw_request_info *info, - __u32 * uwrq, char *extra) -{ - islpci_private *priv = netdev_priv(ndev); - - BUG_ON((priv->iw_mode < IW_MODE_AUTO) || (priv->iw_mode > - IW_MODE_MONITOR)); - *uwrq = priv->iw_mode; - - return 0; -} - -/* we use DOT11_OID_EDTHRESHOLD. From what I guess the card will not try to - * emit data if (sensitivity > rssi - noise) (in dBm). - * prism54_set_sens does not seem to work. - */ - -static int -prism54_set_sens(struct net_device *ndev, struct iw_request_info *info, - struct iw_param *vwrq, char *extra) -{ - islpci_private *priv = netdev_priv(ndev); - u32 sens; - - /* by default the card sets this to 20. */ - sens = vwrq->disabled ? 20 : vwrq->value; - - return mgt_set_request(priv, DOT11_OID_EDTHRESHOLD, 0, &sens); -} - -static int -prism54_get_sens(struct net_device *ndev, struct iw_request_info *info, - struct iw_param *vwrq, char *extra) -{ - islpci_private *priv = netdev_priv(ndev); - union oid_res_t r; - int rvalue; - - rvalue = mgt_get_request(priv, DOT11_OID_EDTHRESHOLD, 0, NULL, &r); - - vwrq->value = r.u; - vwrq->disabled = (vwrq->value == 0); - vwrq->fixed = 1; - - return rvalue; -} - -static int -prism54_get_range(struct net_device *ndev, struct iw_request_info *info, - struct iw_point *dwrq, char *extra) -{ - struct iw_range *range = (struct iw_range *) extra; - islpci_private *priv = netdev_priv(ndev); - u8 *data; - int i, m, rvalue; - struct obj_frequencies *freq; - union oid_res_t r; - - memset(range, 0, sizeof (struct iw_range)); - dwrq->length = sizeof (struct iw_range); - - /* set the wireless extension version number */ - range->we_version_source = SUPPORTED_WIRELESS_EXT; - range->we_version_compiled = WIRELESS_EXT; - - /* Now the encoding capabilities */ - range->num_encoding_sizes = 3; - /* 64(40) bits WEP */ - range->encoding_size[0] = 5; - /* 128(104) bits WEP */ - range->encoding_size[1] = 13; - /* 256 bits for WPA-PSK */ - range->encoding_size[2] = 32; - /* 4 keys are allowed */ - range->max_encoding_tokens = 4; - - /* we don't know the quality range... */ - range->max_qual.level = 0; - range->max_qual.noise = 0; - range->max_qual.qual = 0; - /* these value describe an average quality. Needs more tweaking... */ - range->avg_qual.level = -80; /* -80 dBm */ - range->avg_qual.noise = 0; /* don't know what to put here */ - range->avg_qual.qual = 0; - - range->sensitivity = 200; - - /* retry limit capabilities */ - range->retry_capa = IW_RETRY_LIMIT | IW_RETRY_LIFETIME; - range->retry_flags = IW_RETRY_LIMIT; - range->r_time_flags = IW_RETRY_LIFETIME; - - /* I don't know the range. Put stupid things here */ - range->min_retry = 1; - range->max_retry = 65535; - range->min_r_time = 1024; - range->max_r_time = 65535 * 1024; - - /* txpower is supported in dBm's */ - range->txpower_capa = IW_TXPOW_DBM; - - /* Event capability (kernel + driver) */ - range->event_capa[0] = (IW_EVENT_CAPA_K_0 | - IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) | - IW_EVENT_CAPA_MASK(SIOCGIWAP)); - range->event_capa[1] = IW_EVENT_CAPA_K_1; - range->event_capa[4] = IW_EVENT_CAPA_MASK(IWEVCUSTOM); - - range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 | - IW_ENC_CAPA_CIPHER_TKIP; - - if (islpci_get_state(priv) < PRV_STATE_INIT) - return 0; - - /* Request the device for the supported frequencies - * not really relevant since some devices will report the 5 GHz band - * frequencies even if they don't support them. - */ - rvalue = - mgt_get_request(priv, DOT11_OID_SUPPORTEDFREQUENCIES, 0, NULL, &r); - freq = r.ptr; - - range->num_channels = freq->nr; - range->num_frequency = freq->nr; - - m = min(IW_MAX_FREQUENCIES, (int) freq->nr); - for (i = 0; i < m; i++) { - range->freq[i].m = freq->mhz[i]; - range->freq[i].e = 6; - range->freq[i].i = channel_of_freq(freq->mhz[i]); - } - kfree(freq); - - rvalue |= mgt_get_request(priv, DOT11_OID_SUPPORTEDRATES, 0, NULL, &r); - data = r.ptr; - - /* We got an array of char. It is NULL terminated. */ - i = 0; - while ((i < IW_MAX_BITRATES) && (*data != 0)) { - /* the result must be in bps. The card gives us 500Kbps */ - range->bitrate[i] = *data * 500000; - i++; - data++; - } - range->num_bitrates = i; - kfree(r.ptr); - - return rvalue; -} - -/* Set AP address*/ - -static int -prism54_set_wap(struct net_device *ndev, struct iw_request_info *info, - struct sockaddr *awrq, char *extra) -{ - islpci_private *priv = netdev_priv(ndev); - char bssid[6]; - int rvalue; - - if (awrq->sa_family != ARPHRD_ETHER) - return -EINVAL; - - /* prepare the structure for the set object */ - memcpy(&bssid[0], awrq->sa_data, ETH_ALEN); - - /* set the bssid -- does this make sense when in AP mode? */ - rvalue = mgt_set_request(priv, DOT11_OID_BSSID, 0, &bssid); - - return (rvalue ? rvalue : -EINPROGRESS); /* Call commit handler */ -} - -/* get AP address*/ - -static int -prism54_get_wap(struct net_device *ndev, struct iw_request_info *info, - struct sockaddr *awrq, char *extra) -{ - islpci_private *priv = netdev_priv(ndev); - union oid_res_t r; - int rvalue; - - rvalue = mgt_get_request(priv, DOT11_OID_BSSID, 0, NULL, &r); - memcpy(awrq->sa_data, r.ptr, ETH_ALEN); - awrq->sa_family = ARPHRD_ETHER; - kfree(r.ptr); - - return rvalue; -} - -static int -prism54_set_scan(struct net_device *dev, struct iw_request_info *info, - struct iw_param *vwrq, char *extra) -{ - /* hehe the device does this automagicaly */ - return 0; -} - -/* a little helper that will translate our data into a card independent - * format that the Wireless Tools will understand. This was inspired by - * the "Aironet driver for 4500 and 4800 series cards" (GPL) - */ - -static char * -prism54_translate_bss(struct net_device *ndev, struct iw_request_info *info, - char *current_ev, char *end_buf, struct obj_bss *bss, - char noise) -{ - struct iw_event iwe; /* Temporary buffer */ - short cap; - islpci_private *priv = netdev_priv(ndev); - u8 wpa_ie[MAX_WPA_IE_LEN]; - size_t wpa_ie_len; - - /* The first entry must be the MAC address */ - memcpy(iwe.u.ap_addr.sa_data, bss->address, ETH_ALEN); - iwe.u.ap_addr.sa_family = ARPHRD_ETHER; - iwe.cmd = SIOCGIWAP; - current_ev = iwe_stream_add_event(info, current_ev, end_buf, - &iwe, IW_EV_ADDR_LEN); - - /* The following entries will be displayed in the same order we give them */ - - /* The ESSID. */ - iwe.u.data.length = bss->ssid.length; - iwe.u.data.flags = 1; - iwe.cmd = SIOCGIWESSID; - current_ev = iwe_stream_add_point(info, current_ev, end_buf, - &iwe, bss->ssid.octets); - - /* Capabilities */ -#define CAP_ESS 0x01 -#define CAP_IBSS 0x02 -#define CAP_CRYPT 0x10 - - /* Mode */ - cap = bss->capinfo; - iwe.u.mode = 0; - if (cap & CAP_ESS) - iwe.u.mode = IW_MODE_MASTER; - else if (cap & CAP_IBSS) - iwe.u.mode = IW_MODE_ADHOC; - iwe.cmd = SIOCGIWMODE; - if (iwe.u.mode) - current_ev = iwe_stream_add_event(info, current_ev, end_buf, - &iwe, IW_EV_UINT_LEN); - - /* Encryption capability */ - if (cap & CAP_CRYPT) - iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY; - else - iwe.u.data.flags = IW_ENCODE_DISABLED; - iwe.u.data.length = 0; - iwe.cmd = SIOCGIWENCODE; - current_ev = iwe_stream_add_point(info, current_ev, end_buf, - &iwe, NULL); - - /* Add frequency. (short) bss->channel is the frequency in MHz */ - iwe.u.freq.m = bss->channel; - iwe.u.freq.e = 6; - iwe.cmd = SIOCGIWFREQ; - current_ev = iwe_stream_add_event(info, current_ev, end_buf, - &iwe, IW_EV_FREQ_LEN); - - /* Add quality statistics */ - iwe.u.qual.level = bss->rssi; - iwe.u.qual.noise = noise; - /* do a simple SNR for quality */ - iwe.u.qual.qual = bss->rssi - noise; - iwe.cmd = IWEVQUAL; - current_ev = iwe_stream_add_event(info, current_ev, end_buf, - &iwe, IW_EV_QUAL_LEN); - - /* Add WPA/RSN Information Element, if any */ - wpa_ie_len = prism54_wpa_bss_ie_get(priv, bss->address, wpa_ie); - if (wpa_ie_len > 0) { - iwe.cmd = IWEVGENIE; - iwe.u.data.length = min_t(size_t, wpa_ie_len, MAX_WPA_IE_LEN); - current_ev = iwe_stream_add_point(info, current_ev, end_buf, - &iwe, wpa_ie); - } - /* Do the bitrates */ - { - char *current_val = current_ev + iwe_stream_lcp_len(info); - int i; - int mask; - - iwe.cmd = SIOCGIWRATE; - /* Those two flags are ignored... */ - iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0; - - /* Parse the bitmask */ - mask = 0x1; - for(i = 0; i < sizeof(scan_rate_list); i++) { - if(bss->rates & mask) { - iwe.u.bitrate.value = (scan_rate_list[i] * 500000); - current_val = iwe_stream_add_value( - info, current_ev, current_val, - end_buf, &iwe, IW_EV_PARAM_LEN); - } - mask <<= 1; - } - /* Check if we added any event */ - if ((current_val - current_ev) > iwe_stream_lcp_len(info)) - current_ev = current_val; - } - - return current_ev; -} - -static int -prism54_get_scan(struct net_device *ndev, struct iw_request_info *info, - struct iw_point *dwrq, char *extra) -{ - islpci_private *priv = netdev_priv(ndev); - int i, rvalue; - struct obj_bsslist *bsslist; - u32 noise = 0; - char *current_ev = extra; - union oid_res_t r; - - if (islpci_get_state(priv) < PRV_STATE_INIT) { - /* device is not ready, fail gently */ - dwrq->length = 0; - return 0; - } - - /* first get the noise value. We will use it to report the link quality */ - rvalue = mgt_get_request(priv, DOT11_OID_NOISEFLOOR, 0, NULL, &r); - noise = r.u; - - /* Ask the device for a list of known bss. - * The old API, using SIOCGIWAPLIST, had a hard limit of IW_MAX_AP=64. - * The new API, using SIOCGIWSCAN, is only limited by the buffer size. - * WE-14->WE-16, the buffer is limited to IW_SCAN_MAX_DATA bytes. - * Starting with WE-17, the buffer can be as big as needed. - * But the device won't repport anything if you change the value - * of IWMAX_BSS=24. */ - - rvalue |= mgt_get_request(priv, DOT11_OID_BSSLIST, 0, NULL, &r); - bsslist = r.ptr; - - /* ok now, scan the list and translate its info */ - for (i = 0; i < (int) bsslist->nr; i++) { - current_ev = prism54_translate_bss(ndev, info, current_ev, - extra + dwrq->length, - &(bsslist->bsslist[i]), - noise); - - /* Check if there is space for one more entry */ - if((extra + dwrq->length - current_ev) <= IW_EV_ADDR_LEN) { - /* Ask user space to try again with a bigger buffer */ - rvalue = -E2BIG; - break; - } - } - - kfree(bsslist); - dwrq->length = (current_ev - extra); - dwrq->flags = 0; /* todo */ - - return rvalue; -} - -static int -prism54_set_essid(struct net_device *ndev, struct iw_request_info *info, - struct iw_point *dwrq, char *extra) -{ - islpci_private *priv = netdev_priv(ndev); - struct obj_ssid essid; - - memset(essid.octets, 0, 33); - - /* Check if we were asked for `any' */ - if (dwrq->flags && dwrq->length) { - if (dwrq->length > 32) - return -E2BIG; - essid.length = dwrq->length; - memcpy(essid.octets, extra, dwrq->length); - } else - essid.length = 0; - - if (priv->iw_mode != IW_MODE_MONITOR) - return mgt_set_request(priv, DOT11_OID_SSID, 0, &essid); - - /* If in monitor mode, just save to mib */ - mgt_set(priv, DOT11_OID_SSID, &essid); - return 0; - -} - -static int -prism54_get_essid(struct net_device *ndev, struct iw_request_info *info, - struct iw_point *dwrq, char *extra) -{ - islpci_private *priv = netdev_priv(ndev); - struct obj_ssid *essid; - union oid_res_t r; - int rvalue; - - rvalue = mgt_get_request(priv, DOT11_OID_SSID, 0, NULL, &r); - essid = r.ptr; - - if (essid->length) { - dwrq->flags = 1; /* set ESSID to ON for Wireless Extensions */ - /* if it is too big, trunk it */ - dwrq->length = min((u8)IW_ESSID_MAX_SIZE, essid->length); - } else { - dwrq->flags = 0; - dwrq->length = 0; - } - essid->octets[dwrq->length] = '\0'; - memcpy(extra, essid->octets, dwrq->length); - kfree(essid); - - return rvalue; -} - -/* Provides no functionality, just completes the ioctl. In essence this is a - * just a cosmetic ioctl. - */ -static int -prism54_set_nick(struct net_device *ndev, struct iw_request_info *info, - struct iw_point *dwrq, char *extra) -{ - islpci_private *priv = netdev_priv(ndev); - - if (dwrq->length > IW_ESSID_MAX_SIZE) - return -E2BIG; - - down_write(&priv->mib_sem); - memset(priv->nickname, 0, sizeof (priv->nickname)); - memcpy(priv->nickname, extra, dwrq->length); - up_write(&priv->mib_sem); - - return 0; -} - -static int -prism54_get_nick(struct net_device *ndev, struct iw_request_info *info, - struct iw_point *dwrq, char *extra) -{ - islpci_private *priv = netdev_priv(ndev); - - dwrq->length = 0; - - down_read(&priv->mib_sem); - dwrq->length = strlen(priv->nickname); - memcpy(extra, priv->nickname, dwrq->length); - up_read(&priv->mib_sem); - - return 0; -} - -/* Set the allowed Bitrates */ - -static int -prism54_set_rate(struct net_device *ndev, - struct iw_request_info *info, - struct iw_param *vwrq, char *extra) -{ - - islpci_private *priv = netdev_priv(ndev); - u32 rate, profile; - char *data; - int ret, i; - union oid_res_t r; - - if (vwrq->value == -1) { - /* auto mode. No limit. */ - profile = 1; - return mgt_set_request(priv, DOT11_OID_PROFILES, 0, &profile); - } - - ret = mgt_get_request(priv, DOT11_OID_SUPPORTEDRATES, 0, NULL, &r); - if (ret) { - kfree(r.ptr); - return ret; - } - - rate = (u32) (vwrq->value / 500000); - data = r.ptr; - i = 0; - - while (data[i]) { - if (rate && (data[i] == rate)) { - break; - } - if (vwrq->value == i) { - break; - } - data[i] |= 0x80; - i++; - } - - if (!data[i]) { - kfree(r.ptr); - return -EINVAL; - } - - data[i] |= 0x80; - data[i + 1] = 0; - - /* Now, check if we want a fixed or auto value */ - if (vwrq->fixed) { - data[0] = data[i]; - data[1] = 0; - } - -/* - i = 0; - printk("prism54 rate: "); - while(data[i]) { - printk("%u ", data[i]); - i++; - } - printk("0\n"); -*/ - profile = -1; - ret = mgt_set_request(priv, DOT11_OID_PROFILES, 0, &profile); - ret |= mgt_set_request(priv, DOT11_OID_EXTENDEDRATES, 0, data); - ret |= mgt_set_request(priv, DOT11_OID_RATES, 0, data); - - kfree(r.ptr); - - return ret; -} - -/* Get the current bit rate */ -static int -prism54_get_rate(struct net_device *ndev, - struct iw_request_info *info, - struct iw_param *vwrq, char *extra) -{ - islpci_private *priv = netdev_priv(ndev); - int rvalue; - char *data; - union oid_res_t r; - - /* Get the current bit rate */ - if ((rvalue = mgt_get_request(priv, GEN_OID_LINKSTATE, 0, NULL, &r))) - return rvalue; - vwrq->value = r.u * 500000; - - /* request the device for the enabled rates */ - rvalue = mgt_get_request(priv, DOT11_OID_RATES, 0, NULL, &r); - if (rvalue) { - kfree(r.ptr); - return rvalue; - } - data = r.ptr; - vwrq->fixed = (data[0] != 0) && (data[1] == 0); - kfree(r.ptr); - - return 0; -} - -static int -prism54_set_rts(struct net_device *ndev, struct iw_request_info *info, - struct iw_param *vwrq, char *extra) -{ - islpci_private *priv = netdev_priv(ndev); - - return mgt_set_request(priv, DOT11_OID_RTSTHRESH, 0, &vwrq->value); -} - -static int -prism54_get_rts(struct net_device *ndev, struct iw_request_info *info, - struct iw_param *vwrq, char *extra) -{ - islpci_private *priv = netdev_priv(ndev); - union oid_res_t r; - int rvalue; - - /* get the rts threshold */ - rvalue = mgt_get_request(priv, DOT11_OID_RTSTHRESH, 0, NULL, &r); - vwrq->value = r.u; - - return rvalue; -} - -static int -prism54_set_frag(struct net_device *ndev, struct iw_request_info *info, - struct iw_param *vwrq, char *extra) -{ - islpci_private *priv = netdev_priv(ndev); - - return mgt_set_request(priv, DOT11_OID_FRAGTHRESH, 0, &vwrq->value); -} - -static int -prism54_get_frag(struct net_device *ndev, struct iw_request_info *info, - struct iw_param *vwrq, char *extra) -{ - islpci_private *priv = netdev_priv(ndev); - union oid_res_t r; - int rvalue; - - rvalue = mgt_get_request(priv, DOT11_OID_FRAGTHRESH, 0, NULL, &r); - vwrq->value = r.u; - - return rvalue; -} - -/* Here we have (min,max) = max retries for (small frames, big frames). Where - * big frame <=> bigger than the rts threshold - * small frame <=> smaller than the rts threshold - * This is not really the behavior expected by the wireless tool but it seems - * to be a common behavior in other drivers. - */ - -static int -prism54_set_retry(struct net_device *ndev, struct iw_request_info *info, - struct iw_param *vwrq, char *extra) -{ - islpci_private *priv = netdev_priv(ndev); - u32 slimit = 0, llimit = 0; /* short and long limit */ - u32 lifetime = 0; - int rvalue = 0; - - if (vwrq->disabled) - /* we cannot disable this feature */ - return -EINVAL; - - if (vwrq->flags & IW_RETRY_LIMIT) { - if (vwrq->flags & IW_RETRY_SHORT) - slimit = vwrq->value; - else if (vwrq->flags & IW_RETRY_LONG) - llimit = vwrq->value; - else { - /* we are asked to set both */ - slimit = vwrq->value; - llimit = vwrq->value; - } - } - if (vwrq->flags & IW_RETRY_LIFETIME) - /* Wireless tools use us unit while the device uses 1024 us unit */ - lifetime = vwrq->value / 1024; - - /* now set what is requested */ - if (slimit) - rvalue = - mgt_set_request(priv, DOT11_OID_SHORTRETRIES, 0, &slimit); - if (llimit) - rvalue |= - mgt_set_request(priv, DOT11_OID_LONGRETRIES, 0, &llimit); - if (lifetime) - rvalue |= - mgt_set_request(priv, DOT11_OID_MAXTXLIFETIME, 0, - &lifetime); - return rvalue; -} - -static int -prism54_get_retry(struct net_device *ndev, struct iw_request_info *info, - struct iw_param *vwrq, char *extra) -{ - islpci_private *priv = netdev_priv(ndev); - union oid_res_t r; - int rvalue = 0; - vwrq->disabled = 0; /* It cannot be disabled */ - - if ((vwrq->flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) { - /* we are asked for the life time */ - rvalue = - mgt_get_request(priv, DOT11_OID_MAXTXLIFETIME, 0, NULL, &r); - vwrq->value = r.u * 1024; - vwrq->flags = IW_RETRY_LIFETIME; - } else if ((vwrq->flags & IW_RETRY_LONG)) { - /* we are asked for the long retry limit */ - rvalue |= - mgt_get_request(priv, DOT11_OID_LONGRETRIES, 0, NULL, &r); - vwrq->value = r.u; - vwrq->flags = IW_RETRY_LIMIT | IW_RETRY_LONG; - } else { - /* default. get the short retry limit */ - rvalue |= - mgt_get_request(priv, DOT11_OID_SHORTRETRIES, 0, NULL, &r); - vwrq->value = r.u; - vwrq->flags = IW_RETRY_LIMIT | IW_RETRY_SHORT; - } - - return rvalue; -} - -static int -prism54_set_encode(struct net_device *ndev, struct iw_request_info *info, - struct iw_point *dwrq, char *extra) -{ - islpci_private *priv = netdev_priv(ndev); - int rvalue = 0, force = 0; - int authen = DOT11_AUTH_OS, invoke = 0, exunencrypt = 0; - union oid_res_t r; - - /* with the new API, it's impossible to get a NULL pointer. - * New version of iwconfig set the IW_ENCODE_NOKEY flag - * when no key is given, but older versions don't. */ - - if (dwrq->length > 0) { - /* we have a key to set */ - int index = (dwrq->flags & IW_ENCODE_INDEX) - 1; - int current_index; - struct obj_key key = { DOT11_PRIV_WEP, 0, "" }; - - /* get the current key index */ - rvalue = mgt_get_request(priv, DOT11_OID_DEFKEYID, 0, NULL, &r); - current_index = r.u; - /* Verify that the key is not marked as invalid */ - if (!(dwrq->flags & IW_ENCODE_NOKEY)) { - if (dwrq->length > KEY_SIZE_TKIP) { - /* User-provided key data too big */ - return -EINVAL; - } - if (dwrq->length > KEY_SIZE_WEP104) { - /* WPA-PSK TKIP */ - key.type = DOT11_PRIV_TKIP; - key.length = KEY_SIZE_TKIP; - } else if (dwrq->length > KEY_SIZE_WEP40) { - /* WEP 104/128 */ - key.length = KEY_SIZE_WEP104; - } else { - /* WEP 40/64 */ - key.length = KEY_SIZE_WEP40; - } - memset(key.key, 0, sizeof (key.key)); - memcpy(key.key, extra, dwrq->length); - - if ((index < 0) || (index > 3)) - /* no index provided use the current one */ - index = current_index; - - /* now send the key to the card */ - rvalue |= - mgt_set_request(priv, DOT11_OID_DEFKEYX, index, - &key); - } - /* - * If a valid key is set, encryption should be enabled - * (user may turn it off later). - * This is also how "iwconfig ethX key on" works - */ - if ((index == current_index) && (key.length > 0)) - force = 1; - } else { - int index = (dwrq->flags & IW_ENCODE_INDEX) - 1; - if ((index >= 0) && (index <= 3)) { - /* we want to set the key index */ - rvalue |= - mgt_set_request(priv, DOT11_OID_DEFKEYID, 0, - &index); - } else { - if (!(dwrq->flags & IW_ENCODE_MODE)) { - /* we cannot do anything. Complain. */ - return -EINVAL; - } - } - } - /* now read the flags */ - if (dwrq->flags & IW_ENCODE_DISABLED) { - /* Encoding disabled, - * authen = DOT11_AUTH_OS; - * invoke = 0; - * exunencrypt = 0; */ - } - if (dwrq->flags & IW_ENCODE_OPEN) - /* Encode but accept non-encoded packets. No auth */ - invoke = 1; - if ((dwrq->flags & IW_ENCODE_RESTRICTED) || force) { - /* Refuse non-encoded packets. Auth */ - authen = DOT11_AUTH_BOTH; - invoke = 1; - exunencrypt = 1; - } - /* do the change if requested */ - if ((dwrq->flags & IW_ENCODE_MODE) || force) { - rvalue |= - mgt_set_request(priv, DOT11_OID_AUTHENABLE, 0, &authen); - rvalue |= - mgt_set_request(priv, DOT11_OID_PRIVACYINVOKED, 0, &invoke); - rvalue |= - mgt_set_request(priv, DOT11_OID_EXUNENCRYPTED, 0, - &exunencrypt); - } - return rvalue; -} - -static int -prism54_get_encode(struct net_device *ndev, struct iw_request_info *info, - struct iw_point *dwrq, char *extra) -{ - islpci_private *priv = netdev_priv(ndev); - struct obj_key *key; - u32 devindex, index = (dwrq->flags & IW_ENCODE_INDEX) - 1; - u32 authen = 0, invoke = 0, exunencrypt = 0; - int rvalue; - union oid_res_t r; - - /* first get the flags */ - rvalue = mgt_get_request(priv, DOT11_OID_AUTHENABLE, 0, NULL, &r); - authen = r.u; - rvalue |= mgt_get_request(priv, DOT11_OID_PRIVACYINVOKED, 0, NULL, &r); - invoke = r.u; - rvalue |= mgt_get_request(priv, DOT11_OID_EXUNENCRYPTED, 0, NULL, &r); - exunencrypt = r.u; - - if (invoke && (authen == DOT11_AUTH_BOTH) && exunencrypt) - dwrq->flags = IW_ENCODE_RESTRICTED; - else if ((authen == DOT11_AUTH_OS) && !exunencrypt) { - if (invoke) - dwrq->flags = IW_ENCODE_OPEN; - else - dwrq->flags = IW_ENCODE_DISABLED; - } else - /* The card should not work in this state */ - dwrq->flags = 0; - - /* get the current device key index */ - rvalue |= mgt_get_request(priv, DOT11_OID_DEFKEYID, 0, NULL, &r); - devindex = r.u; - /* Now get the key, return it */ - if (index == -1 || index > 3) - /* no index provided, use the current one */ - index = devindex; - rvalue |= mgt_get_request(priv, DOT11_OID_DEFKEYX, index, NULL, &r); - key = r.ptr; - dwrq->length = key->length; - memcpy(extra, key->key, dwrq->length); - kfree(key); - /* return the used key index */ - dwrq->flags |= devindex + 1; - - return rvalue; -} - -static int -prism54_get_txpower(struct net_device *ndev, struct iw_request_info *info, - struct iw_param *vwrq, char *extra) -{ - islpci_private *priv = netdev_priv(ndev); - union oid_res_t r; - int rvalue; - - rvalue = mgt_get_request(priv, OID_INL_OUTPUTPOWER, 0, NULL, &r); - /* intersil firmware operates in 0.25 dBm (1/4 dBm) */ - vwrq->value = (s32) r.u / 4; - vwrq->fixed = 1; - /* radio is not turned of - * btw: how is possible to turn off only the radio - */ - vwrq->disabled = 0; - - return rvalue; -} - -static int -prism54_set_txpower(struct net_device *ndev, struct iw_request_info *info, - struct iw_param *vwrq, char *extra) -{ - islpci_private *priv = netdev_priv(ndev); - s32 u = vwrq->value; - - /* intersil firmware operates in 0.25 dBm (1/4) */ - u *= 4; - if (vwrq->disabled) { - /* don't know how to disable radio */ - printk(KERN_DEBUG - "%s: %s() disabling radio is not yet supported.\n", - priv->ndev->name, __func__); - return -ENOTSUPP; - } else if (vwrq->fixed) - /* currently only fixed value is supported */ - return mgt_set_request(priv, OID_INL_OUTPUTPOWER, 0, &u); - else { - printk(KERN_DEBUG - "%s: %s() auto power will be implemented later.\n", - priv->ndev->name, __func__); - return -ENOTSUPP; - } -} - -static int prism54_set_genie(struct net_device *ndev, - struct iw_request_info *info, - struct iw_point *data, char *extra) -{ - islpci_private *priv = netdev_priv(ndev); - int alen, ret = 0; - struct obj_attachment *attach; - - if (data->length > MAX_WPA_IE_LEN || - (data->length && extra == NULL)) - return -EINVAL; - - memcpy(priv->wpa_ie, extra, data->length); - priv->wpa_ie_len = data->length; - - alen = sizeof(*attach) + priv->wpa_ie_len; - attach = kzalloc(alen, GFP_KERNEL); - if (attach == NULL) - return -ENOMEM; - -#define WLAN_FC_TYPE_MGMT 0 -#define WLAN_FC_STYPE_ASSOC_REQ 0 -#define WLAN_FC_STYPE_REASSOC_REQ 2 - - /* Note: endianness is covered by mgt_set_varlen */ - attach->type = (WLAN_FC_TYPE_MGMT << 2) | - (WLAN_FC_STYPE_ASSOC_REQ << 4); - attach->id = -1; - attach->size = priv->wpa_ie_len; - memcpy(attach->data, extra, priv->wpa_ie_len); - - ret = mgt_set_varlen(priv, DOT11_OID_ATTACHMENT, attach, - priv->wpa_ie_len); - if (ret == 0) { - attach->type = (WLAN_FC_TYPE_MGMT << 2) | - (WLAN_FC_STYPE_REASSOC_REQ << 4); - - ret = mgt_set_varlen(priv, DOT11_OID_ATTACHMENT, attach, - priv->wpa_ie_len); - if (ret == 0) - printk(KERN_DEBUG "%s: WPA IE Attachment was set\n", - ndev->name); - } - - kfree(attach); - return ret; -} - - -static int prism54_get_genie(struct net_device *ndev, - struct iw_request_info *info, - struct iw_point *data, char *extra) -{ - islpci_private *priv = netdev_priv(ndev); - int len = priv->wpa_ie_len; - - if (len <= 0) { - data->length = 0; - return 0; - } - - if (data->length < len) - return -E2BIG; - - data->length = len; - memcpy(extra, priv->wpa_ie, len); - - return 0; -} - -static int prism54_set_auth(struct net_device *ndev, - struct iw_request_info *info, - union iwreq_data *wrqu, char *extra) -{ - islpci_private *priv = netdev_priv(ndev); - struct iw_param *param = &wrqu->param; - u32 mlmelevel = 0, authen = 0, dot1x = 0; - u32 exunencrypt = 0, privinvoked = 0, wpa = 0; - u32 old_wpa; - int ret = 0; - union oid_res_t r; - - if (islpci_get_state(priv) < PRV_STATE_INIT) - return 0; - - /* first get the flags */ - down_write(&priv->mib_sem); - wpa = old_wpa = priv->wpa; - up_write(&priv->mib_sem); - ret = mgt_get_request(priv, DOT11_OID_AUTHENABLE, 0, NULL, &r); - authen = r.u; - ret = mgt_get_request(priv, DOT11_OID_PRIVACYINVOKED, 0, NULL, &r); - privinvoked = r.u; - ret = mgt_get_request(priv, DOT11_OID_EXUNENCRYPTED, 0, NULL, &r); - exunencrypt = r.u; - ret = mgt_get_request(priv, DOT11_OID_DOT1XENABLE, 0, NULL, &r); - dot1x = r.u; - ret = mgt_get_request(priv, DOT11_OID_MLMEAUTOLEVEL, 0, NULL, &r); - mlmelevel = r.u; - - if (ret < 0) - goto out; - - switch (param->flags & IW_AUTH_INDEX) { - case IW_AUTH_CIPHER_PAIRWISE: - case IW_AUTH_CIPHER_GROUP: - case IW_AUTH_KEY_MGMT: - break; - - case IW_AUTH_WPA_ENABLED: - /* Do the same thing as IW_AUTH_WPA_VERSION */ - if (param->value) { - wpa = 1; - privinvoked = 1; /* For privacy invoked */ - exunencrypt = 1; /* Filter out all unencrypted frames */ - dot1x = 0x01; /* To enable eap filter */ - mlmelevel = DOT11_MLME_EXTENDED; - authen = DOT11_AUTH_OS; /* Only WEP uses _SK and _BOTH */ - } else { - wpa = 0; - privinvoked = 0; - exunencrypt = 0; /* Do not filter un-encrypted data */ - dot1x = 0; - mlmelevel = DOT11_MLME_AUTO; - } - break; - - case IW_AUTH_WPA_VERSION: - if (param->value & IW_AUTH_WPA_VERSION_DISABLED) { - wpa = 0; - privinvoked = 0; - exunencrypt = 0; /* Do not filter un-encrypted data */ - dot1x = 0; - mlmelevel = DOT11_MLME_AUTO; - } else { - if (param->value & IW_AUTH_WPA_VERSION_WPA) - wpa = 1; - else if (param->value & IW_AUTH_WPA_VERSION_WPA2) - wpa = 2; - privinvoked = 1; /* For privacy invoked */ - exunencrypt = 1; /* Filter out all unencrypted frames */ - dot1x = 0x01; /* To enable eap filter */ - mlmelevel = DOT11_MLME_EXTENDED; - authen = DOT11_AUTH_OS; /* Only WEP uses _SK and _BOTH */ - } - break; - - case IW_AUTH_RX_UNENCRYPTED_EAPOL: - /* dot1x should be the opposite of RX_UNENCRYPTED_EAPOL; - * turn off dot1x when allowing receipt of unencrypted EAPOL - * frames, turn on dot1x when receipt should be disallowed - */ - dot1x = param->value ? 0 : 0x01; - break; - - case IW_AUTH_PRIVACY_INVOKED: - privinvoked = param->value ? 1 : 0; - break; - - case IW_AUTH_DROP_UNENCRYPTED: - exunencrypt = param->value ? 1 : 0; - break; - - case IW_AUTH_80211_AUTH_ALG: - if (param->value & IW_AUTH_ALG_SHARED_KEY) { - /* Only WEP uses _SK and _BOTH */ - if (wpa > 0) { - ret = -EINVAL; - goto out; - } - authen = DOT11_AUTH_SK; - } else if (param->value & IW_AUTH_ALG_OPEN_SYSTEM) { - authen = DOT11_AUTH_OS; - } else { - ret = -EINVAL; - goto out; - } - break; - - default: - return -EOPNOTSUPP; - } - - /* Set all the values */ - down_write(&priv->mib_sem); - priv->wpa = wpa; - up_write(&priv->mib_sem); - mgt_set_request(priv, DOT11_OID_AUTHENABLE, 0, &authen); - mgt_set_request(priv, DOT11_OID_PRIVACYINVOKED, 0, &privinvoked); - mgt_set_request(priv, DOT11_OID_EXUNENCRYPTED, 0, &exunencrypt); - mgt_set_request(priv, DOT11_OID_DOT1XENABLE, 0, &dot1x); - mgt_set_request(priv, DOT11_OID_MLMEAUTOLEVEL, 0, &mlmelevel); - -out: - return ret; -} - -static int prism54_get_auth(struct net_device *ndev, - struct iw_request_info *info, - union iwreq_data *wrqu, char *extra) -{ - islpci_private *priv = netdev_priv(ndev); - struct iw_param *param = &wrqu->param; - u32 wpa = 0; - int ret = 0; - union oid_res_t r; - - if (islpci_get_state(priv) < PRV_STATE_INIT) - return 0; - - /* first get the flags */ - down_write(&priv->mib_sem); - wpa = priv->wpa; - up_write(&priv->mib_sem); - - switch (param->flags & IW_AUTH_INDEX) { - case IW_AUTH_CIPHER_PAIRWISE: - case IW_AUTH_CIPHER_GROUP: - case IW_AUTH_KEY_MGMT: - /* - * wpa_supplicant will control these internally - */ - ret = -EOPNOTSUPP; - break; - - case IW_AUTH_WPA_VERSION: - switch (wpa) { - case 1: - param->value = IW_AUTH_WPA_VERSION_WPA; - break; - case 2: - param->value = IW_AUTH_WPA_VERSION_WPA2; - break; - case 0: - default: - param->value = IW_AUTH_WPA_VERSION_DISABLED; - break; - } - break; - - case IW_AUTH_DROP_UNENCRYPTED: - ret = mgt_get_request(priv, DOT11_OID_EXUNENCRYPTED, 0, NULL, &r); - if (ret >= 0) - param->value = r.u > 0 ? 1 : 0; - break; - - case IW_AUTH_80211_AUTH_ALG: - ret = mgt_get_request(priv, DOT11_OID_AUTHENABLE, 0, NULL, &r); - if (ret >= 0) { - switch (r.u) { - case DOT11_AUTH_OS: - param->value = IW_AUTH_ALG_OPEN_SYSTEM; - break; - case DOT11_AUTH_BOTH: - case DOT11_AUTH_SK: - param->value = IW_AUTH_ALG_SHARED_KEY; - break; - case DOT11_AUTH_NONE: - default: - param->value = 0; - break; - } - } - break; - - case IW_AUTH_WPA_ENABLED: - param->value = wpa > 0 ? 1 : 0; - break; - - case IW_AUTH_RX_UNENCRYPTED_EAPOL: - ret = mgt_get_request(priv, DOT11_OID_DOT1XENABLE, 0, NULL, &r); - if (ret >= 0) - param->value = r.u > 0 ? 1 : 0; - break; - - case IW_AUTH_PRIVACY_INVOKED: - ret = mgt_get_request(priv, DOT11_OID_PRIVACYINVOKED, 0, NULL, &r); - if (ret >= 0) - param->value = r.u > 0 ? 1 : 0; - break; - - default: - return -EOPNOTSUPP; - } - return ret; -} - -static int prism54_set_encodeext(struct net_device *ndev, - struct iw_request_info *info, - union iwreq_data *wrqu, - char *extra) -{ - islpci_private *priv = netdev_priv(ndev); - struct iw_point *encoding = &wrqu->encoding; - struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; - int idx, alg = ext->alg, set_key = 1; - union oid_res_t r; - int authen = DOT11_AUTH_OS, invoke = 0, exunencrypt = 0; - int ret = 0; - - if (islpci_get_state(priv) < PRV_STATE_INIT) - return 0; - - /* Determine and validate the key index */ - idx = (encoding->flags & IW_ENCODE_INDEX) - 1; - if (idx) { - if (idx < 0 || idx > 3) - return -EINVAL; - } else { - ret = mgt_get_request(priv, DOT11_OID_DEFKEYID, 0, NULL, &r); - if (ret < 0) - goto out; - idx = r.u; - } - - if (encoding->flags & IW_ENCODE_DISABLED) - alg = IW_ENCODE_ALG_NONE; - - if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) { - /* Only set transmit key index here, actual - * key is set below if needed. - */ - ret = mgt_set_request(priv, DOT11_OID_DEFKEYID, 0, &idx); - set_key = ext->key_len > 0 ? 1 : 0; - } - - if (set_key) { - struct obj_key key = { DOT11_PRIV_WEP, 0, "" }; - switch (alg) { - case IW_ENCODE_ALG_NONE: - break; - case IW_ENCODE_ALG_WEP: - if (ext->key_len > KEY_SIZE_WEP104) { - ret = -EINVAL; - goto out; - } - if (ext->key_len > KEY_SIZE_WEP40) - key.length = KEY_SIZE_WEP104; - else - key.length = KEY_SIZE_WEP40; - break; - case IW_ENCODE_ALG_TKIP: - if (ext->key_len > KEY_SIZE_TKIP) { - ret = -EINVAL; - goto out; - } - key.type = DOT11_PRIV_TKIP; - key.length = KEY_SIZE_TKIP; - break; - default: - return -EINVAL; - } - - if (key.length) { - memset(key.key, 0, sizeof(key.key)); - memcpy(key.key, ext->key, ext->key_len); - ret = mgt_set_request(priv, DOT11_OID_DEFKEYX, idx, - &key); - if (ret < 0) - goto out; - } - } - - /* Read the flags */ - if (encoding->flags & IW_ENCODE_DISABLED) { - /* Encoding disabled, - * authen = DOT11_AUTH_OS; - * invoke = 0; - * exunencrypt = 0; */ - } - if (encoding->flags & IW_ENCODE_OPEN) { - /* Encode but accept non-encoded packets. No auth */ - invoke = 1; - } - if (encoding->flags & IW_ENCODE_RESTRICTED) { - /* Refuse non-encoded packets. Auth */ - authen = DOT11_AUTH_BOTH; - invoke = 1; - exunencrypt = 1; - } - - /* do the change if requested */ - if (encoding->flags & IW_ENCODE_MODE) { - ret = mgt_set_request(priv, DOT11_OID_AUTHENABLE, 0, - &authen); - ret = mgt_set_request(priv, DOT11_OID_PRIVACYINVOKED, 0, - &invoke); - ret = mgt_set_request(priv, DOT11_OID_EXUNENCRYPTED, 0, - &exunencrypt); - } - -out: - return ret; -} - - -static int prism54_get_encodeext(struct net_device *ndev, - struct iw_request_info *info, - union iwreq_data *wrqu, - char *extra) -{ - islpci_private *priv = netdev_priv(ndev); - struct iw_point *encoding = &wrqu->encoding; - struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; - int idx, max_key_len; - union oid_res_t r; - int authen = DOT11_AUTH_OS, invoke = 0, exunencrypt = 0, wpa = 0; - int ret = 0; - - if (islpci_get_state(priv) < PRV_STATE_INIT) - return 0; - - /* first get the flags */ - ret = mgt_get_request(priv, DOT11_OID_AUTHENABLE, 0, NULL, &r); - authen = r.u; - ret = mgt_get_request(priv, DOT11_OID_PRIVACYINVOKED, 0, NULL, &r); - invoke = r.u; - ret = mgt_get_request(priv, DOT11_OID_EXUNENCRYPTED, 0, NULL, &r); - exunencrypt = r.u; - if (ret < 0) - goto out; - - max_key_len = encoding->length - sizeof(*ext); - if (max_key_len < 0) - return -EINVAL; - - idx = (encoding->flags & IW_ENCODE_INDEX) - 1; - if (idx) { - if (idx < 0 || idx > 3) - return -EINVAL; - } else { - ret = mgt_get_request(priv, DOT11_OID_DEFKEYID, 0, NULL, &r); - if (ret < 0) - goto out; - idx = r.u; - } - - encoding->flags = idx + 1; - memset(ext, 0, sizeof(*ext)); - - switch (authen) { - case DOT11_AUTH_BOTH: - case DOT11_AUTH_SK: - wrqu->encoding.flags |= IW_ENCODE_RESTRICTED; - fallthrough; - case DOT11_AUTH_OS: - default: - wrqu->encoding.flags |= IW_ENCODE_OPEN; - break; - } - - down_write(&priv->mib_sem); - wpa = priv->wpa; - up_write(&priv->mib_sem); - - if (authen == DOT11_AUTH_OS && !exunencrypt && !invoke && !wpa) { - /* No encryption */ - ext->alg = IW_ENCODE_ALG_NONE; - ext->key_len = 0; - wrqu->encoding.flags |= IW_ENCODE_DISABLED; - } else { - struct obj_key *key; - - ret = mgt_get_request(priv, DOT11_OID_DEFKEYX, idx, NULL, &r); - if (ret < 0) - goto out; - key = r.ptr; - if (max_key_len < key->length) { - ret = -E2BIG; - goto out; - } - memcpy(ext->key, key->key, key->length); - ext->key_len = key->length; - - switch (key->type) { - case DOT11_PRIV_TKIP: - ext->alg = IW_ENCODE_ALG_TKIP; - break; - default: - case DOT11_PRIV_WEP: - ext->alg = IW_ENCODE_ALG_WEP; - break; - } - wrqu->encoding.flags |= IW_ENCODE_ENABLED; - } - -out: - return ret; -} - - -static int -prism54_reset(struct net_device *ndev, struct iw_request_info *info, - __u32 * uwrq, char *extra) -{ - islpci_reset(netdev_priv(ndev), 0); - - return 0; -} - -static int -prism54_get_oid(struct net_device *ndev, struct iw_request_info *info, - struct iw_point *dwrq, char *extra) -{ - union oid_res_t r; - int rvalue; - enum oid_num_t n = dwrq->flags; - - rvalue = mgt_get_request(netdev_priv(ndev), n, 0, NULL, &r); - dwrq->length = mgt_response_to_str(n, &r, extra); - if ((isl_oid[n].flags & OID_FLAG_TYPE) != OID_TYPE_U32) - kfree(r.ptr); - return rvalue; -} - -static int -prism54_set_u32(struct net_device *ndev, struct iw_request_info *info, - __u32 * uwrq, char *extra) -{ - u32 oid = uwrq[0], u = uwrq[1]; - - return mgt_set_request(netdev_priv(ndev), oid, 0, &u); -} - -static int -prism54_set_raw(struct net_device *ndev, struct iw_request_info *info, - struct iw_point *dwrq, char *extra) -{ - u32 oid = dwrq->flags; - - return mgt_set_request(netdev_priv(ndev), oid, 0, extra); -} - -void -prism54_acl_init(struct islpci_acl *acl) -{ - mutex_init(&acl->lock); - INIT_LIST_HEAD(&acl->mac_list); - acl->size = 0; - acl->policy = MAC_POLICY_OPEN; -} - -static void -prism54_clear_mac(struct islpci_acl *acl) -{ - struct list_head *ptr, *next; - struct mac_entry *entry; - - mutex_lock(&acl->lock); - - if (acl->size == 0) { - mutex_unlock(&acl->lock); - return; - } - - for (ptr = acl->mac_list.next, next = ptr->next; - ptr != &acl->mac_list; ptr = next, next = ptr->next) { - entry = list_entry(ptr, struct mac_entry, _list); - list_del(ptr); - kfree(entry); - } - acl->size = 0; - mutex_unlock(&acl->lock); -} - -void -prism54_acl_clean(struct islpci_acl *acl) -{ - prism54_clear_mac(acl); -} - -static int -prism54_add_mac(struct net_device *ndev, struct iw_request_info *info, - struct sockaddr *awrq, char *extra) -{ - islpci_private *priv = netdev_priv(ndev); - struct islpci_acl *acl = &priv->acl; - struct mac_entry *entry; - struct sockaddr *addr = (struct sockaddr *) extra; - - if (addr->sa_family != ARPHRD_ETHER) - return -EOPNOTSUPP; - - entry = kmalloc(sizeof (struct mac_entry), GFP_KERNEL); - if (entry == NULL) - return -ENOMEM; - - memcpy(entry->addr, addr->sa_data, ETH_ALEN); - - if (mutex_lock_interruptible(&acl->lock)) { - kfree(entry); - return -ERESTARTSYS; - } - list_add_tail(&entry->_list, &acl->mac_list); - acl->size++; - mutex_unlock(&acl->lock); - - return 0; -} - -static int -prism54_del_mac(struct net_device *ndev, struct iw_request_info *info, - struct sockaddr *awrq, char *extra) -{ - islpci_private *priv = netdev_priv(ndev); - struct islpci_acl *acl = &priv->acl; - struct mac_entry *entry; - struct sockaddr *addr = (struct sockaddr *) extra; - - if (addr->sa_family != ARPHRD_ETHER) - return -EOPNOTSUPP; - - if (mutex_lock_interruptible(&acl->lock)) - return -ERESTARTSYS; - list_for_each_entry(entry, &acl->mac_list, _list) { - if (ether_addr_equal(entry->addr, addr->sa_data)) { - list_del(&entry->_list); - acl->size--; - kfree(entry); - mutex_unlock(&acl->lock); - return 0; - } - } - mutex_unlock(&acl->lock); - return -EINVAL; -} - -static int -prism54_get_mac(struct net_device *ndev, struct iw_request_info *info, - struct iw_point *dwrq, char *extra) -{ - islpci_private *priv = netdev_priv(ndev); - struct islpci_acl *acl = &priv->acl; - struct mac_entry *entry; - struct sockaddr *dst = (struct sockaddr *) extra; - - dwrq->length = 0; - - if (mutex_lock_interruptible(&acl->lock)) - return -ERESTARTSYS; - - list_for_each_entry(entry, &acl->mac_list, _list) { - memcpy(dst->sa_data, entry->addr, ETH_ALEN); - dst->sa_family = ARPHRD_ETHER; - dwrq->length++; - dst++; - } - mutex_unlock(&acl->lock); - return 0; -} - -/* Setting policy also clears the MAC acl, even if we don't change the default - * policy - */ - -static int -prism54_set_policy(struct net_device *ndev, struct iw_request_info *info, - __u32 * uwrq, char *extra) -{ - islpci_private *priv = netdev_priv(ndev); - struct islpci_acl *acl = &priv->acl; - u32 mlmeautolevel; - - prism54_clear_mac(acl); - - if ((*uwrq < MAC_POLICY_OPEN) || (*uwrq > MAC_POLICY_REJECT)) - return -EINVAL; - - down_write(&priv->mib_sem); - - acl->policy = *uwrq; - - /* the ACL code needs an intermediate mlmeautolevel */ - if ((priv->iw_mode == IW_MODE_MASTER) && - (acl->policy != MAC_POLICY_OPEN)) - mlmeautolevel = DOT11_MLME_INTERMEDIATE; - else - mlmeautolevel = CARD_DEFAULT_MLME_MODE; - if (priv->wpa) - mlmeautolevel = DOT11_MLME_EXTENDED; - mgt_set(priv, DOT11_OID_MLMEAUTOLEVEL, &mlmeautolevel); - /* restart the card with our new policy */ - if (mgt_commit(priv)) { - up_write(&priv->mib_sem); - return -EIO; - } - up_write(&priv->mib_sem); - - return 0; -} - -static int -prism54_get_policy(struct net_device *ndev, struct iw_request_info *info, - __u32 * uwrq, char *extra) -{ - islpci_private *priv = netdev_priv(ndev); - struct islpci_acl *acl = &priv->acl; - - *uwrq = acl->policy; - - return 0; -} - -/* Return 1 only if client should be accepted. */ - -static int -prism54_mac_accept(struct islpci_acl *acl, char *mac) -{ - struct mac_entry *entry; - int res = 0; - - if (mutex_lock_interruptible(&acl->lock)) - return -ERESTARTSYS; - - if (acl->policy == MAC_POLICY_OPEN) { - mutex_unlock(&acl->lock); - return 1; - } - - list_for_each_entry(entry, &acl->mac_list, _list) { - if (memcmp(entry->addr, mac, ETH_ALEN) == 0) { - res = 1; - break; - } - } - res = (acl->policy == MAC_POLICY_ACCEPT) ? !res : res; - mutex_unlock(&acl->lock); - - return res; -} - -static int -prism54_kick_all(struct net_device *ndev, struct iw_request_info *info, - struct iw_point *dwrq, char *extra) -{ - struct obj_mlme *mlme; - int rvalue; - - mlme = kmalloc(sizeof (struct obj_mlme), GFP_KERNEL); - if (mlme == NULL) - return -ENOMEM; - - /* Tell the card to kick every client */ - mlme->id = 0; - rvalue = - mgt_set_request(netdev_priv(ndev), DOT11_OID_DISASSOCIATE, 0, mlme); - kfree(mlme); - - return rvalue; -} - -static int -prism54_kick_mac(struct net_device *ndev, struct iw_request_info *info, - struct sockaddr *awrq, char *extra) -{ - struct obj_mlme *mlme; - struct sockaddr *addr = (struct sockaddr *) extra; - int rvalue; - - if (addr->sa_family != ARPHRD_ETHER) - return -EOPNOTSUPP; - - mlme = kmalloc(sizeof (struct obj_mlme), GFP_KERNEL); - if (mlme == NULL) - return -ENOMEM; - - /* Tell the card to only kick the corresponding bastard */ - memcpy(mlme->address, addr->sa_data, ETH_ALEN); - mlme->id = -1; - rvalue = - mgt_set_request(netdev_priv(ndev), DOT11_OID_DISASSOCIATE, 0, mlme); - - kfree(mlme); - - return rvalue; -} - -/* Translate a TRAP oid into a wireless event. Called in islpci_mgt_receive. */ - -static void -format_event(islpci_private *priv, char *dest, const char *str, - const struct obj_mlme *mlme, u16 *length, int error) -{ - int n = snprintf(dest, IW_CUSTOM_MAX, - "%s %s %pM %s (%2.2X)", - str, - ((priv->iw_mode == IW_MODE_MASTER) ? "from" : "to"), - mlme->address, - (error ? (mlme->code ? " : REJECTED " : " : ACCEPTED ") - : ""), mlme->code); - WARN_ON(n >= IW_CUSTOM_MAX); - *length = n; -} - -static void -send_formatted_event(islpci_private *priv, const char *str, - const struct obj_mlme *mlme, int error) -{ - union iwreq_data wrqu; - char *memptr; - - memptr = kmalloc(IW_CUSTOM_MAX, GFP_KERNEL); - if (!memptr) - return; - wrqu.data.pointer = memptr; - wrqu.data.length = 0; - format_event(priv, memptr, str, mlme, &wrqu.data.length, - error); - wireless_send_event(priv->ndev, IWEVCUSTOM, &wrqu, memptr); - kfree(memptr); -} - -static void -send_simple_event(islpci_private *priv, const char *str) -{ - union iwreq_data wrqu; - char *memptr; - int n = strlen(str); - - memptr = kmalloc(IW_CUSTOM_MAX, GFP_KERNEL); - if (!memptr) - return; - BUG_ON(n >= IW_CUSTOM_MAX); - wrqu.data.pointer = memptr; - wrqu.data.length = n; - strcpy(memptr, str); - wireless_send_event(priv->ndev, IWEVCUSTOM, &wrqu, memptr); - kfree(memptr); -} - -static void -link_changed(struct net_device *ndev, u32 bitrate) -{ - islpci_private *priv = netdev_priv(ndev); - - if (bitrate) { - netif_carrier_on(ndev); - if (priv->iw_mode == IW_MODE_INFRA) { - union iwreq_data uwrq; - prism54_get_wap(ndev, NULL, (struct sockaddr *) &uwrq, - NULL); - wireless_send_event(ndev, SIOCGIWAP, &uwrq, NULL); - } else - send_simple_event(netdev_priv(ndev), - "Link established"); - } else { - netif_carrier_off(ndev); - send_simple_event(netdev_priv(ndev), "Link lost"); - } -} - -/* Beacon/ProbeResp payload header */ -struct ieee80211_beacon_phdr { - u8 timestamp[8]; - u16 beacon_int; - u16 capab_info; -} __packed; - -#define WLAN_EID_GENERIC 0xdd -static u8 wpa_oid[4] = { 0x00, 0x50, 0xf2, 1 }; - -static void -prism54_wpa_bss_ie_add(islpci_private *priv, u8 *bssid, - u8 *wpa_ie, size_t wpa_ie_len) -{ - struct list_head *ptr; - struct islpci_bss_wpa_ie *bss = NULL; - - if (wpa_ie_len > MAX_WPA_IE_LEN) - wpa_ie_len = MAX_WPA_IE_LEN; - - mutex_lock(&priv->wpa_lock); - - /* try to use existing entry */ - list_for_each(ptr, &priv->bss_wpa_list) { - bss = list_entry(ptr, struct islpci_bss_wpa_ie, list); - if (memcmp(bss->bssid, bssid, ETH_ALEN) == 0) { - list_move(&bss->list, &priv->bss_wpa_list); - break; - } - bss = NULL; - } - - if (bss == NULL) { - /* add a new BSS entry; if max number of entries is already - * reached, replace the least recently updated */ - if (priv->num_bss_wpa >= MAX_BSS_WPA_IE_COUNT) { - bss = list_entry(priv->bss_wpa_list.prev, - struct islpci_bss_wpa_ie, list); - list_del(&bss->list); - } else { - bss = kzalloc(sizeof (*bss), GFP_ATOMIC); - if (bss != NULL) - priv->num_bss_wpa++; - } - if (bss != NULL) { - memcpy(bss->bssid, bssid, ETH_ALEN); - list_add(&bss->list, &priv->bss_wpa_list); - } - } - - if (bss != NULL) { - memcpy(bss->wpa_ie, wpa_ie, wpa_ie_len); - bss->wpa_ie_len = wpa_ie_len; - bss->last_update = jiffies; - } else { - printk(KERN_DEBUG "Failed to add BSS WPA entry for " - "%pM\n", bssid); - } - - /* expire old entries from WPA list */ - while (priv->num_bss_wpa > 0) { - bss = list_entry(priv->bss_wpa_list.prev, - struct islpci_bss_wpa_ie, list); - if (!time_after(jiffies, bss->last_update + 60 * HZ)) - break; - - list_del(&bss->list); - priv->num_bss_wpa--; - kfree(bss); - } - - mutex_unlock(&priv->wpa_lock); -} - -static size_t -prism54_wpa_bss_ie_get(islpci_private *priv, u8 *bssid, u8 *wpa_ie) -{ - struct list_head *ptr; - struct islpci_bss_wpa_ie *bss = NULL; - size_t len = 0; - - mutex_lock(&priv->wpa_lock); - - list_for_each(ptr, &priv->bss_wpa_list) { - bss = list_entry(ptr, struct islpci_bss_wpa_ie, list); - if (memcmp(bss->bssid, bssid, ETH_ALEN) == 0) - break; - bss = NULL; - } - if (bss) { - len = bss->wpa_ie_len; - memcpy(wpa_ie, bss->wpa_ie, len); - } - mutex_unlock(&priv->wpa_lock); - - return len; -} - -void -prism54_wpa_bss_ie_init(islpci_private *priv) -{ - INIT_LIST_HEAD(&priv->bss_wpa_list); - mutex_init(&priv->wpa_lock); -} - -void -prism54_wpa_bss_ie_clean(islpci_private *priv) -{ - struct islpci_bss_wpa_ie *bss, *n; - - list_for_each_entry_safe(bss, n, &priv->bss_wpa_list, list) { - kfree(bss); - } -} - -static void -prism54_process_bss_data(islpci_private *priv, u32 oid, u8 *addr, - u8 *payload, size_t len) -{ - struct ieee80211_beacon_phdr *hdr; - u8 *pos, *end; - - if (!priv->wpa) - return; - - hdr = (struct ieee80211_beacon_phdr *) payload; - pos = (u8 *) (hdr + 1); - end = payload + len; - while (pos < end) { - if (pos + 2 + pos[1] > end) { - printk(KERN_DEBUG "Parsing Beacon/ProbeResp failed " - "for %pM\n", addr); - return; - } - if (pos[0] == WLAN_EID_GENERIC && pos[1] >= 4 && - memcmp(pos + 2, wpa_oid, 4) == 0) { - prism54_wpa_bss_ie_add(priv, addr, pos, pos[1] + 2); - return; - } - pos += 2 + pos[1]; - } -} - -static void -handle_request(islpci_private *priv, struct obj_mlme *mlme, enum oid_num_t oid) -{ - if (((mlme->state == DOT11_STATE_AUTHING) || - (mlme->state == DOT11_STATE_ASSOCING)) - && mgt_mlme_answer(priv)) { - /* Someone is requesting auth and we must respond. Just send back - * the trap with error code set accordingly. - */ - mlme->code = prism54_mac_accept(&priv->acl, - mlme->address) ? 0 : 1; - mgt_set_request(priv, oid, 0, mlme); - } -} - -static int -prism54_process_trap_helper(islpci_private *priv, enum oid_num_t oid, - char *data) -{ - struct obj_mlme *mlme = (struct obj_mlme *) data; - struct obj_mlmeex *mlmeex = (struct obj_mlmeex *) data; - struct obj_mlmeex *confirm; - u8 wpa_ie[MAX_WPA_IE_LEN]; - int wpa_ie_len; - size_t len = 0; /* u16, better? */ - u8 *payload = NULL, *pos = NULL; - int ret; - - /* I think all trapable objects are listed here. - * Some oids have a EX version. The difference is that they are emitted - * in DOT11_MLME_EXTENDED mode (set with DOT11_OID_MLMEAUTOLEVEL) - * with more info. - * The few events already defined by the wireless tools are not really - * suited. We use the more flexible custom event facility. - */ - - if (oid >= DOT11_OID_BEACON) { - len = mlmeex->size; - payload = pos = mlmeex->data; - } - - /* I fear prism54_process_bss_data won't work with big endian data */ - if ((oid == DOT11_OID_BEACON) || (oid == DOT11_OID_PROBE)) - prism54_process_bss_data(priv, oid, mlmeex->address, - payload, len); - - mgt_le_to_cpu(isl_oid[oid].flags & OID_FLAG_TYPE, (void *) mlme); - - switch (oid) { - - case GEN_OID_LINKSTATE: - link_changed(priv->ndev, (u32) *data); - break; - - case DOT11_OID_MICFAILURE: - send_simple_event(priv, "Mic failure"); - break; - - case DOT11_OID_DEAUTHENTICATE: - send_formatted_event(priv, "DeAuthenticate request", mlme, 0); - break; - - case DOT11_OID_AUTHENTICATE: - handle_request(priv, mlme, oid); - send_formatted_event(priv, "Authenticate request", mlme, 1); - break; - - case DOT11_OID_DISASSOCIATE: - send_formatted_event(priv, "Disassociate request", mlme, 0); - break; - - case DOT11_OID_ASSOCIATE: - handle_request(priv, mlme, oid); - send_formatted_event(priv, "Associate request", mlme, 1); - break; - - case DOT11_OID_REASSOCIATE: - handle_request(priv, mlme, oid); - send_formatted_event(priv, "ReAssociate request", mlme, 1); - break; - - case DOT11_OID_BEACON: - send_formatted_event(priv, - "Received a beacon from an unknown AP", - mlme, 0); - break; - - case DOT11_OID_PROBE: - /* we received a probe from a client. */ - send_formatted_event(priv, "Received a probe from client", mlme, - 0); - break; - - /* Note : "mlme" is actually a "struct obj_mlmeex *" here, but this - * is backward compatible layout-wise with "struct obj_mlme". - */ - - case DOT11_OID_DEAUTHENTICATEEX: - send_formatted_event(priv, "DeAuthenticate request", mlme, 0); - break; - - case DOT11_OID_AUTHENTICATEEX: - handle_request(priv, mlme, oid); - send_formatted_event(priv, "Authenticate request (ex)", mlme, 1); - - if (priv->iw_mode != IW_MODE_MASTER - && mlmeex->state != DOT11_STATE_AUTHING) - break; - - confirm = kmalloc(sizeof(struct obj_mlmeex) + 6, GFP_ATOMIC); - - if (!confirm) - break; - - memcpy(&confirm->address, mlmeex->address, ETH_ALEN); - printk(KERN_DEBUG "Authenticate from: address:\t%pM\n", - mlmeex->address); - confirm->id = -1; /* or mlmeex->id ? */ - confirm->state = 0; /* not used */ - confirm->code = 0; - confirm->size = 6; - confirm->data[0] = 0x00; - confirm->data[1] = 0x00; - confirm->data[2] = 0x02; - confirm->data[3] = 0x00; - confirm->data[4] = 0x00; - confirm->data[5] = 0x00; - - ret = mgt_set_varlen(priv, DOT11_OID_ASSOCIATEEX, confirm, 6); - - kfree(confirm); - if (ret) - return ret; - break; - - case DOT11_OID_DISASSOCIATEEX: - send_formatted_event(priv, "Disassociate request (ex)", mlme, 0); - break; - - case DOT11_OID_ASSOCIATEEX: - handle_request(priv, mlme, oid); - send_formatted_event(priv, "Associate request (ex)", mlme, 1); - - if (priv->iw_mode != IW_MODE_MASTER - && mlmeex->state != DOT11_STATE_ASSOCING) - break; - - confirm = kmalloc(sizeof(struct obj_mlmeex), GFP_ATOMIC); - - if (!confirm) - break; - - memcpy(&confirm->address, mlmeex->address, ETH_ALEN); - - confirm->id = ((struct obj_mlmeex *)mlme)->id; - confirm->state = 0; /* not used */ - confirm->code = 0; - - wpa_ie_len = prism54_wpa_bss_ie_get(priv, mlmeex->address, wpa_ie); - - if (!wpa_ie_len) { - printk(KERN_DEBUG "No WPA IE found from address:\t%pM\n", - mlmeex->address); - kfree(confirm); - break; - } - - confirm->size = wpa_ie_len; - memcpy(&confirm->data, wpa_ie, wpa_ie_len); - - mgt_set_varlen(priv, oid, confirm, wpa_ie_len); - - kfree(confirm); - - break; - - case DOT11_OID_REASSOCIATEEX: - handle_request(priv, mlme, oid); - send_formatted_event(priv, "Reassociate request (ex)", mlme, 1); - - if (priv->iw_mode != IW_MODE_MASTER - && mlmeex->state != DOT11_STATE_ASSOCING) - break; - - confirm = kmalloc(sizeof(struct obj_mlmeex), GFP_ATOMIC); - - if (!confirm) - break; - - memcpy(&confirm->address, mlmeex->address, ETH_ALEN); - - confirm->id = mlmeex->id; - confirm->state = 0; /* not used */ - confirm->code = 0; - - wpa_ie_len = prism54_wpa_bss_ie_get(priv, mlmeex->address, wpa_ie); - - if (!wpa_ie_len) { - printk(KERN_DEBUG "No WPA IE found from address:\t%pM\n", - mlmeex->address); - kfree(confirm); - break; - } - - confirm->size = wpa_ie_len; - memcpy(&confirm->data, wpa_ie, wpa_ie_len); - - mgt_set_varlen(priv, oid, confirm, wpa_ie_len); - - kfree(confirm); - - break; - - default: - return -EINVAL; - } - - return 0; -} - -/* - * Process a device trap. This is called via schedule_work(), outside of - * interrupt context, no locks held. - */ -void -prism54_process_trap(struct work_struct *work) -{ - struct islpci_mgmtframe *frame = - container_of(work, struct islpci_mgmtframe, ws); - struct net_device *ndev = frame->ndev; - enum oid_num_t n = mgt_oidtonum(frame->header->oid); - - if (n != OID_NUM_LAST) - prism54_process_trap_helper(netdev_priv(ndev), n, frame->data); - islpci_mgt_release(frame); -} - -int -prism54_set_mac_address(struct net_device *ndev, void *addr) -{ - islpci_private *priv = netdev_priv(ndev); - int ret; - - if (ndev->addr_len != 6) - return -EINVAL; - ret = mgt_set_request(priv, GEN_OID_MACADDRESS, 0, - &((struct sockaddr *) addr)->sa_data); - if (!ret) - memcpy(priv->ndev->dev_addr, - &((struct sockaddr *) addr)->sa_data, ETH_ALEN); - - return ret; -} - -#define PRISM54_SET_WPA SIOCIWFIRSTPRIV+12 - -static int -prism54_set_wpa(struct net_device *ndev, struct iw_request_info *info, - __u32 * uwrq, char *extra) -{ - islpci_private *priv = netdev_priv(ndev); - u32 mlme, authen, dot1x, filter, wep; - - if (islpci_get_state(priv) < PRV_STATE_INIT) - return 0; - - wep = 1; /* For privacy invoked */ - filter = 1; /* Filter out all unencrypted frames */ - dot1x = 0x01; /* To enable eap filter */ - mlme = DOT11_MLME_EXTENDED; - authen = DOT11_AUTH_OS; /* Only WEP uses _SK and _BOTH */ - - down_write(&priv->mib_sem); - priv->wpa = *uwrq; - - switch (priv->wpa) { - default: - case 0: /* Clears/disables WPA and friends */ - wep = 0; - filter = 0; /* Do not filter un-encrypted data */ - dot1x = 0; - mlme = DOT11_MLME_AUTO; - printk("%s: Disabling WPA\n", ndev->name); - break; - case 2: - case 1: /* WPA */ - printk("%s: Enabling WPA\n", ndev->name); - break; - } - up_write(&priv->mib_sem); - - mgt_set_request(priv, DOT11_OID_AUTHENABLE, 0, &authen); - mgt_set_request(priv, DOT11_OID_PRIVACYINVOKED, 0, &wep); - mgt_set_request(priv, DOT11_OID_EXUNENCRYPTED, 0, &filter); - mgt_set_request(priv, DOT11_OID_DOT1XENABLE, 0, &dot1x); - mgt_set_request(priv, DOT11_OID_MLMEAUTOLEVEL, 0, &mlme); - - return 0; -} - -static int -prism54_get_wpa(struct net_device *ndev, struct iw_request_info *info, - __u32 * uwrq, char *extra) -{ - islpci_private *priv = netdev_priv(ndev); - *uwrq = priv->wpa; - return 0; -} - -static int -prism54_set_prismhdr(struct net_device *ndev, struct iw_request_info *info, - __u32 * uwrq, char *extra) -{ - islpci_private *priv = netdev_priv(ndev); - priv->monitor_type = - (*uwrq ? ARPHRD_IEEE80211_PRISM : ARPHRD_IEEE80211); - if (priv->iw_mode == IW_MODE_MONITOR) - priv->ndev->type = priv->monitor_type; - - return 0; -} - -static int -prism54_get_prismhdr(struct net_device *ndev, struct iw_request_info *info, - __u32 * uwrq, char *extra) -{ - islpci_private *priv = netdev_priv(ndev); - *uwrq = (priv->monitor_type == ARPHRD_IEEE80211_PRISM); - return 0; -} - -static int -prism54_debug_oid(struct net_device *ndev, struct iw_request_info *info, - __u32 * uwrq, char *extra) -{ - islpci_private *priv = netdev_priv(ndev); - - priv->priv_oid = *uwrq; - printk("%s: oid 0x%08X\n", ndev->name, *uwrq); - - return 0; -} - -static int -prism54_debug_get_oid(struct net_device *ndev, struct iw_request_info *info, - struct iw_point *data, char *extra) -{ - islpci_private *priv = netdev_priv(ndev); - struct islpci_mgmtframe *response; - int ret = -EIO; - - printk("%s: get_oid 0x%08X\n", ndev->name, priv->priv_oid); - data->length = 0; - - if (islpci_get_state(priv) >= PRV_STATE_INIT) { - ret = - islpci_mgt_transaction(priv->ndev, PIMFOR_OP_GET, - priv->priv_oid, extra, 256, - &response); - printk("%s: ret: %i\n", ndev->name, ret); - if (ret || !response - || response->header->operation == PIMFOR_OP_ERROR) { - if (response) { - islpci_mgt_release(response); - } - printk("%s: EIO\n", ndev->name); - ret = -EIO; - } - if (!ret) { - data->length = response->header->length; - memcpy(extra, response->data, data->length); - islpci_mgt_release(response); - printk("%s: len: %i\n", ndev->name, data->length); - } - } - - return ret; -} - -static int -prism54_debug_set_oid(struct net_device *ndev, struct iw_request_info *info, - struct iw_point *data, char *extra) -{ - islpci_private *priv = netdev_priv(ndev); - struct islpci_mgmtframe *response; - int ret = 0, response_op = PIMFOR_OP_ERROR; - - printk("%s: set_oid 0x%08X\tlen: %d\n", ndev->name, priv->priv_oid, - data->length); - - if (islpci_get_state(priv) >= PRV_STATE_INIT) { - ret = - islpci_mgt_transaction(priv->ndev, PIMFOR_OP_SET, - priv->priv_oid, extra, data->length, - &response); - printk("%s: ret: %i\n", ndev->name, ret); - if (ret || !response - || response->header->operation == PIMFOR_OP_ERROR) { - if (response) { - islpci_mgt_release(response); - } - printk("%s: EIO\n", ndev->name); - ret = -EIO; - } - if (!ret) { - response_op = response->header->operation; - printk("%s: response_op: %i\n", ndev->name, - response_op); - islpci_mgt_release(response); - } - } - - return (ret ? ret : -EINPROGRESS); -} - -static int -prism54_set_spy(struct net_device *ndev, - struct iw_request_info *info, - union iwreq_data *uwrq, char *extra) -{ - islpci_private *priv = netdev_priv(ndev); - u32 u; - enum oid_num_t oid = OID_INL_CONFIG; - - down_write(&priv->mib_sem); - mgt_get(priv, OID_INL_CONFIG, &u); - - if ((uwrq->data.length == 0) && (priv->spy_data.spy_number > 0)) - /* disable spy */ - u &= ~INL_CONFIG_RXANNEX; - else if ((uwrq->data.length > 0) && (priv->spy_data.spy_number == 0)) - /* enable spy */ - u |= INL_CONFIG_RXANNEX; - - mgt_set(priv, OID_INL_CONFIG, &u); - mgt_commit_list(priv, &oid, 1); - up_write(&priv->mib_sem); - - return iw_handler_set_spy(ndev, info, uwrq, extra); -} - -static const iw_handler prism54_handler[] = { - (iw_handler) prism54_commit, /* SIOCSIWCOMMIT */ - (iw_handler) prism54_get_name, /* SIOCGIWNAME */ - (iw_handler) NULL, /* SIOCSIWNWID */ - (iw_handler) NULL, /* SIOCGIWNWID */ - (iw_handler) prism54_set_freq, /* SIOCSIWFREQ */ - (iw_handler) prism54_get_freq, /* SIOCGIWFREQ */ - (iw_handler) prism54_set_mode, /* SIOCSIWMODE */ - (iw_handler) prism54_get_mode, /* SIOCGIWMODE */ - (iw_handler) prism54_set_sens, /* SIOCSIWSENS */ - (iw_handler) prism54_get_sens, /* SIOCGIWSENS */ - (iw_handler) NULL, /* SIOCSIWRANGE */ - (iw_handler) prism54_get_range, /* SIOCGIWRANGE */ - (iw_handler) NULL, /* SIOCSIWPRIV */ - (iw_handler) NULL, /* SIOCGIWPRIV */ - (iw_handler) NULL, /* SIOCSIWSTATS */ - (iw_handler) NULL, /* SIOCGIWSTATS */ - prism54_set_spy, /* SIOCSIWSPY */ - iw_handler_get_spy, /* SIOCGIWSPY */ - iw_handler_set_thrspy, /* SIOCSIWTHRSPY */ - iw_handler_get_thrspy, /* SIOCGIWTHRSPY */ - (iw_handler) prism54_set_wap, /* SIOCSIWAP */ - (iw_handler) prism54_get_wap, /* SIOCGIWAP */ - (iw_handler) NULL, /* -- hole -- */ - (iw_handler) NULL, /* SIOCGIWAPLIST deprecated */ - (iw_handler) prism54_set_scan, /* SIOCSIWSCAN */ - (iw_handler) prism54_get_scan, /* SIOCGIWSCAN */ - (iw_handler) prism54_set_essid, /* SIOCSIWESSID */ - (iw_handler) prism54_get_essid, /* SIOCGIWESSID */ - (iw_handler) prism54_set_nick, /* SIOCSIWNICKN */ - (iw_handler) prism54_get_nick, /* SIOCGIWNICKN */ - (iw_handler) NULL, /* -- hole -- */ - (iw_handler) NULL, /* -- hole -- */ - (iw_handler) prism54_set_rate, /* SIOCSIWRATE */ - (iw_handler) prism54_get_rate, /* SIOCGIWRATE */ - (iw_handler) prism54_set_rts, /* SIOCSIWRTS */ - (iw_handler) prism54_get_rts, /* SIOCGIWRTS */ - (iw_handler) prism54_set_frag, /* SIOCSIWFRAG */ - (iw_handler) prism54_get_frag, /* SIOCGIWFRAG */ - (iw_handler) prism54_set_txpower, /* SIOCSIWTXPOW */ - (iw_handler) prism54_get_txpower, /* SIOCGIWTXPOW */ - (iw_handler) prism54_set_retry, /* SIOCSIWRETRY */ - (iw_handler) prism54_get_retry, /* SIOCGIWRETRY */ - (iw_handler) prism54_set_encode, /* SIOCSIWENCODE */ - (iw_handler) prism54_get_encode, /* SIOCGIWENCODE */ - (iw_handler) NULL, /* SIOCSIWPOWER */ - (iw_handler) NULL, /* SIOCGIWPOWER */ - NULL, /* -- hole -- */ - NULL, /* -- hole -- */ - (iw_handler) prism54_set_genie, /* SIOCSIWGENIE */ - (iw_handler) prism54_get_genie, /* SIOCGIWGENIE */ - (iw_handler) prism54_set_auth, /* SIOCSIWAUTH */ - (iw_handler) prism54_get_auth, /* SIOCGIWAUTH */ - (iw_handler) prism54_set_encodeext, /* SIOCSIWENCODEEXT */ - (iw_handler) prism54_get_encodeext, /* SIOCGIWENCODEEXT */ - NULL, /* SIOCSIWPMKSA */ -}; - -/* The low order bit identify a SET (0) or a GET (1) ioctl. */ - -#define PRISM54_RESET SIOCIWFIRSTPRIV -#define PRISM54_GET_POLICY SIOCIWFIRSTPRIV+1 -#define PRISM54_SET_POLICY SIOCIWFIRSTPRIV+2 -#define PRISM54_GET_MAC SIOCIWFIRSTPRIV+3 -#define PRISM54_ADD_MAC SIOCIWFIRSTPRIV+4 - -#define PRISM54_DEL_MAC SIOCIWFIRSTPRIV+6 - -#define PRISM54_KICK_MAC SIOCIWFIRSTPRIV+8 - -#define PRISM54_KICK_ALL SIOCIWFIRSTPRIV+10 - -#define PRISM54_GET_WPA SIOCIWFIRSTPRIV+11 -#define PRISM54_SET_WPA SIOCIWFIRSTPRIV+12 - -#define PRISM54_DBG_OID SIOCIWFIRSTPRIV+14 -#define PRISM54_DBG_GET_OID SIOCIWFIRSTPRIV+15 -#define PRISM54_DBG_SET_OID SIOCIWFIRSTPRIV+16 - -#define PRISM54_GET_OID SIOCIWFIRSTPRIV+17 -#define PRISM54_SET_OID_U32 SIOCIWFIRSTPRIV+18 -#define PRISM54_SET_OID_STR SIOCIWFIRSTPRIV+20 -#define PRISM54_SET_OID_ADDR SIOCIWFIRSTPRIV+22 - -#define PRISM54_GET_PRISMHDR SIOCIWFIRSTPRIV+23 -#define PRISM54_SET_PRISMHDR SIOCIWFIRSTPRIV+24 - -#define IWPRIV_SET_U32(n,x) { n, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "s_"x } -#define IWPRIV_SET_SSID(n,x) { n, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | 1, 0, "s_"x } -#define IWPRIV_SET_ADDR(n,x) { n, IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, 0, "s_"x } -#define IWPRIV_GET(n,x) { n, 0, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | PRIV_STR_SIZE, "g_"x } - -#define IWPRIV_U32(n,x) IWPRIV_SET_U32(n,x), IWPRIV_GET(n,x) -#define IWPRIV_SSID(n,x) IWPRIV_SET_SSID(n,x), IWPRIV_GET(n,x) -#define IWPRIV_ADDR(n,x) IWPRIV_SET_ADDR(n,x), IWPRIV_GET(n,x) - -/* Note : limited to 128 private ioctls (wireless tools 26) */ - -static const struct iw_priv_args prism54_private_args[] = { -/*{ cmd, set_args, get_args, name } */ - {PRISM54_RESET, 0, 0, "reset"}, - {PRISM54_GET_PRISMHDR, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, - "get_prismhdr"}, - {PRISM54_SET_PRISMHDR, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, - "set_prismhdr"}, - {PRISM54_GET_POLICY, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, - "getPolicy"}, - {PRISM54_SET_POLICY, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, - "setPolicy"}, - {PRISM54_GET_MAC, 0, IW_PRIV_TYPE_ADDR | 64, "getMac"}, - {PRISM54_ADD_MAC, IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, 0, - "addMac"}, - {PRISM54_DEL_MAC, IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, 0, - "delMac"}, - {PRISM54_KICK_MAC, IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, 0, - "kickMac"}, - {PRISM54_KICK_ALL, 0, 0, "kickAll"}, - {PRISM54_GET_WPA, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, - "get_wpa"}, - {PRISM54_SET_WPA, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, - "set_wpa"}, - {PRISM54_DBG_OID, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, - "dbg_oid"}, - {PRISM54_DBG_GET_OID, 0, IW_PRIV_TYPE_BYTE | 256, "dbg_get_oid"}, - {PRISM54_DBG_SET_OID, IW_PRIV_TYPE_BYTE | 256, 0, "dbg_set_oid"}, - /* --- sub-ioctls handlers --- */ - {PRISM54_GET_OID, - 0, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | PRIV_STR_SIZE, ""}, - {PRISM54_SET_OID_U32, - IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, ""}, - {PRISM54_SET_OID_STR, - IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | 1, 0, ""}, - {PRISM54_SET_OID_ADDR, - IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, 0, ""}, - /* --- sub-ioctls definitions --- */ - IWPRIV_ADDR(GEN_OID_MACADDRESS, "addr"), - IWPRIV_GET(GEN_OID_LINKSTATE, "linkstate"), - IWPRIV_U32(DOT11_OID_BSSTYPE, "bsstype"), - IWPRIV_ADDR(DOT11_OID_BSSID, "bssid"), - IWPRIV_U32(DOT11_OID_STATE, "state"), - IWPRIV_U32(DOT11_OID_AID, "aid"), - - IWPRIV_SSID(DOT11_OID_SSIDOVERRIDE, "ssidoverride"), - - IWPRIV_U32(DOT11_OID_MEDIUMLIMIT, "medlimit"), - IWPRIV_U32(DOT11_OID_BEACONPERIOD, "beacon"), - IWPRIV_U32(DOT11_OID_DTIMPERIOD, "dtimperiod"), - - IWPRIV_U32(DOT11_OID_AUTHENABLE, "authenable"), - IWPRIV_U32(DOT11_OID_PRIVACYINVOKED, "privinvok"), - IWPRIV_U32(DOT11_OID_EXUNENCRYPTED, "exunencrypt"), - - IWPRIV_U32(DOT11_OID_REKEYTHRESHOLD, "rekeythresh"), - - IWPRIV_U32(DOT11_OID_MAXTXLIFETIME, "maxtxlife"), - IWPRIV_U32(DOT11_OID_MAXRXLIFETIME, "maxrxlife"), - IWPRIV_U32(DOT11_OID_ALOFT_FIXEDRATE, "fixedrate"), - IWPRIV_U32(DOT11_OID_MAXFRAMEBURST, "frameburst"), - IWPRIV_U32(DOT11_OID_PSM, "psm"), - - IWPRIV_U32(DOT11_OID_BRIDGELOCAL, "bridge"), - IWPRIV_U32(DOT11_OID_CLIENTS, "clients"), - IWPRIV_U32(DOT11_OID_CLIENTSASSOCIATED, "clientassoc"), - IWPRIV_U32(DOT11_OID_DOT1XENABLE, "dot1xenable"), - IWPRIV_U32(DOT11_OID_ANTENNARX, "rxant"), - IWPRIV_U32(DOT11_OID_ANTENNATX, "txant"), - IWPRIV_U32(DOT11_OID_ANTENNADIVERSITY, "antdivers"), - IWPRIV_U32(DOT11_OID_EDTHRESHOLD, "edthresh"), - IWPRIV_U32(DOT11_OID_PREAMBLESETTINGS, "preamble"), - IWPRIV_GET(DOT11_OID_RATES, "rates"), - IWPRIV_U32(DOT11_OID_OUTPUTPOWER, ".11outpower"), - IWPRIV_GET(DOT11_OID_SUPPORTEDRATES, "supprates"), - IWPRIV_GET(DOT11_OID_SUPPORTEDFREQUENCIES, "suppfreq"), - - IWPRIV_U32(DOT11_OID_NOISEFLOOR, "noisefloor"), - IWPRIV_GET(DOT11_OID_FREQUENCYACTIVITY, "freqactivity"), - IWPRIV_U32(DOT11_OID_NONERPPROTECTION, "nonerpprotec"), - IWPRIV_U32(DOT11_OID_PROFILES, "profile"), - IWPRIV_GET(DOT11_OID_EXTENDEDRATES, "extrates"), - IWPRIV_U32(DOT11_OID_MLMEAUTOLEVEL, "mlmelevel"), - - IWPRIV_GET(DOT11_OID_BSSS, "bsss"), - IWPRIV_GET(DOT11_OID_BSSLIST, "bsslist"), - IWPRIV_U32(OID_INL_MODE, "mode"), - IWPRIV_U32(OID_INL_CONFIG, "config"), - IWPRIV_U32(OID_INL_DOT11D_CONFORMANCE, ".11dconform"), - IWPRIV_GET(OID_INL_PHYCAPABILITIES, "phycapa"), - IWPRIV_U32(OID_INL_OUTPUTPOWER, "outpower"), -}; - -static const iw_handler prism54_private_handler[] = { - (iw_handler) prism54_reset, - (iw_handler) prism54_get_policy, - (iw_handler) prism54_set_policy, - (iw_handler) prism54_get_mac, - (iw_handler) prism54_add_mac, - (iw_handler) NULL, - (iw_handler) prism54_del_mac, - (iw_handler) NULL, - (iw_handler) prism54_kick_mac, - (iw_handler) NULL, - (iw_handler) prism54_kick_all, - (iw_handler) prism54_get_wpa, - (iw_handler) prism54_set_wpa, - (iw_handler) NULL, - (iw_handler) prism54_debug_oid, - (iw_handler) prism54_debug_get_oid, - (iw_handler) prism54_debug_set_oid, - (iw_handler) prism54_get_oid, - (iw_handler) prism54_set_u32, - (iw_handler) NULL, - (iw_handler) prism54_set_raw, - (iw_handler) NULL, - (iw_handler) prism54_set_raw, - (iw_handler) prism54_get_prismhdr, - (iw_handler) prism54_set_prismhdr, -}; - -const struct iw_handler_def prism54_handler_def = { - .num_standard = ARRAY_SIZE(prism54_handler), - .num_private = ARRAY_SIZE(prism54_private_handler), - .num_private_args = ARRAY_SIZE(prism54_private_args), - .standard = (iw_handler *) prism54_handler, - .private = (iw_handler *) prism54_private_handler, - .private_args = (struct iw_priv_args *) prism54_private_args, - .get_wireless_stats = prism54_get_wireless_stats, -}; diff --git a/drivers/net/wireless/intersil/prism54/isl_ioctl.h b/drivers/net/wireless/intersil/prism54/isl_ioctl.h deleted file mode 100644 index 3f85fd75ac19..000000000000 --- a/drivers/net/wireless/intersil/prism54/isl_ioctl.h +++ /dev/null @@ -1,35 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2002 Intersil Americas Inc. - * (C) 2003 Aurelien Alleaume <slts@free.fr> - * (C) 2003 Luis R. Rodriguez <mcgrof@ruslug.rutgers.edu> - */ - -#ifndef _ISL_IOCTL_H -#define _ISL_IOCTL_H - -#include "islpci_mgt.h" -#include "islpci_dev.h" - -#include <net/iw_handler.h> /* New driver API */ - -#define SUPPORTED_WIRELESS_EXT 19 - -void prism54_mib_init(islpci_private *); - -struct iw_statistics *prism54_get_wireless_stats(struct net_device *); -void prism54_update_stats(struct work_struct *); - -void prism54_acl_init(struct islpci_acl *); -void prism54_acl_clean(struct islpci_acl *); - -void prism54_process_trap(struct work_struct *); - -void prism54_wpa_bss_ie_init(islpci_private *priv); -void prism54_wpa_bss_ie_clean(islpci_private *priv); - -int prism54_set_mac_address(struct net_device *, void *); - -extern const struct iw_handler_def prism54_handler_def; - -#endif /* _ISL_IOCTL_H */ diff --git a/drivers/net/wireless/intersil/prism54/isl_oid.h b/drivers/net/wireless/intersil/prism54/isl_oid.h deleted file mode 100644 index b889bb73a485..000000000000 --- a/drivers/net/wireless/intersil/prism54/isl_oid.h +++ /dev/null @@ -1,492 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2003 Herbert Valerio Riedel <hvr@gnu.org> - * Copyright (C) 2004 Luis R. Rodriguez <mcgrof@ruslug.rutgers.edu> - * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr> - */ - -#if !defined(_ISL_OID_H) -#define _ISL_OID_H - -/* - * MIB related constant and structure definitions for communicating - * with the device firmware - */ - -struct obj_ssid { - u8 length; - char octets[33]; -} __packed; - -struct obj_key { - u8 type; /* dot11_priv_t */ - u8 length; - char key[32]; -} __packed; - -struct obj_mlme { - u8 address[6]; - u16 id; - u16 state; - u16 code; -} __packed; - -struct obj_mlmeex { - u8 address[6]; - u16 id; - u16 state; - u16 code; - u16 size; - u8 data[]; -} __packed; - -struct obj_buffer { - u32 size; - u32 addr; /* 32bit bus address */ -} __packed; - -struct obj_bss { - u8 address[6]; - int:16; /* padding */ - - char state; - char reserved; - short age; - - char quality; - char rssi; - - struct obj_ssid ssid; - short channel; - char beacon_period; - char dtim_period; - short capinfo; - short rates; - short basic_rates; - int:16; /* padding */ -} __packed; - -struct obj_bsslist { - u32 nr; - struct obj_bss bsslist[]; -} __packed; - -struct obj_frequencies { - u16 nr; - u16 mhz[]; -} __packed; - -struct obj_attachment { - char type; - char reserved; - short id; - short size; - char data[]; -} __packed; - -/* - * in case everything's ok, the inlined function below will be - * optimized away by the compiler... - */ -static inline void -__bug_on_wrong_struct_sizes(void) -{ - BUILD_BUG_ON(sizeof (struct obj_ssid) != 34); - BUILD_BUG_ON(sizeof (struct obj_key) != 34); - BUILD_BUG_ON(sizeof (struct obj_mlme) != 12); - BUILD_BUG_ON(sizeof (struct obj_mlmeex) != 14); - BUILD_BUG_ON(sizeof (struct obj_buffer) != 8); - BUILD_BUG_ON(sizeof (struct obj_bss) != 60); - BUILD_BUG_ON(sizeof (struct obj_bsslist) != 4); - BUILD_BUG_ON(sizeof (struct obj_frequencies) != 2); -} - -enum dot11_state_t { - DOT11_STATE_NONE = 0, - DOT11_STATE_AUTHING = 1, - DOT11_STATE_AUTH = 2, - DOT11_STATE_ASSOCING = 3, - - DOT11_STATE_ASSOC = 5, - DOT11_STATE_IBSS = 6, - DOT11_STATE_WDS = 7 -}; - -enum dot11_bsstype_t { - DOT11_BSSTYPE_NONE = 0, - DOT11_BSSTYPE_INFRA = 1, - DOT11_BSSTYPE_IBSS = 2, - DOT11_BSSTYPE_ANY = 3 -}; - -enum dot11_auth_t { - DOT11_AUTH_NONE = 0, - DOT11_AUTH_OS = 1, - DOT11_AUTH_SK = 2, - DOT11_AUTH_BOTH = 3 -}; - -enum dot11_mlme_t { - DOT11_MLME_AUTO = 0, - DOT11_MLME_INTERMEDIATE = 1, - DOT11_MLME_EXTENDED = 2 -}; - -enum dot11_priv_t { - DOT11_PRIV_WEP = 0, - DOT11_PRIV_TKIP = 1 -}; - -/* Prism "Nitro" / Frameburst / "Packet Frame Grouping" - * Value is in microseconds. Represents the # microseconds - * the firmware will take to group frames before sending out then out - * together with a CSMA contention. Without this all frames are - * sent with a CSMA contention. - * Bibliography: - * https://www.hpl.hp.com/personal/Jean_Tourrilhes/Papers/Packet.Frame.Grouping.html - */ -enum dot11_maxframeburst_t { - /* Values for DOT11_OID_MAXFRAMEBURST */ - DOT11_MAXFRAMEBURST_OFF = 0, /* Card firmware default */ - DOT11_MAXFRAMEBURST_MIXED_SAFE = 650, /* 802.11 a,b,g safe */ - DOT11_MAXFRAMEBURST_IDEAL = 1300, /* Theoretical ideal level */ - DOT11_MAXFRAMEBURST_MAX = 5000, /* Use this as max, - * Note: firmware allows for greater values. This is a - * recommended max. I'll update this as I find - * out what the real MAX is. Also note that you don't necessarily - * get better results with a greater value here. - */ -}; - -/* Support for 802.11 long and short frame preambles. - * Long preamble uses 128-bit sync field, 8-bit CRC - * Short preamble uses 56-bit sync field, 16-bit CRC - * - * 802.11a -- not sure, both optionally ? - * 802.11b supports long and optionally short - * 802.11g supports both */ -enum dot11_preamblesettings_t { - DOT11_PREAMBLESETTING_LONG = 0, - /* Allows *only* long 802.11 preambles */ - DOT11_PREAMBLESETTING_SHORT = 1, - /* Allows *only* short 802.11 preambles */ - DOT11_PREAMBLESETTING_DYNAMIC = 2 - /* AutomatiGically set */ -}; - -/* Support for 802.11 slot timing (time between packets). - * - * Long uses 802.11a slot timing (9 usec ?) - * Short uses 802.11b slot timing (20 use ?) */ -enum dot11_slotsettings_t { - DOT11_SLOTSETTINGS_LONG = 0, - /* Allows *only* long 802.11b slot timing */ - DOT11_SLOTSETTINGS_SHORT = 1, - /* Allows *only* long 802.11a slot timing */ - DOT11_SLOTSETTINGS_DYNAMIC = 2 - /* AutomatiGically set */ -}; - -/* All you need to know, ERP is "Extended Rate PHY". - * An Extended Rate PHY (ERP) STA or AP shall support three different - * preamble and header formats: - * Long preamble (refer to above) - * Short preamble (refer to above) - * OFDM preamble ( ? ) - * - * I'm assuming here Protection tells the AP - * to be careful, a STA which cannot handle the long pre-amble - * has joined. - */ -enum do11_nonerpstatus_t { - DOT11_ERPSTAT_NONEPRESENT = 0, - DOT11_ERPSTAT_USEPROTECTION = 1 -}; - -/* (ERP is "Extended Rate PHY") Way to read NONERP is NON-ERP-* - * The key here is DOT11 NON ERP NEVER protects against - * NON ERP STA's. You *don't* want this unless - * you know what you are doing. It means you will only - * get Extended Rate capabilities */ -enum dot11_nonerpprotection_t { - DOT11_NONERP_NEVER = 0, - DOT11_NONERP_ALWAYS = 1, - DOT11_NONERP_DYNAMIC = 2 -}; - -/* Preset OID configuration for 802.11 modes - * Note: DOT11_OID_CW[MIN|MAX] hold the values of the - * DCS MIN|MAX backoff used */ -enum dot11_profile_t { /* And set/allowed values */ - /* Allowed values for DOT11_OID_PROFILES */ - DOT11_PROFILE_B_ONLY = 0, - /* DOT11_OID_RATES: 1, 2, 5.5, 11Mbps - * DOT11_OID_PREAMBLESETTINGS: DOT11_PREAMBLESETTING_DYNAMIC - * DOT11_OID_CWMIN: 31 - * DOT11_OID_NONEPROTECTION: DOT11_NOERP_DYNAMIC - * DOT11_OID_SLOTSETTINGS: DOT11_SLOTSETTINGS_LONG - */ - DOT11_PROFILE_MIXED_G_WIFI = 1, - /* DOT11_OID_RATES: 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54Mbs - * DOT11_OID_PREAMBLESETTINGS: DOT11_PREAMBLESETTING_DYNAMIC - * DOT11_OID_CWMIN: 15 - * DOT11_OID_NONEPROTECTION: DOT11_NOERP_DYNAMIC - * DOT11_OID_SLOTSETTINGS: DOT11_SLOTSETTINGS_DYNAMIC - */ - DOT11_PROFILE_MIXED_LONG = 2, /* "Long range" */ - /* Same as Profile MIXED_G_WIFI */ - DOT11_PROFILE_G_ONLY = 3, - /* Same as Profile MIXED_G_WIFI */ - DOT11_PROFILE_TEST = 4, - /* Same as Profile MIXED_G_WIFI except: - * DOT11_OID_PREAMBLESETTINGS: DOT11_PREAMBLESETTING_SHORT - * DOT11_OID_NONEPROTECTION: DOT11_NOERP_NEVER - * DOT11_OID_SLOTSETTINGS: DOT11_SLOTSETTINGS_SHORT - */ - DOT11_PROFILE_B_WIFI = 5, - /* Same as Profile B_ONLY */ - DOT11_PROFILE_A_ONLY = 6, - /* Same as Profile MIXED_G_WIFI except: - * DOT11_OID_RATES: 6, 9, 12, 18, 24, 36, 48, 54Mbs - */ - DOT11_PROFILE_MIXED_SHORT = 7 - /* Same as MIXED_G_WIFI */ -}; - - -/* The dot11d conformance level configures the 802.11d conformance levels. - * The following conformance levels exist:*/ -enum oid_inl_conformance_t { - OID_INL_CONFORMANCE_NONE = 0, /* Perform active scanning */ - OID_INL_CONFORMANCE_STRICT = 1, /* Strictly adhere to 802.11d */ - OID_INL_CONFORMANCE_FLEXIBLE = 2, /* Use passed 802.11d info to - * determine channel AND/OR just make assumption that active - * channels are valid channels */ -}; - -enum oid_inl_mode_t { - INL_MODE_NONE = -1, - INL_MODE_PROMISCUOUS = 0, - INL_MODE_CLIENT = 1, - INL_MODE_AP = 2, - INL_MODE_SNIFFER = 3 -}; - -enum oid_inl_config_t { - INL_CONFIG_NOTHING = 0x00, - INL_CONFIG_MANUALRUN = 0x01, - INL_CONFIG_FRAMETRAP = 0x02, - INL_CONFIG_RXANNEX = 0x04, - INL_CONFIG_TXANNEX = 0x08, - INL_CONFIG_WDS = 0x10 -}; - -enum oid_inl_phycap_t { - INL_PHYCAP_2400MHZ = 1, - INL_PHYCAP_5000MHZ = 2, - INL_PHYCAP_FAA = 0x80000000, /* Means card supports the FAA switch */ -}; - - -enum oid_num_t { - GEN_OID_MACADDRESS = 0, - GEN_OID_LINKSTATE, - GEN_OID_WATCHDOG, - GEN_OID_MIBOP, - GEN_OID_OPTIONS, - GEN_OID_LEDCONFIG, - - /* 802.11 */ - DOT11_OID_BSSTYPE, - DOT11_OID_BSSID, - DOT11_OID_SSID, - DOT11_OID_STATE, - DOT11_OID_AID, - DOT11_OID_COUNTRYSTRING, - DOT11_OID_SSIDOVERRIDE, - - DOT11_OID_MEDIUMLIMIT, - DOT11_OID_BEACONPERIOD, - DOT11_OID_DTIMPERIOD, - DOT11_OID_ATIMWINDOW, - DOT11_OID_LISTENINTERVAL, - DOT11_OID_CFPPERIOD, - DOT11_OID_CFPDURATION, - - DOT11_OID_AUTHENABLE, - DOT11_OID_PRIVACYINVOKED, - DOT11_OID_EXUNENCRYPTED, - DOT11_OID_DEFKEYID, - DOT11_OID_DEFKEYX, /* DOT11_OID_DEFKEY1,...DOT11_OID_DEFKEY4 */ - DOT11_OID_STAKEY, - DOT11_OID_REKEYTHRESHOLD, - DOT11_OID_STASC, - - DOT11_OID_PRIVTXREJECTED, - DOT11_OID_PRIVRXPLAIN, - DOT11_OID_PRIVRXFAILED, - DOT11_OID_PRIVRXNOKEY, - - DOT11_OID_RTSTHRESH, - DOT11_OID_FRAGTHRESH, - DOT11_OID_SHORTRETRIES, - DOT11_OID_LONGRETRIES, - DOT11_OID_MAXTXLIFETIME, - DOT11_OID_MAXRXLIFETIME, - DOT11_OID_AUTHRESPTIMEOUT, - DOT11_OID_ASSOCRESPTIMEOUT, - - DOT11_OID_ALOFT_TABLE, - DOT11_OID_ALOFT_CTRL_TABLE, - DOT11_OID_ALOFT_RETREAT, - DOT11_OID_ALOFT_PROGRESS, - DOT11_OID_ALOFT_FIXEDRATE, - DOT11_OID_ALOFT_RSSIGRAPH, - DOT11_OID_ALOFT_CONFIG, - - DOT11_OID_VDCFX, - DOT11_OID_MAXFRAMEBURST, - - DOT11_OID_PSM, - DOT11_OID_CAMTIMEOUT, - DOT11_OID_RECEIVEDTIMS, - DOT11_OID_ROAMPREFERENCE, - - DOT11_OID_BRIDGELOCAL, - DOT11_OID_CLIENTS, - DOT11_OID_CLIENTSASSOCIATED, - DOT11_OID_CLIENTX, /* DOT11_OID_CLIENTX,...DOT11_OID_CLIENT2007 */ - - DOT11_OID_CLIENTFIND, - DOT11_OID_WDSLINKADD, - DOT11_OID_WDSLINKREMOVE, - DOT11_OID_EAPAUTHSTA, - DOT11_OID_EAPUNAUTHSTA, - DOT11_OID_DOT1XENABLE, - DOT11_OID_MICFAILURE, - DOT11_OID_REKEYINDICATE, - - DOT11_OID_MPDUTXSUCCESSFUL, - DOT11_OID_MPDUTXONERETRY, - DOT11_OID_MPDUTXMULTIPLERETRIES, - DOT11_OID_MPDUTXFAILED, - DOT11_OID_MPDURXSUCCESSFUL, - DOT11_OID_MPDURXDUPS, - DOT11_OID_RTSSUCCESSFUL, - DOT11_OID_RTSFAILED, - DOT11_OID_ACKFAILED, - DOT11_OID_FRAMERECEIVES, - DOT11_OID_FRAMEERRORS, - DOT11_OID_FRAMEABORTS, - DOT11_OID_FRAMEABORTSPHY, - - DOT11_OID_SLOTTIME, - DOT11_OID_CWMIN, /* MIN DCS backoff */ - DOT11_OID_CWMAX, /* MAX DCS backoff */ - DOT11_OID_ACKWINDOW, - DOT11_OID_ANTENNARX, - DOT11_OID_ANTENNATX, - DOT11_OID_ANTENNADIVERSITY, - DOT11_OID_CHANNEL, - DOT11_OID_EDTHRESHOLD, - DOT11_OID_PREAMBLESETTINGS, - DOT11_OID_RATES, - DOT11_OID_CCAMODESUPPORTED, - DOT11_OID_CCAMODE, - DOT11_OID_RSSIVECTOR, - DOT11_OID_OUTPUTPOWERTABLE, - DOT11_OID_OUTPUTPOWER, - DOT11_OID_SUPPORTEDRATES, - DOT11_OID_FREQUENCY, - DOT11_OID_SUPPORTEDFREQUENCIES, - DOT11_OID_NOISEFLOOR, - DOT11_OID_FREQUENCYACTIVITY, - DOT11_OID_IQCALIBRATIONTABLE, - DOT11_OID_NONERPPROTECTION, - DOT11_OID_SLOTSETTINGS, - DOT11_OID_NONERPTIMEOUT, - DOT11_OID_PROFILES, - DOT11_OID_EXTENDEDRATES, - - DOT11_OID_DEAUTHENTICATE, - DOT11_OID_AUTHENTICATE, - DOT11_OID_DISASSOCIATE, - DOT11_OID_ASSOCIATE, - DOT11_OID_SCAN, - DOT11_OID_BEACON, - DOT11_OID_PROBE, - DOT11_OID_DEAUTHENTICATEEX, - DOT11_OID_AUTHENTICATEEX, - DOT11_OID_DISASSOCIATEEX, - DOT11_OID_ASSOCIATEEX, - DOT11_OID_REASSOCIATE, - DOT11_OID_REASSOCIATEEX, - - DOT11_OID_NONERPSTATUS, - - DOT11_OID_STATIMEOUT, - DOT11_OID_MLMEAUTOLEVEL, - DOT11_OID_BSSTIMEOUT, - DOT11_OID_ATTACHMENT, - DOT11_OID_PSMBUFFER, - - DOT11_OID_BSSS, - DOT11_OID_BSSX, /*DOT11_OID_BSS1,...,DOT11_OID_BSS64 */ - DOT11_OID_BSSFIND, - DOT11_OID_BSSLIST, - - OID_INL_TUNNEL, - OID_INL_MEMADDR, - OID_INL_MEMORY, - OID_INL_MODE, - OID_INL_COMPONENT_NR, - OID_INL_VERSION, - OID_INL_INTERFACE_ID, - OID_INL_COMPONENT_ID, - OID_INL_CONFIG, - OID_INL_DOT11D_CONFORMANCE, - OID_INL_PHYCAPABILITIES, - OID_INL_OUTPUTPOWER, - - OID_NUM_LAST -}; - -#define OID_FLAG_CACHED 0x80 -#define OID_FLAG_TYPE 0x7f - -#define OID_TYPE_U32 0x01 -#define OID_TYPE_SSID 0x02 -#define OID_TYPE_KEY 0x03 -#define OID_TYPE_BUFFER 0x04 -#define OID_TYPE_BSS 0x05 -#define OID_TYPE_BSSLIST 0x06 -#define OID_TYPE_FREQUENCIES 0x07 -#define OID_TYPE_MLME 0x08 -#define OID_TYPE_MLMEEX 0x09 -#define OID_TYPE_ADDR 0x0A -#define OID_TYPE_RAW 0x0B -#define OID_TYPE_ATTACH 0x0C - -/* OID_TYPE_MLMEEX is special because of a variable size field when sending. - * Not yet implemented (not used in driver anyway). - */ - -struct oid_t { - enum oid_num_t oid; - short range; /* to define a range of oid */ - short size; /* max size of the associated data */ - char flags; -}; - -union oid_res_t { - void *ptr; - u32 u; -}; - -#define IWMAX_BITRATES 20 -#define IWMAX_BSS 24 -#define IWMAX_FREQ 30 -#define PRIV_STR_SIZE 1024 - -#endif /* !defined(_ISL_OID_H) */ -/* EOF */ diff --git a/drivers/net/wireless/intersil/prism54/islpci_dev.c b/drivers/net/wireless/intersil/prism54/islpci_dev.c deleted file mode 100644 index 8eb6d5e4bd57..000000000000 --- a/drivers/net/wireless/intersil/prism54/islpci_dev.c +++ /dev/null @@ -1,951 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2002 Intersil Americas Inc. - * Copyright (C) 2003 Herbert Valerio Riedel <hvr@gnu.org> - * Copyright (C) 2003 Luis R. Rodriguez <mcgrof@ruslug.rutgers.edu> - */ - -#include <linux/hardirq.h> -#include <linux/module.h> -#include <linux/slab.h> - -#include <linux/netdevice.h> -#include <linux/ethtool.h> -#include <linux/pci.h> -#include <linux/sched.h> -#include <linux/etherdevice.h> -#include <linux/delay.h> -#include <linux/if_arp.h> - -#include <asm/io.h> - -#include "prismcompat.h" -#include "isl_38xx.h" -#include "isl_ioctl.h" -#include "islpci_dev.h" -#include "islpci_mgt.h" -#include "islpci_eth.h" -#include "oid_mgt.h" - -#define ISL3877_IMAGE_FILE "isl3877" -#define ISL3886_IMAGE_FILE "isl3886" -#define ISL3890_IMAGE_FILE "isl3890" -MODULE_FIRMWARE(ISL3877_IMAGE_FILE); -MODULE_FIRMWARE(ISL3886_IMAGE_FILE); -MODULE_FIRMWARE(ISL3890_IMAGE_FILE); - -static int prism54_bring_down(islpci_private *); -static int islpci_alloc_memory(islpci_private *); - -/* Temporary dummy MAC address to use until firmware is loaded. - * The idea there is that some tools (such as nameif) may query - * the MAC address before the netdev is 'open'. By using a valid - * OUI prefix, they can process the netdev properly. - * Of course, this is not the final/real MAC address. It doesn't - * matter, as you are suppose to be able to change it anytime via - * ndev->set_mac_address. Jean II */ -static const unsigned char dummy_mac[6] = { 0x00, 0x30, 0xB4, 0x00, 0x00, 0x00 }; - -static int -isl_upload_firmware(islpci_private *priv) -{ - u32 reg, rc; - void __iomem *device_base = priv->device_base; - - /* clear the RAMBoot and the Reset bit */ - reg = readl(device_base + ISL38XX_CTRL_STAT_REG); - reg &= ~ISL38XX_CTRL_STAT_RESET; - reg &= ~ISL38XX_CTRL_STAT_RAMBOOT; - writel(reg, device_base + ISL38XX_CTRL_STAT_REG); - wmb(); - udelay(ISL38XX_WRITEIO_DELAY); - - /* set the Reset bit without reading the register ! */ - reg |= ISL38XX_CTRL_STAT_RESET; - writel(reg, device_base + ISL38XX_CTRL_STAT_REG); - wmb(); - udelay(ISL38XX_WRITEIO_DELAY); - - /* clear the Reset bit */ - reg &= ~ISL38XX_CTRL_STAT_RESET; - writel(reg, device_base + ISL38XX_CTRL_STAT_REG); - wmb(); - - /* wait a while for the device to reboot */ - mdelay(50); - - { - const struct firmware *fw_entry = NULL; - long fw_len; - const u32 *fw_ptr; - - rc = request_firmware(&fw_entry, priv->firmware, PRISM_FW_PDEV); - if (rc) { - printk(KERN_ERR - "%s: request_firmware() failed for '%s'\n", - "prism54", priv->firmware); - return rc; - } - /* prepare the Direct Memory Base register */ - reg = ISL38XX_DEV_FIRMWARE_ADDRES; - - fw_ptr = (u32 *) fw_entry->data; - fw_len = fw_entry->size; - - if (fw_len % 4) { - printk(KERN_ERR - "%s: firmware '%s' size is not multiple of 32bit, aborting!\n", - "prism54", priv->firmware); - release_firmware(fw_entry); - return -EILSEQ; /* Illegal byte sequence */; - } - - while (fw_len > 0) { - long _fw_len = - (fw_len > - ISL38XX_MEMORY_WINDOW_SIZE) ? - ISL38XX_MEMORY_WINDOW_SIZE : fw_len; - u32 __iomem *dev_fw_ptr = device_base + ISL38XX_DIRECT_MEM_WIN; - - /* set the card's base address for writing the data */ - isl38xx_w32_flush(device_base, reg, - ISL38XX_DIR_MEM_BASE_REG); - wmb(); /* be paranoid */ - - /* increment the write address for next iteration */ - reg += _fw_len; - fw_len -= _fw_len; - - /* write the data to the Direct Memory Window 32bit-wise */ - /* memcpy_toio() doesn't guarantee 32bit writes :-| */ - while (_fw_len > 0) { - /* use non-swapping writel() */ - __raw_writel(*fw_ptr, dev_fw_ptr); - fw_ptr++, dev_fw_ptr++; - _fw_len -= 4; - } - - /* flush PCI posting */ - (void) readl(device_base + ISL38XX_PCI_POSTING_FLUSH); - wmb(); /* be paranoid again */ - - BUG_ON(_fw_len != 0); - } - - BUG_ON(fw_len != 0); - - /* Firmware version is at offset 40 (also for "newmac") */ - printk(KERN_DEBUG "%s: firmware version: %.8s\n", - priv->ndev->name, fw_entry->data + 40); - - release_firmware(fw_entry); - } - - /* now reset the device - * clear the Reset & ClkRun bit, set the RAMBoot bit */ - reg = readl(device_base + ISL38XX_CTRL_STAT_REG); - reg &= ~ISL38XX_CTRL_STAT_CLKRUN; - reg &= ~ISL38XX_CTRL_STAT_RESET; - reg |= ISL38XX_CTRL_STAT_RAMBOOT; - isl38xx_w32_flush(device_base, reg, ISL38XX_CTRL_STAT_REG); - wmb(); - udelay(ISL38XX_WRITEIO_DELAY); - - /* set the reset bit latches the host override and RAMBoot bits - * into the device for operation when the reset bit is reset */ - reg |= ISL38XX_CTRL_STAT_RESET; - writel(reg, device_base + ISL38XX_CTRL_STAT_REG); - /* don't do flush PCI posting here! */ - wmb(); - udelay(ISL38XX_WRITEIO_DELAY); - - /* clear the reset bit should start the whole circus */ - reg &= ~ISL38XX_CTRL_STAT_RESET; - writel(reg, device_base + ISL38XX_CTRL_STAT_REG); - /* don't do flush PCI posting here! */ - wmb(); - udelay(ISL38XX_WRITEIO_DELAY); - - return 0; -} - -/****************************************************************************** - Device Interrupt Handler -******************************************************************************/ - -irqreturn_t -islpci_interrupt(int irq, void *config) -{ - u32 reg; - islpci_private *priv = config; - struct net_device *ndev = priv->ndev; - void __iomem *device = priv->device_base; - int powerstate = ISL38XX_PSM_POWERSAVE_STATE; - - /* lock the interrupt handler */ - spin_lock(&priv->slock); - - /* received an interrupt request on a shared IRQ line - * first check whether the device is in sleep mode */ - reg = readl(device + ISL38XX_CTRL_STAT_REG); - if (reg & ISL38XX_CTRL_STAT_SLEEPMODE) - /* device is in sleep mode, IRQ was generated by someone else */ - { -#if VERBOSE > SHOW_ERROR_MESSAGES - DEBUG(SHOW_TRACING, "Assuming someone else called the IRQ\n"); -#endif - spin_unlock(&priv->slock); - return IRQ_NONE; - } - - - /* check whether there is any source of interrupt on the device */ - reg = readl(device + ISL38XX_INT_IDENT_REG); - - /* also check the contents of the Interrupt Enable Register, because this - * will filter out interrupt sources from other devices on the same irq ! */ - reg &= readl(device + ISL38XX_INT_EN_REG); - reg &= ISL38XX_INT_SOURCES; - - if (reg != 0) { - if (islpci_get_state(priv) != PRV_STATE_SLEEP) - powerstate = ISL38XX_PSM_ACTIVE_STATE; - - /* reset the request bits in the Identification register */ - isl38xx_w32_flush(device, reg, ISL38XX_INT_ACK_REG); - -#if VERBOSE > SHOW_ERROR_MESSAGES - DEBUG(SHOW_FUNCTION_CALLS, - "IRQ: Identification register 0x%p 0x%x\n", device, reg); -#endif - - /* check for each bit in the register separately */ - if (reg & ISL38XX_INT_IDENT_UPDATE) { -#if VERBOSE > SHOW_ERROR_MESSAGES - /* Queue has been updated */ - DEBUG(SHOW_TRACING, "IRQ: Update flag\n"); - - DEBUG(SHOW_QUEUE_INDEXES, - "CB drv Qs: [%i][%i][%i][%i][%i][%i]\n", - le32_to_cpu(priv->control_block-> - driver_curr_frag[0]), - le32_to_cpu(priv->control_block-> - driver_curr_frag[1]), - le32_to_cpu(priv->control_block-> - driver_curr_frag[2]), - le32_to_cpu(priv->control_block-> - driver_curr_frag[3]), - le32_to_cpu(priv->control_block-> - driver_curr_frag[4]), - le32_to_cpu(priv->control_block-> - driver_curr_frag[5]) - ); - - DEBUG(SHOW_QUEUE_INDEXES, - "CB dev Qs: [%i][%i][%i][%i][%i][%i]\n", - le32_to_cpu(priv->control_block-> - device_curr_frag[0]), - le32_to_cpu(priv->control_block-> - device_curr_frag[1]), - le32_to_cpu(priv->control_block-> - device_curr_frag[2]), - le32_to_cpu(priv->control_block-> - device_curr_frag[3]), - le32_to_cpu(priv->control_block-> - device_curr_frag[4]), - le32_to_cpu(priv->control_block-> - device_curr_frag[5]) - ); -#endif - - /* cleanup the data low transmit queue */ - islpci_eth_cleanup_transmit(priv, priv->control_block); - - /* device is in active state, update the - * powerstate flag if necessary */ - powerstate = ISL38XX_PSM_ACTIVE_STATE; - - /* check all three queues in priority order - * call the PIMFOR receive function until the - * queue is empty */ - if (isl38xx_in_queue(priv->control_block, - ISL38XX_CB_RX_MGMTQ) != 0) { -#if VERBOSE > SHOW_ERROR_MESSAGES - DEBUG(SHOW_TRACING, - "Received frame in Management Queue\n"); -#endif - islpci_mgt_receive(ndev); - - islpci_mgt_cleanup_transmit(ndev); - - /* Refill slots in receive queue */ - islpci_mgmt_rx_fill(ndev); - - /* no need to trigger the device, next - islpci_mgt_transaction does it */ - } - - while (isl38xx_in_queue(priv->control_block, - ISL38XX_CB_RX_DATA_LQ) != 0) { -#if VERBOSE > SHOW_ERROR_MESSAGES - DEBUG(SHOW_TRACING, - "Received frame in Data Low Queue\n"); -#endif - islpci_eth_receive(priv); - } - - /* check whether the data transmit queues were full */ - if (priv->data_low_tx_full) { - /* check whether the transmit is not full anymore */ - if (ISL38XX_CB_TX_QSIZE - - isl38xx_in_queue(priv->control_block, - ISL38XX_CB_TX_DATA_LQ) >= - ISL38XX_MIN_QTHRESHOLD) { - /* nope, the driver is ready for more network frames */ - netif_wake_queue(priv->ndev); - - /* reset the full flag */ - priv->data_low_tx_full = 0; - } - } - } - - if (reg & ISL38XX_INT_IDENT_INIT) { - /* Device has been initialized */ -#if VERBOSE > SHOW_ERROR_MESSAGES - DEBUG(SHOW_TRACING, - "IRQ: Init flag, device initialized\n"); -#endif - wake_up(&priv->reset_done); - } - - if (reg & ISL38XX_INT_IDENT_SLEEP) { - /* Device intends to move to powersave state */ -#if VERBOSE > SHOW_ERROR_MESSAGES - DEBUG(SHOW_TRACING, "IRQ: Sleep flag\n"); -#endif - isl38xx_handle_sleep_request(priv->control_block, - &powerstate, - priv->device_base); - } - - if (reg & ISL38XX_INT_IDENT_WAKEUP) { - /* Device has been woken up to active state */ -#if VERBOSE > SHOW_ERROR_MESSAGES - DEBUG(SHOW_TRACING, "IRQ: Wakeup flag\n"); -#endif - - isl38xx_handle_wakeup(priv->control_block, - &powerstate, priv->device_base); - } - } else { -#if VERBOSE > SHOW_ERROR_MESSAGES - DEBUG(SHOW_TRACING, "Assuming someone else called the IRQ\n"); -#endif - spin_unlock(&priv->slock); - return IRQ_NONE; - } - - /* sleep -> ready */ - if (islpci_get_state(priv) == PRV_STATE_SLEEP - && powerstate == ISL38XX_PSM_ACTIVE_STATE) - islpci_set_state(priv, PRV_STATE_READY); - - /* !sleep -> sleep */ - if (islpci_get_state(priv) != PRV_STATE_SLEEP - && powerstate == ISL38XX_PSM_POWERSAVE_STATE) - islpci_set_state(priv, PRV_STATE_SLEEP); - - /* unlock the interrupt handler */ - spin_unlock(&priv->slock); - - return IRQ_HANDLED; -} - -/****************************************************************************** - Network Interface Control & Statistical functions -******************************************************************************/ -static int -islpci_open(struct net_device *ndev) -{ - u32 rc; - islpci_private *priv = netdev_priv(ndev); - - /* reset data structures, upload firmware and reset device */ - rc = islpci_reset(priv,1); - if (rc) { - prism54_bring_down(priv); - return rc; /* Returns informative message */ - } - - netif_start_queue(ndev); - - /* Turn off carrier if in STA or Ad-hoc mode. It will be turned on - * once the firmware receives a trap of being associated - * (GEN_OID_LINKSTATE). In other modes (AP or WDS or monitor) we - * should just leave the carrier on as its expected the firmware - * won't send us a trigger. */ - if (priv->iw_mode == IW_MODE_INFRA || priv->iw_mode == IW_MODE_ADHOC) - netif_carrier_off(ndev); - else - netif_carrier_on(ndev); - - return 0; -} - -static int -islpci_close(struct net_device *ndev) -{ - islpci_private *priv = netdev_priv(ndev); - - printk(KERN_DEBUG "%s: islpci_close ()\n", ndev->name); - - netif_stop_queue(ndev); - - return prism54_bring_down(priv); -} - -static int -prism54_bring_down(islpci_private *priv) -{ - void __iomem *device_base = priv->device_base; - u32 reg; - /* we are going to shutdown the device */ - islpci_set_state(priv, PRV_STATE_PREBOOT); - - /* disable all device interrupts in case they weren't */ - isl38xx_disable_interrupts(priv->device_base); - - /* For safety reasons, we may want to ensure that no DMA transfer is - * currently in progress by emptying the TX and RX queues. */ - - /* wait until interrupts have finished executing on other CPUs */ - synchronize_irq(priv->pdev->irq); - - reg = readl(device_base + ISL38XX_CTRL_STAT_REG); - reg &= ~(ISL38XX_CTRL_STAT_RESET | ISL38XX_CTRL_STAT_RAMBOOT); - writel(reg, device_base + ISL38XX_CTRL_STAT_REG); - wmb(); - udelay(ISL38XX_WRITEIO_DELAY); - - reg |= ISL38XX_CTRL_STAT_RESET; - writel(reg, device_base + ISL38XX_CTRL_STAT_REG); - wmb(); - udelay(ISL38XX_WRITEIO_DELAY); - - /* clear the Reset bit */ - reg &= ~ISL38XX_CTRL_STAT_RESET; - writel(reg, device_base + ISL38XX_CTRL_STAT_REG); - wmb(); - - /* wait a while for the device to reset */ - schedule_timeout_uninterruptible(msecs_to_jiffies(50)); - - return 0; -} - -static int -islpci_upload_fw(islpci_private *priv) -{ - islpci_state_t old_state; - u32 rc; - - old_state = islpci_set_state(priv, PRV_STATE_BOOT); - - printk(KERN_DEBUG "%s: uploading firmware...\n", priv->ndev->name); - - rc = isl_upload_firmware(priv); - if (rc) { - /* error uploading the firmware */ - printk(KERN_ERR "%s: could not upload firmware ('%s')\n", - priv->ndev->name, priv->firmware); - - islpci_set_state(priv, old_state); - return rc; - } - - printk(KERN_DEBUG "%s: firmware upload complete\n", - priv->ndev->name); - - islpci_set_state(priv, PRV_STATE_POSTBOOT); - - return 0; -} - -static int -islpci_reset_if(islpci_private *priv) -{ - long remaining; - int result = -ETIME; - int count; - - DEFINE_WAIT(wait); - prepare_to_wait(&priv->reset_done, &wait, TASK_UNINTERRUPTIBLE); - - /* now the last step is to reset the interface */ - isl38xx_interface_reset(priv->device_base, priv->device_host_address); - islpci_set_state(priv, PRV_STATE_PREINIT); - - for(count = 0; count < 2 && result; count++) { - /* The software reset acknowledge needs about 220 msec here. - * Be conservative and wait for up to one second. */ - - remaining = schedule_timeout_uninterruptible(HZ); - - if(remaining > 0) { - result = 0; - break; - } - - /* If we're here it's because our IRQ hasn't yet gone through. - * Retry a bit more... - */ - printk(KERN_ERR "%s: no 'reset complete' IRQ seen - retrying\n", - priv->ndev->name); - } - - finish_wait(&priv->reset_done, &wait); - - if (result) { - printk(KERN_ERR "%s: interface reset failure\n", priv->ndev->name); - return result; - } - - islpci_set_state(priv, PRV_STATE_INIT); - - /* Now that the device is 100% up, let's allow - * for the other interrupts -- - * NOTE: this is not *yet* true since we've only allowed the - * INIT interrupt on the IRQ line. We can perhaps poll - * the IRQ line until we know for sure the reset went through */ - isl38xx_enable_common_interrupts(priv->device_base); - - down_write(&priv->mib_sem); - result = mgt_commit(priv); - if (result) { - printk(KERN_ERR "%s: interface reset failure\n", priv->ndev->name); - up_write(&priv->mib_sem); - return result; - } - up_write(&priv->mib_sem); - - islpci_set_state(priv, PRV_STATE_READY); - - printk(KERN_DEBUG "%s: interface reset complete\n", priv->ndev->name); - return 0; -} - -int -islpci_reset(islpci_private *priv, int reload_firmware) -{ - isl38xx_control_block *cb = /* volatile not needed */ - (isl38xx_control_block *) priv->control_block; - unsigned counter; - int rc; - - if (reload_firmware) - islpci_set_state(priv, PRV_STATE_PREBOOT); - else - islpci_set_state(priv, PRV_STATE_POSTBOOT); - - printk(KERN_DEBUG "%s: resetting device...\n", priv->ndev->name); - - /* disable all device interrupts in case they weren't */ - isl38xx_disable_interrupts(priv->device_base); - - /* flush all management queues */ - priv->index_mgmt_tx = 0; - priv->index_mgmt_rx = 0; - - /* clear the indexes in the frame pointer */ - for (counter = 0; counter < ISL38XX_CB_QCOUNT; counter++) { - cb->driver_curr_frag[counter] = cpu_to_le32(0); - cb->device_curr_frag[counter] = cpu_to_le32(0); - } - - /* reset the mgmt receive queue */ - for (counter = 0; counter < ISL38XX_CB_MGMT_QSIZE; counter++) { - isl38xx_fragment *frag = &cb->rx_data_mgmt[counter]; - frag->size = cpu_to_le16(MGMT_FRAME_SIZE); - frag->flags = 0; - frag->address = cpu_to_le32(priv->mgmt_rx[counter].pci_addr); - } - - for (counter = 0; counter < ISL38XX_CB_RX_QSIZE; counter++) { - cb->rx_data_low[counter].address = - cpu_to_le32((u32) priv->pci_map_rx_address[counter]); - } - - /* since the receive queues are filled with empty fragments, now we can - * set the corresponding indexes in the Control Block */ - priv->control_block->driver_curr_frag[ISL38XX_CB_RX_DATA_LQ] = - cpu_to_le32(ISL38XX_CB_RX_QSIZE); - priv->control_block->driver_curr_frag[ISL38XX_CB_RX_MGMTQ] = - cpu_to_le32(ISL38XX_CB_MGMT_QSIZE); - - /* reset the remaining real index registers and full flags */ - priv->free_data_rx = 0; - priv->free_data_tx = 0; - priv->data_low_tx_full = 0; - - if (reload_firmware) { /* Should we load the firmware ? */ - /* now that the data structures are cleaned up, upload - * firmware and reset interface */ - rc = islpci_upload_fw(priv); - if (rc) { - printk(KERN_ERR "%s: islpci_reset: failure\n", - priv->ndev->name); - return rc; - } - } - - /* finally reset interface */ - rc = islpci_reset_if(priv); - if (rc) - printk(KERN_ERR "prism54: Your card/socket may be faulty, or IRQ line too busy :(\n"); - return rc; -} - -/****************************************************************************** - Network device configuration functions -******************************************************************************/ -static int -islpci_alloc_memory(islpci_private *priv) -{ - int counter; - -#if VERBOSE > SHOW_ERROR_MESSAGES - printk(KERN_DEBUG "islpci_alloc_memory\n"); -#endif - - /* remap the PCI device base address to accessible */ - if (!(priv->device_base = - ioremap(pci_resource_start(priv->pdev, 0), - ISL38XX_PCI_MEM_SIZE))) { - /* error in remapping the PCI device memory address range */ - printk(KERN_ERR "PCI memory remapping failed\n"); - return -1; - } - - /* memory layout for consistent DMA region: - * - * Area 1: Control Block for the device interface - * Area 2: Power Save Mode Buffer for temporary frame storage. Be aware that - * the number of supported stations in the AP determines the minimal - * size of the buffer ! - */ - - /* perform the allocation */ - priv->driver_mem_address = dma_alloc_coherent(&priv->pdev->dev, - HOST_MEM_BLOCK, - &priv->device_host_address, - GFP_KERNEL); - - if (!priv->driver_mem_address) { - /* error allocating the block of PCI memory */ - printk(KERN_ERR "%s: could not allocate DMA memory, aborting!", - "prism54"); - return -1; - } - - /* assign the Control Block to the first address of the allocated area */ - priv->control_block = - (isl38xx_control_block *) priv->driver_mem_address; - - /* set the Power Save Buffer pointer directly behind the CB */ - priv->device_psm_buffer = - priv->device_host_address + CONTROL_BLOCK_SIZE; - - /* make sure all buffer pointers are initialized */ - for (counter = 0; counter < ISL38XX_CB_QCOUNT; counter++) { - priv->control_block->driver_curr_frag[counter] = cpu_to_le32(0); - priv->control_block->device_curr_frag[counter] = cpu_to_le32(0); - } - - priv->index_mgmt_rx = 0; - memset(priv->mgmt_rx, 0, sizeof(priv->mgmt_rx)); - memset(priv->mgmt_tx, 0, sizeof(priv->mgmt_tx)); - - /* allocate rx queue for management frames */ - if (islpci_mgmt_rx_fill(priv->ndev) < 0) - goto out_free; - - /* now get the data rx skb's */ - memset(priv->data_low_rx, 0, sizeof (priv->data_low_rx)); - memset(priv->pci_map_rx_address, 0, sizeof (priv->pci_map_rx_address)); - - for (counter = 0; counter < ISL38XX_CB_RX_QSIZE; counter++) { - struct sk_buff *skb; - - /* allocate an sk_buff for received data frames storage - * each frame on receive size consists of 1 fragment - * include any required allignment operations */ - if (!(skb = dev_alloc_skb(MAX_FRAGMENT_SIZE_RX + 2))) { - /* error allocating an sk_buff structure elements */ - printk(KERN_ERR "Error allocating skb.\n"); - skb = NULL; - goto out_free; - } - skb_reserve(skb, (4 - (long) skb->data) & 0x03); - /* add the new allocated sk_buff to the buffer array */ - priv->data_low_rx[counter] = skb; - - /* map the allocated skb data area to pci */ - priv->pci_map_rx_address[counter] = - dma_map_single(&priv->pdev->dev, (void *)skb->data, - MAX_FRAGMENT_SIZE_RX + 2, DMA_FROM_DEVICE); - if (dma_mapping_error(&priv->pdev->dev, priv->pci_map_rx_address[counter])) { - priv->pci_map_rx_address[counter] = 0; - /* error mapping the buffer to device - accessible memory address */ - printk(KERN_ERR "failed to map skb DMA'able\n"); - goto out_free; - } - } - - prism54_acl_init(&priv->acl); - prism54_wpa_bss_ie_init(priv); - if (mgt_init(priv)) - goto out_free; - - return 0; - out_free: - islpci_free_memory(priv); - return -1; -} - -int -islpci_free_memory(islpci_private *priv) -{ - int counter; - - if (priv->device_base) - iounmap(priv->device_base); - priv->device_base = NULL; - - /* free consistent DMA area... */ - if (priv->driver_mem_address) - dma_free_coherent(&priv->pdev->dev, HOST_MEM_BLOCK, - priv->driver_mem_address, - priv->device_host_address); - - /* clear some dangling pointers */ - priv->driver_mem_address = NULL; - priv->device_host_address = 0; - priv->device_psm_buffer = 0; - priv->control_block = NULL; - - /* clean up mgmt rx buffers */ - for (counter = 0; counter < ISL38XX_CB_MGMT_QSIZE; counter++) { - struct islpci_membuf *buf = &priv->mgmt_rx[counter]; - if (buf->pci_addr) - dma_unmap_single(&priv->pdev->dev, buf->pci_addr, - buf->size, DMA_FROM_DEVICE); - buf->pci_addr = 0; - kfree(buf->mem); - buf->size = 0; - buf->mem = NULL; - } - - /* clean up data rx buffers */ - for (counter = 0; counter < ISL38XX_CB_RX_QSIZE; counter++) { - if (priv->pci_map_rx_address[counter]) - dma_unmap_single(&priv->pdev->dev, - priv->pci_map_rx_address[counter], - MAX_FRAGMENT_SIZE_RX + 2, - DMA_FROM_DEVICE); - priv->pci_map_rx_address[counter] = 0; - - if (priv->data_low_rx[counter]) - dev_kfree_skb(priv->data_low_rx[counter]); - priv->data_low_rx[counter] = NULL; - } - - /* Free the access control list and the WPA list */ - prism54_acl_clean(&priv->acl); - prism54_wpa_bss_ie_clean(priv); - mgt_clean(priv); - - return 0; -} - -#if 0 -static void -islpci_set_multicast_list(struct net_device *dev) -{ - /* put device into promisc mode and let network layer handle it */ -} -#endif - -static void islpci_ethtool_get_drvinfo(struct net_device *dev, - struct ethtool_drvinfo *info) -{ - strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); - strlcpy(info->version, DRV_VERSION, sizeof(info->version)); -} - -static const struct ethtool_ops islpci_ethtool_ops = { - .get_drvinfo = islpci_ethtool_get_drvinfo, -}; - -static const struct net_device_ops islpci_netdev_ops = { - .ndo_open = islpci_open, - .ndo_stop = islpci_close, - .ndo_start_xmit = islpci_eth_transmit, - .ndo_tx_timeout = islpci_eth_tx_timeout, - .ndo_set_mac_address = prism54_set_mac_address, - .ndo_validate_addr = eth_validate_addr, -}; - -static struct device_type wlan_type = { - .name = "wlan", -}; - -struct net_device * -islpci_setup(struct pci_dev *pdev) -{ - islpci_private *priv; - struct net_device *ndev = alloc_etherdev(sizeof (islpci_private)); - - if (!ndev) - return ndev; - - pci_set_drvdata(pdev, ndev); - SET_NETDEV_DEV(ndev, &pdev->dev); - SET_NETDEV_DEVTYPE(ndev, &wlan_type); - - /* setup the structure members */ - ndev->base_addr = pci_resource_start(pdev, 0); - ndev->irq = pdev->irq; - - /* initialize the function pointers */ - ndev->netdev_ops = &islpci_netdev_ops; - ndev->wireless_handlers = &prism54_handler_def; - ndev->ethtool_ops = &islpci_ethtool_ops; - - /* ndev->set_multicast_list = &islpci_set_multicast_list; */ - ndev->addr_len = ETH_ALEN; - /* Get a non-zero dummy MAC address for nameif. Jean II */ - memcpy(ndev->dev_addr, dummy_mac, ETH_ALEN); - - ndev->watchdog_timeo = ISLPCI_TX_TIMEOUT; - - /* allocate a private device structure to the network device */ - priv = netdev_priv(ndev); - priv->ndev = ndev; - priv->pdev = pdev; - priv->monitor_type = ARPHRD_IEEE80211; - priv->ndev->type = (priv->iw_mode == IW_MODE_MONITOR) ? - priv->monitor_type : ARPHRD_ETHER; - - /* Add pointers to enable iwspy support. */ - priv->wireless_data.spy_data = &priv->spy_data; - ndev->wireless_data = &priv->wireless_data; - - /* save the start and end address of the PCI memory area */ - ndev->mem_start = (unsigned long) priv->device_base; - ndev->mem_end = ndev->mem_start + ISL38XX_PCI_MEM_SIZE; - -#if VERBOSE > SHOW_ERROR_MESSAGES - DEBUG(SHOW_TRACING, "PCI Memory remapped to 0x%p\n", priv->device_base); -#endif - - init_waitqueue_head(&priv->reset_done); - - /* init the queue read locks, process wait counter */ - mutex_init(&priv->mgmt_lock); - priv->mgmt_received = NULL; - init_waitqueue_head(&priv->mgmt_wqueue); - mutex_init(&priv->stats_lock); - spin_lock_init(&priv->slock); - - /* init state machine with off#1 state */ - priv->state = PRV_STATE_OFF; - priv->state_off = 1; - - /* initialize workqueue's */ - INIT_WORK(&priv->stats_work, prism54_update_stats); - priv->stats_timestamp = 0; - - INIT_WORK(&priv->reset_task, islpci_do_reset_and_wake); - priv->reset_task_pending = 0; - - /* allocate various memory areas */ - if (islpci_alloc_memory(priv)) - goto do_free_netdev; - - /* select the firmware file depending on the device id */ - switch (pdev->device) { - case 0x3877: - strcpy(priv->firmware, ISL3877_IMAGE_FILE); - break; - - case 0x3886: - strcpy(priv->firmware, ISL3886_IMAGE_FILE); - break; - - default: - strcpy(priv->firmware, ISL3890_IMAGE_FILE); - break; - } - - if (register_netdev(ndev)) { - DEBUG(SHOW_ERROR_MESSAGES, - "ERROR: register_netdev() failed\n"); - goto do_islpci_free_memory; - } - - return ndev; - - do_islpci_free_memory: - islpci_free_memory(priv); - do_free_netdev: - free_netdev(ndev); - priv = NULL; - return NULL; -} - -islpci_state_t -islpci_set_state(islpci_private *priv, islpci_state_t new_state) -{ - islpci_state_t old_state; - - /* lock */ - old_state = priv->state; - - /* this means either a race condition or some serious error in - * the driver code */ - switch (new_state) { - case PRV_STATE_OFF: - priv->state_off++; - fallthrough; - default: - priv->state = new_state; - break; - - case PRV_STATE_PREBOOT: - /* there are actually many off-states, enumerated by - * state_off */ - if (old_state == PRV_STATE_OFF) - priv->state_off--; - - /* only if hw_unavailable is zero now it means we either - * were in off#1 state, or came here from - * somewhere else */ - if (!priv->state_off) - priv->state = new_state; - break; - } -#if 0 - printk(KERN_DEBUG "%s: state transition %d -> %d (off#%d)\n", - priv->ndev->name, old_state, new_state, priv->state_off); -#endif - - /* invariants */ - BUG_ON(priv->state_off < 0); - BUG_ON(priv->state_off && (priv->state != PRV_STATE_OFF)); - BUG_ON(!priv->state_off && (priv->state == PRV_STATE_OFF)); - - /* unlock */ - return old_state; -} diff --git a/drivers/net/wireless/intersil/prism54/islpci_dev.h b/drivers/net/wireless/intersil/prism54/islpci_dev.h deleted file mode 100644 index 4753418dce62..000000000000 --- a/drivers/net/wireless/intersil/prism54/islpci_dev.h +++ /dev/null @@ -1,204 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2002 Intersil Americas Inc. - * Copyright (C) 2003 Herbert Valerio Riedel <hvr@gnu.org> - * Copyright (C) 2003 Luis R. Rodriguez <mcgrof@ruslug.rutgers.edu> - * Copyright (C) 2003 Aurelien Alleaume <slts@free.fr> - */ - -#ifndef _ISLPCI_DEV_H -#define _ISLPCI_DEV_H - -#include <linux/irqreturn.h> -#include <linux/netdevice.h> -#include <linux/wireless.h> -#include <net/iw_handler.h> -#include <linux/list.h> -#include <linux/mutex.h> - -#include "isl_38xx.h" -#include "isl_oid.h" -#include "islpci_mgt.h" - -/* some states might not be superflous and may be removed when - design is finalized (hvr) */ -typedef enum { - PRV_STATE_OFF = 0, /* this means hw_unavailable is != 0 */ - PRV_STATE_PREBOOT, /* we are in a pre-boot state (empty RAM) */ - PRV_STATE_BOOT, /* boot state (fw upload, run fw) */ - PRV_STATE_POSTBOOT, /* after boot state, need reset now */ - PRV_STATE_PREINIT, /* pre-init state */ - PRV_STATE_INIT, /* init state (restore MIB backup to device) */ - PRV_STATE_READY, /* driver&device are in operational state */ - PRV_STATE_SLEEP /* device in sleep mode */ -} islpci_state_t; - -/* ACL using MAC address */ -struct mac_entry { - struct list_head _list; - char addr[ETH_ALEN]; -}; - -struct islpci_acl { - enum { MAC_POLICY_OPEN=0, MAC_POLICY_ACCEPT=1, MAC_POLICY_REJECT=2 } policy; - struct list_head mac_list; /* a list of mac_entry */ - int size; /* size of queue */ - struct mutex lock; /* accessed in ioctls and trap_work */ -}; - -struct islpci_membuf { - int size; /* size of memory */ - void *mem; /* address of memory as seen by CPU */ - dma_addr_t pci_addr; /* address of memory as seen by device */ -}; - -#define MAX_BSS_WPA_IE_COUNT 64 -#define MAX_WPA_IE_LEN 64 -struct islpci_bss_wpa_ie { - struct list_head list; - unsigned long last_update; - u8 bssid[ETH_ALEN]; - u8 wpa_ie[MAX_WPA_IE_LEN]; - size_t wpa_ie_len; - -}; - -typedef struct { - spinlock_t slock; /* generic spinlock; */ - - u32 priv_oid; - - /* our mib cache */ - u32 iw_mode; - struct rw_semaphore mib_sem; - void **mib; - char nickname[IW_ESSID_MAX_SIZE+1]; - - /* Take care of the wireless stats */ - struct work_struct stats_work; - struct mutex stats_lock; - /* remember when we last updated the stats */ - unsigned long stats_timestamp; - /* The first is accessed under semaphore locking. - * The second is the clean one we return to iwconfig. - */ - struct iw_statistics local_iwstatistics; - struct iw_statistics iwstatistics; - - struct iw_spy_data spy_data; /* iwspy support */ - - struct iw_public_data wireless_data; - - int monitor_type; /* ARPHRD_IEEE80211 or ARPHRD_IEEE80211_PRISM */ - - struct islpci_acl acl; - - /* PCI bus allocation & configuration members */ - struct pci_dev *pdev; /* PCI structure information */ - char firmware[33]; - - void __iomem *device_base; /* ioremapped device base address */ - - /* consistent DMA region */ - void *driver_mem_address; /* base DMA address */ - dma_addr_t device_host_address; /* base DMA address (bus address) */ - dma_addr_t device_psm_buffer; /* host memory for PSM buffering (bus address) */ - - /* our network_device structure */ - struct net_device *ndev; - - /* device queue interface members */ - struct isl38xx_cb *control_block; /* device control block - (== driver_mem_address!) */ - - /* Each queue has three indexes: - * free/index_mgmt/data_rx/tx (called index, see below), - * driver_curr_frag, and device_curr_frag (in the control block) - * All indexes are ever-increasing, but interpreted modulo the - * device queue size when used. - * index <= device_curr_frag <= driver_curr_frag at all times - * For rx queues, [index, device_curr_frag) contains fragments - * that the interrupt processing needs to handle (owned by driver). - * [device_curr_frag, driver_curr_frag) is the free space in the - * rx queue, waiting for data (owned by device). The driver - * increments driver_curr_frag to indicate to the device that more - * buffers are available. - * If device_curr_frag == driver_curr_frag, no more rx buffers are - * available, and the rx DMA engine of the device is halted. - * For tx queues, [index, device_curr_frag) contains fragments - * where tx is done; they need to be freed (owned by driver). - * [device_curr_frag, driver_curr_frag) contains the frames - * that are being transferred (owned by device). The driver - * increments driver_curr_frag to indicate that more tx work - * needs to be done. - */ - u32 index_mgmt_rx; /* real index mgmt rx queue */ - u32 index_mgmt_tx; /* read index mgmt tx queue */ - u32 free_data_rx; /* free pointer data rx queue */ - u32 free_data_tx; /* free pointer data tx queue */ - u32 data_low_tx_full; /* full detected flag */ - - /* frame memory buffers for the device queues */ - struct islpci_membuf mgmt_tx[ISL38XX_CB_MGMT_QSIZE]; - struct islpci_membuf mgmt_rx[ISL38XX_CB_MGMT_QSIZE]; - struct sk_buff *data_low_tx[ISL38XX_CB_TX_QSIZE]; - struct sk_buff *data_low_rx[ISL38XX_CB_RX_QSIZE]; - dma_addr_t pci_map_tx_address[ISL38XX_CB_TX_QSIZE]; - dma_addr_t pci_map_rx_address[ISL38XX_CB_RX_QSIZE]; - - /* wait for a reset interrupt */ - wait_queue_head_t reset_done; - - /* used by islpci_mgt_transaction */ - struct mutex mgmt_lock; /* serialize access to mailbox and wqueue */ - struct islpci_mgmtframe *mgmt_received; /* mbox for incoming frame */ - wait_queue_head_t mgmt_wqueue; /* waitqueue for mbox */ - - /* state machine */ - islpci_state_t state; - int state_off; /* enumeration of off-state, if 0 then - * we're not in any off-state */ - - /* WPA stuff */ - int wpa; /* WPA mode enabled */ - struct list_head bss_wpa_list; - int num_bss_wpa; - struct mutex wpa_lock; - u8 wpa_ie[MAX_WPA_IE_LEN]; - size_t wpa_ie_len; - - struct work_struct reset_task; - int reset_task_pending; -} islpci_private; - -static inline islpci_state_t -islpci_get_state(islpci_private *priv) -{ - /* lock */ - return priv->state; - /* unlock */ -} - -islpci_state_t islpci_set_state(islpci_private *priv, islpci_state_t new_state); - -#define ISLPCI_TX_TIMEOUT (2*HZ) - -irqreturn_t islpci_interrupt(int, void *); - -int prism54_post_setup(islpci_private *, int); -int islpci_reset(islpci_private *, int); - -static inline void -islpci_trigger(islpci_private *priv) -{ - isl38xx_trigger_device(islpci_get_state(priv) == PRV_STATE_SLEEP, - priv->device_base); -} - -int islpci_free_memory(islpci_private *); -struct net_device *islpci_setup(struct pci_dev *); - -#define DRV_NAME "prism54" -#define DRV_VERSION "1.2" - -#endif /* _ISLPCI_DEV_H */ diff --git a/drivers/net/wireless/intersil/prism54/islpci_eth.c b/drivers/net/wireless/intersil/prism54/islpci_eth.c deleted file mode 100644 index 74dd65716afd..000000000000 --- a/drivers/net/wireless/intersil/prism54/islpci_eth.c +++ /dev/null @@ -1,489 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2002 Intersil Americas Inc. - * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr> - */ - -#include <linux/module.h> -#include <linux/gfp.h> - -#include <linux/pci.h> -#include <linux/delay.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/if_arp.h> -#include <asm/byteorder.h> - -#include "prismcompat.h" -#include "isl_38xx.h" -#include "islpci_eth.h" -#include "islpci_mgt.h" -#include "oid_mgt.h" - -/****************************************************************************** - Network Interface functions -******************************************************************************/ -void -islpci_eth_cleanup_transmit(islpci_private *priv, - isl38xx_control_block *control_block) -{ - struct sk_buff *skb; - u32 index; - - /* compare the control block read pointer with the free pointer */ - while (priv->free_data_tx != - le32_to_cpu(control_block-> - device_curr_frag[ISL38XX_CB_TX_DATA_LQ])) { - /* read the index of the first fragment to be freed */ - index = priv->free_data_tx % ISL38XX_CB_TX_QSIZE; - - /* check for holes in the arrays caused by multi fragment frames - * searching for the last fragment of a frame */ - if (priv->pci_map_tx_address[index]) { - /* entry is the last fragment of a frame - * free the skb structure and unmap pci memory */ - skb = priv->data_low_tx[index]; - -#if VERBOSE > SHOW_ERROR_MESSAGES - DEBUG(SHOW_TRACING, - "cleanup skb %p skb->data %p skb->len %u truesize %u\n", - skb, skb->data, skb->len, skb->truesize); -#endif - - dma_unmap_single(&priv->pdev->dev, - priv->pci_map_tx_address[index], - skb->len, DMA_TO_DEVICE); - dev_kfree_skb_irq(skb); - skb = NULL; - } - /* increment the free data low queue pointer */ - priv->free_data_tx++; - } -} - -netdev_tx_t -islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev) -{ - islpci_private *priv = netdev_priv(ndev); - isl38xx_control_block *cb = priv->control_block; - u32 index; - dma_addr_t pci_map_address; - int frame_size; - isl38xx_fragment *fragment; - int offset; - struct sk_buff *newskb; - int newskb_offset; - unsigned long flags; - unsigned char wds_mac[6]; - u32 curr_frag; - -#if VERBOSE > SHOW_ERROR_MESSAGES - DEBUG(SHOW_FUNCTION_CALLS, "islpci_eth_transmit\n"); -#endif - - /* lock the driver code */ - spin_lock_irqsave(&priv->slock, flags); - - /* check whether the destination queue has enough fragments for the frame */ - curr_frag = le32_to_cpu(cb->driver_curr_frag[ISL38XX_CB_TX_DATA_LQ]); - if (unlikely(curr_frag - priv->free_data_tx >= ISL38XX_CB_TX_QSIZE)) { - printk(KERN_ERR "%s: transmit device queue full when awake\n", - ndev->name); - netif_stop_queue(ndev); - - /* trigger the device */ - isl38xx_w32_flush(priv->device_base, ISL38XX_DEV_INT_UPDATE, - ISL38XX_DEV_INT_REG); - udelay(ISL38XX_WRITEIO_DELAY); - goto drop_free; - } - /* Check alignment and WDS frame formatting. The start of the packet should - * be aligned on a 4-byte boundary. If WDS is enabled add another 6 bytes - * and add WDS address information */ - if (likely(((long) skb->data & 0x03) | init_wds)) { - /* get the number of bytes to add and re-align */ - offset = (4 - (long) skb->data) & 0x03; - offset += init_wds ? 6 : 0; - - /* check whether the current skb can be used */ - if (!skb_cloned(skb) && (skb_tailroom(skb) >= offset)) { - unsigned char *src = skb->data; - -#if VERBOSE > SHOW_ERROR_MESSAGES - DEBUG(SHOW_TRACING, "skb offset %i wds %i\n", offset, - init_wds); -#endif - - /* align the buffer on 4-byte boundary */ - skb_reserve(skb, (4 - (long) skb->data) & 0x03); - if (init_wds) { - /* wds requires an additional address field of 6 bytes */ - skb_put(skb, 6); -#ifdef ISLPCI_ETH_DEBUG - printk("islpci_eth_transmit:wds_mac\n"); -#endif - memmove(skb->data + 6, src, skb->len); - skb_copy_to_linear_data(skb, wds_mac, 6); - } else { - memmove(skb->data, src, skb->len); - } - -#if VERBOSE > SHOW_ERROR_MESSAGES - DEBUG(SHOW_TRACING, "memmove %p %p %i\n", skb->data, - src, skb->len); -#endif - } else { - newskb = - dev_alloc_skb(init_wds ? skb->len + 6 : skb->len); - if (unlikely(newskb == NULL)) { - printk(KERN_ERR "%s: Cannot allocate skb\n", - ndev->name); - goto drop_free; - } - newskb_offset = (4 - (long) newskb->data) & 0x03; - - /* Check if newskb->data is aligned */ - if (newskb_offset) - skb_reserve(newskb, newskb_offset); - - skb_put(newskb, init_wds ? skb->len + 6 : skb->len); - if (init_wds) { - skb_copy_from_linear_data(skb, - newskb->data + 6, - skb->len); - skb_copy_to_linear_data(newskb, wds_mac, 6); -#ifdef ISLPCI_ETH_DEBUG - printk("islpci_eth_transmit:wds_mac\n"); -#endif - } else - skb_copy_from_linear_data(skb, newskb->data, - skb->len); - -#if VERBOSE > SHOW_ERROR_MESSAGES - DEBUG(SHOW_TRACING, "memcpy %p %p %i wds %i\n", - newskb->data, skb->data, skb->len, init_wds); -#endif - - newskb->dev = skb->dev; - dev_kfree_skb_irq(skb); - skb = newskb; - } - } - /* display the buffer contents for debugging */ -#if VERBOSE > SHOW_ERROR_MESSAGES - DEBUG(SHOW_BUFFER_CONTENTS, "\ntx %p ", skb->data); - display_buffer((char *) skb->data, skb->len); -#endif - - /* map the skb buffer to pci memory for DMA operation */ - pci_map_address = dma_map_single(&priv->pdev->dev, (void *)skb->data, - skb->len, DMA_TO_DEVICE); - if (dma_mapping_error(&priv->pdev->dev, pci_map_address)) { - printk(KERN_WARNING "%s: cannot map buffer to PCI\n", - ndev->name); - goto drop_free; - } - /* Place the fragment in the control block structure. */ - index = curr_frag % ISL38XX_CB_TX_QSIZE; - fragment = &cb->tx_data_low[index]; - - priv->pci_map_tx_address[index] = pci_map_address; - /* store the skb address for future freeing */ - priv->data_low_tx[index] = skb; - /* set the proper fragment start address and size information */ - frame_size = skb->len; - fragment->size = cpu_to_le16(frame_size); - fragment->flags = cpu_to_le16(0); /* set to 1 if more fragments */ - fragment->address = cpu_to_le32(pci_map_address); - curr_frag++; - - /* The fragment address in the control block must have been - * written before announcing the frame buffer to device. */ - wmb(); - cb->driver_curr_frag[ISL38XX_CB_TX_DATA_LQ] = cpu_to_le32(curr_frag); - - if (curr_frag - priv->free_data_tx + ISL38XX_MIN_QTHRESHOLD - > ISL38XX_CB_TX_QSIZE) { - /* stop sends from upper layers */ - netif_stop_queue(ndev); - - /* set the full flag for the transmission queue */ - priv->data_low_tx_full = 1; - } - - ndev->stats.tx_packets++; - ndev->stats.tx_bytes += skb->len; - - /* trigger the device */ - islpci_trigger(priv); - - /* unlock the driver code */ - spin_unlock_irqrestore(&priv->slock, flags); - - return NETDEV_TX_OK; - - drop_free: - ndev->stats.tx_dropped++; - spin_unlock_irqrestore(&priv->slock, flags); - dev_kfree_skb(skb); - return NETDEV_TX_OK; -} - -static inline int -islpci_monitor_rx(islpci_private *priv, struct sk_buff **skb) -{ - /* The card reports full 802.11 packets but with a 20 bytes - * header and without the FCS. But there a is a bit that - * indicates if the packet is corrupted :-) */ - struct rfmon_header *hdr = (struct rfmon_header *) (*skb)->data; - - if (hdr->flags & 0x01) - /* This one is bad. Drop it ! */ - return -1; - if (priv->ndev->type == ARPHRD_IEEE80211_PRISM) { - struct avs_80211_1_header *avs; - /* extract the relevant data from the header */ - u32 clock = le32_to_cpu(hdr->clock); - u8 rate = hdr->rate; - u16 freq = le16_to_cpu(hdr->freq); - u8 rssi = hdr->rssi; - - skb_pull(*skb, sizeof (struct rfmon_header)); - - if (skb_headroom(*skb) < sizeof (struct avs_80211_1_header)) { - struct sk_buff *newskb = skb_copy_expand(*skb, - sizeof (struct - avs_80211_1_header), - 0, GFP_ATOMIC); - if (newskb) { - dev_kfree_skb_irq(*skb); - *skb = newskb; - } else - return -1; - /* This behavior is not very subtile... */ - } - - /* make room for the new header and fill it. */ - avs = skb_push(*skb, sizeof(struct avs_80211_1_header)); - - avs->version = cpu_to_be32(P80211CAPTURE_VERSION); - avs->length = cpu_to_be32(sizeof (struct avs_80211_1_header)); - avs->mactime = cpu_to_be64(clock); - avs->hosttime = cpu_to_be64(jiffies); - avs->phytype = cpu_to_be32(6); /*OFDM: 6 for (g), 8 for (a) */ - avs->channel = cpu_to_be32(channel_of_freq(freq)); - avs->datarate = cpu_to_be32(rate * 5); - avs->antenna = cpu_to_be32(0); /*unknown */ - avs->priority = cpu_to_be32(0); /*unknown */ - avs->ssi_type = cpu_to_be32(3); /*2: dBm, 3: raw RSSI */ - avs->ssi_signal = cpu_to_be32(rssi & 0x7f); - avs->ssi_noise = cpu_to_be32(priv->local_iwstatistics.qual.noise); /*better than 'undefined', I assume */ - avs->preamble = cpu_to_be32(0); /*unknown */ - avs->encoding = cpu_to_be32(0); /*unknown */ - } else - skb_pull(*skb, sizeof (struct rfmon_header)); - - (*skb)->protocol = htons(ETH_P_802_2); - skb_reset_mac_header(*skb); - (*skb)->pkt_type = PACKET_OTHERHOST; - - return 0; -} - -int -islpci_eth_receive(islpci_private *priv) -{ - struct net_device *ndev = priv->ndev; - isl38xx_control_block *control_block = priv->control_block; - struct sk_buff *skb; - u16 size; - u32 index, offset; - unsigned char *src; - int discard = 0; - -#if VERBOSE > SHOW_ERROR_MESSAGES - DEBUG(SHOW_FUNCTION_CALLS, "islpci_eth_receive\n"); -#endif - - /* the device has written an Ethernet frame in the data area - * of the sk_buff without updating the structure, do it now */ - index = priv->free_data_rx % ISL38XX_CB_RX_QSIZE; - size = le16_to_cpu(control_block->rx_data_low[index].size); - skb = priv->data_low_rx[index]; - offset = ((unsigned long) - le32_to_cpu(control_block->rx_data_low[index].address) - - (unsigned long) skb->data) & 3; - -#if VERBOSE > SHOW_ERROR_MESSAGES - DEBUG(SHOW_TRACING, - "frq->addr %x skb->data %p skb->len %u offset %u truesize %u\n", - control_block->rx_data_low[priv->free_data_rx].address, skb->data, - skb->len, offset, skb->truesize); -#endif - - /* delete the streaming DMA mapping before processing the skb */ - dma_unmap_single(&priv->pdev->dev, priv->pci_map_rx_address[index], - MAX_FRAGMENT_SIZE_RX + 2, DMA_FROM_DEVICE); - - /* update the skb structure and align the buffer */ - skb_put(skb, size); - if (offset) { - /* shift the buffer allocation offset bytes to get the right frame */ - skb_pull(skb, 2); - skb_put(skb, 2); - } -#if VERBOSE > SHOW_ERROR_MESSAGES - /* display the buffer contents for debugging */ - DEBUG(SHOW_BUFFER_CONTENTS, "\nrx %p ", skb->data); - display_buffer((char *) skb->data, skb->len); -#endif - - /* check whether WDS is enabled and whether the data frame is a WDS frame */ - - if (init_wds) { - /* WDS enabled, check for the wds address on the first 6 bytes of the buffer */ - src = skb->data + 6; - memmove(skb->data, src, skb->len - 6); - skb_trim(skb, skb->len - 6); - } -#if VERBOSE > SHOW_ERROR_MESSAGES - DEBUG(SHOW_TRACING, "Fragment size %i in skb at %p\n", size, skb); - DEBUG(SHOW_TRACING, "Skb data at %p, length %i\n", skb->data, skb->len); - - /* display the buffer contents for debugging */ - DEBUG(SHOW_BUFFER_CONTENTS, "\nrx %p ", skb->data); - display_buffer((char *) skb->data, skb->len); -#endif - /* take care of monitor mode and spy monitoring. */ - if (unlikely(priv->iw_mode == IW_MODE_MONITOR)) { - skb->dev = ndev; - discard = islpci_monitor_rx(priv, &skb); - } else { - if (unlikely(skb->data[2 * ETH_ALEN] == 0)) { - /* The packet has a rx_annex. Read it for spy monitoring, Then - * remove it, while keeping the 2 leading MAC addr. - */ - struct iw_quality wstats; - struct rx_annex_header *annex = - (struct rx_annex_header *) skb->data; - wstats.level = annex->rfmon.rssi; - /* The noise value can be a bit outdated if nobody's - * reading wireless stats... */ - wstats.noise = priv->local_iwstatistics.qual.noise; - wstats.qual = wstats.level - wstats.noise; - wstats.updated = 0x07; - /* Update spy records */ - wireless_spy_update(ndev, annex->addr2, &wstats); - - skb_copy_from_linear_data(skb, - (skb->data + - sizeof(struct rfmon_header)), - 2 * ETH_ALEN); - skb_pull(skb, sizeof (struct rfmon_header)); - } - skb->protocol = eth_type_trans(skb, ndev); - } - skb->ip_summed = CHECKSUM_NONE; - ndev->stats.rx_packets++; - ndev->stats.rx_bytes += size; - - /* deliver the skb to the network layer */ -#ifdef ISLPCI_ETH_DEBUG - printk - ("islpci_eth_receive:netif_rx %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n", - skb->data[0], skb->data[1], skb->data[2], skb->data[3], - skb->data[4], skb->data[5]); -#endif - if (unlikely(discard)) { - dev_kfree_skb_irq(skb); - skb = NULL; - } else - netif_rx(skb); - - /* increment the read index for the rx data low queue */ - priv->free_data_rx++; - - /* add one or more sk_buff structures */ - while (index = - le32_to_cpu(control_block-> - driver_curr_frag[ISL38XX_CB_RX_DATA_LQ]), - index - priv->free_data_rx < ISL38XX_CB_RX_QSIZE) { - /* allocate an sk_buff for received data frames storage - * include any required allignment operations */ - skb = dev_alloc_skb(MAX_FRAGMENT_SIZE_RX + 2); - if (unlikely(skb == NULL)) { - /* error allocating an sk_buff structure elements */ - DEBUG(SHOW_ERROR_MESSAGES, "Error allocating skb\n"); - break; - } - skb_reserve(skb, (4 - (long) skb->data) & 0x03); - /* store the new skb structure pointer */ - index = index % ISL38XX_CB_RX_QSIZE; - priv->data_low_rx[index] = skb; - -#if VERBOSE > SHOW_ERROR_MESSAGES - DEBUG(SHOW_TRACING, - "new alloc skb %p skb->data %p skb->len %u index %u truesize %u\n", - skb, skb->data, skb->len, index, skb->truesize); -#endif - - /* set the streaming DMA mapping for proper PCI bus operation */ - priv->pci_map_rx_address[index] = - dma_map_single(&priv->pdev->dev, (void *)skb->data, - MAX_FRAGMENT_SIZE_RX + 2, DMA_FROM_DEVICE); - if (dma_mapping_error(&priv->pdev->dev, priv->pci_map_rx_address[index])) { - /* error mapping the buffer to device accessible memory address */ - DEBUG(SHOW_ERROR_MESSAGES, - "Error mapping DMA address\n"); - - /* free the skbuf structure before aborting */ - dev_kfree_skb_irq(skb); - skb = NULL; - break; - } - /* update the fragment address */ - control_block->rx_data_low[index].address = - cpu_to_le32((u32)priv->pci_map_rx_address[index]); - wmb(); - - /* increment the driver read pointer */ - le32_add_cpu(&control_block-> - driver_curr_frag[ISL38XX_CB_RX_DATA_LQ], 1); - } - - /* trigger the device */ - islpci_trigger(priv); - - return 0; -} - -void -islpci_do_reset_and_wake(struct work_struct *work) -{ - islpci_private *priv = container_of(work, islpci_private, reset_task); - - islpci_reset(priv, 1); - priv->reset_task_pending = 0; - smp_wmb(); - netif_wake_queue(priv->ndev); -} - -void -islpci_eth_tx_timeout(struct net_device *ndev, unsigned int txqueue) -{ - islpci_private *priv = netdev_priv(ndev); - - /* increment the transmit error counter */ - ndev->stats.tx_errors++; - - if (!priv->reset_task_pending) { - printk(KERN_WARNING - "%s: tx_timeout, scheduling reset", ndev->name); - netif_stop_queue(ndev); - priv->reset_task_pending = 1; - schedule_work(&priv->reset_task); - } else { - printk(KERN_WARNING - "%s: tx_timeout, waiting for reset", ndev->name); - } -} diff --git a/drivers/net/wireless/intersil/prism54/islpci_eth.h b/drivers/net/wireless/intersil/prism54/islpci_eth.h deleted file mode 100644 index e433ccdc526b..000000000000 --- a/drivers/net/wireless/intersil/prism54/islpci_eth.h +++ /dev/null @@ -1,59 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2002 Intersil Americas Inc. - */ - -#ifndef _ISLPCI_ETH_H -#define _ISLPCI_ETH_H - -#include "isl_38xx.h" -#include "islpci_dev.h" - -struct rfmon_header { - __le16 unk0; /* = 0x0000 */ - __le16 length; /* = 0x1400 */ - __le32 clock; /* 1MHz clock */ - u8 flags; - u8 unk1; - u8 rate; - u8 unk2; - __le16 freq; - __le16 unk3; - u8 rssi; - u8 padding[3]; -} __packed; - -struct rx_annex_header { - u8 addr1[ETH_ALEN]; - u8 addr2[ETH_ALEN]; - struct rfmon_header rfmon; -} __packed; - -/* wlan-ng (and hopefully others) AVS header, version one. Fields in - * network byte order. */ -#define P80211CAPTURE_VERSION 0x80211001 - -struct avs_80211_1_header { - __be32 version; - __be32 length; - __be64 mactime; - __be64 hosttime; - __be32 phytype; - __be32 channel; - __be32 datarate; - __be32 antenna; - __be32 priority; - __be32 ssi_type; - __be32 ssi_signal; - __be32 ssi_noise; - __be32 preamble; - __be32 encoding; -}; - -void islpci_eth_cleanup_transmit(islpci_private *, isl38xx_control_block *); -netdev_tx_t islpci_eth_transmit(struct sk_buff *, struct net_device *); -int islpci_eth_receive(islpci_private *); -void islpci_eth_tx_timeout(struct net_device *, unsigned int txqueue); -void islpci_do_reset_and_wake(struct work_struct *); - -#endif /* _ISL_GEN_H */ diff --git a/drivers/net/wireless/intersil/prism54/islpci_hotplug.c b/drivers/net/wireless/intersil/prism54/islpci_hotplug.c deleted file mode 100644 index 31a1e61326ff..000000000000 --- a/drivers/net/wireless/intersil/prism54/islpci_hotplug.c +++ /dev/null @@ -1,316 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2002 Intersil Americas Inc. - * Copyright (C) 2003 Herbert Valerio Riedel <hvr@gnu.org> - */ - -#include <linux/interrupt.h> -#include <linux/module.h> -#include <linux/pci.h> -#include <linux/delay.h> -#include <linux/init.h> /* For __init, __exit */ -#include <linux/dma-mapping.h> - -#include "prismcompat.h" -#include "islpci_dev.h" -#include "islpci_mgt.h" /* for pc_debug */ -#include "isl_oid.h" - -MODULE_AUTHOR("[Intersil] R.Bastings and W.Termorshuizen, The prism54.org Development Team <prism54-devel@prism54.org>"); -MODULE_DESCRIPTION("The Prism54 802.11 Wireless LAN adapter"); -MODULE_LICENSE("GPL"); - -static int init_pcitm = 0; -module_param(init_pcitm, int, 0); - -/* In this order: vendor, device, subvendor, subdevice, class, class_mask, - * driver_data - * If you have an update for this please contact prism54-devel@prism54.org - * The latest list can be found at http://wireless.wiki.kernel.org/en/users/Drivers/p54 - */ -static const struct pci_device_id prism54_id_tbl[] = { - /* Intersil PRISM Duette/Prism GT Wireless LAN adapter */ - { - 0x1260, 0x3890, - PCI_ANY_ID, PCI_ANY_ID, - 0, 0, 0 - }, - - /* 3COM 3CRWE154G72 Wireless LAN adapter */ - { - PCI_VDEVICE(3COM, 0x6001), 0 - }, - - /* Intersil PRISM Indigo Wireless LAN adapter */ - { - 0x1260, 0x3877, - PCI_ANY_ID, PCI_ANY_ID, - 0, 0, 0 - }, - - /* Intersil PRISM Javelin/Xbow Wireless LAN adapter */ - { - 0x1260, 0x3886, - PCI_ANY_ID, PCI_ANY_ID, - 0, 0, 0 - }, - - /* End of list */ - {0,0,0,0,0,0,0} -}; - -/* register the device with the Hotplug facilities of the kernel */ -MODULE_DEVICE_TABLE(pci, prism54_id_tbl); - -static int prism54_probe(struct pci_dev *, const struct pci_device_id *); -static void prism54_remove(struct pci_dev *); -static int __maybe_unused prism54_suspend(struct device *); -static int __maybe_unused prism54_resume(struct device *); - -static SIMPLE_DEV_PM_OPS(prism54_pm_ops, prism54_suspend, prism54_resume); - -static struct pci_driver prism54_driver = { - .name = DRV_NAME, - .id_table = prism54_id_tbl, - .probe = prism54_probe, - .remove = prism54_remove, - .driver.pm = &prism54_pm_ops, -}; - -/****************************************************************************** - Module initialization functions -******************************************************************************/ - -static int -prism54_probe(struct pci_dev *pdev, const struct pci_device_id *id) -{ - struct net_device *ndev; - u8 latency_tmr; - u32 mem_addr; - islpci_private *priv; - int rvalue; - - /* Enable the pci device */ - if (pci_enable_device(pdev)) { - printk(KERN_ERR "%s: pci_enable_device() failed.\n", DRV_NAME); - return -ENODEV; - } - - /* check whether the latency timer is set correctly */ - pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &latency_tmr); -#if VERBOSE > SHOW_ERROR_MESSAGES - DEBUG(SHOW_TRACING, "latency timer: %x\n", latency_tmr); -#endif - if (latency_tmr < PCIDEVICE_LATENCY_TIMER_MIN) { - /* set the latency timer */ - pci_write_config_byte(pdev, PCI_LATENCY_TIMER, - PCIDEVICE_LATENCY_TIMER_VAL); - } - - /* enable PCI DMA */ - if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { - printk(KERN_ERR "%s: 32-bit PCI DMA not supported", DRV_NAME); - goto do_pci_disable_device; - } - - /* 0x40 is the programmable timer to configure the response timeout (TRDY_TIMEOUT) - * 0x41 is the programmable timer to configure the retry timeout (RETRY_TIMEOUT) - * The RETRY_TIMEOUT is used to set the number of retries that the core, as a - * Master, will perform before abandoning a cycle. The default value for - * RETRY_TIMEOUT is 0x80, which far exceeds the PCI 2.1 requirement for new - * devices. A write of zero to the RETRY_TIMEOUT register disables this - * function to allow use with any non-compliant legacy devices that may - * execute more retries. - * - * Writing zero to both these two registers will disable both timeouts and - * *can* solve problems caused by devices that are slow to respond. - * Make this configurable - MSW - */ - if ( init_pcitm >= 0 ) { - pci_write_config_byte(pdev, 0x40, (u8)init_pcitm); - pci_write_config_byte(pdev, 0x41, (u8)init_pcitm); - } else { - printk(KERN_INFO "PCI TRDY/RETRY unchanged\n"); - } - - /* request the pci device I/O regions */ - rvalue = pci_request_regions(pdev, DRV_NAME); - if (rvalue) { - printk(KERN_ERR "%s: pci_request_regions failure (rc=%d)\n", - DRV_NAME, rvalue); - goto do_pci_disable_device; - } - - /* check if the memory window is indeed set */ - rvalue = pci_read_config_dword(pdev, PCI_BASE_ADDRESS_0, &mem_addr); - if (rvalue || !mem_addr) { - printk(KERN_ERR "%s: PCI device memory region not configured; fix your BIOS or CardBus bridge/drivers\n", - DRV_NAME); - goto do_pci_release_regions; - } - - /* enable PCI bus-mastering */ - DEBUG(SHOW_TRACING, "%s: pci_set_master(pdev)\n", DRV_NAME); - pci_set_master(pdev); - - /* enable MWI */ - pci_try_set_mwi(pdev); - - /* setup the network device interface and its structure */ - if (!(ndev = islpci_setup(pdev))) { - /* error configuring the driver as a network device */ - printk(KERN_ERR "%s: could not configure network device\n", - DRV_NAME); - goto do_pci_clear_mwi; - } - - priv = netdev_priv(ndev); - islpci_set_state(priv, PRV_STATE_PREBOOT); /* we are attempting to boot */ - - /* card is in unknown state yet, might have some interrupts pending */ - isl38xx_disable_interrupts(priv->device_base); - - /* request for the interrupt before uploading the firmware */ - rvalue = request_irq(pdev->irq, islpci_interrupt, - IRQF_SHARED, ndev->name, priv); - - if (rvalue) { - /* error, could not hook the handler to the irq */ - printk(KERN_ERR "%s: could not install IRQ handler\n", - ndev->name); - goto do_unregister_netdev; - } - - /* firmware upload is triggered in islpci_open */ - - return 0; - - do_unregister_netdev: - unregister_netdev(ndev); - islpci_free_memory(priv); - free_netdev(ndev); - priv = NULL; - do_pci_clear_mwi: - pci_clear_mwi(pdev); - do_pci_release_regions: - pci_release_regions(pdev); - do_pci_disable_device: - pci_disable_device(pdev); - return -EIO; -} - -/* set by cleanup_module */ -static volatile int __in_cleanup_module = 0; - -/* this one removes one(!!) instance only */ -static void -prism54_remove(struct pci_dev *pdev) -{ - struct net_device *ndev = pci_get_drvdata(pdev); - islpci_private *priv = ndev ? netdev_priv(ndev) : NULL; - BUG_ON(!priv); - - if (!__in_cleanup_module) { - printk(KERN_DEBUG "%s: hot unplug detected\n", ndev->name); - islpci_set_state(priv, PRV_STATE_OFF); - } - - printk(KERN_DEBUG "%s: removing device\n", ndev->name); - - unregister_netdev(ndev); - - /* free the interrupt request */ - - if (islpci_get_state(priv) != PRV_STATE_OFF) { - isl38xx_disable_interrupts(priv->device_base); - islpci_set_state(priv, PRV_STATE_OFF); - /* This bellow causes a lockup at rmmod time. It might be - * because some interrupts still linger after rmmod time, - * see bug #17 */ - /* pci_set_power_state(pdev, 3);*/ /* try to power-off */ - } - - free_irq(pdev->irq, priv); - - /* free the PCI memory and unmap the remapped page */ - islpci_free_memory(priv); - - free_netdev(ndev); - priv = NULL; - - pci_clear_mwi(pdev); - - pci_release_regions(pdev); - - pci_disable_device(pdev); -} - -static int __maybe_unused -prism54_suspend(struct device *dev) -{ - struct net_device *ndev = dev_get_drvdata(dev); - islpci_private *priv = ndev ? netdev_priv(ndev) : NULL; - BUG_ON(!priv); - - /* tell the device not to trigger interrupts for now... */ - isl38xx_disable_interrupts(priv->device_base); - - /* from now on assume the hardware was already powered down - and don't touch it anymore */ - islpci_set_state(priv, PRV_STATE_OFF); - - netif_stop_queue(ndev); - netif_device_detach(ndev); - - return 0; -} - -static int __maybe_unused -prism54_resume(struct device *dev) -{ - struct net_device *ndev = dev_get_drvdata(dev); - islpci_private *priv = ndev ? netdev_priv(ndev) : NULL; - - BUG_ON(!priv); - - printk(KERN_NOTICE "%s: got resume request\n", ndev->name); - - /* alright let's go into the PREBOOT state */ - islpci_reset(priv, 1); - - netif_device_attach(ndev); - netif_start_queue(ndev); - - return 0; -} - -static int __init -prism54_module_init(void) -{ - printk(KERN_INFO "Loaded %s driver, version %s\n", - DRV_NAME, DRV_VERSION); - - __bug_on_wrong_struct_sizes (); - - return pci_register_driver(&prism54_driver); -} - -/* by the time prism54_module_exit() terminates, as a postcondition - * all instances will have been destroyed by calls to - * prism54_remove() */ -static void __exit -prism54_module_exit(void) -{ - __in_cleanup_module = 1; - - pci_unregister_driver(&prism54_driver); - - printk(KERN_INFO "Unloaded %s driver\n", DRV_NAME); - - __in_cleanup_module = 0; -} - -/* register entry points */ -module_init(prism54_module_init); -module_exit(prism54_module_exit); -/* EOF */ diff --git a/drivers/net/wireless/intersil/prism54/islpci_mgt.c b/drivers/net/wireless/intersil/prism54/islpci_mgt.c deleted file mode 100644 index 0c7fb76c7d1c..000000000000 --- a/drivers/net/wireless/intersil/prism54/islpci_mgt.c +++ /dev/null @@ -1,491 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2002 Intersil Americas Inc. - * Copyright 2004 Jens Maurer <Jens.Maurer@gmx.net> - */ - -#include <linux/netdevice.h> -#include <linux/module.h> -#include <linux/pci.h> -#include <linux/sched.h> -#include <linux/slab.h> - -#include <asm/io.h> -#include <linux/if_arp.h> - -#include "prismcompat.h" -#include "isl_38xx.h" -#include "islpci_mgt.h" -#include "isl_oid.h" /* additional types and defs for isl38xx fw */ -#include "isl_ioctl.h" - -#include <net/iw_handler.h> - -/****************************************************************************** - Global variable definition section -******************************************************************************/ -int pc_debug = VERBOSE; -module_param(pc_debug, int, 0); - -/****************************************************************************** - Driver general functions -******************************************************************************/ -#if VERBOSE > SHOW_ERROR_MESSAGES -void -display_buffer(char *buffer, int length) -{ - if ((pc_debug & SHOW_BUFFER_CONTENTS) == 0) - return; - - while (length > 0) { - printk("[%02x]", *buffer & 255); - length--; - buffer++; - } - - printk("\n"); -} -#endif - -/***************************************************************************** - Queue handling for management frames -******************************************************************************/ - -/* - * Helper function to create a PIMFOR management frame header. - */ -static void -pimfor_encode_header(int operation, u32 oid, u32 length, pimfor_header_t *h) -{ - h->version = PIMFOR_VERSION; - h->operation = operation; - h->device_id = PIMFOR_DEV_ID_MHLI_MIB; - h->flags = 0; - h->oid = cpu_to_be32(oid); - h->length = cpu_to_be32(length); -} - -/* - * Helper function to analyze a PIMFOR management frame header. - */ -static pimfor_header_t * -pimfor_decode_header(void *data, int len) -{ - pimfor_header_t *h = data; - - while ((void *) h < data + len) { - if (h->flags & PIMFOR_FLAG_LITTLE_ENDIAN) { - le32_to_cpus(&h->oid); - le32_to_cpus(&h->length); - } else { - be32_to_cpus(&h->oid); - be32_to_cpus(&h->length); - } - if (h->oid != OID_INL_TUNNEL) - return h; - h++; - } - return NULL; -} - -/* - * Fill the receive queue for management frames with fresh buffers. - */ -int -islpci_mgmt_rx_fill(struct net_device *ndev) -{ - islpci_private *priv = netdev_priv(ndev); - isl38xx_control_block *cb = /* volatile not needed */ - (isl38xx_control_block *) priv->control_block; - u32 curr = le32_to_cpu(cb->driver_curr_frag[ISL38XX_CB_RX_MGMTQ]); - -#if VERBOSE > SHOW_ERROR_MESSAGES - DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgmt_rx_fill\n"); -#endif - - while (curr - priv->index_mgmt_rx < ISL38XX_CB_MGMT_QSIZE) { - u32 index = curr % ISL38XX_CB_MGMT_QSIZE; - struct islpci_membuf *buf = &priv->mgmt_rx[index]; - isl38xx_fragment *frag = &cb->rx_data_mgmt[index]; - - if (buf->mem == NULL) { - buf->mem = kmalloc(MGMT_FRAME_SIZE, GFP_ATOMIC); - if (!buf->mem) - return -ENOMEM; - buf->size = MGMT_FRAME_SIZE; - } - if (buf->pci_addr == 0) { - buf->pci_addr = dma_map_single(&priv->pdev->dev, - buf->mem, - MGMT_FRAME_SIZE, - DMA_FROM_DEVICE); - if (dma_mapping_error(&priv->pdev->dev, buf->pci_addr)) { - printk(KERN_WARNING - "Failed to make memory DMA'able.\n"); - return -ENOMEM; - } - } - - /* be safe: always reset control block information */ - frag->size = cpu_to_le16(MGMT_FRAME_SIZE); - frag->flags = 0; - frag->address = cpu_to_le32(buf->pci_addr); - curr++; - - /* The fragment address in the control block must have - * been written before announcing the frame buffer to - * device */ - wmb(); - cb->driver_curr_frag[ISL38XX_CB_RX_MGMTQ] = cpu_to_le32(curr); - } - return 0; -} - -/* - * Create and transmit a management frame using "operation" and "oid", - * with arguments data/length. - * We either return an error and free the frame, or we return 0 and - * islpci_mgt_cleanup_transmit() frees the frame in the tx-done - * interrupt. - */ -static int -islpci_mgt_transmit(struct net_device *ndev, int operation, unsigned long oid, - void *data, int length) -{ - islpci_private *priv = netdev_priv(ndev); - isl38xx_control_block *cb = - (isl38xx_control_block *) priv->control_block; - void *p; - int err = -EINVAL; - unsigned long flags; - isl38xx_fragment *frag; - struct islpci_membuf buf; - u32 curr_frag; - int index; - int frag_len = length + PIMFOR_HEADER_SIZE; - -#if VERBOSE > SHOW_ERROR_MESSAGES - DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgt_transmit\n"); -#endif - - if (frag_len > MGMT_FRAME_SIZE) { - printk(KERN_DEBUG "%s: mgmt frame too large %d\n", - ndev->name, frag_len); - goto error; - } - - err = -ENOMEM; - p = buf.mem = kmalloc(frag_len, GFP_KERNEL); - if (!buf.mem) - goto error; - - buf.size = frag_len; - - /* create the header directly in the fragment data area */ - pimfor_encode_header(operation, oid, length, (pimfor_header_t *) p); - p += PIMFOR_HEADER_SIZE; - - if (data) - memcpy(p, data, length); - else - memset(p, 0, length); - -#if VERBOSE > SHOW_ERROR_MESSAGES - { - pimfor_header_t *h = buf.mem; - DEBUG(SHOW_PIMFOR_FRAMES, - "PIMFOR: op %i, oid 0x%08lx, device %i, flags 0x%x length 0x%x\n", - h->operation, oid, h->device_id, h->flags, length); - - /* display the buffer contents for debugging */ - display_buffer((char *) h, sizeof (pimfor_header_t)); - display_buffer(p, length); - } -#endif - - err = -ENOMEM; - buf.pci_addr = dma_map_single(&priv->pdev->dev, buf.mem, frag_len, - DMA_TO_DEVICE); - if (dma_mapping_error(&priv->pdev->dev, buf.pci_addr)) { - printk(KERN_WARNING "%s: cannot map PCI memory for mgmt\n", - ndev->name); - goto error_free; - } - - /* Protect the control block modifications against interrupts. */ - spin_lock_irqsave(&priv->slock, flags); - curr_frag = le32_to_cpu(cb->driver_curr_frag[ISL38XX_CB_TX_MGMTQ]); - if (curr_frag - priv->index_mgmt_tx >= ISL38XX_CB_MGMT_QSIZE) { - printk(KERN_WARNING "%s: mgmt tx queue is still full\n", - ndev->name); - goto error_unlock; - } - - /* commit the frame to the tx device queue */ - index = curr_frag % ISL38XX_CB_MGMT_QSIZE; - priv->mgmt_tx[index] = buf; - frag = &cb->tx_data_mgmt[index]; - frag->size = cpu_to_le16(frag_len); - frag->flags = 0; /* for any other than the last fragment, set to 1 */ - frag->address = cpu_to_le32(buf.pci_addr); - - /* The fragment address in the control block must have - * been written before announcing the frame buffer to - * device */ - wmb(); - cb->driver_curr_frag[ISL38XX_CB_TX_MGMTQ] = cpu_to_le32(curr_frag + 1); - spin_unlock_irqrestore(&priv->slock, flags); - - /* trigger the device */ - islpci_trigger(priv); - return 0; - - error_unlock: - spin_unlock_irqrestore(&priv->slock, flags); - error_free: - kfree(buf.mem); - error: - return err; -} - -/* - * Receive a management frame from the device. - * This can be an arbitrary number of traps, and at most one response - * frame for a previous request sent via islpci_mgt_transmit(). - */ -int -islpci_mgt_receive(struct net_device *ndev) -{ - islpci_private *priv = netdev_priv(ndev); - isl38xx_control_block *cb = - (isl38xx_control_block *) priv->control_block; - u32 curr_frag; - -#if VERBOSE > SHOW_ERROR_MESSAGES - DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgt_receive\n"); -#endif - - /* Only once per interrupt, determine fragment range to - * process. This avoids an endless loop (i.e. lockup) if - * frames come in faster than we can process them. */ - curr_frag = le32_to_cpu(cb->device_curr_frag[ISL38XX_CB_RX_MGMTQ]); - barrier(); - - for (; priv->index_mgmt_rx < curr_frag; priv->index_mgmt_rx++) { - pimfor_header_t *header; - u32 index = priv->index_mgmt_rx % ISL38XX_CB_MGMT_QSIZE; - struct islpci_membuf *buf = &priv->mgmt_rx[index]; - u16 frag_len; - int size; - struct islpci_mgmtframe *frame; - - /* I have no idea (and no documentation) if flags != 0 - * is possible. Drop the frame, reuse the buffer. */ - if (le16_to_cpu(cb->rx_data_mgmt[index].flags) != 0) { - printk(KERN_WARNING "%s: unknown flags 0x%04x\n", - ndev->name, - le16_to_cpu(cb->rx_data_mgmt[index].flags)); - continue; - } - - /* The device only returns the size of the header(s) here. */ - frag_len = le16_to_cpu(cb->rx_data_mgmt[index].size); - - /* - * We appear to have no way to tell the device the - * size of a receive buffer. Thus, if this check - * triggers, we likely have kernel heap corruption. */ - if (frag_len > MGMT_FRAME_SIZE) { - printk(KERN_WARNING - "%s: Bogus packet size of %d (%#x).\n", - ndev->name, frag_len, frag_len); - frag_len = MGMT_FRAME_SIZE; - } - - /* Ensure the results of device DMA are visible to the CPU. */ - dma_sync_single_for_cpu(&priv->pdev->dev, buf->pci_addr, - buf->size, DMA_FROM_DEVICE); - - /* Perform endianess conversion for PIMFOR header in-place. */ - header = pimfor_decode_header(buf->mem, frag_len); - if (!header) { - printk(KERN_WARNING "%s: no PIMFOR header found\n", - ndev->name); - continue; - } - - /* The device ID from the PIMFOR packet received from - * the MVC is always 0. We forward a sensible device_id. - * Not that anyone upstream would care... */ - header->device_id = priv->ndev->ifindex; - -#if VERBOSE > SHOW_ERROR_MESSAGES - DEBUG(SHOW_PIMFOR_FRAMES, - "PIMFOR: op %i, oid 0x%08x, device %i, flags 0x%x length 0x%x\n", - header->operation, header->oid, header->device_id, - header->flags, header->length); - - /* display the buffer contents for debugging */ - display_buffer((char *) header, PIMFOR_HEADER_SIZE); - display_buffer((char *) header + PIMFOR_HEADER_SIZE, - header->length); -#endif - - /* nobody sends these */ - if (header->flags & PIMFOR_FLAG_APPLIC_ORIGIN) { - printk(KERN_DEBUG - "%s: errant PIMFOR application frame\n", - ndev->name); - continue; - } - - /* Determine frame size, skipping OID_INL_TUNNEL headers. */ - size = PIMFOR_HEADER_SIZE + header->length; - frame = kmalloc(sizeof(struct islpci_mgmtframe) + size, - GFP_ATOMIC); - if (!frame) - continue; - - frame->ndev = ndev; - memcpy(&frame->buf, header, size); - frame->header = (pimfor_header_t *) frame->buf; - frame->data = frame->buf + PIMFOR_HEADER_SIZE; - -#if VERBOSE > SHOW_ERROR_MESSAGES - DEBUG(SHOW_PIMFOR_FRAMES, - "frame: header: %p, data: %p, size: %d\n", - frame->header, frame->data, size); -#endif - - if (header->operation == PIMFOR_OP_TRAP) { -#if VERBOSE > SHOW_ERROR_MESSAGES - printk(KERN_DEBUG - "TRAP: oid 0x%x, device %i, flags 0x%x length %i\n", - header->oid, header->device_id, header->flags, - header->length); -#endif - - /* Create work to handle trap out of interrupt - * context. */ - INIT_WORK(&frame->ws, prism54_process_trap); - schedule_work(&frame->ws); - - } else { - /* Signal the one waiting process that a response - * has been received. */ - if ((frame = xchg(&priv->mgmt_received, frame)) != NULL) { - printk(KERN_WARNING - "%s: mgmt response not collected\n", - ndev->name); - kfree(frame); - } -#if VERBOSE > SHOW_ERROR_MESSAGES - DEBUG(SHOW_TRACING, "Wake up Mgmt Queue\n"); -#endif - wake_up(&priv->mgmt_wqueue); - } - - } - - return 0; -} - -/* - * Cleanup the transmit queue by freeing all frames handled by the device. - */ -void -islpci_mgt_cleanup_transmit(struct net_device *ndev) -{ - islpci_private *priv = netdev_priv(ndev); - isl38xx_control_block *cb = /* volatile not needed */ - (isl38xx_control_block *) priv->control_block; - u32 curr_frag; - -#if VERBOSE > SHOW_ERROR_MESSAGES - DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgt_cleanup_transmit\n"); -#endif - - /* Only once per cleanup, determine fragment range to - * process. This avoids an endless loop (i.e. lockup) if - * the device became confused, incrementing device_curr_frag - * rapidly. */ - curr_frag = le32_to_cpu(cb->device_curr_frag[ISL38XX_CB_TX_MGMTQ]); - barrier(); - - for (; priv->index_mgmt_tx < curr_frag; priv->index_mgmt_tx++) { - int index = priv->index_mgmt_tx % ISL38XX_CB_MGMT_QSIZE; - struct islpci_membuf *buf = &priv->mgmt_tx[index]; - dma_unmap_single(&priv->pdev->dev, buf->pci_addr, buf->size, - DMA_TO_DEVICE); - buf->pci_addr = 0; - kfree(buf->mem); - buf->mem = NULL; - buf->size = 0; - } -} - -/* - * Perform one request-response transaction to the device. - */ -int -islpci_mgt_transaction(struct net_device *ndev, - int operation, unsigned long oid, - void *senddata, int sendlen, - struct islpci_mgmtframe **recvframe) -{ - islpci_private *priv = netdev_priv(ndev); - const long wait_cycle_jiffies = msecs_to_jiffies(ISL38XX_WAIT_CYCLE * 10); - long timeout_left = ISL38XX_MAX_WAIT_CYCLES * wait_cycle_jiffies; - int err; - DEFINE_WAIT(wait); - - *recvframe = NULL; - - if (mutex_lock_interruptible(&priv->mgmt_lock)) - return -ERESTARTSYS; - - prepare_to_wait(&priv->mgmt_wqueue, &wait, TASK_UNINTERRUPTIBLE); - err = islpci_mgt_transmit(ndev, operation, oid, senddata, sendlen); - if (err) - goto out; - - err = -ETIMEDOUT; - while (timeout_left > 0) { - int timeleft; - struct islpci_mgmtframe *frame; - - timeleft = schedule_timeout_uninterruptible(wait_cycle_jiffies); - frame = xchg(&priv->mgmt_received, NULL); - if (frame) { - if (frame->header->oid == oid) { - *recvframe = frame; - err = 0; - goto out; - } else { - printk(KERN_DEBUG - "%s: expecting oid 0x%x, received 0x%x.\n", - ndev->name, (unsigned int) oid, - frame->header->oid); - kfree(frame); - frame = NULL; - } - } - if (timeleft == 0) { - printk(KERN_DEBUG - "%s: timeout waiting for mgmt response %lu, " - "triggering device\n", - ndev->name, timeout_left); - islpci_trigger(priv); - } - timeout_left += timeleft - wait_cycle_jiffies; - } - printk(KERN_WARNING "%s: timeout waiting for mgmt response\n", - ndev->name); - - /* TODO: we should reset the device here */ - out: - finish_wait(&priv->mgmt_wqueue, &wait); - mutex_unlock(&priv->mgmt_lock); - return err; -} - diff --git a/drivers/net/wireless/intersil/prism54/islpci_mgt.h b/drivers/net/wireless/intersil/prism54/islpci_mgt.h deleted file mode 100644 index 1f87d0aea60c..000000000000 --- a/drivers/net/wireless/intersil/prism54/islpci_mgt.h +++ /dev/null @@ -1,126 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2002 Intersil Americas Inc. - * Copyright (C) 2003 Luis R. Rodriguez <mcgrof@ruslug.rutgers.edu> - */ - -#ifndef _ISLPCI_MGT_H -#define _ISLPCI_MGT_H - -#include <linux/wireless.h> -#include <linux/skbuff.h> -#include <linux/slab.h> - -/* - * Function definitions - */ - -#define K_DEBUG(f, m, args...) do { if(f & m) printk(KERN_DEBUG args); } while(0) -#define DEBUG(f, args...) K_DEBUG(f, pc_debug, args) - -extern int pc_debug; -#define init_wds 0 /* help compiler optimize away dead code */ - - -/* General driver definitions */ -#define PCIDEVICE_LATENCY_TIMER_MIN 0x40 -#define PCIDEVICE_LATENCY_TIMER_VAL 0x50 - -/* Debugging verbose definitions */ -#define SHOW_NOTHING 0x00 /* overrules everything */ -#define SHOW_ANYTHING 0xFF -#define SHOW_ERROR_MESSAGES 0x01 -#define SHOW_TRAPS 0x02 -#define SHOW_FUNCTION_CALLS 0x04 -#define SHOW_TRACING 0x08 -#define SHOW_QUEUE_INDEXES 0x10 -#define SHOW_PIMFOR_FRAMES 0x20 -#define SHOW_BUFFER_CONTENTS 0x40 -#define VERBOSE 0x01 - -/* Default card definitions */ -#define CARD_DEFAULT_CHANNEL 6 -#define CARD_DEFAULT_MODE INL_MODE_CLIENT -#define CARD_DEFAULT_IW_MODE IW_MODE_INFRA -#define CARD_DEFAULT_BSSTYPE DOT11_BSSTYPE_INFRA -#define CARD_DEFAULT_CLIENT_SSID "" -#define CARD_DEFAULT_AP_SSID "default" -#define CARD_DEFAULT_KEY1 "default_key_1" -#define CARD_DEFAULT_KEY2 "default_key_2" -#define CARD_DEFAULT_KEY3 "default_key_3" -#define CARD_DEFAULT_KEY4 "default_key_4" -#define CARD_DEFAULT_WEP 0 -#define CARD_DEFAULT_FILTER 0 -#define CARD_DEFAULT_WDS 0 -#define CARD_DEFAULT_AUTHEN DOT11_AUTH_OS -#define CARD_DEFAULT_DOT1X 0 -#define CARD_DEFAULT_MLME_MODE DOT11_MLME_AUTO -#define CARD_DEFAULT_CONFORMANCE OID_INL_CONFORMANCE_NONE -#define CARD_DEFAULT_PROFILE DOT11_PROFILE_MIXED_G_WIFI -#define CARD_DEFAULT_MAXFRAMEBURST DOT11_MAXFRAMEBURST_MIXED_SAFE - -/* PIMFOR package definitions */ -#define PIMFOR_ETHERTYPE 0x8828 -#define PIMFOR_HEADER_SIZE 12 -#define PIMFOR_VERSION 1 -#define PIMFOR_OP_GET 0 -#define PIMFOR_OP_SET 1 -#define PIMFOR_OP_RESPONSE 2 -#define PIMFOR_OP_ERROR 3 -#define PIMFOR_OP_TRAP 4 -#define PIMFOR_OP_RESERVED 5 /* till 255 */ -#define PIMFOR_DEV_ID_MHLI_MIB 0 -#define PIMFOR_FLAG_APPLIC_ORIGIN 0x01 -#define PIMFOR_FLAG_LITTLE_ENDIAN 0x02 - -void display_buffer(char *, int); - -/* - * Type definition section - * - * the structure defines only the header allowing copyless - * frame handling - */ -typedef struct { - u8 version; - u8 operation; - u32 oid; - u8 device_id; - u8 flags; - u32 length; -} __packed -pimfor_header_t; - -/* A received and interrupt-processed management frame, either for - * schedule_work(prism54_process_trap) or for priv->mgmt_received, - * processed by islpci_mgt_transaction(). */ -struct islpci_mgmtframe { - struct net_device *ndev; /* pointer to network device */ - pimfor_header_t *header; /* payload header, points into buf */ - void *data; /* payload ex header, points into buf */ - struct work_struct ws; /* argument for schedule_work() */ - char buf[]; /* fragment buffer */ -}; - -int -islpci_mgt_receive(struct net_device *ndev); - -int -islpci_mgmt_rx_fill(struct net_device *ndev); - -void -islpci_mgt_cleanup_transmit(struct net_device *ndev); - -int -islpci_mgt_transaction(struct net_device *ndev, - int operation, unsigned long oid, - void *senddata, int sendlen, - struct islpci_mgmtframe **recvframe); - -static inline void -islpci_mgt_release(struct islpci_mgmtframe *frame) -{ - kfree(frame); -} - -#endif /* _ISLPCI_MGT_H */ diff --git a/drivers/net/wireless/intersil/prism54/oid_mgt.c b/drivers/net/wireless/intersil/prism54/oid_mgt.c deleted file mode 100644 index 9fd307ca4b6d..000000000000 --- a/drivers/net/wireless/intersil/prism54/oid_mgt.c +++ /dev/null @@ -1,889 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2003,2004 Aurelien Alleaume <slts@free.fr> - */ - -#include <linux/kernel.h> -#include <linux/slab.h> - -#include "prismcompat.h" -#include "islpci_dev.h" -#include "islpci_mgt.h" -#include "isl_oid.h" -#include "oid_mgt.h" -#include "isl_ioctl.h" - -/* to convert between channel and freq */ -static const int frequency_list_bg[] = { 2412, 2417, 2422, 2427, 2432, - 2437, 2442, 2447, 2452, 2457, 2462, 2467, 2472, 2484 -}; - -int -channel_of_freq(int f) -{ - int c = 0; - - if ((f >= 2412) && (f <= 2484)) { - while ((c < 14) && (f != frequency_list_bg[c])) - c++; - return (c >= 14) ? 0 : ++c; - } else if ((f >= (int) 5000) && (f <= (int) 6000)) { - return ( (f - 5000) / 5 ); - } else - return 0; -} - -#define OID_STRUCT(name,oid,s,t) [name] = {oid, 0, sizeof(s), t} -#define OID_STRUCT_C(name,oid,s,t) OID_STRUCT(name,oid,s,t | OID_FLAG_CACHED) -#define OID_U32(name,oid) OID_STRUCT(name,oid,u32,OID_TYPE_U32) -#define OID_U32_C(name,oid) OID_STRUCT_C(name,oid,u32,OID_TYPE_U32) -#define OID_STRUCT_MLME(name,oid) OID_STRUCT(name,oid,struct obj_mlme,OID_TYPE_MLME) -#define OID_STRUCT_MLMEEX(name,oid) OID_STRUCT(name,oid,struct obj_mlmeex,OID_TYPE_MLMEEX) - -#define OID_UNKNOWN(name,oid) OID_STRUCT(name,oid,0,0) - -struct oid_t isl_oid[] = { - OID_STRUCT(GEN_OID_MACADDRESS, 0x00000000, u8[6], OID_TYPE_ADDR), - OID_U32(GEN_OID_LINKSTATE, 0x00000001), - OID_UNKNOWN(GEN_OID_WATCHDOG, 0x00000002), - OID_UNKNOWN(GEN_OID_MIBOP, 0x00000003), - OID_UNKNOWN(GEN_OID_OPTIONS, 0x00000004), - OID_UNKNOWN(GEN_OID_LEDCONFIG, 0x00000005), - - /* 802.11 */ - OID_U32_C(DOT11_OID_BSSTYPE, 0x10000000), - OID_STRUCT_C(DOT11_OID_BSSID, 0x10000001, u8[6], OID_TYPE_RAW), - OID_STRUCT_C(DOT11_OID_SSID, 0x10000002, struct obj_ssid, - OID_TYPE_SSID), - OID_U32(DOT11_OID_STATE, 0x10000003), - OID_U32(DOT11_OID_AID, 0x10000004), - OID_STRUCT(DOT11_OID_COUNTRYSTRING, 0x10000005, u8[4], OID_TYPE_RAW), - OID_STRUCT_C(DOT11_OID_SSIDOVERRIDE, 0x10000006, struct obj_ssid, - OID_TYPE_SSID), - - OID_U32(DOT11_OID_MEDIUMLIMIT, 0x11000000), - OID_U32_C(DOT11_OID_BEACONPERIOD, 0x11000001), - OID_U32(DOT11_OID_DTIMPERIOD, 0x11000002), - OID_U32(DOT11_OID_ATIMWINDOW, 0x11000003), - OID_U32(DOT11_OID_LISTENINTERVAL, 0x11000004), - OID_U32(DOT11_OID_CFPPERIOD, 0x11000005), - OID_U32(DOT11_OID_CFPDURATION, 0x11000006), - - OID_U32_C(DOT11_OID_AUTHENABLE, 0x12000000), - OID_U32_C(DOT11_OID_PRIVACYINVOKED, 0x12000001), - OID_U32_C(DOT11_OID_EXUNENCRYPTED, 0x12000002), - OID_U32_C(DOT11_OID_DEFKEYID, 0x12000003), - [DOT11_OID_DEFKEYX] = {0x12000004, 3, sizeof (struct obj_key), - OID_FLAG_CACHED | OID_TYPE_KEY}, /* DOT11_OID_DEFKEY1,...DOT11_OID_DEFKEY4 */ - OID_UNKNOWN(DOT11_OID_STAKEY, 0x12000008), - OID_U32(DOT11_OID_REKEYTHRESHOLD, 0x12000009), - OID_UNKNOWN(DOT11_OID_STASC, 0x1200000a), - - OID_U32(DOT11_OID_PRIVTXREJECTED, 0x1a000000), - OID_U32(DOT11_OID_PRIVRXPLAIN, 0x1a000001), - OID_U32(DOT11_OID_PRIVRXFAILED, 0x1a000002), - OID_U32(DOT11_OID_PRIVRXNOKEY, 0x1a000003), - - OID_U32_C(DOT11_OID_RTSTHRESH, 0x13000000), - OID_U32_C(DOT11_OID_FRAGTHRESH, 0x13000001), - OID_U32_C(DOT11_OID_SHORTRETRIES, 0x13000002), - OID_U32_C(DOT11_OID_LONGRETRIES, 0x13000003), - OID_U32_C(DOT11_OID_MAXTXLIFETIME, 0x13000004), - OID_U32(DOT11_OID_MAXRXLIFETIME, 0x13000005), - OID_U32(DOT11_OID_AUTHRESPTIMEOUT, 0x13000006), - OID_U32(DOT11_OID_ASSOCRESPTIMEOUT, 0x13000007), - - OID_UNKNOWN(DOT11_OID_ALOFT_TABLE, 0x1d000000), - OID_UNKNOWN(DOT11_OID_ALOFT_CTRL_TABLE, 0x1d000001), - OID_UNKNOWN(DOT11_OID_ALOFT_RETREAT, 0x1d000002), - OID_UNKNOWN(DOT11_OID_ALOFT_PROGRESS, 0x1d000003), - OID_U32(DOT11_OID_ALOFT_FIXEDRATE, 0x1d000004), - OID_UNKNOWN(DOT11_OID_ALOFT_RSSIGRAPH, 0x1d000005), - OID_UNKNOWN(DOT11_OID_ALOFT_CONFIG, 0x1d000006), - - [DOT11_OID_VDCFX] = {0x1b000000, 7, 0, 0}, - OID_U32(DOT11_OID_MAXFRAMEBURST, 0x1b000008), - - OID_U32(DOT11_OID_PSM, 0x14000000), - OID_U32(DOT11_OID_CAMTIMEOUT, 0x14000001), - OID_U32(DOT11_OID_RECEIVEDTIMS, 0x14000002), - OID_U32(DOT11_OID_ROAMPREFERENCE, 0x14000003), - - OID_U32(DOT11_OID_BRIDGELOCAL, 0x15000000), - OID_U32(DOT11_OID_CLIENTS, 0x15000001), - OID_U32(DOT11_OID_CLIENTSASSOCIATED, 0x15000002), - [DOT11_OID_CLIENTX] = {0x15000003, 2006, 0, 0}, /* DOT11_OID_CLIENTX,...DOT11_OID_CLIENT2007 */ - - OID_STRUCT(DOT11_OID_CLIENTFIND, 0x150007DB, u8[6], OID_TYPE_ADDR), - OID_STRUCT(DOT11_OID_WDSLINKADD, 0x150007DC, u8[6], OID_TYPE_ADDR), - OID_STRUCT(DOT11_OID_WDSLINKREMOVE, 0x150007DD, u8[6], OID_TYPE_ADDR), - OID_STRUCT(DOT11_OID_EAPAUTHSTA, 0x150007DE, u8[6], OID_TYPE_ADDR), - OID_STRUCT(DOT11_OID_EAPUNAUTHSTA, 0x150007DF, u8[6], OID_TYPE_ADDR), - OID_U32_C(DOT11_OID_DOT1XENABLE, 0x150007E0), - OID_UNKNOWN(DOT11_OID_MICFAILURE, 0x150007E1), - OID_UNKNOWN(DOT11_OID_REKEYINDICATE, 0x150007E2), - - OID_U32(DOT11_OID_MPDUTXSUCCESSFUL, 0x16000000), - OID_U32(DOT11_OID_MPDUTXONERETRY, 0x16000001), - OID_U32(DOT11_OID_MPDUTXMULTIPLERETRIES, 0x16000002), - OID_U32(DOT11_OID_MPDUTXFAILED, 0x16000003), - OID_U32(DOT11_OID_MPDURXSUCCESSFUL, 0x16000004), - OID_U32(DOT11_OID_MPDURXDUPS, 0x16000005), - OID_U32(DOT11_OID_RTSSUCCESSFUL, 0x16000006), - OID_U32(DOT11_OID_RTSFAILED, 0x16000007), - OID_U32(DOT11_OID_ACKFAILED, 0x16000008), - OID_U32(DOT11_OID_FRAMERECEIVES, 0x16000009), - OID_U32(DOT11_OID_FRAMEERRORS, 0x1600000A), - OID_U32(DOT11_OID_FRAMEABORTS, 0x1600000B), - OID_U32(DOT11_OID_FRAMEABORTSPHY, 0x1600000C), - - OID_U32(DOT11_OID_SLOTTIME, 0x17000000), - OID_U32(DOT11_OID_CWMIN, 0x17000001), - OID_U32(DOT11_OID_CWMAX, 0x17000002), - OID_U32(DOT11_OID_ACKWINDOW, 0x17000003), - OID_U32(DOT11_OID_ANTENNARX, 0x17000004), - OID_U32(DOT11_OID_ANTENNATX, 0x17000005), - OID_U32(DOT11_OID_ANTENNADIVERSITY, 0x17000006), - OID_U32_C(DOT11_OID_CHANNEL, 0x17000007), - OID_U32_C(DOT11_OID_EDTHRESHOLD, 0x17000008), - OID_U32(DOT11_OID_PREAMBLESETTINGS, 0x17000009), - OID_STRUCT(DOT11_OID_RATES, 0x1700000A, u8[IWMAX_BITRATES + 1], - OID_TYPE_RAW), - OID_U32(DOT11_OID_CCAMODESUPPORTED, 0x1700000B), - OID_U32(DOT11_OID_CCAMODE, 0x1700000C), - OID_UNKNOWN(DOT11_OID_RSSIVECTOR, 0x1700000D), - OID_UNKNOWN(DOT11_OID_OUTPUTPOWERTABLE, 0x1700000E), - OID_U32(DOT11_OID_OUTPUTPOWER, 0x1700000F), - OID_STRUCT(DOT11_OID_SUPPORTEDRATES, 0x17000010, - u8[IWMAX_BITRATES + 1], OID_TYPE_RAW), - OID_U32_C(DOT11_OID_FREQUENCY, 0x17000011), - [DOT11_OID_SUPPORTEDFREQUENCIES] = - {0x17000012, 0, sizeof (struct obj_frequencies) - + sizeof (u16) * IWMAX_FREQ, OID_TYPE_FREQUENCIES}, - - OID_U32(DOT11_OID_NOISEFLOOR, 0x17000013), - OID_STRUCT(DOT11_OID_FREQUENCYACTIVITY, 0x17000014, u8[IWMAX_FREQ + 1], - OID_TYPE_RAW), - OID_UNKNOWN(DOT11_OID_IQCALIBRATIONTABLE, 0x17000015), - OID_U32(DOT11_OID_NONERPPROTECTION, 0x17000016), - OID_U32(DOT11_OID_SLOTSETTINGS, 0x17000017), - OID_U32(DOT11_OID_NONERPTIMEOUT, 0x17000018), - OID_U32(DOT11_OID_PROFILES, 0x17000019), - OID_STRUCT(DOT11_OID_EXTENDEDRATES, 0x17000020, - u8[IWMAX_BITRATES + 1], OID_TYPE_RAW), - - OID_STRUCT_MLME(DOT11_OID_DEAUTHENTICATE, 0x18000000), - OID_STRUCT_MLME(DOT11_OID_AUTHENTICATE, 0x18000001), - OID_STRUCT_MLME(DOT11_OID_DISASSOCIATE, 0x18000002), - OID_STRUCT_MLME(DOT11_OID_ASSOCIATE, 0x18000003), - OID_UNKNOWN(DOT11_OID_SCAN, 0x18000004), - OID_STRUCT_MLMEEX(DOT11_OID_BEACON, 0x18000005), - OID_STRUCT_MLMEEX(DOT11_OID_PROBE, 0x18000006), - OID_STRUCT_MLMEEX(DOT11_OID_DEAUTHENTICATEEX, 0x18000007), - OID_STRUCT_MLMEEX(DOT11_OID_AUTHENTICATEEX, 0x18000008), - OID_STRUCT_MLMEEX(DOT11_OID_DISASSOCIATEEX, 0x18000009), - OID_STRUCT_MLMEEX(DOT11_OID_ASSOCIATEEX, 0x1800000A), - OID_STRUCT_MLMEEX(DOT11_OID_REASSOCIATE, 0x1800000B), - OID_STRUCT_MLMEEX(DOT11_OID_REASSOCIATEEX, 0x1800000C), - - OID_U32(DOT11_OID_NONERPSTATUS, 0x1E000000), - - OID_U32(DOT11_OID_STATIMEOUT, 0x19000000), - OID_U32_C(DOT11_OID_MLMEAUTOLEVEL, 0x19000001), - OID_U32(DOT11_OID_BSSTIMEOUT, 0x19000002), - [DOT11_OID_ATTACHMENT] = {0x19000003, 0, - sizeof(struct obj_attachment), OID_TYPE_ATTACH}, - OID_STRUCT_C(DOT11_OID_PSMBUFFER, 0x19000004, struct obj_buffer, - OID_TYPE_BUFFER), - - OID_U32(DOT11_OID_BSSS, 0x1C000000), - [DOT11_OID_BSSX] = {0x1C000001, 63, sizeof (struct obj_bss), - OID_TYPE_BSS}, /*DOT11_OID_BSS1,...,DOT11_OID_BSS64 */ - OID_STRUCT(DOT11_OID_BSSFIND, 0x1C000042, struct obj_bss, OID_TYPE_BSS), - [DOT11_OID_BSSLIST] = {0x1C000043, 0, sizeof (struct - obj_bsslist) + - sizeof (struct obj_bss[IWMAX_BSS]), - OID_TYPE_BSSLIST}, - - OID_UNKNOWN(OID_INL_TUNNEL, 0xFF020000), - OID_UNKNOWN(OID_INL_MEMADDR, 0xFF020001), - OID_UNKNOWN(OID_INL_MEMORY, 0xFF020002), - OID_U32_C(OID_INL_MODE, 0xFF020003), - OID_UNKNOWN(OID_INL_COMPONENT_NR, 0xFF020004), - OID_STRUCT(OID_INL_VERSION, 0xFF020005, u8[8], OID_TYPE_RAW), - OID_UNKNOWN(OID_INL_INTERFACE_ID, 0xFF020006), - OID_UNKNOWN(OID_INL_COMPONENT_ID, 0xFF020007), - OID_U32_C(OID_INL_CONFIG, 0xFF020008), - OID_U32_C(OID_INL_DOT11D_CONFORMANCE, 0xFF02000C), - OID_U32(OID_INL_PHYCAPABILITIES, 0xFF02000D), - OID_U32_C(OID_INL_OUTPUTPOWER, 0xFF02000F), - -}; - -int -mgt_init(islpci_private *priv) -{ - int i; - - priv->mib = kcalloc(OID_NUM_LAST, sizeof (void *), GFP_KERNEL); - if (!priv->mib) - return -ENOMEM; - - /* Alloc the cache */ - for (i = 0; i < OID_NUM_LAST; i++) { - if (isl_oid[i].flags & OID_FLAG_CACHED) { - priv->mib[i] = kcalloc(isl_oid[i].size, - (isl_oid[i].range + 1), - GFP_KERNEL); - if (!priv->mib[i]) - return -ENOMEM; - } else - priv->mib[i] = NULL; - } - - init_rwsem(&priv->mib_sem); - prism54_mib_init(priv); - - return 0; -} - -void -mgt_clean(islpci_private *priv) -{ - int i; - - if (!priv->mib) - return; - for (i = 0; i < OID_NUM_LAST; i++) { - kfree(priv->mib[i]); - priv->mib[i] = NULL; - } - kfree(priv->mib); - priv->mib = NULL; -} - -void -mgt_le_to_cpu(int type, void *data) -{ - switch (type) { - case OID_TYPE_U32: - *(u32 *) data = le32_to_cpu(*(u32 *) data); - break; - case OID_TYPE_BUFFER:{ - struct obj_buffer *buff = data; - buff->size = le32_to_cpu(buff->size); - buff->addr = le32_to_cpu(buff->addr); - break; - } - case OID_TYPE_BSS:{ - struct obj_bss *bss = data; - bss->age = le16_to_cpu(bss->age); - bss->channel = le16_to_cpu(bss->channel); - bss->capinfo = le16_to_cpu(bss->capinfo); - bss->rates = le16_to_cpu(bss->rates); - bss->basic_rates = le16_to_cpu(bss->basic_rates); - break; - } - case OID_TYPE_BSSLIST:{ - struct obj_bsslist *list = data; - int i; - list->nr = le32_to_cpu(list->nr); - for (i = 0; i < list->nr; i++) - mgt_le_to_cpu(OID_TYPE_BSS, &list->bsslist[i]); - break; - } - case OID_TYPE_FREQUENCIES:{ - struct obj_frequencies *freq = data; - int i; - freq->nr = le16_to_cpu(freq->nr); - for (i = 0; i < freq->nr; i++) - freq->mhz[i] = le16_to_cpu(freq->mhz[i]); - break; - } - case OID_TYPE_MLME:{ - struct obj_mlme *mlme = data; - mlme->id = le16_to_cpu(mlme->id); - mlme->state = le16_to_cpu(mlme->state); - mlme->code = le16_to_cpu(mlme->code); - break; - } - case OID_TYPE_MLMEEX:{ - struct obj_mlmeex *mlme = data; - mlme->id = le16_to_cpu(mlme->id); - mlme->state = le16_to_cpu(mlme->state); - mlme->code = le16_to_cpu(mlme->code); - mlme->size = le16_to_cpu(mlme->size); - break; - } - case OID_TYPE_ATTACH:{ - struct obj_attachment *attach = data; - attach->id = le16_to_cpu(attach->id); - attach->size = le16_to_cpu(attach->size); - break; - } - case OID_TYPE_SSID: - case OID_TYPE_KEY: - case OID_TYPE_ADDR: - case OID_TYPE_RAW: - break; - default: - BUG(); - } -} - -static void -mgt_cpu_to_le(int type, void *data) -{ - switch (type) { - case OID_TYPE_U32: - *(u32 *) data = cpu_to_le32(*(u32 *) data); - break; - case OID_TYPE_BUFFER:{ - struct obj_buffer *buff = data; - buff->size = cpu_to_le32(buff->size); - buff->addr = cpu_to_le32(buff->addr); - break; - } - case OID_TYPE_BSS:{ - struct obj_bss *bss = data; - bss->age = cpu_to_le16(bss->age); - bss->channel = cpu_to_le16(bss->channel); - bss->capinfo = cpu_to_le16(bss->capinfo); - bss->rates = cpu_to_le16(bss->rates); - bss->basic_rates = cpu_to_le16(bss->basic_rates); - break; - } - case OID_TYPE_BSSLIST:{ - struct obj_bsslist *list = data; - int i; - list->nr = cpu_to_le32(list->nr); - for (i = 0; i < list->nr; i++) - mgt_cpu_to_le(OID_TYPE_BSS, &list->bsslist[i]); - break; - } - case OID_TYPE_FREQUENCIES:{ - struct obj_frequencies *freq = data; - int i; - freq->nr = cpu_to_le16(freq->nr); - for (i = 0; i < freq->nr; i++) - freq->mhz[i] = cpu_to_le16(freq->mhz[i]); - break; - } - case OID_TYPE_MLME:{ - struct obj_mlme *mlme = data; - mlme->id = cpu_to_le16(mlme->id); - mlme->state = cpu_to_le16(mlme->state); - mlme->code = cpu_to_le16(mlme->code); - break; - } - case OID_TYPE_MLMEEX:{ - struct obj_mlmeex *mlme = data; - mlme->id = cpu_to_le16(mlme->id); - mlme->state = cpu_to_le16(mlme->state); - mlme->code = cpu_to_le16(mlme->code); - mlme->size = cpu_to_le16(mlme->size); - break; - } - case OID_TYPE_ATTACH:{ - struct obj_attachment *attach = data; - attach->id = cpu_to_le16(attach->id); - attach->size = cpu_to_le16(attach->size); - break; - } - case OID_TYPE_SSID: - case OID_TYPE_KEY: - case OID_TYPE_ADDR: - case OID_TYPE_RAW: - break; - default: - BUG(); - } -} - -/* Note : data is modified during this function */ - -int -mgt_set_request(islpci_private *priv, enum oid_num_t n, int extra, void *data) -{ - int ret = 0; - struct islpci_mgmtframe *response = NULL; - int response_op = PIMFOR_OP_ERROR; - int dlen; - void *cache, *_data = data; - u32 oid; - - BUG_ON(n >= OID_NUM_LAST); - BUG_ON(extra > isl_oid[n].range); - - if (!priv->mib) - /* memory has been freed */ - return -1; - - dlen = isl_oid[n].size; - cache = priv->mib[n]; - cache += (cache ? extra * dlen : 0); - oid = isl_oid[n].oid + extra; - - if (_data == NULL) - /* we are requested to re-set a cached value */ - _data = cache; - else - mgt_cpu_to_le(isl_oid[n].flags & OID_FLAG_TYPE, _data); - /* If we are going to write to the cache, we don't want anyone to read - * it -> acquire write lock. - * Else we could acquire a read lock to be sure we don't bother the - * commit process (which takes a write lock). But I'm not sure if it's - * needed. - */ - if (cache) - down_write(&priv->mib_sem); - - if (islpci_get_state(priv) >= PRV_STATE_READY) { - ret = islpci_mgt_transaction(priv->ndev, PIMFOR_OP_SET, oid, - _data, dlen, &response); - if (!ret) { - response_op = response->header->operation; - islpci_mgt_release(response); - } - if (ret || response_op == PIMFOR_OP_ERROR) - ret = -EIO; - } else if (!cache) - ret = -EIO; - - if (cache) { - if (!ret && data) - memcpy(cache, _data, dlen); - up_write(&priv->mib_sem); - } - - /* re-set given data to what it was */ - if (data) - mgt_le_to_cpu(isl_oid[n].flags & OID_FLAG_TYPE, data); - - return ret; -} - -/* None of these are cached */ -int -mgt_set_varlen(islpci_private *priv, enum oid_num_t n, void *data, int extra_len) -{ - int ret = 0; - struct islpci_mgmtframe *response; - int response_op = PIMFOR_OP_ERROR; - int dlen; - u32 oid; - - BUG_ON(n >= OID_NUM_LAST); - - dlen = isl_oid[n].size; - oid = isl_oid[n].oid; - - mgt_cpu_to_le(isl_oid[n].flags & OID_FLAG_TYPE, data); - - if (islpci_get_state(priv) >= PRV_STATE_READY) { - ret = islpci_mgt_transaction(priv->ndev, PIMFOR_OP_SET, oid, - data, dlen + extra_len, &response); - if (!ret) { - response_op = response->header->operation; - islpci_mgt_release(response); - } - if (ret || response_op == PIMFOR_OP_ERROR) - ret = -EIO; - } else - ret = -EIO; - - /* re-set given data to what it was */ - if (data) - mgt_le_to_cpu(isl_oid[n].flags & OID_FLAG_TYPE, data); - - return ret; -} - -int -mgt_get_request(islpci_private *priv, enum oid_num_t n, int extra, void *data, - union oid_res_t *res) -{ - - int ret = -EIO; - int reslen = 0; - struct islpci_mgmtframe *response = NULL; - - int dlen; - void *cache, *_res = NULL; - u32 oid; - - BUG_ON(n >= OID_NUM_LAST); - BUG_ON(extra > isl_oid[n].range); - - res->ptr = NULL; - - if (!priv->mib) - /* memory has been freed */ - return -1; - - dlen = isl_oid[n].size; - cache = priv->mib[n]; - cache += cache ? extra * dlen : 0; - oid = isl_oid[n].oid + extra; - reslen = dlen; - - if (cache) - down_read(&priv->mib_sem); - - if (islpci_get_state(priv) >= PRV_STATE_READY) { - ret = islpci_mgt_transaction(priv->ndev, PIMFOR_OP_GET, - oid, data, dlen, &response); - if (ret || !response || - response->header->operation == PIMFOR_OP_ERROR) { - if (response) - islpci_mgt_release(response); - ret = -EIO; - } - if (!ret) { - _res = response->data; - reslen = response->header->length; - } - } else if (cache) { - _res = cache; - ret = 0; - } - if ((isl_oid[n].flags & OID_FLAG_TYPE) == OID_TYPE_U32) - res->u = ret ? 0 : le32_to_cpu(*(u32 *) _res); - else { - res->ptr = kmalloc(reslen, GFP_KERNEL); - BUG_ON(res->ptr == NULL); - if (ret) - memset(res->ptr, 0, reslen); - else { - memcpy(res->ptr, _res, reslen); - mgt_le_to_cpu(isl_oid[n].flags & OID_FLAG_TYPE, - res->ptr); - } - } - if (cache) - up_read(&priv->mib_sem); - - if (response && !ret) - islpci_mgt_release(response); - - if (reslen > isl_oid[n].size) - printk(KERN_DEBUG - "mgt_get_request(0x%x): received data length was bigger " - "than expected (%d > %d). Memory is probably corrupted...", - oid, reslen, isl_oid[n].size); - - return ret; -} - -/* lock outside */ -int -mgt_commit_list(islpci_private *priv, enum oid_num_t *l, int n) -{ - int i, ret = 0; - struct islpci_mgmtframe *response; - - for (i = 0; i < n; i++) { - struct oid_t *t = &(isl_oid[l[i]]); - void *data = priv->mib[l[i]]; - int j = 0; - u32 oid = t->oid; - BUG_ON(data == NULL); - while (j <= t->range) { - int r = islpci_mgt_transaction(priv->ndev, PIMFOR_OP_SET, - oid, data, t->size, - &response); - if (response) { - r |= (response->header->operation == PIMFOR_OP_ERROR); - islpci_mgt_release(response); - } - if (r) - printk(KERN_ERR "%s: mgt_commit_list: failure. " - "oid=%08x err=%d\n", - priv->ndev->name, oid, r); - ret |= r; - j++; - oid++; - data += t->size; - } - } - return ret; -} - -/* Lock outside */ - -void -mgt_set(islpci_private *priv, enum oid_num_t n, void *data) -{ - BUG_ON(n >= OID_NUM_LAST); - BUG_ON(priv->mib[n] == NULL); - - memcpy(priv->mib[n], data, isl_oid[n].size); - mgt_cpu_to_le(isl_oid[n].flags & OID_FLAG_TYPE, priv->mib[n]); -} - -void -mgt_get(islpci_private *priv, enum oid_num_t n, void *res) -{ - BUG_ON(n >= OID_NUM_LAST); - BUG_ON(priv->mib[n] == NULL); - BUG_ON(res == NULL); - - memcpy(res, priv->mib[n], isl_oid[n].size); - mgt_le_to_cpu(isl_oid[n].flags & OID_FLAG_TYPE, res); -} - -/* Commits the cache. Lock outside. */ - -static enum oid_num_t commit_part1[] = { - OID_INL_CONFIG, - OID_INL_MODE, - DOT11_OID_BSSTYPE, - DOT11_OID_CHANNEL, - DOT11_OID_MLMEAUTOLEVEL -}; - -static enum oid_num_t commit_part2[] = { - DOT11_OID_SSID, - DOT11_OID_PSMBUFFER, - DOT11_OID_AUTHENABLE, - DOT11_OID_PRIVACYINVOKED, - DOT11_OID_EXUNENCRYPTED, - DOT11_OID_DEFKEYX, /* MULTIPLE */ - DOT11_OID_DEFKEYID, - DOT11_OID_DOT1XENABLE, - OID_INL_DOT11D_CONFORMANCE, - /* Do not initialize this - fw < 1.0.4.3 rejects it - OID_INL_OUTPUTPOWER, - */ -}; - -/* update the MAC addr. */ -static int -mgt_update_addr(islpci_private *priv) -{ - struct islpci_mgmtframe *res; - int ret; - - ret = islpci_mgt_transaction(priv->ndev, PIMFOR_OP_GET, - isl_oid[GEN_OID_MACADDRESS].oid, NULL, - isl_oid[GEN_OID_MACADDRESS].size, &res); - - if ((ret == 0) && res && (res->header->operation != PIMFOR_OP_ERROR)) - memcpy(priv->ndev->dev_addr, res->data, ETH_ALEN); - else - ret = -EIO; - if (res) - islpci_mgt_release(res); - - if (ret) - printk(KERN_ERR "%s: mgt_update_addr: failure\n", priv->ndev->name); - return ret; -} - -int -mgt_commit(islpci_private *priv) -{ - int rvalue; - enum oid_num_t u; - - if (islpci_get_state(priv) < PRV_STATE_INIT) - return 0; - - rvalue = mgt_commit_list(priv, commit_part1, ARRAY_SIZE(commit_part1)); - - if (priv->iw_mode != IW_MODE_MONITOR) - rvalue |= mgt_commit_list(priv, commit_part2, ARRAY_SIZE(commit_part2)); - - u = OID_INL_MODE; - rvalue |= mgt_commit_list(priv, &u, 1); - rvalue |= mgt_update_addr(priv); - - if (rvalue) { - /* some request have failed. The device might be in an - incoherent state. We should reset it ! */ - printk(KERN_DEBUG "%s: mgt_commit: failure\n", priv->ndev->name); - } - return rvalue; -} - -/* The following OIDs need to be "unlatched": - * - * MEDIUMLIMIT,BEACONPERIOD,DTIMPERIOD,ATIMWINDOW,LISTENINTERVAL - * FREQUENCY,EXTENDEDRATES. - * - * The way to do this is to set ESSID. Note though that they may get - * unlatch before though by setting another OID. */ -#if 0 -void -mgt_unlatch_all(islpci_private *priv) -{ - u32 u; - int rvalue = 0; - - if (islpci_get_state(priv) < PRV_STATE_INIT) - return; - - u = DOT11_OID_SSID; - rvalue = mgt_commit_list(priv, &u, 1); - /* Necessary if in MANUAL RUN mode? */ -#if 0 - u = OID_INL_MODE; - rvalue |= mgt_commit_list(priv, &u, 1); - - u = DOT11_OID_MLMEAUTOLEVEL; - rvalue |= mgt_commit_list(priv, &u, 1); - - u = OID_INL_MODE; - rvalue |= mgt_commit_list(priv, &u, 1); -#endif - - if (rvalue) - printk(KERN_DEBUG "%s: Unlatching OIDs failed\n", priv->ndev->name); -} -#endif - -/* This will tell you if you are allowed to answer a mlme(ex) request .*/ - -int -mgt_mlme_answer(islpci_private *priv) -{ - u32 mlmeautolevel; - /* Acquire a read lock because if we are in a mode change, it's - * possible to answer true, while the card is leaving master to managed - * mode. Answering to a mlme in this situation could hang the card. - */ - down_read(&priv->mib_sem); - mlmeautolevel = - le32_to_cpu(*(u32 *) priv->mib[DOT11_OID_MLMEAUTOLEVEL]); - up_read(&priv->mib_sem); - - return ((priv->iw_mode == IW_MODE_MASTER) && - (mlmeautolevel >= DOT11_MLME_INTERMEDIATE)); -} - -enum oid_num_t -mgt_oidtonum(u32 oid) -{ - int i; - - for (i = 0; i < OID_NUM_LAST; i++) - if (isl_oid[i].oid == oid) - return i; - - printk(KERN_DEBUG "looking for an unknown oid 0x%x", oid); - - return OID_NUM_LAST; -} - -int -mgt_response_to_str(enum oid_num_t n, union oid_res_t *r, char *str) -{ - switch (isl_oid[n].flags & OID_FLAG_TYPE) { - case OID_TYPE_U32: - return scnprintf(str, PRIV_STR_SIZE, "%u\n", r->u); - case OID_TYPE_BUFFER:{ - struct obj_buffer *buff = r->ptr; - return scnprintf(str, PRIV_STR_SIZE, - "size=%u\naddr=0x%X\n", buff->size, - buff->addr); - } - break; - case OID_TYPE_BSS:{ - struct obj_bss *bss = r->ptr; - return scnprintf(str, PRIV_STR_SIZE, - "age=%u\nchannel=%u\n" - "capinfo=0x%X\nrates=0x%X\n" - "basic_rates=0x%X\n", bss->age, - bss->channel, bss->capinfo, - bss->rates, bss->basic_rates); - } - break; - case OID_TYPE_BSSLIST:{ - struct obj_bsslist *list = r->ptr; - int i, k; - k = scnprintf(str, PRIV_STR_SIZE, "nr=%u\n", list->nr); - for (i = 0; i < list->nr; i++) - k += scnprintf(str + k, PRIV_STR_SIZE - k, - "bss[%u] :\nage=%u\nchannel=%u\n" - "capinfo=0x%X\nrates=0x%X\n" - "basic_rates=0x%X\n", - i, list->bsslist[i].age, - list->bsslist[i].channel, - list->bsslist[i].capinfo, - list->bsslist[i].rates, - list->bsslist[i].basic_rates); - return k; - } - break; - case OID_TYPE_FREQUENCIES:{ - struct obj_frequencies *freq = r->ptr; - int i, t; - printk("nr : %u\n", freq->nr); - t = scnprintf(str, PRIV_STR_SIZE, "nr=%u\n", freq->nr); - for (i = 0; i < freq->nr; i++) - t += scnprintf(str + t, PRIV_STR_SIZE - t, - "mhz[%u]=%u\n", i, freq->mhz[i]); - return t; - } - break; - case OID_TYPE_MLME:{ - struct obj_mlme *mlme = r->ptr; - return scnprintf(str, PRIV_STR_SIZE, - "id=0x%X\nstate=0x%X\ncode=0x%X\n", - mlme->id, mlme->state, mlme->code); - } - break; - case OID_TYPE_MLMEEX:{ - struct obj_mlmeex *mlme = r->ptr; - return scnprintf(str, PRIV_STR_SIZE, - "id=0x%X\nstate=0x%X\n" - "code=0x%X\nsize=0x%X\n", mlme->id, - mlme->state, mlme->code, mlme->size); - } - break; - case OID_TYPE_ATTACH:{ - struct obj_attachment *attach = r->ptr; - return scnprintf(str, PRIV_STR_SIZE, - "id=%d\nsize=%d\n", - attach->id, - attach->size); - } - break; - case OID_TYPE_SSID:{ - struct obj_ssid *ssid = r->ptr; - return scnprintf(str, PRIV_STR_SIZE, - "length=%u\noctets=%.*s\n", - ssid->length, ssid->length, - ssid->octets); - } - break; - case OID_TYPE_KEY:{ - struct obj_key *key = r->ptr; - int t, i; - t = scnprintf(str, PRIV_STR_SIZE, - "type=0x%X\nlength=0x%X\nkey=0x", - key->type, key->length); - for (i = 0; i < key->length; i++) - t += scnprintf(str + t, PRIV_STR_SIZE - t, - "%02X:", key->key[i]); - t += scnprintf(str + t, PRIV_STR_SIZE - t, "\n"); - return t; - } - break; - case OID_TYPE_RAW: - case OID_TYPE_ADDR:{ - unsigned char *buff = r->ptr; - int t, i; - t = scnprintf(str, PRIV_STR_SIZE, "hex data="); - for (i = 0; i < isl_oid[n].size; i++) - t += scnprintf(str + t, PRIV_STR_SIZE - t, - "%02X:", buff[i]); - t += scnprintf(str + t, PRIV_STR_SIZE - t, "\n"); - return t; - } - break; - default: - BUG(); - } - return 0; -} diff --git a/drivers/net/wireless/intersil/prism54/oid_mgt.h b/drivers/net/wireless/intersil/prism54/oid_mgt.h deleted file mode 100644 index a7dc9e24c0bf..000000000000 --- a/drivers/net/wireless/intersil/prism54/oid_mgt.h +++ /dev/null @@ -1,46 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2003 Aurelien Alleaume <slts@free.fr> - */ - -#if !defined(_OID_MGT_H) -#define _OID_MGT_H - -#include "isl_oid.h" -#include "islpci_dev.h" - -extern struct oid_t isl_oid[]; - -int mgt_init(islpci_private *); - -void mgt_clean(islpci_private *); - -/* I don't know where to put these 2 */ -extern const int frequency_list_a[]; -int channel_of_freq(int); - -void mgt_le_to_cpu(int, void *); - -int mgt_set_request(islpci_private *, enum oid_num_t, int, void *); -int mgt_set_varlen(islpci_private *, enum oid_num_t, void *, int); - - -int mgt_get_request(islpci_private *, enum oid_num_t, int, void *, - union oid_res_t *); - -int mgt_commit_list(islpci_private *, enum oid_num_t *, int); - -void mgt_set(islpci_private *, enum oid_num_t, void *); - -void mgt_get(islpci_private *, enum oid_num_t, void *); - -int mgt_commit(islpci_private *); - -int mgt_mlme_answer(islpci_private *); - -enum oid_num_t mgt_oidtonum(u32 oid); - -int mgt_response_to_str(enum oid_num_t, union oid_res_t *, char *); - -#endif /* !defined(_OID_MGT_H) */ -/* EOF */ diff --git a/drivers/net/wireless/intersil/prism54/prismcompat.h b/drivers/net/wireless/intersil/prism54/prismcompat.h deleted file mode 100644 index c4489b66d07e..000000000000 --- a/drivers/net/wireless/intersil/prism54/prismcompat.h +++ /dev/null @@ -1,30 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * (C) 2004 Margit Schubert-While <margitsw@t-online.de> - */ - -/* - * Compatibility header file to aid support of different kernel versions - */ - -#ifdef PRISM54_COMPAT24 -#include "prismcompat24.h" -#else /* PRISM54_COMPAT24 */ - -#ifndef _PRISM_COMPAT_H -#define _PRISM_COMPAT_H - -#include <linux/device.h> -#include <linux/firmware.h> -#include <linux/moduleparam.h> -#include <linux/workqueue.h> -#include <linux/compiler.h> - -#ifndef __iomem -#define __iomem -#endif - -#define PRISM_FW_PDEV &priv->pdev->dev - -#endif /* _PRISM_COMPAT_H */ -#endif /* PRISM54_COMPAT24 */ diff --git a/drivers/net/wireless/marvell/libertas/ethtool.c b/drivers/net/wireless/marvell/libertas/ethtool.c index 1bb8746a0b23..d8e4f29b690d 100644 --- a/drivers/net/wireless/marvell/libertas/ethtool.c +++ b/drivers/net/wireless/marvell/libertas/ethtool.c @@ -43,10 +43,8 @@ static int lbs_ethtool_get_eeprom(struct net_device *dev, int ret; if (eeprom->offset + eeprom->len > LBS_EEPROM_LEN || - eeprom->len > LBS_EEPROM_READ_LEN) { - ret = -EINVAL; - goto out; - } + eeprom->len > LBS_EEPROM_READ_LEN) + return -EINVAL; cmd.hdr.size = cpu_to_le16(sizeof(struct cmd_ds_802_11_eeprom_access) - LBS_EEPROM_READ_LEN + eeprom->len); @@ -57,8 +55,7 @@ static int lbs_ethtool_get_eeprom(struct net_device *dev, if (!ret) memcpy(bytes, cmd.value, eeprom->len); -out: - return ret; + return ret; } static void lbs_ethtool_get_wol(struct net_device *dev, diff --git a/drivers/net/wireless/marvell/mwifiex/Makefile b/drivers/net/wireless/marvell/mwifiex/Makefile index 162d557b78af..2bd00f40958e 100644 --- a/drivers/net/wireless/marvell/mwifiex/Makefile +++ b/drivers/net/wireless/marvell/mwifiex/Makefile @@ -49,6 +49,7 @@ mwifiex_sdio-y += sdio.o obj-$(CONFIG_MWIFIEX_SDIO) += mwifiex_sdio.o mwifiex_pcie-y += pcie.o +mwifiex_pcie-y += pcie_quirks.o obj-$(CONFIG_MWIFIEX_PCIE) += mwifiex_pcie.o mwifiex_usb-y += usb.o diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c index 3a11342a6bde..171a25742600 100644 --- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c +++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c @@ -187,7 +187,7 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv, host_cmd = (struct host_cmd_ds_command *) (cmd_node->cmd_skb->data); /* Sanity test */ - if (host_cmd == NULL || host_cmd->size == 0) { + if (host_cmd->size == 0) { mwifiex_dbg(adapter, ERROR, "DNLD_CMD: host_cmd is null\t" "or cmd size is 0, not sending\n"); diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index 46517515ba72..c6ccce426b49 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -27,6 +27,7 @@ #include "wmm.h" #include "11n.h" #include "pcie.h" +#include "pcie_quirks.h" #define PCIE_VERSION "1.0" #define DRV_NAME "Marvell mwifiex PCIe" @@ -410,6 +411,9 @@ static int mwifiex_pcie_probe(struct pci_dev *pdev, return ret; } + /* check quirks */ + mwifiex_initialize_quirks(card); + if (mwifiex_add_card(card, &card->fw_done, &pcie_ops, MWIFIEX_PCIE, &pdev->dev)) { pr_err("%s failed\n", __func__); @@ -524,6 +528,13 @@ static void mwifiex_pcie_reset_prepare(struct pci_dev *pdev) mwifiex_shutdown_sw(adapter); clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags); clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags); + + /* On MS Surface gen4+ devices FLR isn't effective to recover from + * hangups, so we power-cycle the card instead. + */ + if (card->quirks & QUIRK_FW_RST_D3COLD) + mwifiex_pcie_reset_d3cold_quirk(pdev); + mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__); card->pci_reset_ongoing = true; diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.h b/drivers/net/wireless/marvell/mwifiex/pcie.h index 5ed613d65709..981e330c77d7 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.h +++ b/drivers/net/wireless/marvell/mwifiex/pcie.h @@ -244,6 +244,7 @@ struct pcie_service_card { unsigned long work_flags; bool pci_reset_ongoing; + unsigned long quirks; }; static inline int diff --git a/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c new file mode 100644 index 000000000000..0234cf3c2974 --- /dev/null +++ b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c @@ -0,0 +1,161 @@ +/* + * NXP Wireless LAN device driver: PCIE and platform specific quirks + * + * This software file (the "File") is distributed by NXP + * under the terms of the GNU General Public License Version 2, June 1991 + * (the "License"). You may use, redistribute and/or modify this File in + * accordance with the terms and conditions of the License, a copy of which + * is available by writing to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the + * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. + * + * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE + * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE + * ARE EXPRESSLY DISCLAIMED. The License provides additional details about + * this warranty disclaimer. + */ + +#include <linux/dmi.h> + +#include "pcie_quirks.h" + +/* quirk table based on DMI matching */ +static const struct dmi_system_id mwifiex_quirk_table[] = { + { + .ident = "Surface Pro 4", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Pro 4"), + }, + .driver_data = (void *)QUIRK_FW_RST_D3COLD, + }, + { + .ident = "Surface Pro 5", + .matches = { + /* match for SKU here due to generic product name "Surface Pro" */ + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "Surface_Pro_1796"), + }, + .driver_data = (void *)QUIRK_FW_RST_D3COLD, + }, + { + .ident = "Surface Pro 5 (LTE)", + .matches = { + /* match for SKU here due to generic product name "Surface Pro" */ + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "Surface_Pro_1807"), + }, + .driver_data = (void *)QUIRK_FW_RST_D3COLD, + }, + { + .ident = "Surface Pro 6", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Pro 6"), + }, + .driver_data = (void *)QUIRK_FW_RST_D3COLD, + }, + { + .ident = "Surface Book 1", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Book"), + }, + .driver_data = (void *)QUIRK_FW_RST_D3COLD, + }, + { + .ident = "Surface Book 2", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Book 2"), + }, + .driver_data = (void *)QUIRK_FW_RST_D3COLD, + }, + { + .ident = "Surface Laptop 1", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Laptop"), + }, + .driver_data = (void *)QUIRK_FW_RST_D3COLD, + }, + { + .ident = "Surface Laptop 2", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Laptop 2"), + }, + .driver_data = (void *)QUIRK_FW_RST_D3COLD, + }, + {} +}; + +void mwifiex_initialize_quirks(struct pcie_service_card *card) +{ + struct pci_dev *pdev = card->dev; + const struct dmi_system_id *dmi_id; + + dmi_id = dmi_first_match(mwifiex_quirk_table); + if (dmi_id) + card->quirks = (uintptr_t)dmi_id->driver_data; + + if (!card->quirks) + dev_info(&pdev->dev, "no quirks enabled\n"); + if (card->quirks & QUIRK_FW_RST_D3COLD) + dev_info(&pdev->dev, "quirk reset_d3cold enabled\n"); +} + +static void mwifiex_pcie_set_power_d3cold(struct pci_dev *pdev) +{ + dev_info(&pdev->dev, "putting into D3cold...\n"); + + pci_save_state(pdev); + if (pci_is_enabled(pdev)) + pci_disable_device(pdev); + pci_set_power_state(pdev, PCI_D3cold); +} + +static int mwifiex_pcie_set_power_d0(struct pci_dev *pdev) +{ + int ret; + + dev_info(&pdev->dev, "putting into D0...\n"); + + pci_set_power_state(pdev, PCI_D0); + ret = pci_enable_device(pdev); + if (ret) { + dev_err(&pdev->dev, "pci_enable_device failed\n"); + return ret; + } + pci_restore_state(pdev); + + return 0; +} + +int mwifiex_pcie_reset_d3cold_quirk(struct pci_dev *pdev) +{ + struct pci_dev *parent_pdev = pci_upstream_bridge(pdev); + int ret; + + /* Power-cycle (put into D3cold then D0) */ + dev_info(&pdev->dev, "Using reset_d3cold quirk to perform FW reset\n"); + + /* We need to perform power-cycle also for bridge of wifi because + * on some devices (e.g. Surface Book 1), the OS for some reasons + * can't know the real power state of the bridge. + * When tried to power-cycle only wifi, the reset failed with the + * following dmesg log: + * "Cannot transition to power state D0 for parent in D3hot". + */ + mwifiex_pcie_set_power_d3cold(pdev); + mwifiex_pcie_set_power_d3cold(parent_pdev); + + ret = mwifiex_pcie_set_power_d0(parent_pdev); + if (ret) + return ret; + ret = mwifiex_pcie_set_power_d0(pdev); + if (ret) + return ret; + + return 0; +} diff --git a/drivers/net/wireless/marvell/mwifiex/pcie_quirks.h b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.h new file mode 100644 index 000000000000..8ec4176d698f --- /dev/null +++ b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.h @@ -0,0 +1,23 @@ +/* + * NXP Wireless LAN device driver: PCIE and platform specific quirks + * + * This software file (the "File") is distributed by NXP + * under the terms of the GNU General Public License Version 2, June 1991 + * (the "License"). You may use, redistribute and/or modify this File in + * accordance with the terms and conditions of the License, a copy of which + * is available by writing to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the + * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. + * + * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE + * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE + * ARE EXPRESSLY DISCLAIMED. The License provides additional details about + * this warranty disclaimer. + */ + +#include "pcie.h" + +#define QUIRK_FW_RST_D3COLD BIT(0) + +void mwifiex_initialize_quirks(struct pcie_service_card *card); +int mwifiex_pcie_reset_d3cold_quirk(struct pci_dev *pdev); diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c index 653f9e094256..4062e515697a 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c @@ -1325,8 +1325,8 @@ mwifiex_set_gen_ie_helper(struct mwifiex_private *priv, u8 *ie_data_ptr, u16 ie_len) { struct ieee_types_vendor_header *pvendor_ie; - const u8 wpa_oui[] = { 0x00, 0x50, 0xf2, 0x01 }; - const u8 wps_oui[] = { 0x00, 0x50, 0xf2, 0x04 }; + static const u8 wpa_oui[] = { 0x00, 0x50, 0xf2, 0x01 }; + static const u8 wps_oui[] = { 0x00, 0x50, 0xf2, 0x04 }; u16 unparsed_len = ie_len, cur_ie_len; /* If the passed length is zero, reset the buffer */ diff --git a/drivers/net/wireless/marvell/mwifiex/usb.h b/drivers/net/wireless/marvell/mwifiex/usb.h index d822ec15b7e6..61a96b7fbf21 100644 --- a/drivers/net/wireless/marvell/mwifiex/usb.h +++ b/drivers/net/wireless/marvell/mwifiex/usb.h @@ -134,7 +134,7 @@ struct fw_sync_header { struct fw_data { struct fw_header fw_hdr; __le32 seq_num; - u8 data[1]; + u8 data[]; } __packed; #endif /*_MWIFIEX_USB_H */ diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c index e14b9fc2c67a..42e03a701ae1 100644 --- a/drivers/net/wireless/microchip/wilc1000/sdio.c +++ b/drivers/net/wireless/microchip/wilc1000/sdio.c @@ -129,10 +129,8 @@ static int wilc_sdio_probe(struct sdio_func *func, ret = wilc_cfg80211_init(&wilc, &func->dev, WILC_HIF_SDIO, &wilc_hif_sdio); - if (ret) { - kfree(sdio_priv); - return ret; - } + if (ret) + goto free; if (IS_ENABLED(CONFIG_WILC1000_HW_OOB_INTR)) { struct device_node *np = func->card->dev.of_node; @@ -148,24 +146,29 @@ static int wilc_sdio_probe(struct sdio_func *func, wilc->bus_data = sdio_priv; wilc->dev = &func->dev; - wilc->rtc_clk = devm_clk_get(&func->card->dev, "rtc"); - if (PTR_ERR_OR_ZERO(wilc->rtc_clk) == -EPROBE_DEFER) { - kfree(sdio_priv); - return -EPROBE_DEFER; - } else if (!IS_ERR(wilc->rtc_clk)) - clk_prepare_enable(wilc->rtc_clk); + wilc->rtc_clk = devm_clk_get_optional(&func->card->dev, "rtc"); + if (IS_ERR(wilc->rtc_clk)) { + ret = PTR_ERR(wilc->rtc_clk); + goto dispose_irq; + } + clk_prepare_enable(wilc->rtc_clk); dev_info(&func->dev, "Driver Initializing success\n"); return 0; + +dispose_irq: + irq_dispose_mapping(wilc->dev_irq_num); + wilc_netdev_cleanup(wilc); +free: + kfree(sdio_priv); + return ret; } static void wilc_sdio_remove(struct sdio_func *func) { struct wilc *wilc = sdio_get_drvdata(func); - if (!IS_ERR(wilc->rtc_clk)) - clk_disable_unprepare(wilc->rtc_clk); - + clk_disable_unprepare(wilc->rtc_clk); wilc_netdev_cleanup(wilc); } diff --git a/drivers/net/wireless/microchip/wilc1000/spi.c b/drivers/net/wireless/microchip/wilc1000/spi.c index 8e9aaf03a6fa..dd481dc0b5ce 100644 --- a/drivers/net/wireless/microchip/wilc1000/spi.c +++ b/drivers/net/wireless/microchip/wilc1000/spi.c @@ -39,6 +39,7 @@ MODULE_PARM_DESC(enable_crc16, #define WILC_SPI_RSP_HDR_EXTRA_DATA 8 struct wilc_spi { + bool isinit; /* true if SPI protocol has been configured */ bool probing_crc; /* true if we're probing chip's CRC config */ bool crc7_enabled; /* true if crc7 is currently enabled */ bool crc16_enabled; /* true if crc16 is currently enabled */ @@ -154,34 +155,37 @@ static int wilc_bus_probe(struct spi_device *spi) return -ENOMEM; ret = wilc_cfg80211_init(&wilc, &spi->dev, WILC_HIF_SPI, &wilc_hif_spi); - if (ret) { - kfree(spi_priv); - return ret; - } + if (ret) + goto free; spi_set_drvdata(spi, wilc); wilc->dev = &spi->dev; wilc->bus_data = spi_priv; wilc->dev_irq_num = spi->irq; - wilc->rtc_clk = devm_clk_get(&spi->dev, "rtc"); - if (PTR_ERR_OR_ZERO(wilc->rtc_clk) == -EPROBE_DEFER) { - kfree(spi_priv); - return -EPROBE_DEFER; - } else if (!IS_ERR(wilc->rtc_clk)) - clk_prepare_enable(wilc->rtc_clk); + wilc->rtc_clk = devm_clk_get_optional(&spi->dev, "rtc"); + if (IS_ERR(wilc->rtc_clk)) { + ret = PTR_ERR(wilc->rtc_clk); + goto netdev_cleanup; + } + clk_prepare_enable(wilc->rtc_clk); return 0; + +netdev_cleanup: + wilc_netdev_cleanup(wilc); +free: + kfree(spi_priv); + return ret; } static int wilc_bus_remove(struct spi_device *spi) { struct wilc *wilc = spi_get_drvdata(spi); - if (!IS_ERR(wilc->rtc_clk)) - clk_disable_unprepare(wilc->rtc_clk); - + clk_disable_unprepare(wilc->rtc_clk); wilc_netdev_cleanup(wilc); + return 0; } @@ -905,15 +909,15 @@ static int wilc_spi_init(struct wilc *wilc, bool resume) struct wilc_spi *spi_priv = wilc->bus_data; u32 reg; u32 chipid; - static int isinit; int ret, i; - if (isinit) { + if (spi_priv->isinit) { + /* Confirm we can read chipid register without error: */ ret = wilc_spi_read_reg(wilc, WILC_CHIPID, &chipid); - if (ret) - dev_err(&spi->dev, "Fail cmd read chip id...\n"); + if (ret == 0) + return 0; - return ret; + dev_err(&spi->dev, "Fail cmd read chip id...\n"); } /* @@ -971,7 +975,7 @@ static int wilc_spi_init(struct wilc *wilc, bool resume) spi_priv->probing_crc = false; /* - * make sure can read back chip id correctly + * make sure can read chip id without protocol error */ ret = wilc_spi_read_reg(wilc, WILC_CHIPID, &chipid); if (ret) { @@ -979,7 +983,7 @@ static int wilc_spi_init(struct wilc *wilc, bool resume) return ret; } - isinit = 1; + spi_priv->isinit = true; return 0; } diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.c b/drivers/net/wireless/microchip/wilc1000/wlan.c index 2030fc7f53ca..200a103a0a85 100644 --- a/drivers/net/wireless/microchip/wilc1000/wlan.c +++ b/drivers/net/wireless/microchip/wilc1000/wlan.c @@ -1127,27 +1127,22 @@ int wilc_wlan_start(struct wilc *wilc) } acquire_bus(wilc, WILC_BUS_ACQUIRE_ONLY); ret = wilc->hif_func->hif_write_reg(wilc, WILC_VMM_CORE_CFG, reg); - if (ret) { - release_bus(wilc, WILC_BUS_RELEASE_ONLY); - return ret; - } + if (ret) + goto release; + reg = 0; if (wilc->io_type == WILC_HIF_SDIO && wilc->dev_irq_num) reg |= WILC_HAVE_SDIO_IRQ_GPIO; ret = wilc->hif_func->hif_write_reg(wilc, WILC_GP_REG_1, reg); - if (ret) { - release_bus(wilc, WILC_BUS_RELEASE_ONLY); - return ret; - } + if (ret) + goto release; wilc->hif_func->hif_sync_ext(wilc, NUM_INT_EXT); ret = wilc->hif_func->hif_read_reg(wilc, WILC_CHIPID, &chipid); - if (ret) { - release_bus(wilc, WILC_BUS_RELEASE_ONLY); - return ret; - } + if (ret) + goto release; wilc->hif_func->hif_read_reg(wilc, WILC_GLB_RESET_0, ®); if ((reg & BIT(10)) == BIT(10)) { @@ -1159,8 +1154,9 @@ int wilc_wlan_start(struct wilc *wilc) reg |= BIT(10); ret = wilc->hif_func->hif_write_reg(wilc, WILC_GLB_RESET_0, reg); wilc->hif_func->hif_read_reg(wilc, WILC_GLB_RESET_0, ®); - release_bus(wilc, WILC_BUS_RELEASE_ONLY); +release: + release_bus(wilc, WILC_BUS_RELEASE_ONLY); return ret; } @@ -1174,36 +1170,34 @@ int wilc_wlan_stop(struct wilc *wilc, struct wilc_vif *vif) ret = wilc->hif_func->hif_read_reg(wilc, WILC_GP_REG_0, ®); if (ret) { netdev_err(vif->ndev, "Error while reading reg\n"); - release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP); - return ret; + goto release; } ret = wilc->hif_func->hif_write_reg(wilc, WILC_GP_REG_0, (reg | WILC_ABORT_REQ_BIT)); if (ret) { netdev_err(vif->ndev, "Error while writing reg\n"); - release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP); - return ret; + goto release; } ret = wilc->hif_func->hif_read_reg(wilc, WILC_FW_HOST_COMM, ®); if (ret) { netdev_err(vif->ndev, "Error while reading reg\n"); - release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP); - return ret; + goto release; } reg = BIT(0); ret = wilc->hif_func->hif_write_reg(wilc, WILC_FW_HOST_COMM, reg); if (ret) { netdev_err(vif->ndev, "Error while writing reg\n"); - release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP); - return ret; + goto release; } + ret = 0; +release: release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP); - return 0; + return ret; } void wilc_wlan_cleanup(struct net_device *dev) diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c index 0003df577cb3..840728ed57b2 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c +++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c @@ -295,9 +295,9 @@ static int pearl_skb2rbd_attach(struct qtnf_pcie_pearl_state *ps, u16 index) priv->rx_skb[index] = skb; rxbd = &ps->rx_bd_vbase[index]; - paddr = pci_map_single(priv->pdev, skb->data, - SKB_BUF_SIZE, PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(priv->pdev, paddr)) { + paddr = dma_map_single(&priv->pdev->dev, skb->data, SKB_BUF_SIZE, + DMA_FROM_DEVICE); + if (dma_mapping_error(&priv->pdev->dev, paddr)) { pr_err("skb DMA mapping error: %pad\n", &paddr); return -ENOMEM; } @@ -357,8 +357,8 @@ static void qtnf_pearl_free_xfer_buffers(struct qtnf_pcie_pearl_state *ps) skb = priv->rx_skb[i]; paddr = QTN_HOST_ADDR(le32_to_cpu(rxbd->addr_h), le32_to_cpu(rxbd->addr)); - pci_unmap_single(priv->pdev, paddr, SKB_BUF_SIZE, - PCI_DMA_FROMDEVICE); + dma_unmap_single(&priv->pdev->dev, paddr, + SKB_BUF_SIZE, DMA_FROM_DEVICE); dev_kfree_skb_any(skb); priv->rx_skb[i] = NULL; } @@ -371,8 +371,8 @@ static void qtnf_pearl_free_xfer_buffers(struct qtnf_pcie_pearl_state *ps) skb = priv->tx_skb[i]; paddr = QTN_HOST_ADDR(le32_to_cpu(txbd->addr_h), le32_to_cpu(txbd->addr)); - pci_unmap_single(priv->pdev, paddr, skb->len, - PCI_DMA_TODEVICE); + dma_unmap_single(&priv->pdev->dev, paddr, skb->len, + DMA_TO_DEVICE); dev_kfree_skb_any(skb); priv->tx_skb[i] = NULL; } @@ -485,8 +485,8 @@ static void qtnf_pearl_data_tx_reclaim(struct qtnf_pcie_pearl_state *ps) txbd = &ps->tx_bd_vbase[i]; paddr = QTN_HOST_ADDR(le32_to_cpu(txbd->addr_h), le32_to_cpu(txbd->addr)); - pci_unmap_single(priv->pdev, paddr, skb->len, - PCI_DMA_TODEVICE); + dma_unmap_single(&priv->pdev->dev, paddr, skb->len, + DMA_TO_DEVICE); if (skb->dev) { dev_sw_netstats_tx_add(skb->dev, 1, skb->len); @@ -559,9 +559,9 @@ static int qtnf_pcie_skb_send(struct qtnf_bus *bus, struct sk_buff *skb) priv->tx_skb[i] = skb; len = skb->len; - skb_paddr = pci_map_single(priv->pdev, skb->data, - skb->len, PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(priv->pdev, skb_paddr)) { + skb_paddr = dma_map_single(&priv->pdev->dev, skb->data, skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(&priv->pdev->dev, skb_paddr)) { pr_err("skb DMA mapping error: %pad\n", &skb_paddr); ret = -ENOMEM; goto tx_done; @@ -748,8 +748,8 @@ static int qtnf_pcie_pearl_rx_poll(struct napi_struct *napi, int budget) if (skb) { skb_paddr = QTN_HOST_ADDR(le32_to_cpu(rxbd->addr_h), le32_to_cpu(rxbd->addr)); - pci_unmap_single(priv->pdev, skb_paddr, SKB_BUF_SIZE, - PCI_DMA_FROMDEVICE); + dma_unmap_single(&priv->pdev->dev, skb_paddr, + SKB_BUF_SIZE, DMA_FROM_DEVICE); } if (consume) { diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c index 24f1be8ddcef..9534e1b33780 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c +++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c @@ -255,9 +255,9 @@ topaz_skb2rbd_attach(struct qtnf_pcie_topaz_state *ts, u16 index, u32 wrap) ts->base.rx_skb[index] = skb; - paddr = pci_map_single(ts->base.pdev, skb->data, - SKB_BUF_SIZE, PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(ts->base.pdev, paddr)) { + paddr = dma_map_single(&ts->base.pdev->dev, skb->data, SKB_BUF_SIZE, + DMA_FROM_DEVICE); + if (dma_mapping_error(&ts->base.pdev->dev, paddr)) { pr_err("skb mapping error: %pad\n", &paddr); return -ENOMEM; } @@ -306,8 +306,8 @@ static void qtnf_topaz_free_xfer_buffers(struct qtnf_pcie_topaz_state *ts) rxbd = &ts->rx_bd_vbase[i]; skb = priv->rx_skb[i]; paddr = QTN_HOST_ADDR(0x0, le32_to_cpu(rxbd->addr)); - pci_unmap_single(priv->pdev, paddr, SKB_BUF_SIZE, - PCI_DMA_FROMDEVICE); + dma_unmap_single(&priv->pdev->dev, paddr, + SKB_BUF_SIZE, DMA_FROM_DEVICE); dev_kfree_skb_any(skb); priv->rx_skb[i] = NULL; rxbd->addr = 0; @@ -321,8 +321,8 @@ static void qtnf_topaz_free_xfer_buffers(struct qtnf_pcie_topaz_state *ts) txbd = &ts->tx_bd_vbase[i]; skb = priv->tx_skb[i]; paddr = QTN_HOST_ADDR(0x0, le32_to_cpu(txbd->addr)); - pci_unmap_single(priv->pdev, paddr, SKB_BUF_SIZE, - PCI_DMA_TODEVICE); + dma_unmap_single(&priv->pdev->dev, paddr, + SKB_BUF_SIZE, DMA_TO_DEVICE); dev_kfree_skb_any(skb); priv->tx_skb[i] = NULL; txbd->addr = 0; @@ -414,8 +414,8 @@ static void qtnf_topaz_data_tx_reclaim(struct qtnf_pcie_topaz_state *ts) if (likely(skb)) { txbd = &ts->tx_bd_vbase[i]; paddr = QTN_HOST_ADDR(0x0, le32_to_cpu(txbd->addr)); - pci_unmap_single(priv->pdev, paddr, skb->len, - PCI_DMA_TODEVICE); + dma_unmap_single(&priv->pdev->dev, paddr, skb->len, + DMA_TO_DEVICE); if (skb->dev) { dev_sw_netstats_tx_add(skb->dev, 1, skb->len); @@ -522,9 +522,9 @@ static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb, priv->tx_skb[i] = skb; len = skb->len; - skb_paddr = pci_map_single(priv->pdev, skb->data, - skb->len, PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(priv->pdev, skb_paddr)) { + skb_paddr = dma_map_single(&priv->pdev->dev, skb->data, skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(&priv->pdev->dev, skb_paddr)) { ret = -ENOMEM; goto tx_done; } @@ -653,8 +653,8 @@ static int qtnf_topaz_rx_poll(struct napi_struct *napi, int budget) if (skb) { skb_paddr = QTN_HOST_ADDR(0x0, le32_to_cpu(rxbd->addr)); - pci_unmap_single(priv->pdev, skb_paddr, SKB_BUF_SIZE, - PCI_DMA_FROMDEVICE); + dma_unmap_single(&priv->pdev->dev, skb_paddr, + SKB_BUF_SIZE, DMA_FROM_DEVICE); } if (consume) { diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c index 590bd974d94f..0f5009c47cd0 100644 --- a/drivers/net/wireless/ray_cs.c +++ b/drivers/net/wireless/ray_cs.c @@ -982,7 +982,9 @@ AP to AP 1 1 dest AP src AP dest source if (local->net_type == ADHOC) { writeb(0, &ptx->mac.frame_ctl_2); memcpy_toio(ptx->mac.addr_1, ((struct ethhdr *)data)->h_dest, - 2 * ADDRLEN); + ADDRLEN); + memcpy_toio(ptx->mac.addr_2, ((struct ethhdr *)data)->h_source, + ADDRLEN); memcpy_toio(ptx->mac.addr_3, local->bss_id, ADDRLEN); } else { /* infrastructure */ @@ -2424,9 +2426,7 @@ static void rx_authenticate(ray_dev_t *local, struct rcs __iomem *prcs, copy_from_rx_buff(local, buff, pkt_addr, rx_len & 0xff); /* if we are trying to get authenticated */ if (local->sparm.b4.a_network_type == ADHOC) { - pr_debug("ray_cs rx_auth var= %02x %02x %02x %02x %02x %02x\n", - msg->var[0], msg->var[1], msg->var[2], msg->var[3], - msg->var[4], msg->var[5]); + pr_debug("ray_cs rx_auth var= %6ph\n", msg->var); if (msg->var[2] == 1) { pr_debug("ray_cs Sending authentication response.\n"); if (!build_auth_frame diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index 01735776345a..7ddce3c3f0c4 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -1378,6 +1378,8 @@ struct rtl8xxxu_priv { u8 no_pape:1; u8 int_buf[USB_INTR_CONTENT_LENGTH]; u8 rssi_level; + DECLARE_BITMAP(tx_aggr_started, IEEE80211_NUM_TIDS); + DECLARE_BITMAP(tid_tx_operational, IEEE80211_NUM_TIDS); /* * Only one virtual interface permitted because only STA mode * is supported and no iface_combinations are provided. diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c index ac1061caacd6..774341b0005a 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c @@ -1670,7 +1670,7 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv) priv->rf_paths = 2; priv->rx_paths = 2; priv->tx_paths = 2; - priv->usb_interrupts = 1; + priv->usb_interrupts = 0; priv->rtl_chip = RTL8192C; } priv->has_wifi = 1; @@ -1680,7 +1680,7 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv) priv->rx_paths = 1; priv->tx_paths = 1; priv->rtl_chip = RTL8188C; - priv->usb_interrupts = 1; + priv->usb_interrupts = 0; priv->has_wifi = 1; } @@ -4805,6 +4805,8 @@ rtl8xxxu_fill_txdesc_v1(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr, struct ieee80211_rate *tx_rate = ieee80211_get_tx_rate(hw, tx_info); struct rtl8xxxu_priv *priv = hw->priv; struct device *dev = &priv->udev->dev; + u8 *qc = ieee80211_get_qos_ctl(hdr); + u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; u32 rate; u16 rate_flags = tx_info->control.rates[0].flags; u16 seq_number; @@ -4828,7 +4830,7 @@ rtl8xxxu_fill_txdesc_v1(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr, tx_desc->txdw3 = cpu_to_le32((u32)seq_number << TXDESC32_SEQ_SHIFT); - if (ampdu_enable) + if (ampdu_enable && test_bit(tid, priv->tid_tx_operational)) tx_desc->txdw1 |= cpu_to_le32(TXDESC32_AGG_ENABLE); else tx_desc->txdw1 |= cpu_to_le32(TXDESC32_AGG_BREAK); @@ -4876,6 +4878,8 @@ rtl8xxxu_fill_txdesc_v2(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr, struct rtl8xxxu_priv *priv = hw->priv; struct device *dev = &priv->udev->dev; struct rtl8xxxu_txdesc40 *tx_desc40; + u8 *qc = ieee80211_get_qos_ctl(hdr); + u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; u32 rate; u16 rate_flags = tx_info->control.rates[0].flags; u16 seq_number; @@ -4902,7 +4906,7 @@ rtl8xxxu_fill_txdesc_v2(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr, tx_desc40->txdw9 = cpu_to_le32((u32)seq_number << TXDESC40_SEQ_SHIFT); - if (ampdu_enable) + if (ampdu_enable && test_bit(tid, priv->tid_tx_operational)) tx_desc40->txdw2 |= cpu_to_le32(TXDESC40_AGG_ENABLE); else tx_desc40->txdw2 |= cpu_to_le32(TXDESC40_AGG_BREAK); @@ -5015,12 +5019,19 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw, if (ieee80211_is_data_qos(hdr->frame_control) && sta) { if (sta->ht_cap.ht_supported) { u32 ampdu, val32; + u8 *qc = ieee80211_get_qos_ctl(hdr); + u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; ampdu = (u32)sta->ht_cap.ampdu_density; val32 = ampdu << TXDESC_AMPDU_DENSITY_SHIFT; tx_desc->txdw2 |= cpu_to_le32(val32); ampdu_enable = true; + + if (!test_bit(tid, priv->tx_aggr_started) && + !(skb->protocol == cpu_to_be16(ETH_P_PAE))) + if (!ieee80211_start_tx_ba_session(sta, tid, 0)) + set_bit(tid, priv->tx_aggr_started); } } @@ -6096,6 +6107,7 @@ rtl8xxxu_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct device *dev = &priv->udev->dev; u8 ampdu_factor, ampdu_density; struct ieee80211_sta *sta = params->sta; + u16 tid = params->tid; enum ieee80211_ampdu_mlme_action action = params->action; switch (action) { @@ -6108,17 +6120,20 @@ rtl8xxxu_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, dev_dbg(dev, "Changed HT: ampdu_factor %02x, ampdu_density %02x\n", ampdu_factor, ampdu_density); - break; + return IEEE80211_AMPDU_TX_START_IMMEDIATE; + case IEEE80211_AMPDU_TX_STOP_CONT: case IEEE80211_AMPDU_TX_STOP_FLUSH: - dev_dbg(dev, "%s: IEEE80211_AMPDU_TX_STOP_FLUSH\n", __func__); - rtl8xxxu_set_ampdu_factor(priv, 0); - rtl8xxxu_set_ampdu_min_space(priv, 0); - break; case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: - dev_dbg(dev, "%s: IEEE80211_AMPDU_TX_STOP_FLUSH_CONT\n", - __func__); + dev_dbg(dev, "%s: IEEE80211_AMPDU_TX_STOP\n", __func__); rtl8xxxu_set_ampdu_factor(priv, 0); rtl8xxxu_set_ampdu_min_space(priv, 0); + clear_bit(tid, priv->tx_aggr_started); + clear_bit(tid, priv->tid_tx_operational); + ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); + break; + case IEEE80211_AMPDU_TX_OPERATIONAL: + dev_dbg(dev, "%s: IEEE80211_AMPDU_TX_OPERATIONAL\n", __func__); + set_bit(tid, priv->tid_tx_operational); break; case IEEE80211_AMPDU_RX_START: dev_dbg(dev, "%s: IEEE80211_AMPDU_RX_START\n", __func__); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c index 76dd881ef9bb..9b83c710c9b8 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c @@ -160,6 +160,15 @@ static u32 targetchnl_2g[TARGET_CHNL_NUM_2G] = { 25711, 25658, 25606, 25554, 25502, 25451, 25328 }; +static const u8 channel_all[59] = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, + 60, 62, 64, 100, 102, 104, 106, 108, 110, 112, + 114, 116, 118, 120, 122, 124, 126, 128, 130, + 132, 134, 136, 138, 140, 149, 151, 153, 155, + 157, 159, 161, 163, 165 +}; + static u32 _rtl92d_phy_calculate_bit_shift(u32 bitmask) { u32 i = ffs(bitmask); @@ -681,7 +690,7 @@ static bool _rtl92d_phy_bb_config(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); - bool rtstatus = true; + bool rtstatus; rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "==>\n"); rtstatus = _rtl92d_phy_config_bb_with_headerfile(hw, @@ -1354,15 +1363,7 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel) u8 rtl92d_get_rightchnlplace_for_iqk(u8 chnl) { - u8 channel_all[59] = { - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, - 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, - 60, 62, 64, 100, 102, 104, 106, 108, 110, 112, - 114, 116, 118, 120, 122, 124, 126, 128, 130, - 132, 134, 136, 138, 140, 149, 151, 153, 155, - 157, 159, 161, 163, 165 - }; - u8 place = chnl; + u8 place; if (chnl > 14) { for (place = 14; place < sizeof(channel_all); place++) { @@ -3220,37 +3221,28 @@ void rtl92d_phy_config_macphymode_info(struct ieee80211_hw *hw) u8 rtl92d_get_chnlgroup_fromarray(u8 chnl) { u8 group; - u8 channel_info[59] = { - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, - 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, - 58, 60, 62, 64, 100, 102, 104, 106, 108, - 110, 112, 114, 116, 118, 120, 122, 124, - 126, 128, 130, 132, 134, 136, 138, 140, - 149, 151, 153, 155, 157, 159, 161, 163, - 165 - }; - if (channel_info[chnl] <= 3) + if (channel_all[chnl] <= 3) group = 0; - else if (channel_info[chnl] <= 9) + else if (channel_all[chnl] <= 9) group = 1; - else if (channel_info[chnl] <= 14) + else if (channel_all[chnl] <= 14) group = 2; - else if (channel_info[chnl] <= 44) + else if (channel_all[chnl] <= 44) group = 3; - else if (channel_info[chnl] <= 54) + else if (channel_all[chnl] <= 54) group = 4; - else if (channel_info[chnl] <= 64) + else if (channel_all[chnl] <= 64) group = 5; - else if (channel_info[chnl] <= 112) + else if (channel_all[chnl] <= 112) group = 6; - else if (channel_info[chnl] <= 126) + else if (channel_all[chnl] <= 126) group = 7; - else if (channel_info[chnl] <= 140) + else if (channel_all[chnl] <= 140) group = 8; - else if (channel_info[chnl] <= 153) + else if (channel_all[chnl] <= 153) group = 9; - else if (channel_info[chnl] <= 159) + else if (channel_all[chnl] <= 159) group = 10; else group = 11; diff --git a/drivers/net/wireless/realtek/rtw88/Makefile b/drivers/net/wireless/realtek/rtw88/Makefile index c0e4b111c8b4..73d6807a8cdf 100644 --- a/drivers/net/wireless/realtek/rtw88/Makefile +++ b/drivers/net/wireless/realtek/rtw88/Makefile @@ -15,9 +15,9 @@ rtw88_core-y += main.o \ ps.o \ sec.o \ bf.o \ - wow.o \ regd.o +rtw88_core-$(CONFIG_PM) += wow.o obj-$(CONFIG_RTW88_8822B) += rtw88_8822b.o rtw88_8822b-objs := rtw8822b.o rtw8822b_table.o diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c index 3bfa5ecc0053..e6399519584b 100644 --- a/drivers/net/wireless/realtek/rtw88/fw.c +++ b/drivers/net/wireless/realtek/rtw88/fw.c @@ -819,7 +819,7 @@ static u16 rtw_get_rsvd_page_probe_req_size(struct rtw_dev *rtwdev, continue; if ((!ssid && !rsvd_pkt->ssid) || rtw_ssid_equal(rsvd_pkt->ssid, ssid)) - size = rsvd_pkt->skb->len; + size = rsvd_pkt->probe_req_size; } return size; @@ -1047,6 +1047,8 @@ static struct sk_buff *rtw_get_rsvd_page_skb(struct ieee80211_hw *hw, ssid->ssid_len, 0); else skb_new = ieee80211_probereq_get(hw, vif->addr, NULL, 0, 0); + if (skb_new) + rsvd_pkt->probe_req_size = (u16)skb_new->len; break; case RSVD_NLO_INFO: skb_new = rtw_nlo_info_get(hw); @@ -1643,6 +1645,7 @@ int rtw_fw_dump_fifo(struct rtw_dev *rtwdev, u8 fifo_sel, u32 addr, u32 size, static void __rtw_fw_update_pkt(struct rtw_dev *rtwdev, u8 pkt_id, u16 size, u8 location) { + struct rtw_chip_info *chip = rtwdev->chip; u8 h2c_pkt[H2C_PKT_SIZE] = {0}; u16 total_size = H2C_PKT_HDR_SIZE + H2C_PKT_UPDATE_PKT_LEN; @@ -1653,6 +1656,7 @@ static void __rtw_fw_update_pkt(struct rtw_dev *rtwdev, u8 pkt_id, u16 size, UPDATE_PKT_SET_LOCATION(h2c_pkt, location); /* include txdesc size */ + size += chip->tx_pkt_desc_sz; UPDATE_PKT_SET_SIZE(h2c_pkt, size); rtw_fw_send_h2c_packet(rtwdev, h2c_pkt); @@ -1662,7 +1666,7 @@ void rtw_fw_update_pkt_probe_req(struct rtw_dev *rtwdev, struct cfg80211_ssid *ssid) { u8 loc; - u32 size; + u16 size; loc = rtw_get_rsvd_page_probe_req_location(rtwdev, ssid); if (!loc) { diff --git a/drivers/net/wireless/realtek/rtw88/fw.h b/drivers/net/wireless/realtek/rtw88/fw.h index a8a7162fbe64..64dcde35a021 100644 --- a/drivers/net/wireless/realtek/rtw88/fw.h +++ b/drivers/net/wireless/realtek/rtw88/fw.h @@ -99,7 +99,7 @@ enum rtw_beacon_filter_offload_mode { BCN_FILTER_OFFLOAD_MODE_2, BCN_FILTER_OFFLOAD_MODE_3, - BCN_FILTER_OFFLOAD_MODE_DEFAULT = BCN_FILTER_OFFLOAD_MODE_1, + BCN_FILTER_OFFLOAD_MODE_DEFAULT = BCN_FILTER_OFFLOAD_MODE_0, }; struct rtw_coex_info_req { @@ -147,6 +147,7 @@ struct rtw_rsvd_page { u8 page; bool add_txdesc; struct cfg80211_ssid *ssid; + u16 probe_req_size; }; enum rtw_keep_alive_pkt_type { diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c index c6364837e83b..6bb55e663fc3 100644 --- a/drivers/net/wireless/realtek/rtw88/main.c +++ b/drivers/net/wireless/realtek/rtw88/main.c @@ -1338,6 +1338,8 @@ static void rtw_init_ht_cap(struct rtw_dev *rtwdev, if (rtw_chip_has_rx_ldpc(rtwdev)) ht_cap->cap |= IEEE80211_HT_CAP_LDPC_CODING; + if (rtw_chip_has_tx_stbc(rtwdev)) + ht_cap->cap |= IEEE80211_HT_CAP_TX_STBC; if (efuse->hw_cap.bw & BIT(RTW_CHANNEL_WIDTH_40)) ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 | diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h index e5af375b3dd0..56812127a053 100644 --- a/drivers/net/wireless/realtek/rtw88/main.h +++ b/drivers/net/wireless/realtek/rtw88/main.h @@ -1146,6 +1146,7 @@ struct rtw_chip_info { u8 txgi_factor; bool is_pwr_by_rate_dec; bool rx_ldpc; + bool tx_stbc; u8 max_power_index; u16 fw_fifo_addr[RTW_FW_FIFO_MAX]; @@ -1959,6 +1960,11 @@ static inline bool rtw_chip_has_rx_ldpc(struct rtw_dev *rtwdev) return rtwdev->chip->rx_ldpc; } +static inline bool rtw_chip_has_tx_stbc(struct rtw_dev *rtwdev) +{ + return rtwdev->chip->tx_stbc; +} + static inline void rtw_release_macid(struct rtw_dev *rtwdev, u8 mac_id) { clear_bit(mac_id, rtwdev->mac_id_map); diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c index e7d17ab8f113..a7a6ebfaa203 100644 --- a/drivers/net/wireless/realtek/rtw88/pci.c +++ b/drivers/net/wireless/realtek/rtw88/pci.c @@ -268,11 +268,6 @@ static int rtw_pci_init_rx_ring(struct rtw_dev *rtwdev, int i, allocated; int ret = 0; - if (len > TRX_BD_IDX_MASK) { - rtw_err(rtwdev, "len %d exceeds maximum RX entries\n", len); - return -EINVAL; - } - head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); if (!head) { rtw_err(rtwdev, "failed to allocate rx ring\n"); @@ -1359,6 +1354,25 @@ static void rtw_pci_clkreq_set(struct rtw_dev *rtwdev, bool enable) rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value); } +static void rtw_pci_clkreq_pad_low(struct rtw_dev *rtwdev, bool enable) +{ + u8 value; + int ret; + + ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value); + if (ret) { + rtw_err(rtwdev, "failed to read CLKREQ_L1, ret=%d", ret); + return; + } + + if (enable) + value &= ~BIT_CLKREQ_N_PAD; + else + value |= BIT_CLKREQ_N_PAD; + + rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value); +} + static void rtw_pci_aspm_set(struct rtw_dev *rtwdev, bool enable) { u8 value; @@ -1500,11 +1514,25 @@ static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev) static int __maybe_unused rtw_pci_suspend(struct device *dev) { + struct ieee80211_hw *hw = dev_get_drvdata(dev); + struct rtw_dev *rtwdev = hw->priv; + struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_efuse *efuse = &rtwdev->efuse; + + if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 6) + rtw_pci_clkreq_pad_low(rtwdev, true); return 0; } static int __maybe_unused rtw_pci_resume(struct device *dev) { + struct ieee80211_hw *hw = dev_get_drvdata(dev); + struct rtw_dev *rtwdev = hw->priv; + struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_efuse *efuse = &rtwdev->efuse; + + if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 6) + rtw_pci_clkreq_pad_low(rtwdev, false); return 0; } @@ -1701,6 +1729,15 @@ static const struct dmi_system_id rtw88_pci_quirks[] = { }, .driver_data = (void *)BIT(QUIRK_DIS_PCI_CAP_ASPM), }, + { + .callback = disable_pci_caps, + .ident = "HP HP Pavilion Laptop 14-ce0xxx", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Laptop 14-ce0xxx"), + }, + .driver_data = (void *)BIT(QUIRK_DIS_PCI_CAP_ASPM), + }, {} }; diff --git a/drivers/net/wireless/realtek/rtw88/pci.h b/drivers/net/wireless/realtek/rtw88/pci.h index 0ffae887527a..66f78eb7757c 100644 --- a/drivers/net/wireless/realtek/rtw88/pci.h +++ b/drivers/net/wireless/realtek/rtw88/pci.h @@ -37,6 +37,7 @@ #define RTK_PCIE_LINK_CFG 0x0719 #define BIT_CLKREQ_SW_EN BIT(4) #define BIT_L1_SW_EN BIT(3) +#define BIT_CLKREQ_N_PAD BIT(0) #define RTK_PCIE_CLKDLY_CTRL 0x0725 #define BIT_PCI_BCNQ_FLAG BIT(4) diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c index 8bf3cd3a3678..f3ad079967a6 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c @@ -5288,6 +5288,7 @@ struct rtw_chip_info rtw8822c_hw_spec = { .bfer_su_max_num = 2, .bfer_mu_max_num = 1, .rx_ldpc = true, + .tx_stbc = true, #ifdef CONFIG_PM .wow_fw_name = "rtw88/rtw8822c_wow_fw.bin", diff --git a/drivers/net/wireless/realtek/rtw88/tx.c b/drivers/net/wireless/realtek/rtw88/tx.c index 0193708fc013..3a101aa139ed 100644 --- a/drivers/net/wireless/realtek/rtw88/tx.c +++ b/drivers/net/wireless/realtek/rtw88/tx.c @@ -162,7 +162,7 @@ void rtw_tx_report_purge_timer(struct timer_list *t) if (skb_queue_len(&tx_report->queue) == 0) return; - rtw_dbg(rtwdev, RTW_DBG_TX, "purge skb(s) not reported by firmware\n"); + rtw_warn(rtwdev, "failed to get tx report from firmware\n"); spin_lock_irqsave(&tx_report->q_lock, flags); skb_queue_purge(&tx_report->queue); diff --git a/drivers/net/wireless/realtek/rtw88/wow.c b/drivers/net/wireless/realtek/rtw88/wow.c index fc9544f4e5e4..89dc595094d5 100644 --- a/drivers/net/wireless/realtek/rtw88/wow.c +++ b/drivers/net/wireless/realtek/rtw88/wow.c @@ -12,26 +12,54 @@ static void rtw_wow_show_wakeup_reason(struct rtw_dev *rtwdev) { + struct cfg80211_wowlan_nd_info nd_info; + struct cfg80211_wowlan_wakeup wakeup = { + .pattern_idx = -1, + }; u8 reason; reason = rtw_read8(rtwdev, REG_WOWLAN_WAKE_REASON); - if (reason == RTW_WOW_RSN_RX_DEAUTH) + switch (reason) { + case RTW_WOW_RSN_RX_DEAUTH: + wakeup.disconnect = true; rtw_dbg(rtwdev, RTW_DBG_WOW, "WOW: Rx deauth\n"); - else if (reason == RTW_WOW_RSN_DISCONNECT) + break; + case RTW_WOW_RSN_DISCONNECT: + wakeup.disconnect = true; rtw_dbg(rtwdev, RTW_DBG_WOW, "WOW: AP is off\n"); - else if (reason == RTW_WOW_RSN_RX_MAGIC_PKT) + break; + case RTW_WOW_RSN_RX_MAGIC_PKT: + wakeup.magic_pkt = true; rtw_dbg(rtwdev, RTW_DBG_WOW, "WOW: Rx magic packet\n"); - else if (reason == RTW_WOW_RSN_RX_GTK_REKEY) + break; + case RTW_WOW_RSN_RX_GTK_REKEY: + wakeup.gtk_rekey_failure = true; rtw_dbg(rtwdev, RTW_DBG_WOW, "WOW: Rx gtk rekey\n"); - else if (reason == RTW_WOW_RSN_RX_PTK_REKEY) - rtw_dbg(rtwdev, RTW_DBG_WOW, "WOW: Rx ptk rekey\n"); - else if (reason == RTW_WOW_RSN_RX_PATTERN_MATCH) + break; + case RTW_WOW_RSN_RX_PATTERN_MATCH: + /* Current firmware and driver don't report pattern index + * Use pattern_idx to 0 defaultly. + */ + wakeup.pattern_idx = 0; rtw_dbg(rtwdev, RTW_DBG_WOW, "WOW: Rx pattern match packet\n"); - else if (reason == RTW_WOW_RSN_RX_NLO) + break; + case RTW_WOW_RSN_RX_NLO: + /* Current firmware and driver don't report ssid index. + * Use 0 for n_matches based on its comment. + */ + nd_info.n_matches = 0; + wakeup.net_detect = &nd_info; rtw_dbg(rtwdev, RTW_DBG_WOW, "Rx NLO\n"); - else + break; + default: rtw_warn(rtwdev, "Unknown wakeup reason %x\n", reason); + ieee80211_report_wowlan_wakeup(rtwdev->wow.wow_vif, NULL, + GFP_KERNEL); + return; + } + ieee80211_report_wowlan_wakeup(rtwdev->wow.wow_vif, &wakeup, + GFP_KERNEL); } static void rtw_wow_pattern_write_cam(struct rtw_dev *rtwdev, u8 addr, @@ -283,15 +311,26 @@ static void rtw_wow_rx_dma_start(struct rtw_dev *rtwdev) static int rtw_wow_check_fw_status(struct rtw_dev *rtwdev, bool wow_enable) { - /* wait 100ms for wow firmware to finish work */ - msleep(100); + int ret; + u8 check; + u32 check_dis; if (wow_enable) { - if (rtw_read8(rtwdev, REG_WOWLAN_WAKE_REASON)) + ret = read_poll_timeout(rtw_read8, check, !check, 1000, + 100000, true, rtwdev, + REG_WOWLAN_WAKE_REASON); + if (ret) goto wow_fail; } else { - if (rtw_read32_mask(rtwdev, REG_FE1IMR, BIT_FS_RXDONE) || - rtw_read32_mask(rtwdev, REG_RXPKT_NUM, BIT_RW_RELEASE)) + ret = read_poll_timeout(rtw_read32_mask, check_dis, + !check_dis, 1000, 100000, true, rtwdev, + REG_FE1IMR, BIT_FS_RXDONE); + if (ret) + goto wow_fail; + ret = read_poll_timeout(rtw_read32_mask, check_dis, + !check_dis, 1000, 100000, false, rtwdev, + REG_RXPKT_NUM, BIT_RW_RELEASE); + if (ret) goto wow_fail; } @@ -432,37 +471,31 @@ static void rtw_wow_fw_media_status(struct rtw_dev *rtwdev, bool connect) rtw_iterate_stas_atomic(rtwdev, rtw_wow_fw_media_status_iter, &data); } -static void rtw_wow_config_pno_rsvd_page(struct rtw_dev *rtwdev, - struct rtw_vif *rtwvif) +static int rtw_wow_config_wow_fw_rsvd_page(struct rtw_dev *rtwdev) { - rtw_add_rsvd_page_pno(rtwdev, rtwvif); -} - -static void rtw_wow_config_linked_rsvd_page(struct rtw_dev *rtwdev, - struct rtw_vif *rtwvif) -{ - rtw_add_rsvd_page_sta(rtwdev, rtwvif); -} + struct ieee80211_vif *wow_vif = rtwdev->wow.wow_vif; + struct rtw_vif *rtwvif = (struct rtw_vif *)wow_vif->drv_priv; -static void rtw_wow_config_rsvd_page(struct rtw_dev *rtwdev, - struct rtw_vif *rtwvif) -{ rtw_remove_rsvd_page(rtwdev, rtwvif); - if (rtw_wow_mgd_linked(rtwdev)) { - rtw_wow_config_linked_rsvd_page(rtwdev, rtwvif); - } else if (test_bit(RTW_FLAG_WOWLAN, rtwdev->flags) && - rtw_wow_no_link(rtwdev)) { - rtw_wow_config_pno_rsvd_page(rtwdev, rtwvif); - } + if (rtw_wow_no_link(rtwdev)) + rtw_add_rsvd_page_pno(rtwdev, rtwvif); + else + rtw_add_rsvd_page_sta(rtwdev, rtwvif); + + return rtw_fw_download_rsvd_page(rtwdev); } -static int rtw_wow_dl_fw_rsvd_page(struct rtw_dev *rtwdev) +static int rtw_wow_config_normal_fw_rsvd_page(struct rtw_dev *rtwdev) { struct ieee80211_vif *wow_vif = rtwdev->wow.wow_vif; struct rtw_vif *rtwvif = (struct rtw_vif *)wow_vif->drv_priv; - rtw_wow_config_rsvd_page(rtwdev, rtwvif); + rtw_remove_rsvd_page(rtwdev, rtwvif); + rtw_add_rsvd_page_sta(rtwdev, rtwvif); + + if (rtw_wow_no_link(rtwdev)) + return 0; return rtw_fw_download_rsvd_page(rtwdev); } @@ -660,7 +693,7 @@ static int rtw_wow_enable(struct rtw_dev *rtwdev) set_bit(RTW_FLAG_WOWLAN, rtwdev->flags); - ret = rtw_wow_dl_fw_rsvd_page(rtwdev); + ret = rtw_wow_config_wow_fw_rsvd_page(rtwdev); if (ret) { rtw_err(rtwdev, "failed to download wowlan rsvd page\n"); goto error; @@ -733,7 +766,7 @@ static int rtw_wow_disable(struct rtw_dev *rtwdev) goto out; } - ret = rtw_wow_dl_fw_rsvd_page(rtwdev); + ret = rtw_wow_config_normal_fw_rsvd_page(rtwdev); if (ret) rtw_err(rtwdev, "failed to download normal rsvd page\n"); diff --git a/drivers/net/wireless/rsi/rsi_91x_debugfs.c b/drivers/net/wireless/rsi/rsi_91x_debugfs.c index 24a417ea2ae7..bf22fd948276 100644 --- a/drivers/net/wireless/rsi/rsi_91x_debugfs.c +++ b/drivers/net/wireless/rsi/rsi_91x_debugfs.c @@ -117,7 +117,7 @@ static int rsi_stats_read(struct seq_file *seq, void *data) { struct rsi_common *common = seq->private; - unsigned char fsm_state[][32] = { + static const unsigned char fsm_state[][32] = { "FSM_FW_NOT_LOADED", "FSM_CARD_NOT_READY", "FSM_COMMON_DEV_PARAMS_SENT", diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c index 99b21a2c8386..f4a26f16f00f 100644 --- a/drivers/net/wireless/rsi/rsi_91x_hal.c +++ b/drivers/net/wireless/rsi/rsi_91x_hal.c @@ -1038,8 +1038,10 @@ static int rsi_load_9116_firmware(struct rsi_hw *adapter) } ta_firmware = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL); - if (!ta_firmware) + if (!ta_firmware) { + status = -ENOMEM; goto fail_release_fw; + } fw_p = ta_firmware; instructions_sz = fw_entry->size; rsi_dbg(INFO_ZONE, "FW Length = %d bytes\n", instructions_sz); diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c index 3fbe2a3c1455..416976f09888 100644 --- a/drivers/net/wireless/rsi/rsi_91x_usb.c +++ b/drivers/net/wireless/rsi/rsi_91x_usb.c @@ -816,6 +816,7 @@ static int rsi_probe(struct usb_interface *pfunction, } else { rsi_dbg(ERR_ZONE, "%s: Unsupported RSI device id 0x%x\n", __func__, id->idProduct); + status = -ENODEV; goto err1; } diff --git a/drivers/net/wwan/Kconfig b/drivers/net/wwan/Kconfig index de9384326bc8..77dbfc418bce 100644 --- a/drivers/net/wwan/Kconfig +++ b/drivers/net/wwan/Kconfig @@ -38,6 +38,18 @@ config MHI_WWAN_CTRL To compile this driver as a module, choose M here: the module will be called mhi_wwan_ctrl. +config MHI_WWAN_MBIM + tristate "MHI WWAN MBIM network driver for QCOM-based PCIe modems" + depends on MHI_BUS + help + MHI WWAN MBIM is a WWAN network driver for QCOM-based PCIe modems. + It implements MBIM over MHI, for IP data aggregation and muxing. + A default wwan0 network interface is created for MBIM data session + ID 0. Additional links can be created via wwan rtnetlink type. + + To compile this driver as a module, choose M here: the module will be + called mhi_wwan_mbim. + config RPMSG_WWAN_CTRL tristate "RPMSG WWAN control driver" depends on RPMSG diff --git a/drivers/net/wwan/Makefile b/drivers/net/wwan/Makefile index d90ac33abaef..fe51feedac21 100644 --- a/drivers/net/wwan/Makefile +++ b/drivers/net/wwan/Makefile @@ -9,5 +9,6 @@ wwan-objs += wwan_core.o obj-$(CONFIG_WWAN_HWSIM) += wwan_hwsim.o obj-$(CONFIG_MHI_WWAN_CTRL) += mhi_wwan_ctrl.o +obj-$(CONFIG_MHI_WWAN_MBIM) += mhi_wwan_mbim.o obj-$(CONFIG_RPMSG_WWAN_CTRL) += rpmsg_wwan_ctrl.o obj-$(CONFIG_IOSM) += iosm/ diff --git a/drivers/net/wwan/iosm/iosm_ipc_pcie.c b/drivers/net/wwan/iosm/iosm_ipc_pcie.c index 7f7d364d3a51..2fe88b8be348 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_pcie.c +++ b/drivers/net/wwan/iosm/iosm_ipc_pcie.c @@ -479,6 +479,7 @@ static struct pci_driver iosm_ipc_driver = { }, .id_table = iosm_ipc_ids, }; +module_pci_driver(iosm_ipc_driver); int ipc_pcie_addr_map(struct iosm_pcie *ipc_pcie, unsigned char *data, size_t size, dma_addr_t *mapping, int direction) @@ -560,21 +561,3 @@ void ipc_pcie_kfree_skb(struct iosm_pcie *ipc_pcie, struct sk_buff *skb) IPC_CB(skb)->mapping = 0; dev_kfree_skb(skb); } - -static int __init iosm_ipc_driver_init(void) -{ - if (pci_register_driver(&iosm_ipc_driver)) { - pr_err("registering of IOSM PCIe driver failed"); - return -1; - } - - return 0; -} - -static void __exit iosm_ipc_driver_exit(void) -{ - pci_unregister_driver(&iosm_ipc_driver); -} - -module_init(iosm_ipc_driver_init); -module_exit(iosm_ipc_driver_exit); diff --git a/drivers/net/wwan/iosm/iosm_ipc_protocol.c b/drivers/net/wwan/iosm/iosm_ipc_protocol.c index 834d8b146a94..63fc7012f09f 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_protocol.c +++ b/drivers/net/wwan/iosm/iosm_ipc_protocol.c @@ -239,9 +239,9 @@ struct iosm_protocol *ipc_protocol_init(struct iosm_imem *ipc_imem) ipc_protocol->old_msg_tail = 0; ipc_protocol->p_ap_shm = - pci_alloc_consistent(ipc_protocol->pcie->pci, - sizeof(*ipc_protocol->p_ap_shm), - &ipc_protocol->phy_ap_shm); + dma_alloc_coherent(&ipc_protocol->pcie->pci->dev, + sizeof(*ipc_protocol->p_ap_shm), + &ipc_protocol->phy_ap_shm, GFP_KERNEL); if (!ipc_protocol->p_ap_shm) { dev_err(ipc_protocol->dev, "pci shm alloc error"); @@ -275,8 +275,8 @@ struct iosm_protocol *ipc_protocol_init(struct iosm_imem *ipc_imem) void ipc_protocol_deinit(struct iosm_protocol *proto) { - pci_free_consistent(proto->pcie->pci, sizeof(*proto->p_ap_shm), - proto->p_ap_shm, proto->phy_ap_shm); + dma_free_coherent(&proto->pcie->pci->dev, sizeof(*proto->p_ap_shm), + proto->p_ap_shm, proto->phy_ap_shm); ipc_pm_deinit(proto); kfree(proto); diff --git a/drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c b/drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c index 35d590743d3a..c6b032f95d2e 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c +++ b/drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c @@ -74,9 +74,9 @@ static int ipc_protocol_msg_prepipe_open(struct iosm_protocol *ipc_protocol, return -ENOMEM; /* Allocate the transfer descriptors for the pipe. */ - tdr = pci_alloc_consistent(ipc_protocol->pcie->pci, - pipe->nr_of_entries * sizeof(*tdr), - &pipe->phy_tdr_start); + tdr = dma_alloc_coherent(&ipc_protocol->pcie->pci->dev, + pipe->nr_of_entries * sizeof(*tdr), + &pipe->phy_tdr_start, GFP_ATOMIC); if (!tdr) { kfree(skbr); dev_err(ipc_protocol->dev, "tdr alloc error"); @@ -492,10 +492,9 @@ void ipc_protocol_pipe_cleanup(struct iosm_protocol *ipc_protocol, /* Free and reset the td and skbuf circular buffers. kfree is save! */ if (pipe->tdr_start) { - pci_free_consistent(ipc_protocol->pcie->pci, - sizeof(*pipe->tdr_start) * - pipe->nr_of_entries, - pipe->tdr_start, pipe->phy_tdr_start); + dma_free_coherent(&ipc_protocol->pcie->pci->dev, + sizeof(*pipe->tdr_start) * pipe->nr_of_entries, + pipe->tdr_start, pipe->phy_tdr_start); pipe->tdr_start = NULL; } diff --git a/drivers/net/wwan/mhi_wwan_mbim.c b/drivers/net/wwan/mhi_wwan_mbim.c new file mode 100644 index 000000000000..71bf9b4f769f --- /dev/null +++ b/drivers/net/wwan/mhi_wwan_mbim.c @@ -0,0 +1,658 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* MHI MBIM Network driver - Network/MBIM over MHI bus + * + * Copyright (C) 2021 Linaro Ltd <loic.poulain@linaro.org> + * + * This driver copy some code from cdc_ncm, which is: + * Copyright (C) ST-Ericsson 2010-2012 + * and cdc_mbim, which is: + * Copyright (c) 2012 Smith Micro Software, Inc. + * Copyright (c) 2012 Bjørn Mork <bjorn@mork.no> + * + */ + +#include <linux/ethtool.h> +#include <linux/if_arp.h> +#include <linux/if_vlan.h> +#include <linux/ip.h> +#include <linux/mhi.h> +#include <linux/mii.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/skbuff.h> +#include <linux/u64_stats_sync.h> +#include <linux/usb.h> +#include <linux/usb/cdc.h> +#include <linux/usb/usbnet.h> +#include <linux/usb/cdc_ncm.h> +#include <linux/wwan.h> + +/* 3500 allows to optimize skb allocation, the skbs will basically fit in + * one 4K page. Large MBIM packets will simply be split over several MHI + * transfers and chained by the MHI net layer (zerocopy). + */ +#define MHI_DEFAULT_MRU 3500 + +#define MHI_MBIM_DEFAULT_MTU 1500 +#define MHI_MAX_BUF_SZ 0xffff + +#define MBIM_NDP16_SIGN_MASK 0x00ffffff + +#define MHI_MBIM_LINK_HASH_SIZE 8 +#define LINK_HASH(session) ((session) % MHI_MBIM_LINK_HASH_SIZE) + +struct mhi_mbim_link { + struct mhi_mbim_context *mbim; + struct net_device *ndev; + unsigned int session; + + /* stats */ + u64_stats_t rx_packets; + u64_stats_t rx_bytes; + u64_stats_t rx_errors; + u64_stats_t tx_packets; + u64_stats_t tx_bytes; + u64_stats_t tx_errors; + u64_stats_t tx_dropped; + struct u64_stats_sync tx_syncp; + struct u64_stats_sync rx_syncp; + + struct hlist_node hlnode; +}; + +struct mhi_mbim_context { + struct mhi_device *mdev; + struct sk_buff *skbagg_head; + struct sk_buff *skbagg_tail; + unsigned int mru; + u32 rx_queue_sz; + u16 rx_seq; + u16 tx_seq; + struct delayed_work rx_refill; + spinlock_t tx_lock; + struct hlist_head link_list[MHI_MBIM_LINK_HASH_SIZE]; +}; + +struct mbim_tx_hdr { + struct usb_cdc_ncm_nth16 nth16; + struct usb_cdc_ncm_ndp16 ndp16; + struct usb_cdc_ncm_dpe16 dpe16[2]; +} __packed; + +static struct mhi_mbim_link *mhi_mbim_get_link_rcu(struct mhi_mbim_context *mbim, + unsigned int session) +{ + struct mhi_mbim_link *link; + + hlist_for_each_entry_rcu(link, &mbim->link_list[LINK_HASH(session)], hlnode) { + if (link->session == session) + return link; + } + + return NULL; +} + +static struct sk_buff *mbim_tx_fixup(struct sk_buff *skb, unsigned int session, + u16 tx_seq) +{ + unsigned int dgram_size = skb->len; + struct usb_cdc_ncm_nth16 *nth16; + struct usb_cdc_ncm_ndp16 *ndp16; + struct mbim_tx_hdr *mbim_hdr; + + /* Only one NDP is sent, containing the IP packet (no aggregation) */ + + /* Ensure we have enough headroom for crafting MBIM header */ + if (skb_cow_head(skb, sizeof(struct mbim_tx_hdr))) { + dev_kfree_skb_any(skb); + return NULL; + } + + mbim_hdr = skb_push(skb, sizeof(struct mbim_tx_hdr)); + + /* Fill NTB header */ + nth16 = &mbim_hdr->nth16; + nth16->dwSignature = cpu_to_le32(USB_CDC_NCM_NTH16_SIGN); + nth16->wHeaderLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_nth16)); + nth16->wSequence = cpu_to_le16(tx_seq); + nth16->wBlockLength = cpu_to_le16(skb->len); + nth16->wNdpIndex = cpu_to_le16(sizeof(struct usb_cdc_ncm_nth16)); + + /* Fill the unique NDP */ + ndp16 = &mbim_hdr->ndp16; + ndp16->dwSignature = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN | (session << 24)); + ndp16->wLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_ndp16) + + sizeof(struct usb_cdc_ncm_dpe16) * 2); + ndp16->wNextNdpIndex = 0; + + /* Datagram follows the mbim header */ + ndp16->dpe16[0].wDatagramIndex = cpu_to_le16(sizeof(struct mbim_tx_hdr)); + ndp16->dpe16[0].wDatagramLength = cpu_to_le16(dgram_size); + + /* null termination */ + ndp16->dpe16[1].wDatagramIndex = 0; + ndp16->dpe16[1].wDatagramLength = 0; + + return skb; +} + +static netdev_tx_t mhi_mbim_ndo_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev); + struct mhi_mbim_context *mbim = link->mbim; + unsigned long flags; + int err = -ENOMEM; + + /* Serialize MHI channel queuing and MBIM seq */ + spin_lock_irqsave(&mbim->tx_lock, flags); + + skb = mbim_tx_fixup(skb, link->session, mbim->tx_seq); + if (unlikely(!skb)) + goto exit_unlock; + + err = mhi_queue_skb(mbim->mdev, DMA_TO_DEVICE, skb, skb->len, MHI_EOT); + + if (mhi_queue_is_full(mbim->mdev, DMA_TO_DEVICE)) + netif_stop_queue(ndev); + + if (!err) + mbim->tx_seq++; + +exit_unlock: + spin_unlock_irqrestore(&mbim->tx_lock, flags); + + if (unlikely(err)) { + net_err_ratelimited("%s: Failed to queue TX buf (%d)\n", + ndev->name, err); + dev_kfree_skb_any(skb); + goto exit_drop; + } + + return NETDEV_TX_OK; + +exit_drop: + u64_stats_update_begin(&link->tx_syncp); + u64_stats_inc(&link->tx_dropped); + u64_stats_update_end(&link->tx_syncp); + + return NETDEV_TX_OK; +} + +static int mbim_rx_verify_nth16(struct mhi_mbim_context *mbim, struct sk_buff *skb) +{ + struct usb_cdc_ncm_nth16 *nth16; + int len; + + if (skb->len < sizeof(struct usb_cdc_ncm_nth16) + + sizeof(struct usb_cdc_ncm_ndp16)) { + net_err_ratelimited("frame too short\n"); + return -EINVAL; + } + + nth16 = (struct usb_cdc_ncm_nth16 *)skb->data; + + if (nth16->dwSignature != cpu_to_le32(USB_CDC_NCM_NTH16_SIGN)) { + net_err_ratelimited("invalid NTH16 signature <%#010x>\n", + le32_to_cpu(nth16->dwSignature)); + return -EINVAL; + } + + /* No limit on the block length, except the size of the data pkt */ + len = le16_to_cpu(nth16->wBlockLength); + if (len > skb->len) { + net_err_ratelimited("NTB does not fit into the skb %u/%u\n", + len, skb->len); + return -EINVAL; + } + + if (mbim->rx_seq + 1 != le16_to_cpu(nth16->wSequence) && + (mbim->rx_seq || le16_to_cpu(nth16->wSequence)) && + !(mbim->rx_seq == 0xffff && !le16_to_cpu(nth16->wSequence))) { + net_err_ratelimited("sequence number glitch prev=%d curr=%d\n", + mbim->rx_seq, le16_to_cpu(nth16->wSequence)); + } + mbim->rx_seq = le16_to_cpu(nth16->wSequence); + + return le16_to_cpu(nth16->wNdpIndex); +} + +static int mbim_rx_verify_ndp16(struct sk_buff *skb, struct usb_cdc_ncm_ndp16 *ndp16) +{ + int ret; + + if (le16_to_cpu(ndp16->wLength) < USB_CDC_NCM_NDP16_LENGTH_MIN) { + net_err_ratelimited("invalid DPT16 length <%u>\n", + le16_to_cpu(ndp16->wLength)); + return -EINVAL; + } + + ret = ((le16_to_cpu(ndp16->wLength) - sizeof(struct usb_cdc_ncm_ndp16)) + / sizeof(struct usb_cdc_ncm_dpe16)); + ret--; /* Last entry is always a NULL terminator */ + + if (sizeof(struct usb_cdc_ncm_ndp16) + + ret * sizeof(struct usb_cdc_ncm_dpe16) > skb->len) { + net_err_ratelimited("Invalid nframes = %d\n", ret); + return -EINVAL; + } + + return ret; +} + +static void mhi_mbim_rx(struct mhi_mbim_context *mbim, struct sk_buff *skb) +{ + int ndpoffset; + + /* Check NTB header and retrieve first NDP offset */ + ndpoffset = mbim_rx_verify_nth16(mbim, skb); + if (ndpoffset < 0) { + net_err_ratelimited("mbim: Incorrect NTB header\n"); + goto error; + } + + /* Process each NDP */ + while (1) { + struct usb_cdc_ncm_ndp16 ndp16; + struct usb_cdc_ncm_dpe16 dpe16; + struct mhi_mbim_link *link; + int nframes, n, dpeoffset; + unsigned int session; + + if (skb_copy_bits(skb, ndpoffset, &ndp16, sizeof(ndp16))) { + net_err_ratelimited("mbim: Incorrect NDP offset (%u)\n", + ndpoffset); + goto error; + } + + /* Check NDP header and retrieve number of datagrams */ + nframes = mbim_rx_verify_ndp16(skb, &ndp16); + if (nframes < 0) { + net_err_ratelimited("mbim: Incorrect NDP16\n"); + goto error; + } + + /* Only IP data type supported, no DSS in MHI context */ + if ((ndp16.dwSignature & cpu_to_le32(MBIM_NDP16_SIGN_MASK)) + != cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN)) { + net_err_ratelimited("mbim: Unsupported NDP type\n"); + goto next_ndp; + } + + session = (le32_to_cpu(ndp16.dwSignature) & ~MBIM_NDP16_SIGN_MASK) >> 24; + + rcu_read_lock(); + + link = mhi_mbim_get_link_rcu(mbim, session); + if (!link) { + net_err_ratelimited("mbim: bad packet session (%u)\n", session); + goto unlock; + } + + /* de-aggregate and deliver IP packets */ + dpeoffset = ndpoffset + sizeof(struct usb_cdc_ncm_ndp16); + for (n = 0; n < nframes; n++, dpeoffset += sizeof(dpe16)) { + u16 dgram_offset, dgram_len; + struct sk_buff *skbn; + + if (skb_copy_bits(skb, dpeoffset, &dpe16, sizeof(dpe16))) + break; + + dgram_offset = le16_to_cpu(dpe16.wDatagramIndex); + dgram_len = le16_to_cpu(dpe16.wDatagramLength); + + if (!dgram_offset || !dgram_len) + break; /* null terminator */ + + skbn = netdev_alloc_skb(link->ndev, dgram_len); + if (!skbn) + continue; + + skb_put(skbn, dgram_len); + skb_copy_bits(skb, dgram_offset, skbn->data, dgram_len); + + switch (skbn->data[0] & 0xf0) { + case 0x40: + skbn->protocol = htons(ETH_P_IP); + break; + case 0x60: + skbn->protocol = htons(ETH_P_IPV6); + break; + default: + net_err_ratelimited("%s: unknown protocol\n", + link->ndev->name); + dev_kfree_skb_any(skbn); + u64_stats_update_begin(&link->rx_syncp); + u64_stats_inc(&link->rx_errors); + u64_stats_update_end(&link->rx_syncp); + continue; + } + + u64_stats_update_begin(&link->rx_syncp); + u64_stats_inc(&link->rx_packets); + u64_stats_add(&link->rx_bytes, skbn->len); + u64_stats_update_end(&link->rx_syncp); + + netif_rx(skbn); + } +unlock: + rcu_read_unlock(); +next_ndp: + /* Other NDP to process? */ + ndpoffset = (int)le16_to_cpu(ndp16.wNextNdpIndex); + if (!ndpoffset) + break; + } + + /* free skb */ + dev_consume_skb_any(skb); + return; +error: + dev_kfree_skb_any(skb); +} + +static struct sk_buff *mhi_net_skb_agg(struct mhi_mbim_context *mbim, + struct sk_buff *skb) +{ + struct sk_buff *head = mbim->skbagg_head; + struct sk_buff *tail = mbim->skbagg_tail; + + /* This is non-paged skb chaining using frag_list */ + if (!head) { + mbim->skbagg_head = skb; + return skb; + } + + if (!skb_shinfo(head)->frag_list) + skb_shinfo(head)->frag_list = skb; + else + tail->next = skb; + + head->len += skb->len; + head->data_len += skb->len; + head->truesize += skb->truesize; + + mbim->skbagg_tail = skb; + + return mbim->skbagg_head; +} + +static void mhi_net_rx_refill_work(struct work_struct *work) +{ + struct mhi_mbim_context *mbim = container_of(work, struct mhi_mbim_context, + rx_refill.work); + struct mhi_device *mdev = mbim->mdev; + int err; + + while (!mhi_queue_is_full(mdev, DMA_FROM_DEVICE)) { + struct sk_buff *skb = alloc_skb(MHI_DEFAULT_MRU, GFP_KERNEL); + + if (unlikely(!skb)) + break; + + err = mhi_queue_skb(mdev, DMA_FROM_DEVICE, skb, + MHI_DEFAULT_MRU, MHI_EOT); + if (unlikely(err)) { + kfree_skb(skb); + break; + } + + /* Do not hog the CPU if rx buffers are consumed faster than + * queued (unlikely). + */ + cond_resched(); + } + + /* If we're still starved of rx buffers, reschedule later */ + if (mhi_get_free_desc_count(mdev, DMA_FROM_DEVICE) == mbim->rx_queue_sz) + schedule_delayed_work(&mbim->rx_refill, HZ / 2); +} + +static void mhi_mbim_dl_callback(struct mhi_device *mhi_dev, + struct mhi_result *mhi_res) +{ + struct mhi_mbim_context *mbim = dev_get_drvdata(&mhi_dev->dev); + struct sk_buff *skb = mhi_res->buf_addr; + int free_desc_count; + + free_desc_count = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE); + + if (unlikely(mhi_res->transaction_status)) { + switch (mhi_res->transaction_status) { + case -EOVERFLOW: + /* Packet has been split over multiple transfers */ + skb_put(skb, mhi_res->bytes_xferd); + mhi_net_skb_agg(mbim, skb); + break; + case -ENOTCONN: + /* MHI layer stopping/resetting the DL channel */ + dev_kfree_skb_any(skb); + return; + default: + /* Unknown error, simply drop */ + dev_kfree_skb_any(skb); + } + } else { + skb_put(skb, mhi_res->bytes_xferd); + + if (mbim->skbagg_head) { + /* Aggregate the final fragment */ + skb = mhi_net_skb_agg(mbim, skb); + mbim->skbagg_head = NULL; + } + + mhi_mbim_rx(mbim, skb); + } + + /* Refill if RX buffers queue becomes low */ + if (free_desc_count >= mbim->rx_queue_sz / 2) + schedule_delayed_work(&mbim->rx_refill, 0); +} + +static void mhi_mbim_ndo_get_stats64(struct net_device *ndev, + struct rtnl_link_stats64 *stats) +{ + struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev); + unsigned int start; + + do { + start = u64_stats_fetch_begin_irq(&link->rx_syncp); + stats->rx_packets = u64_stats_read(&link->rx_packets); + stats->rx_bytes = u64_stats_read(&link->rx_bytes); + stats->rx_errors = u64_stats_read(&link->rx_errors); + } while (u64_stats_fetch_retry_irq(&link->rx_syncp, start)); + + do { + start = u64_stats_fetch_begin_irq(&link->tx_syncp); + stats->tx_packets = u64_stats_read(&link->tx_packets); + stats->tx_bytes = u64_stats_read(&link->tx_bytes); + stats->tx_errors = u64_stats_read(&link->tx_errors); + stats->tx_dropped = u64_stats_read(&link->tx_dropped); + } while (u64_stats_fetch_retry_irq(&link->tx_syncp, start)); +} + +static void mhi_mbim_ul_callback(struct mhi_device *mhi_dev, + struct mhi_result *mhi_res) +{ + struct mhi_mbim_context *mbim = dev_get_drvdata(&mhi_dev->dev); + struct sk_buff *skb = mhi_res->buf_addr; + struct net_device *ndev = skb->dev; + struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev); + + /* Hardware has consumed the buffer, so free the skb (which is not + * freed by the MHI stack) and perform accounting. + */ + dev_consume_skb_any(skb); + + u64_stats_update_begin(&link->tx_syncp); + if (unlikely(mhi_res->transaction_status)) { + /* MHI layer stopping/resetting the UL channel */ + if (mhi_res->transaction_status == -ENOTCONN) { + u64_stats_update_end(&link->tx_syncp); + return; + } + + u64_stats_inc(&link->tx_errors); + } else { + u64_stats_inc(&link->tx_packets); + u64_stats_add(&link->tx_bytes, mhi_res->bytes_xferd); + } + u64_stats_update_end(&link->tx_syncp); + + if (netif_queue_stopped(ndev) && !mhi_queue_is_full(mbim->mdev, DMA_TO_DEVICE)) + netif_wake_queue(ndev); +} + +static int mhi_mbim_ndo_open(struct net_device *ndev) +{ + struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev); + + /* Feed the MHI rx buffer pool */ + schedule_delayed_work(&link->mbim->rx_refill, 0); + + /* Carrier is established via out-of-band channel (e.g. qmi) */ + netif_carrier_on(ndev); + + netif_start_queue(ndev); + + return 0; +} + +static int mhi_mbim_ndo_stop(struct net_device *ndev) +{ + netif_stop_queue(ndev); + netif_carrier_off(ndev); + + return 0; +} + +static const struct net_device_ops mhi_mbim_ndo = { + .ndo_open = mhi_mbim_ndo_open, + .ndo_stop = mhi_mbim_ndo_stop, + .ndo_start_xmit = mhi_mbim_ndo_xmit, + .ndo_get_stats64 = mhi_mbim_ndo_get_stats64, +}; + +static int mhi_mbim_newlink(void *ctxt, struct net_device *ndev, u32 if_id, + struct netlink_ext_ack *extack) +{ + struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev); + struct mhi_mbim_context *mbim = ctxt; + + link->session = if_id; + link->mbim = mbim; + link->ndev = ndev; + u64_stats_init(&link->rx_syncp); + u64_stats_init(&link->tx_syncp); + + rcu_read_lock(); + if (mhi_mbim_get_link_rcu(mbim, if_id)) { + rcu_read_unlock(); + return -EEXIST; + } + rcu_read_unlock(); + + /* Already protected by RTNL lock */ + hlist_add_head_rcu(&link->hlnode, &mbim->link_list[LINK_HASH(if_id)]); + + return register_netdevice(ndev); +} + +static void mhi_mbim_dellink(void *ctxt, struct net_device *ndev, + struct list_head *head) +{ + struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev); + + hlist_del_init_rcu(&link->hlnode); + synchronize_rcu(); + + unregister_netdevice_queue(ndev, head); +} + +static void mhi_mbim_setup(struct net_device *ndev) +{ + ndev->header_ops = NULL; /* No header */ + ndev->type = ARPHRD_RAWIP; + ndev->needed_headroom = sizeof(struct mbim_tx_hdr); + ndev->hard_header_len = 0; + ndev->addr_len = 0; + ndev->flags = IFF_POINTOPOINT | IFF_NOARP; + ndev->netdev_ops = &mhi_mbim_ndo; + ndev->mtu = MHI_MBIM_DEFAULT_MTU; + ndev->min_mtu = ETH_MIN_MTU; + ndev->max_mtu = MHI_MAX_BUF_SZ - ndev->needed_headroom; + ndev->tx_queue_len = 1000; +} + +static const struct wwan_ops mhi_mbim_wwan_ops = { + .priv_size = sizeof(struct mhi_mbim_link), + .setup = mhi_mbim_setup, + .newlink = mhi_mbim_newlink, + .dellink = mhi_mbim_dellink, +}; + +static int mhi_mbim_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id) +{ + struct mhi_controller *cntrl = mhi_dev->mhi_cntrl; + struct mhi_mbim_context *mbim; + int err; + + mbim = devm_kzalloc(&mhi_dev->dev, sizeof(*mbim), GFP_KERNEL); + if (!mbim) + return -ENOMEM; + + spin_lock_init(&mbim->tx_lock); + dev_set_drvdata(&mhi_dev->dev, mbim); + mbim->mdev = mhi_dev; + mbim->mru = mhi_dev->mhi_cntrl->mru ? mhi_dev->mhi_cntrl->mru : MHI_DEFAULT_MRU; + + INIT_DELAYED_WORK(&mbim->rx_refill, mhi_net_rx_refill_work); + + /* Start MHI channels */ + err = mhi_prepare_for_transfer(mhi_dev); + if (err) + return err; + + /* Number of transfer descriptors determines size of the queue */ + mbim->rx_queue_sz = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE); + + /* Register wwan link ops with MHI controller representing WWAN instance */ + return wwan_register_ops(&cntrl->mhi_dev->dev, &mhi_mbim_wwan_ops, mbim, 0); +} + +static void mhi_mbim_remove(struct mhi_device *mhi_dev) +{ + struct mhi_mbim_context *mbim = dev_get_drvdata(&mhi_dev->dev); + struct mhi_controller *cntrl = mhi_dev->mhi_cntrl; + + mhi_unprepare_from_transfer(mhi_dev); + cancel_delayed_work_sync(&mbim->rx_refill); + wwan_unregister_ops(&cntrl->mhi_dev->dev); + kfree_skb(mbim->skbagg_head); + dev_set_drvdata(&mhi_dev->dev, NULL); +} + +static const struct mhi_device_id mhi_mbim_id_table[] = { + /* Hardware accelerated data PATH (to modem IPA), MBIM protocol */ + { .chan = "IP_HW0_MBIM", .driver_data = 0 }, + {} +}; +MODULE_DEVICE_TABLE(mhi, mhi_mbim_id_table); + +static struct mhi_driver mhi_mbim_driver = { + .probe = mhi_mbim_probe, + .remove = mhi_mbim_remove, + .dl_xfer_cb = mhi_mbim_dl_callback, + .ul_xfer_cb = mhi_mbim_ul_callback, + .id_table = mhi_mbim_id_table, + .driver = { + .name = "mhi_wwan_mbim", + .owner = THIS_MODULE, + }, +}; + +module_mhi_driver(mhi_mbim_driver); + +MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>"); +MODULE_DESCRIPTION("Network/MBIM over MHI"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/wwan/wwan_core.c b/drivers/net/wwan/wwan_core.c index 35ece98134c0..d293ab688044 100644 --- a/drivers/net/wwan/wwan_core.c +++ b/drivers/net/wwan/wwan_core.c @@ -359,8 +359,8 @@ struct wwan_port *wwan_create_port(struct device *parent, { struct wwan_device *wwandev; struct wwan_port *port; - int minor, err = -ENOMEM; char namefmt[0x20]; + int minor, err; if (type > WWAN_PORT_MAX || !ops) return ERR_PTR(-EINVAL); @@ -374,11 +374,14 @@ struct wwan_port *wwan_create_port(struct device *parent, /* A port is exposed as character device, get a minor */ minor = ida_alloc_range(&minors, 0, WWAN_MAX_MINORS - 1, GFP_KERNEL); - if (minor < 0) + if (minor < 0) { + err = minor; goto error_wwandev_remove; + } port = kzalloc(sizeof(*port), GFP_KERNEL); if (!port) { + err = -ENOMEM; ida_free(&minors, minor); goto error_wwandev_remove; } diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 44275908d61a..e31b98403f31 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -126,21 +126,17 @@ struct netfront_queue { /* * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries - * are linked from tx_skb_freelist through skb_entry.link. - * - * NB. Freelist index entries are always going to be less than - * PAGE_OFFSET, whereas pointers to skbs will always be equal or - * greater than PAGE_OFFSET: we use this property to distinguish - * them. + * are linked from tx_skb_freelist through tx_link. */ - union skb_entry { - struct sk_buff *skb; - unsigned long link; - } tx_skbs[NET_TX_RING_SIZE]; + struct sk_buff *tx_skbs[NET_TX_RING_SIZE]; + unsigned short tx_link[NET_TX_RING_SIZE]; +#define TX_LINK_NONE 0xffff +#define TX_PENDING 0xfffe grant_ref_t gref_tx_head; grant_ref_t grant_tx_ref[NET_TX_RING_SIZE]; struct page *grant_tx_page[NET_TX_RING_SIZE]; unsigned tx_skb_freelist; + unsigned int tx_pend_queue; spinlock_t rx_lock ____cacheline_aligned_in_smp; struct xen_netif_rx_front_ring rx; @@ -173,6 +169,9 @@ struct netfront_info { bool netback_has_xdp_headroom; bool netfront_xdp_enabled; + /* Is device behaving sane? */ + bool broken; + atomic_t rx_gso_checksum_fixup; }; @@ -181,33 +180,25 @@ struct netfront_rx_info { struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; }; -static void skb_entry_set_link(union skb_entry *list, unsigned short id) -{ - list->link = id; -} - -static int skb_entry_is_link(const union skb_entry *list) -{ - BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link)); - return (unsigned long)list->skb < PAGE_OFFSET; -} - /* * Access macros for acquiring freeing slots in tx_skbs[]. */ -static void add_id_to_freelist(unsigned *head, union skb_entry *list, - unsigned short id) +static void add_id_to_list(unsigned *head, unsigned short *list, + unsigned short id) { - skb_entry_set_link(&list[id], *head); + list[id] = *head; *head = id; } -static unsigned short get_id_from_freelist(unsigned *head, - union skb_entry *list) +static unsigned short get_id_from_list(unsigned *head, unsigned short *list) { unsigned int id = *head; - *head = list[id].link; + + if (id != TX_LINK_NONE) { + *head = list[id]; + list[id] = TX_LINK_NONE; + } return id; } @@ -363,7 +354,7 @@ static int xennet_open(struct net_device *dev) unsigned int i = 0; struct netfront_queue *queue = NULL; - if (!np->queues) + if (!np->queues || np->broken) return -ENODEV; for (i = 0; i < num_queues; ++i) { @@ -391,27 +382,47 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue) unsigned short id; struct sk_buff *skb; bool more_to_do; + const struct device *dev = &queue->info->netdev->dev; BUG_ON(!netif_carrier_ok(queue->info->netdev)); do { prod = queue->tx.sring->rsp_prod; + if (RING_RESPONSE_PROD_OVERFLOW(&queue->tx, prod)) { + dev_alert(dev, "Illegal number of responses %u\n", + prod - queue->tx.rsp_cons); + goto err; + } rmb(); /* Ensure we see responses up to 'rp'. */ for (cons = queue->tx.rsp_cons; cons != prod; cons++) { - struct xen_netif_tx_response *txrsp; + struct xen_netif_tx_response txrsp; - txrsp = RING_GET_RESPONSE(&queue->tx, cons); - if (txrsp->status == XEN_NETIF_RSP_NULL) + RING_COPY_RESPONSE(&queue->tx, cons, &txrsp); + if (txrsp.status == XEN_NETIF_RSP_NULL) continue; - id = txrsp->id; - skb = queue->tx_skbs[id].skb; + id = txrsp.id; + if (id >= RING_SIZE(&queue->tx)) { + dev_alert(dev, + "Response has incorrect id (%u)\n", + id); + goto err; + } + if (queue->tx_link[id] != TX_PENDING) { + dev_alert(dev, + "Response for inactive request\n"); + goto err; + } + + queue->tx_link[id] = TX_LINK_NONE; + skb = queue->tx_skbs[id]; + queue->tx_skbs[id] = NULL; if (unlikely(gnttab_query_foreign_access( queue->grant_tx_ref[id]) != 0)) { - pr_alert("%s: warning -- grant still in use by backend domain\n", - __func__); - BUG(); + dev_alert(dev, + "Grant still in use by backend domain\n"); + goto err; } gnttab_end_foreign_access_ref( queue->grant_tx_ref[id], GNTMAP_readonly); @@ -419,7 +430,7 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue) &queue->gref_tx_head, queue->grant_tx_ref[id]); queue->grant_tx_ref[id] = GRANT_INVALID_REF; queue->grant_tx_page[id] = NULL; - add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, id); + add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, id); dev_kfree_skb_irq(skb); } @@ -429,13 +440,20 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue) } while (more_to_do); xennet_maybe_wake_tx(queue); + + return; + + err: + queue->info->broken = true; + dev_alert(dev, "Disabled for further use\n"); } struct xennet_gnttab_make_txreq { struct netfront_queue *queue; struct sk_buff *skb; struct page *page; - struct xen_netif_tx_request *tx; /* Last request */ + struct xen_netif_tx_request *tx; /* Last request on ring page */ + struct xen_netif_tx_request tx_local; /* Last request local copy*/ unsigned int size; }; @@ -451,7 +469,7 @@ static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset, struct netfront_queue *queue = info->queue; struct sk_buff *skb = info->skb; - id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs); + id = get_id_from_list(&queue->tx_skb_freelist, queue->tx_link); tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++); ref = gnttab_claim_grant_reference(&queue->gref_tx_head); WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref)); @@ -459,34 +477,37 @@ static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset, gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id, gfn, GNTMAP_readonly); - queue->tx_skbs[id].skb = skb; + queue->tx_skbs[id] = skb; queue->grant_tx_page[id] = page; queue->grant_tx_ref[id] = ref; - tx->id = id; - tx->gref = ref; - tx->offset = offset; - tx->size = len; - tx->flags = 0; + info->tx_local.id = id; + info->tx_local.gref = ref; + info->tx_local.offset = offset; + info->tx_local.size = len; + info->tx_local.flags = 0; + + *tx = info->tx_local; + + /* + * Put the request in the pending queue, it will be set to be pending + * when the producer index is about to be raised. + */ + add_id_to_list(&queue->tx_pend_queue, queue->tx_link, id); info->tx = tx; - info->size += tx->size; + info->size += info->tx_local.size; } static struct xen_netif_tx_request *xennet_make_first_txreq( - struct netfront_queue *queue, struct sk_buff *skb, - struct page *page, unsigned int offset, unsigned int len) + struct xennet_gnttab_make_txreq *info, + unsigned int offset, unsigned int len) { - struct xennet_gnttab_make_txreq info = { - .queue = queue, - .skb = skb, - .page = page, - .size = 0, - }; + info->size = 0; - gnttab_for_one_grant(page, offset, len, xennet_tx_setup_grant, &info); + gnttab_for_one_grant(info->page, offset, len, xennet_tx_setup_grant, info); - return info.tx; + return info->tx; } static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset, @@ -499,35 +520,27 @@ static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset, xennet_tx_setup_grant(gfn, offset, len, data); } -static struct xen_netif_tx_request *xennet_make_txreqs( - struct netfront_queue *queue, struct xen_netif_tx_request *tx, - struct sk_buff *skb, struct page *page, +static void xennet_make_txreqs( + struct xennet_gnttab_make_txreq *info, + struct page *page, unsigned int offset, unsigned int len) { - struct xennet_gnttab_make_txreq info = { - .queue = queue, - .skb = skb, - .tx = tx, - }; - /* Skip unused frames from start of page */ page += offset >> PAGE_SHIFT; offset &= ~PAGE_MASK; while (len) { - info.page = page; - info.size = 0; + info->page = page; + info->size = 0; gnttab_foreach_grant_in_range(page, offset, len, xennet_make_one_txreq, - &info); + info); page++; offset = 0; - len -= info.size; + len -= info->size; } - - return info.tx; } /* @@ -574,19 +587,34 @@ static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb, return queue_idx; } +static void xennet_mark_tx_pending(struct netfront_queue *queue) +{ + unsigned int i; + + while ((i = get_id_from_list(&queue->tx_pend_queue, queue->tx_link)) != + TX_LINK_NONE) + queue->tx_link[i] = TX_PENDING; +} + static int xennet_xdp_xmit_one(struct net_device *dev, struct netfront_queue *queue, struct xdp_frame *xdpf) { struct netfront_info *np = netdev_priv(dev); struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats); + struct xennet_gnttab_make_txreq info = { + .queue = queue, + .skb = NULL, + .page = virt_to_page(xdpf->data), + }; int notify; - xennet_make_first_txreq(queue, NULL, - virt_to_page(xdpf->data), + xennet_make_first_txreq(&info, offset_in_page(xdpf->data), xdpf->len); + xennet_mark_tx_pending(queue); + RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify); if (notify) notify_remote_via_irq(queue->tx_irq); @@ -611,6 +639,8 @@ static int xennet_xdp_xmit(struct net_device *dev, int n, int nxmit = 0; int i; + if (unlikely(np->broken)) + return -ENODEV; if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) return -EINVAL; @@ -638,7 +668,7 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev { struct netfront_info *np = netdev_priv(dev); struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats); - struct xen_netif_tx_request *tx, *first_tx; + struct xen_netif_tx_request *first_tx; unsigned int i; int notify; int slots; @@ -647,6 +677,7 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev unsigned int len; unsigned long flags; struct netfront_queue *queue = NULL; + struct xennet_gnttab_make_txreq info = { }; unsigned int num_queues = dev->real_num_tx_queues; u16 queue_index; struct sk_buff *nskb; @@ -654,6 +685,8 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev /* Drop the packet if no queues are set up */ if (num_queues < 1) goto drop; + if (unlikely(np->broken)) + goto drop; /* Determine which queue to transmit this SKB on */ queue_index = skb_get_queue_mapping(skb); queue = &np->queues[queue_index]; @@ -704,21 +737,24 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev } /* First request for the linear area. */ - first_tx = tx = xennet_make_first_txreq(queue, skb, - page, offset, len); - offset += tx->size; + info.queue = queue; + info.skb = skb; + info.page = page; + first_tx = xennet_make_first_txreq(&info, offset, len); + offset += info.tx_local.size; if (offset == PAGE_SIZE) { page++; offset = 0; } - len -= tx->size; + len -= info.tx_local.size; if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */ - tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated; + first_tx->flags |= XEN_NETTXF_csum_blank | + XEN_NETTXF_data_validated; else if (skb->ip_summed == CHECKSUM_UNNECESSARY) /* remote but checksummed. */ - tx->flags |= XEN_NETTXF_data_validated; + first_tx->flags |= XEN_NETTXF_data_validated; /* Optional extra info after the first request. */ if (skb_shinfo(skb)->gso_size) { @@ -727,7 +763,7 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev gso = (struct xen_netif_extra_info *) RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++); - tx->flags |= XEN_NETTXF_extra_info; + first_tx->flags |= XEN_NETTXF_extra_info; gso->u.gso.size = skb_shinfo(skb)->gso_size; gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ? @@ -741,12 +777,12 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev } /* Requests for the rest of the linear area. */ - tx = xennet_make_txreqs(queue, tx, skb, page, offset, len); + xennet_make_txreqs(&info, page, offset, len); /* Requests for all the frags. */ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - tx = xennet_make_txreqs(queue, tx, skb, skb_frag_page(frag), + xennet_make_txreqs(&info, skb_frag_page(frag), skb_frag_off(frag), skb_frag_size(frag)); } @@ -757,6 +793,8 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev /* timestamp packet in software */ skb_tx_timestamp(skb); + xennet_mark_tx_pending(queue); + RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify); if (notify) notify_remote_via_irq(queue->tx_irq); @@ -814,7 +852,7 @@ static int xennet_get_extras(struct netfront_queue *queue, RING_IDX rp) { - struct xen_netif_extra_info *extra; + struct xen_netif_extra_info extra; struct device *dev = &queue->info->netdev->dev; RING_IDX cons = queue->rx.rsp_cons; int err = 0; @@ -830,24 +868,22 @@ static int xennet_get_extras(struct netfront_queue *queue, break; } - extra = (struct xen_netif_extra_info *) - RING_GET_RESPONSE(&queue->rx, ++cons); + RING_COPY_RESPONSE(&queue->rx, ++cons, &extra); - if (unlikely(!extra->type || - extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { + if (unlikely(!extra.type || + extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) { if (net_ratelimit()) dev_warn(dev, "Invalid extra type: %d\n", - extra->type); + extra.type); err = -EINVAL; } else { - memcpy(&extras[extra->type - 1], extra, - sizeof(*extra)); + extras[extra.type - 1] = extra; } skb = xennet_get_rx_skb(queue, cons); ref = xennet_get_rx_ref(queue, cons); xennet_move_rx_slot(queue, skb, ref); - } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); + } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE); queue->rx.rsp_cons = cons; return err; @@ -905,7 +941,7 @@ static int xennet_get_responses(struct netfront_queue *queue, struct sk_buff_head *list, bool *need_xdp_flush) { - struct xen_netif_rx_response *rx = &rinfo->rx; + struct xen_netif_rx_response *rx = &rinfo->rx, rx_local; int max = XEN_NETIF_NR_SLOTS_MIN + (rx->status <= RX_COPY_THRESHOLD); RING_IDX cons = queue->rx.rsp_cons; struct sk_buff *skb = xennet_get_rx_skb(queue, cons); @@ -989,7 +1025,8 @@ next: break; } - rx = RING_GET_RESPONSE(&queue->rx, cons + slots); + RING_COPY_RESPONSE(&queue->rx, cons + slots, &rx_local); + rx = &rx_local; skb = xennet_get_rx_skb(queue, cons + slots); ref = xennet_get_rx_ref(queue, cons + slots); slots++; @@ -1044,10 +1081,11 @@ static int xennet_fill_frags(struct netfront_queue *queue, struct sk_buff *nskb; while ((nskb = __skb_dequeue(list))) { - struct xen_netif_rx_response *rx = - RING_GET_RESPONSE(&queue->rx, ++cons); + struct xen_netif_rx_response rx; skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0]; + RING_COPY_RESPONSE(&queue->rx, ++cons, &rx); + if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) { unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to; @@ -1062,7 +1100,7 @@ static int xennet_fill_frags(struct netfront_queue *queue, skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, skb_frag_page(nfrag), - rx->offset, rx->status, PAGE_SIZE); + rx.offset, rx.status, PAGE_SIZE); skb_shinfo(nskb)->nr_frags = 0; kfree_skb(nskb); @@ -1156,12 +1194,19 @@ static int xennet_poll(struct napi_struct *napi, int budget) skb_queue_head_init(&tmpq); rp = queue->rx.sring->rsp_prod; + if (RING_RESPONSE_PROD_OVERFLOW(&queue->rx, rp)) { + dev_alert(&dev->dev, "Illegal number of responses %u\n", + rp - queue->rx.rsp_cons); + queue->info->broken = true; + spin_unlock(&queue->rx_lock); + return 0; + } rmb(); /* Ensure we see queued responses up to 'rp'. */ i = queue->rx.rsp_cons; work_done = 0; while ((i != rp) && (work_done < budget)) { - memcpy(rx, RING_GET_RESPONSE(&queue->rx, i), sizeof(*rx)); + RING_COPY_RESPONSE(&queue->rx, i, rx); memset(extras, 0, sizeof(rinfo.extras)); err = xennet_get_responses(queue, &rinfo, rp, &tmpq, @@ -1286,17 +1331,18 @@ static void xennet_release_tx_bufs(struct netfront_queue *queue) for (i = 0; i < NET_TX_RING_SIZE; i++) { /* Skip over entries which are actually freelist references */ - if (skb_entry_is_link(&queue->tx_skbs[i])) + if (!queue->tx_skbs[i]) continue; - skb = queue->tx_skbs[i].skb; + skb = queue->tx_skbs[i]; + queue->tx_skbs[i] = NULL; get_page(queue->grant_tx_page[i]); gnttab_end_foreign_access(queue->grant_tx_ref[i], GNTMAP_readonly, (unsigned long)page_address(queue->grant_tx_page[i])); queue->grant_tx_page[i] = NULL; queue->grant_tx_ref[i] = GRANT_INVALID_REF; - add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, i); + add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, i); dev_kfree_skb_irq(skb); } } @@ -1376,6 +1422,9 @@ static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id) struct netfront_queue *queue = dev_id; unsigned long flags; + if (queue->info->broken) + return IRQ_HANDLED; + spin_lock_irqsave(&queue->tx_lock, flags); xennet_tx_buf_gc(queue); spin_unlock_irqrestore(&queue->tx_lock, flags); @@ -1388,6 +1437,9 @@ static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id) struct netfront_queue *queue = dev_id; struct net_device *dev = queue->info->netdev; + if (queue->info->broken) + return IRQ_HANDLED; + if (likely(netif_carrier_ok(dev) && RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))) napi_schedule(&queue->napi); @@ -1409,6 +1461,10 @@ static void xennet_poll_controller(struct net_device *dev) struct netfront_info *info = netdev_priv(dev); unsigned int num_queues = dev->real_num_tx_queues; unsigned int i; + + if (info->broken) + return; + for (i = 0; i < num_queues; ++i) xennet_interrupt(0, &info->queues[i]); } @@ -1480,6 +1536,11 @@ static int xennet_xdp_set(struct net_device *dev, struct bpf_prog *prog, static int xennet_xdp(struct net_device *dev, struct netdev_bpf *xdp) { + struct netfront_info *np = netdev_priv(dev); + + if (np->broken) + return -ENODEV; + switch (xdp->command) { case XDP_SETUP_PROG: return xennet_xdp_set(dev, xdp->prog, xdp->extack); @@ -1853,13 +1914,15 @@ static int xennet_init_queue(struct netfront_queue *queue) snprintf(queue->name, sizeof(queue->name), "vif%s-q%u", devid, queue->id); - /* Initialise tx_skbs as a free chain containing every entry. */ + /* Initialise tx_skb_freelist as a free chain containing every entry. */ queue->tx_skb_freelist = 0; + queue->tx_pend_queue = TX_LINK_NONE; for (i = 0; i < NET_TX_RING_SIZE; i++) { - skb_entry_set_link(&queue->tx_skbs[i], i+1); + queue->tx_link[i] = i + 1; queue->grant_tx_ref[i] = GRANT_INVALID_REF; queue->grant_tx_page[i] = NULL; } + queue->tx_link[NET_TX_RING_SIZE - 1] = TX_LINK_NONE; /* Clear out rx_skbs */ for (i = 0; i < NET_RX_RING_SIZE; i++) { @@ -2128,6 +2191,9 @@ static int talk_to_netback(struct xenbus_device *dev, if (info->queues) xennet_destroy_queues(info); + /* For the case of a reconnect reset the "broken" indicator. */ + info->broken = false; + err = xennet_create_queues(info, &num_queues); if (err < 0) { xenbus_dev_fatal(dev, err, "creating queues"); |