diff options
Diffstat (limited to 'drivers/net/ethernet/ibm')
-rw-r--r-- | drivers/net/ethernet/ibm/Kconfig | 13 | ||||
-rw-r--r-- | drivers/net/ethernet/ibm/ehea/ehea_main.c | 12 | ||||
-rw-r--r-- | drivers/net/ethernet/ibm/emac/core.c | 264 | ||||
-rw-r--r-- | drivers/net/ethernet/ibm/emac/core.h | 10 | ||||
-rw-r--r-- | drivers/net/ethernet/ibm/emac/mal.c | 108 | ||||
-rw-r--r-- | drivers/net/ethernet/ibm/emac/mal.h | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/ibm/emac/rgmii.c | 49 | ||||
-rw-r--r-- | drivers/net/ethernet/ibm/emac/tah.c | 49 | ||||
-rw-r--r-- | drivers/net/ethernet/ibm/emac/zmii.c | 49 | ||||
-rw-r--r-- | drivers/net/ethernet/ibm/ibmveth.c | 519 | ||||
-rw-r--r-- | drivers/net/ethernet/ibm/ibmveth.h | 65 | ||||
-rw-r--r-- | drivers/net/ethernet/ibm/ibmvnic.c | 306 | ||||
-rw-r--r-- | drivers/net/ethernet/ibm/ibmvnic.h | 3 |
13 files changed, 791 insertions, 658 deletions
diff --git a/drivers/net/ethernet/ibm/Kconfig b/drivers/net/ethernet/ibm/Kconfig index c0c112d95b89..4f4b23465c47 100644 --- a/drivers/net/ethernet/ibm/Kconfig +++ b/drivers/net/ethernet/ibm/Kconfig @@ -27,6 +27,19 @@ config IBMVETH To compile this driver as a module, choose M here. The module will be called ibmveth. +config IBMVETH_KUNIT_TEST + bool "KUnit test for IBM LAN Virtual Ethernet support" if !KUNIT_ALL_TESTS + depends on KUNIT + depends on KUNIT=y && IBMVETH=y + default KUNIT_ALL_TESTS + help + This builds unit tests for the IBM LAN Virtual Ethernet driver. + + For more information on KUnit and unit tests in general, please refer + to the KUnit documentation in Documentation/dev-tools/kunit/. + + If unsure, say N. + source "drivers/net/ethernet/ibm/emac/Kconfig" config EHEA diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index 1e29e5c9a2df..9b006bc353a1 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -121,7 +121,7 @@ static struct platform_driver ehea_driver = { .of_match_table = ehea_device_table, }, .probe = ehea_probe_adapter, - .remove_new = ehea_remove, + .remove = ehea_remove, }; void ehea_dump(void *adr, int len, char *msg) @@ -3063,14 +3063,13 @@ static void ehea_shutdown_single_port(struct ehea_port *port) static int ehea_setup_ports(struct ehea_adapter *adapter) { struct device_node *lhea_dn; - struct device_node *eth_dn = NULL; + struct device_node *eth_dn; const u32 *dn_log_port_id; int i = 0; lhea_dn = adapter->ofdev->dev.of_node; - while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) { - + for_each_child_of_node(lhea_dn, eth_dn) { dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no", NULL); if (!dn_log_port_id) { @@ -3102,12 +3101,11 @@ static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter, u32 logical_port_id) { struct device_node *lhea_dn; - struct device_node *eth_dn = NULL; + struct device_node *eth_dn; const u32 *dn_log_port_id; lhea_dn = adapter->ofdev->dev.of_node; - while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) { - + for_each_child_of_node(lhea_dn, eth_dn) { dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no", NULL); if (dn_log_port_id) diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index e6e47b1842ea..417dfa18daae 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -32,7 +32,6 @@ #include <linux/ethtool.h> #include <linux/mii.h> #include <linux/bitops.h> -#include <linux/workqueue.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> @@ -96,11 +95,6 @@ MODULE_LICENSE("GPL"); static u32 busy_phy_map; static DEFINE_MUTEX(emac_phy_map_lock); -/* This is the wait queue used to wait on any event related to probe, that - * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc... - */ -static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait); - /* Having stable interface names is a doomed idea. However, it would be nice * if we didn't have completely random interface names at boot too :-) It's * just a matter of making everybody's life easier. Since we are doing @@ -116,9 +110,6 @@ static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait); #define EMAC_BOOT_LIST_SIZE 4 static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE]; -/* How long should I wait for dependent devices ? */ -#define EMAC_PROBE_DEP_TIMEOUT (HZ * 5) - /* I don't want to litter system log with timeout errors * when we have brain-damaged PHY. */ @@ -418,8 +409,8 @@ do_retry: static void emac_hash_mc(struct emac_instance *dev) { + u32 __iomem *gaht_base = emac_gaht_base(dev); const int regs = EMAC_XAHT_REGS(dev); - u32 *gaht_base = emac_gaht_base(dev); u32 gaht_temp[EMAC_XAHT_MAX_REGS]; struct netdev_hw_addr *ha; int i; @@ -973,8 +964,6 @@ static void __emac_set_multicast_list(struct emac_instance *dev) * we need is just to stop RX channel. This seems to work on all * tested SoCs. --ebs * - * If we need the full reset, we might just trigger the workqueue - * and do it async... a bit nasty but should work --BenH */ dev->mcast_pending = 0; emac_rx_disable(dev); @@ -1098,7 +1087,7 @@ static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu) /* This is to prevent starting RX channel in emac_rx_enable() */ set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags); - dev->ndev->mtu = new_mtu; + WRITE_ONCE(dev->ndev->mtu, new_mtu); emac_full_tx_reset(dev); } @@ -1130,7 +1119,7 @@ static int emac_change_mtu(struct net_device *ndev, int new_mtu) } if (!ret) { - ndev->mtu = new_mtu; + WRITE_ONCE(ndev->mtu, new_mtu); dev->rx_skb_size = emac_rx_skb_size(new_mtu); dev->rx_sync_size = emac_rx_sync_size(new_mtu); } @@ -1228,18 +1217,10 @@ static void emac_print_link_status(struct emac_instance *dev) static int emac_open(struct net_device *ndev) { struct emac_instance *dev = netdev_priv(ndev); - int err, i; + int i; DBG(dev, "open" NL); - /* Setup error IRQ handler */ - err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev); - if (err) { - printk(KERN_ERR "%s: failed to request IRQ %d\n", - ndev->name, dev->emac_irq); - return err; - } - /* Allocate RX ring */ for (i = 0; i < NUM_RX_BUFF; ++i) if (emac_alloc_rx_skb(dev, i)) { @@ -1293,8 +1274,6 @@ static int emac_open(struct net_device *ndev) return 0; oom: emac_clean_rx_ring(dev); - free_irq(dev->emac_irq, dev); - return -ENOMEM; } @@ -1408,8 +1387,6 @@ static int emac_close(struct net_device *ndev) emac_clean_tx_ring(dev); emac_clean_rx_ring(dev); - free_irq(dev->emac_irq, dev); - netif_carrier_off(ndev); return 0; @@ -1750,6 +1727,7 @@ static inline int emac_rx_sg_append(struct emac_instance *dev, int slot) /* NAPI poll context */ static int emac_poll_rx(void *param, int budget) { + LIST_HEAD(rx_list); struct emac_instance *dev = param; int slot = dev->rx_slot, received = 0; @@ -1806,8 +1784,7 @@ static int emac_poll_rx(void *param, int budget) skb->protocol = eth_type_trans(skb, dev->ndev); emac_rx_csum(dev, skb, ctrl); - if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) - ++dev->estats.rx_dropped_stack; + list_add_tail(&skb->list, &rx_list); next: ++dev->stats.rx_packets; skip: @@ -1851,6 +1828,8 @@ static int emac_poll_rx(void *param, int budget) goto next; } + netif_receive_skb_list(&rx_list); + if (received) { DBG2(dev, "rx %d BDs" NL, received); dev->rx_slot = slot; @@ -2390,7 +2369,9 @@ static int emac_check_deps(struct emac_instance *dev, if (deps[i].drvdata != NULL) there++; } - return there == EMAC_DEP_COUNT; + if (there != EMAC_DEP_COUNT) + return -EPROBE_DEFER; + return 0; } static void emac_put_deps(struct emac_instance *dev) @@ -2402,19 +2383,6 @@ static void emac_put_deps(struct emac_instance *dev) platform_device_put(dev->tah_dev); } -static int emac_of_bus_notify(struct notifier_block *nb, unsigned long action, - void *data) -{ - /* We are only intereted in device addition */ - if (action == BUS_NOTIFY_BOUND_DRIVER) - wake_up_all(&emac_probe_wait); - return 0; -} - -static struct notifier_block emac_of_bus_notifier = { - .notifier_call = emac_of_bus_notify -}; - static int emac_wait_deps(struct emac_instance *dev) { struct emac_depentry deps[EMAC_DEP_COUNT]; @@ -2431,18 +2399,13 @@ static int emac_wait_deps(struct emac_instance *dev) deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph; if (dev->blist && dev->blist > emac_boot_list) deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu; - bus_register_notifier(&platform_bus_type, &emac_of_bus_notifier); - wait_event_timeout(emac_probe_wait, - emac_check_deps(dev, deps), - EMAC_PROBE_DEP_TIMEOUT); - bus_unregister_notifier(&platform_bus_type, &emac_of_bus_notifier); - err = emac_check_deps(dev, deps) ? 0 : -ENODEV; + err = emac_check_deps(dev, deps); for (i = 0; i < EMAC_DEP_COUNT; i++) { of_node_put(deps[i].node); if (err) platform_device_put(deps[i].ofdev); } - if (err == 0) { + if (!err) { dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev; dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev; dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev; @@ -2456,22 +2419,21 @@ static int emac_wait_deps(struct emac_instance *dev) static int emac_read_uint_prop(struct device_node *np, const char *name, u32 *val, int fatal) { - int len; - const u32 *prop = of_get_property(np, name, &len); - if (prop == NULL || len < sizeof(u32)) { + int err; + + err = of_property_read_u32(np, name, val); + if (err) { if (fatal) - printk(KERN_ERR "%pOF: missing %s property\n", - np, name); - return -ENODEV; + pr_err("%pOF: missing %s property", np, name); + return err; } - *val = *prop; return 0; } static void emac_adjust_link(struct net_device *ndev) { struct emac_instance *dev = netdev_priv(ndev); - struct phy_device *phy = dev->phy_dev; + struct phy_device *phy = ndev->phydev; dev->phy.autoneg = phy->autoneg; dev->phy.speed = phy->speed; @@ -2522,22 +2484,20 @@ static int emac_mdio_phy_start_aneg(struct mii_phy *phy, static int emac_mdio_setup_aneg(struct mii_phy *phy, u32 advertise) { struct net_device *ndev = phy->dev; - struct emac_instance *dev = netdev_priv(ndev); phy->autoneg = AUTONEG_ENABLE; phy->advertising = advertise; - return emac_mdio_phy_start_aneg(phy, dev->phy_dev); + return emac_mdio_phy_start_aneg(phy, ndev->phydev); } static int emac_mdio_setup_forced(struct mii_phy *phy, int speed, int fd) { struct net_device *ndev = phy->dev; - struct emac_instance *dev = netdev_priv(ndev); phy->autoneg = AUTONEG_DISABLE; phy->speed = speed; phy->duplex = fd; - return emac_mdio_phy_start_aneg(phy, dev->phy_dev); + return emac_mdio_phy_start_aneg(phy, ndev->phydev); } static int emac_mdio_poll_link(struct mii_phy *phy) @@ -2546,20 +2506,19 @@ static int emac_mdio_poll_link(struct mii_phy *phy) struct emac_instance *dev = netdev_priv(ndev); int res; - res = phy_read_status(dev->phy_dev); + res = phy_read_status(ndev->phydev); if (res) { dev_err(&dev->ofdev->dev, "link update failed (%d).", res); return ethtool_op_get_link(ndev); } - return dev->phy_dev->link; + return ndev->phydev->link; } static int emac_mdio_read_link(struct mii_phy *phy) { struct net_device *ndev = phy->dev; - struct emac_instance *dev = netdev_priv(ndev); - struct phy_device *phy_dev = dev->phy_dev; + struct phy_device *phy_dev = ndev->phydev; int res; res = phy_read_status(phy_dev); @@ -2576,10 +2535,9 @@ static int emac_mdio_read_link(struct mii_phy *phy) static int emac_mdio_init_phy(struct mii_phy *phy) { struct net_device *ndev = phy->dev; - struct emac_instance *dev = netdev_priv(ndev); - phy_start(dev->phy_dev); - return phy_init_hw(dev->phy_dev); + phy_start(ndev->phydev); + return phy_init_hw(ndev->phydev); } static const struct mii_phy_ops emac_dt_mdio_phy_ops = { @@ -2593,36 +2551,32 @@ static const struct mii_phy_ops emac_dt_mdio_phy_ops = { static int emac_dt_mdio_probe(struct emac_instance *dev) { struct device_node *mii_np; + struct mii_bus *bus; int res; - mii_np = of_get_child_by_name(dev->ofdev->dev.of_node, "mdio"); + mii_np = of_get_available_child_by_name(dev->ofdev->dev.of_node, "mdio"); if (!mii_np) { dev_err(&dev->ofdev->dev, "no mdio definition found."); return -ENODEV; } - if (!of_device_is_available(mii_np)) { - res = -ENODEV; - goto put_node; - } - - dev->mii_bus = devm_mdiobus_alloc(&dev->ofdev->dev); - if (!dev->mii_bus) { + bus = devm_mdiobus_alloc(&dev->ofdev->dev); + if (!bus) { res = -ENOMEM; goto put_node; } - dev->mii_bus->priv = dev->ndev; - dev->mii_bus->parent = dev->ndev->dev.parent; - dev->mii_bus->name = "emac_mdio"; - dev->mii_bus->read = &emac_mii_bus_read; - dev->mii_bus->write = &emac_mii_bus_write; - dev->mii_bus->reset = &emac_mii_bus_reset; - snprintf(dev->mii_bus->id, MII_BUS_ID_SIZE, "%s", dev->ofdev->name); - res = of_mdiobus_register(dev->mii_bus, mii_np); + bus->priv = dev->ndev; + bus->parent = dev->ndev->dev.parent; + bus->name = "emac_mdio"; + bus->read = &emac_mii_bus_read; + bus->write = &emac_mii_bus_write; + bus->reset = &emac_mii_bus_reset; + snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev->ofdev->name); + res = devm_of_mdiobus_register(&dev->ofdev->dev, bus, mii_np); if (res) { dev_err(&dev->ofdev->dev, "cannot register MDIO bus %s (%d)", - dev->mii_bus->name, res); + bus->name, res); } put_node: @@ -2633,26 +2587,28 @@ static int emac_dt_mdio_probe(struct emac_instance *dev) static int emac_dt_phy_connect(struct emac_instance *dev, struct device_node *phy_handle) { + struct phy_device *phy_dev; + dev->phy.def = devm_kzalloc(&dev->ofdev->dev, sizeof(*dev->phy.def), GFP_KERNEL); if (!dev->phy.def) return -ENOMEM; - dev->phy_dev = of_phy_connect(dev->ndev, phy_handle, &emac_adjust_link, - 0, dev->phy_mode); - if (!dev->phy_dev) { + phy_dev = of_phy_connect(dev->ndev, phy_handle, &emac_adjust_link, 0, + dev->phy_mode); + if (!phy_dev) { dev_err(&dev->ofdev->dev, "failed to connect to PHY.\n"); return -ENODEV; } - dev->phy.def->phy_id = dev->phy_dev->drv->phy_id; - dev->phy.def->phy_id_mask = dev->phy_dev->drv->phy_id_mask; - dev->phy.def->name = dev->phy_dev->drv->name; + dev->phy.def->phy_id = phy_dev->drv->phy_id; + dev->phy.def->phy_id_mask = phy_dev->drv->phy_id_mask; + dev->phy.def->name = phy_dev->drv->name; dev->phy.def->ops = &emac_dt_mdio_phy_ops; ethtool_convert_link_mode_to_legacy_u32(&dev->phy.features, - dev->phy_dev->supported); - dev->phy.address = dev->phy_dev->mdio.addr; - dev->phy.mode = dev->phy_dev->interface; + phy_dev->supported); + dev->phy.address = phy_dev->mdio.addr; + dev->phy.mode = phy_dev->interface; return 0; } @@ -2668,8 +2624,6 @@ static int emac_dt_phy_probe(struct emac_instance *dev) res = emac_dt_mdio_probe(dev); if (!res) { res = emac_dt_phy_connect(dev, phy_handle); - if (res) - mdiobus_unregister(dev->mii_bus); } } @@ -2708,13 +2662,11 @@ static int emac_init_phy(struct emac_instance *dev) return res; res = of_phy_register_fixed_link(np); - dev->phy_dev = of_phy_find_device(np); - if (res || !dev->phy_dev) { - mdiobus_unregister(dev->mii_bus); + ndev->phydev = of_phy_find_device(np); + if (res || !ndev->phydev) return res ? res : -EINVAL; - } emac_adjust_link(dev->ndev); - put_device(&dev->phy_dev->mdio.dev); + put_device(&ndev->phydev->mdio.dev); } return 0; } @@ -2980,9 +2932,12 @@ static int emac_init_config(struct emac_instance *dev) /* Read MAC-address */ err = of_get_ethdev_address(np, dev->ndev); - if (err) - return dev_err_probe(&dev->ofdev->dev, err, - "Can't get valid [local-]mac-address from OF !\n"); + if (err == -EPROBE_DEFER) + return err; + if (err) { + dev_warn(&dev->ofdev->dev, "Can't get valid mac-address. Generating random."); + eth_hw_addr_random(dev->ndev); + } /* IAHT and GAHT filter parameterization */ if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) { @@ -3053,7 +3008,7 @@ static int emac_probe(struct platform_device *ofdev) /* Allocate our net_device structure */ err = -ENOMEM; - ndev = alloc_etherdev(sizeof(struct emac_instance)); + ndev = devm_alloc_etherdev(&ofdev->dev, sizeof(struct emac_instance)); if (!ndev) goto err_gone; @@ -3064,43 +3019,45 @@ static int emac_probe(struct platform_device *ofdev) SET_NETDEV_DEV(ndev, &ofdev->dev); /* Initialize some embedded data structures */ - mutex_init(&dev->mdio_lock); - mutex_init(&dev->link_lock); + err = devm_mutex_init(&ofdev->dev, &dev->mdio_lock); + if (err) + goto err_gone; + + err = devm_mutex_init(&ofdev->dev, &dev->link_lock); + if (err) + goto err_gone; + spin_lock_init(&dev->lock); INIT_WORK(&dev->reset_work, emac_reset_work); /* Init various config data based on device-tree */ err = emac_init_config(dev); if (err) - goto err_free; + goto err_gone; - /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */ - dev->emac_irq = irq_of_parse_and_map(np, 0); - dev->wol_irq = irq_of_parse_and_map(np, 1); - if (!dev->emac_irq) { - printk(KERN_ERR "%pOF: Can't map main interrupt\n", np); - err = -ENODEV; - goto err_free; + /* Setup error IRQ handler */ + dev->emac_irq = platform_get_irq(ofdev, 0); + err = devm_request_irq(&ofdev->dev, dev->emac_irq, emac_irq, 0, "EMAC", + dev); + if (err) { + dev_err_probe(&ofdev->dev, err, "failed to request IRQ %d", + dev->emac_irq); + goto err_gone; } + ndev->irq = dev->emac_irq; - /* Map EMAC regs */ - // TODO : platform_get_resource() and devm_ioremap_resource() - dev->emacp = of_iomap(np, 0); - if (dev->emacp == NULL) { - printk(KERN_ERR "%pOF: Can't map device registers!\n", np); - err = -ENOMEM; - goto err_irq_unmap; + dev->emacp = devm_platform_ioremap_resource(ofdev, 0); + if (IS_ERR(dev->emacp)) { + dev_err(&ofdev->dev, "can't map device registers"); + err = PTR_ERR(dev->emacp); + goto err_gone; } /* Wait for dependent devices */ err = emac_wait_deps(dev); - if (err) { - printk(KERN_ERR - "%pOF: Timeout waiting for dependent devices\n", np); - /* display more info about what's missing ? */ - goto err_reg_unmap; - } + if (err) + goto err_gone; dev->mal = platform_get_drvdata(dev->mal_dev); if (dev->mdio_dev != NULL) dev->mdio_instance = platform_get_drvdata(dev->mdio_dev); @@ -3187,7 +3144,7 @@ static int emac_probe(struct platform_device *ofdev) netif_carrier_off(ndev); - err = register_netdev(ndev); + err = devm_register_netdev(&ofdev->dev, ndev); if (err) { printk(KERN_ERR "%pOF: failed to register net device (%d)!\n", np, err); @@ -3200,10 +3157,6 @@ static int emac_probe(struct platform_device *ofdev) wmb(); platform_set_drvdata(ofdev, dev); - /* There's a new kid in town ! Let's tell everybody */ - wake_up_all(&emac_probe_wait); - - printk(KERN_INFO "%s: EMAC-%d %pOF, MAC %pM\n", ndev->name, dev->cell_index, np, ndev->dev_addr); @@ -3232,24 +3185,9 @@ static int emac_probe(struct platform_device *ofdev) mal_unregister_commac(dev->mal, &dev->commac); err_rel_deps: emac_put_deps(dev); - err_reg_unmap: - iounmap(dev->emacp); - err_irq_unmap: - if (dev->wol_irq) - irq_dispose_mapping(dev->wol_irq); - if (dev->emac_irq) - irq_dispose_mapping(dev->emac_irq); - err_free: - free_netdev(ndev); err_gone: - /* if we were on the bootlist, remove us as we won't show up and - * wake up all waiters to notify them in case they were waiting - * on us - */ - if (blist) { + if (blist) *blist = NULL; - wake_up_all(&emac_probe_wait); - } return err; } @@ -3259,8 +3197,6 @@ static void emac_remove(struct platform_device *ofdev) DBG(dev, "remove" NL); - unregister_netdev(dev->ndev); - cancel_work_sync(&dev->reset_work); if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) @@ -3270,26 +3206,11 @@ static void emac_remove(struct platform_device *ofdev) if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) zmii_detach(dev->zmii_dev, dev->zmii_port); - if (dev->phy_dev) - phy_disconnect(dev->phy_dev); - - if (dev->mii_bus) - mdiobus_unregister(dev->mii_bus); - busy_phy_map &= ~(1 << dev->phy.address); DBG(dev, "busy_phy_map now %#x" NL, busy_phy_map); mal_unregister_commac(dev->mal, &dev->commac); emac_put_deps(dev); - - iounmap(dev->emacp); - - if (dev->wol_irq) - irq_dispose_mapping(dev->wol_irq); - if (dev->emac_irq) - irq_dispose_mapping(dev->emac_irq); - - free_netdev(dev->ndev); } /* XXX Features in here should be replaced by properties... */ @@ -3317,7 +3238,7 @@ static struct platform_driver emac_driver = { .of_match_table = emac_match, }, .probe = emac_probe, - .remove_new = emac_remove, + .remove = emac_remove, }; static void __init emac_make_bootlist(void) @@ -3328,16 +3249,15 @@ static void __init emac_make_bootlist(void) /* Collect EMACs */ while((np = of_find_all_nodes(np)) != NULL) { - const u32 *idx; + u32 idx; if (of_match_node(emac_match, np) == NULL) continue; if (of_property_read_bool(np, "unused")) continue; - idx = of_get_property(np, "cell-index", NULL); - if (idx == NULL) + if (of_property_read_u32(np, "cell-index", &idx)) continue; - cell_indices[i] = *idx; + cell_indices[i] = idx; emac_boot_list[i++] = of_node_get(np); if (i >= EMAC_BOOT_LIST_SIZE) { of_node_put(np); diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h index 295516b07662..89fa1683ec3c 100644 --- a/drivers/net/ethernet/ibm/emac/core.h +++ b/drivers/net/ethernet/ibm/emac/core.h @@ -188,10 +188,6 @@ struct emac_instance { struct emac_instance *mdio_instance; struct mutex mdio_lock; - /* Device-tree based phy configuration */ - struct mii_bus *mii_bus; - struct phy_device *phy_dev; - /* ZMII infos if any */ u32 zmii_ph; u32 zmii_port; @@ -400,7 +396,7 @@ static inline int emac_has_feature(struct emac_instance *dev, ((u32)(1 << (EMAC_XAHT_WIDTH(dev) - 1)) >> \ ((slot) & (u32)(EMAC_XAHT_WIDTH(dev) - 1))) -static inline u32 *emac_xaht_base(struct emac_instance *dev) +static inline u32 __iomem *emac_xaht_base(struct emac_instance *dev) { struct emac_regs __iomem *p = dev->emacp; int offset; @@ -413,10 +409,10 @@ static inline u32 *emac_xaht_base(struct emac_instance *dev) else offset = offsetof(struct emac_regs, u0.emac4.iaht1); - return (u32 *)((ptrdiff_t)p + offset); + return (u32 __iomem *)((__force ptrdiff_t)p + offset); } -static inline u32 *emac_gaht_base(struct emac_instance *dev) +static inline u32 __iomem *emac_gaht_base(struct emac_instance *dev) { /* GAHT registers always come after an identical number of * IAHT registers. diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c index 2439f7e96e05..7d70056e9008 100644 --- a/drivers/net/ethernet/ibm/emac/mal.c +++ b/drivers/net/ethernet/ibm/emac/mal.c @@ -524,7 +524,8 @@ static int mal_probe(struct platform_device *ofdev) unsigned long irqflags; irq_handler_t hdlr_serr, hdlr_txde, hdlr_rxde; - mal = kzalloc(sizeof(struct mal_instance), GFP_KERNEL); + mal = devm_kzalloc(&ofdev->dev, sizeof(struct mal_instance), + GFP_KERNEL); if (!mal) return -ENOMEM; @@ -539,8 +540,7 @@ static int mal_probe(struct platform_device *ofdev) printk(KERN_ERR "mal%d: can't find MAL num-tx-chans property!\n", index); - err = -ENODEV; - goto fail; + return -ENODEV; } mal->num_tx_chans = prop[0]; @@ -549,8 +549,7 @@ static int mal_probe(struct platform_device *ofdev) printk(KERN_ERR "mal%d: can't find MAL num-rx-chans property!\n", index); - err = -ENODEV; - goto fail; + return -ENODEV; } mal->num_rx_chans = prop[0]; @@ -558,15 +557,13 @@ static int mal_probe(struct platform_device *ofdev) if (dcr_base == 0) { printk(KERN_ERR "mal%d: can't find DCR resource!\n", index); - err = -ENODEV; - goto fail; + return -ENODEV; } mal->dcr_host = dcr_map(ofdev->dev.of_node, dcr_base, 0x100); if (!DCR_MAP_OK(mal->dcr_host)) { printk(KERN_ERR "mal%d: failed to map DCRs !\n", index); - err = -ENODEV; - goto fail; + return -ENODEV; } if (of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal-405ez")) { @@ -578,36 +575,21 @@ static int mal_probe(struct platform_device *ofdev) printk(KERN_ERR "%pOF: Support for 405EZ not enabled!\n", ofdev->dev.of_node); err = -ENODEV; - goto fail; -#endif - } - - mal->txeob_irq = irq_of_parse_and_map(ofdev->dev.of_node, 0); - mal->rxeob_irq = irq_of_parse_and_map(ofdev->dev.of_node, 1); - mal->serr_irq = irq_of_parse_and_map(ofdev->dev.of_node, 2); - - if (mal_has_feature(mal, MAL_FTR_COMMON_ERR_INT)) { - mal->txde_irq = mal->rxde_irq = mal->serr_irq; - } else { - mal->txde_irq = irq_of_parse_and_map(ofdev->dev.of_node, 3); - mal->rxde_irq = irq_of_parse_and_map(ofdev->dev.of_node, 4); - } - - if (!mal->txeob_irq || !mal->rxeob_irq || !mal->serr_irq || - !mal->txde_irq || !mal->rxde_irq) { - printk(KERN_ERR - "mal%d: failed to map interrupts !\n", index); - err = -ENODEV; goto fail_unmap; +#endif } INIT_LIST_HEAD(&mal->poll_list); INIT_LIST_HEAD(&mal->list); spin_lock_init(&mal->lock); - init_dummy_netdev(&mal->dummy_dev); + mal->dummy_dev = alloc_netdev_dummy(0); + if (!mal->dummy_dev) { + err = -ENOMEM; + goto fail_unmap; + } - netif_napi_add_weight(&mal->dummy_dev, &mal->napi, mal_poll, + netif_napi_add_weight(mal->dummy_dev, &mal->napi, mal_poll, CONFIG_IBM_EMAC_POLL_WEIGHT); /* Load power-on reset defaults */ @@ -637,7 +619,7 @@ static int mal_probe(struct platform_device *ofdev) GFP_KERNEL); if (mal->bd_virt == NULL) { err = -ENOMEM; - goto fail_unmap; + goto fail_dummy; } for (i = 0; i < mal->num_tx_chans; ++i) @@ -650,31 +632,43 @@ static int mal_probe(struct platform_device *ofdev) sizeof(struct mal_descriptor) * mal_rx_bd_offset(mal, i)); + mal->txeob_irq = platform_get_irq(ofdev, 0); + mal->rxeob_irq = platform_get_irq(ofdev, 1); + mal->serr_irq = platform_get_irq(ofdev, 2); + if (mal_has_feature(mal, MAL_FTR_COMMON_ERR_INT)) { + mal->txde_irq = mal->rxde_irq = mal->serr_irq; irqflags = IRQF_SHARED; hdlr_serr = hdlr_txde = hdlr_rxde = mal_int; } else { + mal->txde_irq = platform_get_irq(ofdev, 3); + mal->rxde_irq = platform_get_irq(ofdev, 4); irqflags = 0; hdlr_serr = mal_serr; hdlr_txde = mal_txde; hdlr_rxde = mal_rxde; } - err = request_irq(mal->serr_irq, hdlr_serr, irqflags, "MAL SERR", mal); + err = devm_request_irq(&ofdev->dev, mal->serr_irq, hdlr_serr, irqflags, + "MAL SERR", mal); if (err) goto fail2; - err = request_irq(mal->txde_irq, hdlr_txde, irqflags, "MAL TX DE", mal); + err = devm_request_irq(&ofdev->dev, mal->txde_irq, hdlr_txde, irqflags, + "MAL TX DE", mal); if (err) - goto fail3; - err = request_irq(mal->txeob_irq, mal_txeob, 0, "MAL TX EOB", mal); + goto fail2; + err = devm_request_irq(&ofdev->dev, mal->txeob_irq, mal_txeob, 0, + "MAL TX EOB", mal); if (err) - goto fail4; - err = request_irq(mal->rxde_irq, hdlr_rxde, irqflags, "MAL RX DE", mal); + goto fail2; + err = devm_request_irq(&ofdev->dev, mal->rxde_irq, hdlr_rxde, irqflags, + "MAL RX DE", mal); if (err) - goto fail5; - err = request_irq(mal->rxeob_irq, mal_rxeob, 0, "MAL RX EOB", mal); + goto fail2; + err = devm_request_irq(&ofdev->dev, mal->rxeob_irq, mal_rxeob, 0, + "MAL RX EOB", mal); if (err) - goto fail6; + goto fail2; /* Enable all MAL SERR interrupt sources */ set_mal_dcrn(mal, MAL_IER, MAL_IER_EVENTS); @@ -693,21 +687,12 @@ static int mal_probe(struct platform_device *ofdev) return 0; - fail6: - free_irq(mal->rxde_irq, mal); - fail5: - free_irq(mal->txeob_irq, mal); - fail4: - free_irq(mal->txde_irq, mal); - fail3: - free_irq(mal->serr_irq, mal); fail2: dma_free_coherent(&ofdev->dev, bd_size, mal->bd_virt, mal->bd_dma); + fail_dummy: + free_netdev(mal->dummy_dev); fail_unmap: dcr_unmap(mal->dcr_host, 0x100); - fail: - kfree(mal); - return err; } @@ -726,20 +711,17 @@ static void mal_remove(struct platform_device *ofdev) "mal%d: commac list is not empty on remove!\n", mal->index); - free_irq(mal->serr_irq, mal); - free_irq(mal->txde_irq, mal); - free_irq(mal->txeob_irq, mal); - free_irq(mal->rxde_irq, mal); - free_irq(mal->rxeob_irq, mal); - mal_reset(mal); + free_netdev(mal->dummy_dev); + + dcr_unmap(mal->dcr_host, 0x100); + dma_free_coherent(&ofdev->dev, sizeof(struct mal_descriptor) * - (NUM_TX_BUFF * mal->num_tx_chans + - NUM_RX_BUFF * mal->num_rx_chans), mal->bd_virt, - mal->bd_dma); - kfree(mal); + (NUM_TX_BUFF * mal->num_tx_chans + + NUM_RX_BUFF * mal->num_rx_chans), + mal->bd_virt, mal->bd_dma); } static const struct of_device_id mal_platform_match[] = @@ -768,7 +750,7 @@ static struct platform_driver mal_of_driver = { .of_match_table = mal_platform_match, }, .probe = mal_probe, - .remove_new = mal_remove, + .remove = mal_remove, }; int __init mal_init(void) diff --git a/drivers/net/ethernet/ibm/emac/mal.h b/drivers/net/ethernet/ibm/emac/mal.h index d212373a72e7..e0ddc41186a2 100644 --- a/drivers/net/ethernet/ibm/emac/mal.h +++ b/drivers/net/ethernet/ibm/emac/mal.h @@ -205,7 +205,7 @@ struct mal_instance { int index; spinlock_t lock; - struct net_device dummy_dev; + struct net_device *dummy_dev; unsigned int features; }; diff --git a/drivers/net/ethernet/ibm/emac/rgmii.c b/drivers/net/ethernet/ibm/emac/rgmii.c index e1712fdc3c31..b544dd8633b7 100644 --- a/drivers/net/ethernet/ibm/emac/rgmii.c +++ b/drivers/net/ethernet/ibm/emac/rgmii.c @@ -216,31 +216,24 @@ void *rgmii_dump_regs(struct platform_device *ofdev, void *buf) static int rgmii_probe(struct platform_device *ofdev) { - struct device_node *np = ofdev->dev.of_node; struct rgmii_instance *dev; - struct resource regs; - int rc; + int err; - rc = -ENOMEM; - dev = kzalloc(sizeof(struct rgmii_instance), GFP_KERNEL); - if (dev == NULL) - goto err_gone; + dev = devm_kzalloc(&ofdev->dev, sizeof(struct rgmii_instance), + GFP_KERNEL); + if (!dev) + return -ENOMEM; - mutex_init(&dev->lock); - dev->ofdev = ofdev; + err = devm_mutex_init(&ofdev->dev, &dev->lock); + if (err) + return err; - rc = -ENXIO; - if (of_address_to_resource(np, 0, ®s)) { - printk(KERN_ERR "%pOF: Can't get registers address\n", np); - goto err_free; - } + dev->ofdev = ofdev; - rc = -ENOMEM; - dev->base = (struct rgmii_regs __iomem *)ioremap(regs.start, - sizeof(struct rgmii_regs)); - if (dev->base == NULL) { - printk(KERN_ERR "%pOF: Can't map device registers!\n", np); - goto err_free; + dev->base = devm_platform_ioremap_resource(ofdev, 0); + if (IS_ERR(dev->base)) { + dev_err(&ofdev->dev, "can't map device registers"); + return PTR_ERR(dev->base); } /* Check for RGMII flags */ @@ -266,21 +259,6 @@ static int rgmii_probe(struct platform_device *ofdev) platform_set_drvdata(ofdev, dev); return 0; - - err_free: - kfree(dev); - err_gone: - return rc; -} - -static void rgmii_remove(struct platform_device *ofdev) -{ - struct rgmii_instance *dev = platform_get_drvdata(ofdev); - - WARN_ON(dev->users != 0); - - iounmap(dev->base); - kfree(dev); } static const struct of_device_id rgmii_match[] = @@ -300,7 +278,6 @@ static struct platform_driver rgmii_driver = { .of_match_table = rgmii_match, }, .probe = rgmii_probe, - .remove_new = rgmii_remove, }; int __init rgmii_init(void) diff --git a/drivers/net/ethernet/ibm/emac/tah.c b/drivers/net/ethernet/ibm/emac/tah.c index fa3488258ca2..09f6373ed2f9 100644 --- a/drivers/net/ethernet/ibm/emac/tah.c +++ b/drivers/net/ethernet/ibm/emac/tah.c @@ -87,31 +87,24 @@ void *tah_dump_regs(struct platform_device *ofdev, void *buf) static int tah_probe(struct platform_device *ofdev) { - struct device_node *np = ofdev->dev.of_node; struct tah_instance *dev; - struct resource regs; - int rc; + int err; - rc = -ENOMEM; - dev = kzalloc(sizeof(struct tah_instance), GFP_KERNEL); - if (dev == NULL) - goto err_gone; + dev = devm_kzalloc(&ofdev->dev, sizeof(struct tah_instance), + GFP_KERNEL); + if (!dev) + return -ENOMEM; - mutex_init(&dev->lock); - dev->ofdev = ofdev; + err = devm_mutex_init(&ofdev->dev, &dev->lock); + if (err) + return err; - rc = -ENXIO; - if (of_address_to_resource(np, 0, ®s)) { - printk(KERN_ERR "%pOF: Can't get registers address\n", np); - goto err_free; - } + dev->ofdev = ofdev; - rc = -ENOMEM; - dev->base = (struct tah_regs __iomem *)ioremap(regs.start, - sizeof(struct tah_regs)); - if (dev->base == NULL) { - printk(KERN_ERR "%pOF: Can't map device registers!\n", np); - goto err_free; + dev->base = devm_platform_ioremap_resource(ofdev, 0); + if (IS_ERR(dev->base)) { + dev_err(&ofdev->dev, "can't map device registers"); + return PTR_ERR(dev->base); } platform_set_drvdata(ofdev, dev); @@ -123,21 +116,6 @@ static int tah_probe(struct platform_device *ofdev) wmb(); return 0; - - err_free: - kfree(dev); - err_gone: - return rc; -} - -static void tah_remove(struct platform_device *ofdev) -{ - struct tah_instance *dev = platform_get_drvdata(ofdev); - - WARN_ON(dev->users != 0); - - iounmap(dev->base); - kfree(dev); } static const struct of_device_id tah_match[] = @@ -158,7 +136,6 @@ static struct platform_driver tah_driver = { .of_match_table = tah_match, }, .probe = tah_probe, - .remove_new = tah_remove, }; int __init tah_init(void) diff --git a/drivers/net/ethernet/ibm/emac/zmii.c b/drivers/net/ethernet/ibm/emac/zmii.c index 26e86cdee2f6..69ca6065de1c 100644 --- a/drivers/net/ethernet/ibm/emac/zmii.c +++ b/drivers/net/ethernet/ibm/emac/zmii.c @@ -232,32 +232,25 @@ void *zmii_dump_regs(struct platform_device *ofdev, void *buf) static int zmii_probe(struct platform_device *ofdev) { - struct device_node *np = ofdev->dev.of_node; struct zmii_instance *dev; - struct resource regs; - int rc; + int err; - rc = -ENOMEM; - dev = kzalloc(sizeof(struct zmii_instance), GFP_KERNEL); - if (dev == NULL) - goto err_gone; + dev = devm_kzalloc(&ofdev->dev, sizeof(struct zmii_instance), + GFP_KERNEL); + if (!dev) + return -ENOMEM; + + err = devm_mutex_init(&ofdev->dev, &dev->lock); + if (err) + return err; - mutex_init(&dev->lock); dev->ofdev = ofdev; dev->mode = PHY_INTERFACE_MODE_NA; - rc = -ENXIO; - if (of_address_to_resource(np, 0, ®s)) { - printk(KERN_ERR "%pOF: Can't get registers address\n", np); - goto err_free; - } - - rc = -ENOMEM; - dev->base = (struct zmii_regs __iomem *)ioremap(regs.start, - sizeof(struct zmii_regs)); - if (dev->base == NULL) { - printk(KERN_ERR "%pOF: Can't map device registers!\n", np); - goto err_free; + dev->base = devm_platform_ioremap_resource(ofdev, 0); + if (IS_ERR(dev->base)) { + dev_err(&ofdev->dev, "can't map device registers"); + return PTR_ERR(dev->base); } /* We may need FER value for autodetection later */ @@ -271,21 +264,6 @@ static int zmii_probe(struct platform_device *ofdev) platform_set_drvdata(ofdev, dev); return 0; - - err_free: - kfree(dev); - err_gone: - return rc; -} - -static void zmii_remove(struct platform_device *ofdev) -{ - struct zmii_instance *dev = platform_get_drvdata(ofdev); - - WARN_ON(dev->users != 0); - - iounmap(dev->base); - kfree(dev); } static const struct of_device_id zmii_match[] = @@ -306,7 +284,6 @@ static struct platform_driver zmii_driver = { .of_match_table = zmii_match, }, .probe = zmii_probe, - .remove_new = zmii_remove, }; int __init zmii_init(void) diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index b5aef0b29efe..24046fe16634 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -39,7 +39,6 @@ #include "ibmveth.h" static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance); -static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter); static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev); static struct kobj_type ktype_veth_pool; @@ -226,6 +225,19 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, for (i = 0; i < count; ++i) { union ibmveth_buf_desc desc; + free_index = pool->consumer_index; + index = pool->free_map[free_index]; + skb = NULL; + + if (WARN_ON(index == IBM_VETH_INVALID_MAP)) { + schedule_work(&adapter->work); + goto bad_index_failure; + } + + /* are we allocating a new buffer or recycling an old one */ + if (pool->skbuff[index]) + goto reuse; + skb = netdev_alloc_skb(adapter->netdev, pool->buff_size); if (!skb) { @@ -235,46 +247,46 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, break; } - free_index = pool->consumer_index; - pool->consumer_index++; - if (pool->consumer_index >= pool->size) - pool->consumer_index = 0; - index = pool->free_map[free_index]; - - BUG_ON(index == IBM_VETH_INVALID_MAP); - BUG_ON(pool->skbuff[index] != NULL); - dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, pool->buff_size, DMA_FROM_DEVICE); if (dma_mapping_error(&adapter->vdev->dev, dma_addr)) goto failure; - pool->free_map[free_index] = IBM_VETH_INVALID_MAP; pool->dma_addr[index] = dma_addr; pool->skbuff[index] = skb; - correlator = ((u64)pool->index << 32) | index; - *(u64 *)skb->data = correlator; - - desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size; - desc.fields.address = dma_addr; - if (rx_flush) { unsigned int len = min(pool->buff_size, - adapter->netdev->mtu + - IBMVETH_BUFF_OH); + adapter->netdev->mtu + + IBMVETH_BUFF_OH); ibmveth_flush_buffer(skb->data, len); } +reuse: + dma_addr = pool->dma_addr[index]; + desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size; + desc.fields.address = dma_addr; + + correlator = ((u64)pool->index << 32) | index; + *(u64 *)pool->skbuff[index]->data = correlator; + lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); if (lpar_rc != H_SUCCESS) { + netdev_warn(adapter->netdev, + "%sadd_logical_lan failed %lu\n", + skb ? "" : "When recycling: ", lpar_rc); goto failure; - } else { - buffers_added++; - adapter->replenish_add_buff_success++; } + + pool->free_map[free_index] = IBM_VETH_INVALID_MAP; + pool->consumer_index++; + if (pool->consumer_index >= pool->size) + pool->consumer_index = 0; + + buffers_added++; + adapter->replenish_add_buff_success++; } mb(); @@ -282,17 +294,14 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, return; failure: - pool->free_map[free_index] = index; - pool->skbuff[index] = NULL; - if (pool->consumer_index == 0) - pool->consumer_index = pool->size - 1; - else - pool->consumer_index--; - if (!dma_mapping_error(&adapter->vdev->dev, dma_addr)) + + if (dma_addr && !dma_mapping_error(&adapter->vdev->dev, dma_addr)) dma_unmap_single(&adapter->vdev->dev, pool->dma_addr[index], pool->buff_size, DMA_FROM_DEVICE); - dev_kfree_skb_any(skb); + dev_kfree_skb_any(pool->skbuff[index]); + pool->skbuff[index] = NULL; +bad_index_failure: adapter->replenish_add_buff_failure++; mb(); @@ -363,28 +372,52 @@ static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, } } -/* remove a buffer from a pool */ -static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter, - u64 correlator) +/** + * ibmveth_remove_buffer_from_pool - remove a buffer from a pool + * @adapter: adapter instance + * @correlator: identifies pool and index + * @reuse: whether to reuse buffer + * + * Return: + * * %0 - success + * * %-EINVAL - correlator maps to pool or index out of range + * * %-EFAULT - pool and index map to null skb + */ +static int ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter, + u64 correlator, bool reuse) { unsigned int pool = correlator >> 32; unsigned int index = correlator & 0xffffffffUL; unsigned int free_index; struct sk_buff *skb; - BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS); - BUG_ON(index >= adapter->rx_buff_pool[pool].size); + if (WARN_ON(pool >= IBMVETH_NUM_BUFF_POOLS) || + WARN_ON(index >= adapter->rx_buff_pool[pool].size)) { + schedule_work(&adapter->work); + return -EINVAL; + } skb = adapter->rx_buff_pool[pool].skbuff[index]; + if (WARN_ON(!skb)) { + schedule_work(&adapter->work); + return -EFAULT; + } - BUG_ON(skb == NULL); - - adapter->rx_buff_pool[pool].skbuff[index] = NULL; + /* if we are going to reuse the buffer then keep the pointers around + * but mark index as available. replenish will see the skb pointer and + * assume it is to be recycled. + */ + if (!reuse) { + /* remove the skb pointer to mark free. actual freeing is done + * by upper level networking after gro_recieve + */ + adapter->rx_buff_pool[pool].skbuff[index] = NULL; - dma_unmap_single(&adapter->vdev->dev, - adapter->rx_buff_pool[pool].dma_addr[index], - adapter->rx_buff_pool[pool].buff_size, - DMA_FROM_DEVICE); + dma_unmap_single(&adapter->vdev->dev, + adapter->rx_buff_pool[pool].dma_addr[index], + adapter->rx_buff_pool[pool].buff_size, + DMA_FROM_DEVICE); + } free_index = adapter->rx_buff_pool[pool].producer_index; adapter->rx_buff_pool[pool].producer_index++; @@ -396,6 +429,8 @@ static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter, mb(); atomic_dec(&(adapter->rx_buff_pool[pool].available)); + + return 0; } /* get the current buffer on the rx queue */ @@ -405,62 +440,44 @@ static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *ada unsigned int pool = correlator >> 32; unsigned int index = correlator & 0xffffffffUL; - BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS); - BUG_ON(index >= adapter->rx_buff_pool[pool].size); + if (WARN_ON(pool >= IBMVETH_NUM_BUFF_POOLS) || + WARN_ON(index >= adapter->rx_buff_pool[pool].size)) { + schedule_work(&adapter->work); + return NULL; + } return adapter->rx_buff_pool[pool].skbuff[index]; } -/* recycle the current buffer on the rx queue */ -static int ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter) +/** + * ibmveth_rxq_harvest_buffer - Harvest buffer from pool + * + * @adapter: pointer to adapter + * @reuse: whether to reuse buffer + * + * Context: called from ibmveth_poll + * + * Return: + * * %0 - success + * * other - non-zero return from ibmveth_remove_buffer_from_pool + */ +static int ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter, + bool reuse) { - u32 q_index = adapter->rx_queue.index; - u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator; - unsigned int pool = correlator >> 32; - unsigned int index = correlator & 0xffffffffUL; - union ibmveth_buf_desc desc; - unsigned long lpar_rc; - int ret = 1; - - BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS); - BUG_ON(index >= adapter->rx_buff_pool[pool].size); - - if (!adapter->rx_buff_pool[pool].active) { - ibmveth_rxq_harvest_buffer(adapter); - ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]); - goto out; - } - - desc.fields.flags_len = IBMVETH_BUF_VALID | - adapter->rx_buff_pool[pool].buff_size; - desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index]; - - lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); + u64 cor; + int rc; - if (lpar_rc != H_SUCCESS) { - netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed " - "during recycle rc=%ld", lpar_rc); - ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator); - ret = 0; - } + cor = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator; + rc = ibmveth_remove_buffer_from_pool(adapter, cor, reuse); + if (unlikely(rc)) + return rc; if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) { adapter->rx_queue.index = 0; adapter->rx_queue.toggle = !adapter->rx_queue.toggle; } -out: - return ret; -} - -static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter) -{ - ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator); - - if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) { - adapter->rx_queue.index = 0; - adapter->rx_queue.toggle = !adapter->rx_queue.toggle; - } + return 0; } static void ibmveth_free_tx_ltb(struct ibmveth_adapter *adapter, int idx) @@ -732,6 +749,35 @@ static int ibmveth_close(struct net_device *netdev) return 0; } +/** + * ibmveth_reset - Handle scheduled reset work + * + * @w: pointer to work_struct embedded in adapter structure + * + * Context: This routine acquires rtnl_mutex and disables its NAPI through + * ibmveth_close. It can't be called directly in a context that has + * already acquired rtnl_mutex or disabled its NAPI, or directly from + * a poll routine. + * + * Return: void + */ +static void ibmveth_reset(struct work_struct *w) +{ + struct ibmveth_adapter *adapter = container_of(w, struct ibmveth_adapter, work); + struct net_device *netdev = adapter->netdev; + + netdev_dbg(netdev, "reset starting\n"); + + rtnl_lock(); + + dev_close(adapter->netdev); + dev_open(adapter->netdev, NULL); + + rtnl_unlock(); + + netdev_dbg(netdev, "reset complete\n"); +} + static int ibmveth_set_link_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *cmd) { @@ -1337,6 +1383,7 @@ static int ibmveth_poll(struct napi_struct *napi, int budget) unsigned long lpar_rc; u16 mss = 0; +restart_poll: while (frames_processed < budget) { if (!ibmveth_rxq_pending_buffer(adapter)) break; @@ -1346,7 +1393,8 @@ static int ibmveth_poll(struct napi_struct *napi, int budget) wmb(); /* suggested by larson1 */ adapter->rx_invalid_buffer++; netdev_dbg(netdev, "recycling invalid buffer\n"); - ibmveth_rxq_recycle_buffer(adapter); + if (unlikely(ibmveth_rxq_harvest_buffer(adapter, true))) + break; } else { struct sk_buff *skb, *new_skb; int length = ibmveth_rxq_frame_length(adapter); @@ -1356,6 +1404,8 @@ static int ibmveth_poll(struct napi_struct *napi, int budget) __sum16 iph_check = 0; skb = ibmveth_rxq_get_buffer(adapter); + if (unlikely(!skb)) + break; /* if the large packet bit is set in the rx queue * descriptor, the mss will be written by PHYP eight @@ -1379,11 +1429,12 @@ static int ibmveth_poll(struct napi_struct *napi, int budget) if (rx_flush) ibmveth_flush_buffer(skb->data, length + offset); - if (!ibmveth_rxq_recycle_buffer(adapter)) - kfree_skb(skb); + if (unlikely(ibmveth_rxq_harvest_buffer(adapter, true))) + break; skb = new_skb; } else { - ibmveth_rxq_harvest_buffer(adapter); + if (unlikely(ibmveth_rxq_harvest_buffer(adapter, false))) + break; skb_reserve(skb, offset); } @@ -1420,24 +1471,28 @@ static int ibmveth_poll(struct napi_struct *napi, int budget) ibmveth_replenish_task(adapter); - if (frames_processed < budget) { - napi_complete_done(napi, frames_processed); + if (frames_processed == budget) + goto out; - /* We think we are done - reenable interrupts, - * then check once more to make sure we are done. - */ - lpar_rc = h_vio_signal(adapter->vdev->unit_address, - VIO_IRQ_ENABLE); + if (!napi_complete_done(napi, frames_processed)) + goto out; - BUG_ON(lpar_rc != H_SUCCESS); + /* We think we are done - reenable interrupts, + * then check once more to make sure we are done. + */ + lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_ENABLE); + if (WARN_ON(lpar_rc != H_SUCCESS)) { + schedule_work(&adapter->work); + goto out; + } - if (ibmveth_rxq_pending_buffer(adapter) && - napi_schedule(napi)) { - lpar_rc = h_vio_signal(adapter->vdev->unit_address, - VIO_IRQ_DISABLE); - } + if (ibmveth_rxq_pending_buffer(adapter) && napi_schedule(napi)) { + lpar_rc = h_vio_signal(adapter->vdev->unit_address, + VIO_IRQ_DISABLE); + goto restart_poll; } +out: return frames_processed; } @@ -1450,7 +1505,7 @@ static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance) if (napi_schedule_prep(&adapter->napi)) { lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE); - BUG_ON(lpar_rc != H_SUCCESS); + WARN_ON(lpar_rc != H_SUCCESS); __napi_schedule(&adapter->napi); } return IRQ_HANDLED; @@ -1537,7 +1592,7 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) adapter->rx_buff_pool[i].active = 1; if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size) { - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); vio_cmo_set_dev_desired(viodev, ibmveth_get_desired_dma (viodev)); @@ -1692,6 +1747,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) adapter->vdev = dev; adapter->netdev = netdev; + INIT_WORK(&adapter->work, ibmveth_reset); adapter->mcastFilterSize = be32_to_cpu(*mcastFilterSize_p); ibmveth_init_link_settings(netdev); @@ -1784,6 +1840,8 @@ static void ibmveth_remove(struct vio_dev *dev) struct ibmveth_adapter *adapter = netdev_priv(netdev); int i; + cancel_work_sync(&adapter->work); + for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) kobject_put(&adapter->rx_buff_pool[i].kobj); @@ -1813,6 +1871,26 @@ static ssize_t veth_pool_show(struct kobject *kobj, return 0; } +/** + * veth_pool_store - sysfs store handler for pool attributes + * @kobj: kobject embedded in pool + * @attr: attribute being changed + * @buf: value being stored + * @count: length of @buf in bytes + * + * Stores new value in pool attribute. Verifies the range of the new value for + * size and buff_size. Verifies that at least one pool remains available to + * receive MTU-sized packets. + * + * Context: Process context. + * Takes and releases rtnl_mutex to ensure correct ordering of close + * and open calls. + * Return: + * * %-EPERM - Not allowed to disabled all MTU-sized buffer pools + * * %-EINVAL - New pool size or buffer size is out of range + * * count - Return count for success + * * other - Return value from a failed ibmveth_open call + */ static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { @@ -1822,24 +1900,30 @@ static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr, struct net_device *netdev = dev_get_drvdata(kobj_to_dev(kobj->parent)); struct ibmveth_adapter *adapter = netdev_priv(netdev); long value = simple_strtol(buf, NULL, 10); + bool change = false; + u32 newbuff_size; + u32 oldbuff_size; + int newactive; + int oldactive; + u32 newsize; + u32 oldsize; long rc; + rtnl_lock(); + + oldbuff_size = pool->buff_size; + oldactive = pool->active; + oldsize = pool->size; + + newbuff_size = oldbuff_size; + newactive = oldactive; + newsize = oldsize; + if (attr == &veth_active_attr) { - if (value && !pool->active) { - if (netif_running(netdev)) { - if (ibmveth_alloc_buffer_pool(pool)) { - netdev_err(netdev, - "unable to alloc pool\n"); - return -ENOMEM; - } - pool->active = 1; - ibmveth_close(netdev); - if ((rc = ibmveth_open(netdev))) - return rc; - } else { - pool->active = 1; - } - } else if (!value && pool->active) { + if (value && !oldactive) { + newactive = 1; + change = true; + } else if (!value && oldactive) { int mtu = netdev->mtu + IBMVETH_BUFF_OH; int i; /* Make sure there is a buffer pool with buffers that @@ -1855,48 +1939,60 @@ static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr, if (i == IBMVETH_NUM_BUFF_POOLS) { netdev_err(netdev, "no active pool >= MTU\n"); - return -EPERM; + rc = -EPERM; + goto unlock_err; } - if (netif_running(netdev)) { - ibmveth_close(netdev); - pool->active = 0; - if ((rc = ibmveth_open(netdev))) - return rc; - } - pool->active = 0; + newactive = 0; + change = true; } } else if (attr == &veth_num_attr) { if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) { - return -EINVAL; - } else { - if (netif_running(netdev)) { - ibmveth_close(netdev); - pool->size = value; - if ((rc = ibmveth_open(netdev))) - return rc; - } else { - pool->size = value; - } + rc = -EINVAL; + goto unlock_err; + } + if (value != oldsize) { + newsize = value; + change = true; } } else if (attr == &veth_size_attr) { if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) { - return -EINVAL; - } else { - if (netif_running(netdev)) { - ibmveth_close(netdev); - pool->buff_size = value; - if ((rc = ibmveth_open(netdev))) - return rc; - } else { - pool->buff_size = value; + rc = -EINVAL; + goto unlock_err; + } + if (value != oldbuff_size) { + newbuff_size = value; + change = true; + } + } + + if (change) { + if (netif_running(netdev)) + ibmveth_close(netdev); + + pool->active = newactive; + pool->buff_size = newbuff_size; + pool->size = newsize; + + if (netif_running(netdev)) { + rc = ibmveth_open(netdev); + if (rc) { + pool->active = oldactive; + pool->buff_size = oldbuff_size; + pool->size = oldsize; + goto unlock_err; } } } + rtnl_unlock(); /* kick the interrupt handler to allocate/deallocate pools */ ibmveth_interrupt(netdev->irq, netdev); return count; + +unlock_err: + rtnl_unlock(); + return rc; } @@ -1969,3 +2065,132 @@ static void __exit ibmveth_module_exit(void) module_init(ibmveth_module_init); module_exit(ibmveth_module_exit); + +#ifdef CONFIG_IBMVETH_KUNIT_TEST +#include <kunit/test.h> + +/** + * ibmveth_reset_kunit - reset routine for running in KUnit environment + * + * @w: pointer to work_struct embedded in adapter structure + * + * Context: Called in the KUnit environment. Does nothing. + * + * Return: void + */ +static void ibmveth_reset_kunit(struct work_struct *w) +{ + netdev_dbg(NULL, "reset_kunit starting\n"); + netdev_dbg(NULL, "reset_kunit complete\n"); +} + +/** + * ibmveth_remove_buffer_from_pool_test - unit test for some of + * ibmveth_remove_buffer_from_pool + * @test: pointer to kunit structure + * + * Tests the error returns from ibmveth_remove_buffer_from_pool. + * ibmveth_remove_buffer_from_pool also calls WARN_ON, so dmesg should be + * checked to see that these warnings happened. + * + * Return: void + */ +static void ibmveth_remove_buffer_from_pool_test(struct kunit *test) +{ + struct ibmveth_adapter *adapter = kunit_kzalloc(test, sizeof(*adapter), GFP_KERNEL); + struct ibmveth_buff_pool *pool; + u64 correlator; + + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, adapter); + + INIT_WORK(&adapter->work, ibmveth_reset_kunit); + + /* Set sane values for buffer pools */ + for (int i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) + ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i, + pool_count[i], pool_size[i], + pool_active[i]); + + pool = &adapter->rx_buff_pool[0]; + pool->skbuff = kunit_kcalloc(test, pool->size, sizeof(void *), GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pool->skbuff); + + correlator = ((u64)IBMVETH_NUM_BUFF_POOLS << 32) | 0; + KUNIT_EXPECT_EQ(test, -EINVAL, ibmveth_remove_buffer_from_pool(adapter, correlator, false)); + KUNIT_EXPECT_EQ(test, -EINVAL, ibmveth_remove_buffer_from_pool(adapter, correlator, true)); + + correlator = ((u64)0 << 32) | adapter->rx_buff_pool[0].size; + KUNIT_EXPECT_EQ(test, -EINVAL, ibmveth_remove_buffer_from_pool(adapter, correlator, false)); + KUNIT_EXPECT_EQ(test, -EINVAL, ibmveth_remove_buffer_from_pool(adapter, correlator, true)); + + correlator = (u64)0 | 0; + pool->skbuff[0] = NULL; + KUNIT_EXPECT_EQ(test, -EFAULT, ibmveth_remove_buffer_from_pool(adapter, correlator, false)); + KUNIT_EXPECT_EQ(test, -EFAULT, ibmveth_remove_buffer_from_pool(adapter, correlator, true)); + + flush_work(&adapter->work); +} + +/** + * ibmveth_rxq_get_buffer_test - unit test for ibmveth_rxq_get_buffer + * @test: pointer to kunit structure + * + * Tests ibmveth_rxq_get_buffer. ibmveth_rxq_get_buffer also calls WARN_ON for + * the NULL returns, so dmesg should be checked to see that these warnings + * happened. + * + * Return: void + */ +static void ibmveth_rxq_get_buffer_test(struct kunit *test) +{ + struct ibmveth_adapter *adapter = kunit_kzalloc(test, sizeof(*adapter), GFP_KERNEL); + struct sk_buff *skb = kunit_kzalloc(test, sizeof(*skb), GFP_KERNEL); + struct ibmveth_buff_pool *pool; + + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, adapter); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb); + + INIT_WORK(&adapter->work, ibmveth_reset_kunit); + + adapter->rx_queue.queue_len = 1; + adapter->rx_queue.index = 0; + adapter->rx_queue.queue_addr = kunit_kzalloc(test, sizeof(struct ibmveth_rx_q_entry), + GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, adapter->rx_queue.queue_addr); + + /* Set sane values for buffer pools */ + for (int i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) + ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i, + pool_count[i], pool_size[i], + pool_active[i]); + + pool = &adapter->rx_buff_pool[0]; + pool->skbuff = kunit_kcalloc(test, pool->size, sizeof(void *), GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pool->skbuff); + + adapter->rx_queue.queue_addr[0].correlator = (u64)IBMVETH_NUM_BUFF_POOLS << 32 | 0; + KUNIT_EXPECT_PTR_EQ(test, NULL, ibmveth_rxq_get_buffer(adapter)); + + adapter->rx_queue.queue_addr[0].correlator = (u64)0 << 32 | adapter->rx_buff_pool[0].size; + KUNIT_EXPECT_PTR_EQ(test, NULL, ibmveth_rxq_get_buffer(adapter)); + + pool->skbuff[0] = skb; + adapter->rx_queue.queue_addr[0].correlator = (u64)0 << 32 | 0; + KUNIT_EXPECT_PTR_EQ(test, skb, ibmveth_rxq_get_buffer(adapter)); + + flush_work(&adapter->work); +} + +static struct kunit_case ibmveth_test_cases[] = { + KUNIT_CASE(ibmveth_remove_buffer_from_pool_test), + KUNIT_CASE(ibmveth_rxq_get_buffer_test), + {} +}; + +static struct kunit_suite ibmveth_test_suite = { + .name = "ibmveth-kunit-test", + .test_cases = ibmveth_test_cases, +}; + +kunit_test_suite(ibmveth_test_suite); +#endif diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h index 8468e2c59d7a..b0a2460ec9f9 100644 --- a/drivers/net/ethernet/ibm/ibmveth.h +++ b/drivers/net/ethernet/ibm/ibmveth.h @@ -134,38 +134,39 @@ struct ibmveth_rx_q { }; struct ibmveth_adapter { - struct vio_dev *vdev; - struct net_device *netdev; - struct napi_struct napi; - unsigned int mcastFilterSize; - void * buffer_list_addr; - void * filter_list_addr; - void *tx_ltb_ptr[IBMVETH_MAX_QUEUES]; - unsigned int tx_ltb_size; - dma_addr_t tx_ltb_dma[IBMVETH_MAX_QUEUES]; - dma_addr_t buffer_list_dma; - dma_addr_t filter_list_dma; - struct ibmveth_buff_pool rx_buff_pool[IBMVETH_NUM_BUFF_POOLS]; - struct ibmveth_rx_q rx_queue; - int rx_csum; - int large_send; - bool is_active_trunk; - - u64 fw_ipv6_csum_support; - u64 fw_ipv4_csum_support; - u64 fw_large_send_support; - /* adapter specific stats */ - u64 replenish_task_cycles; - u64 replenish_no_mem; - u64 replenish_add_buff_failure; - u64 replenish_add_buff_success; - u64 rx_invalid_buffer; - u64 rx_no_buffer; - u64 tx_map_failed; - u64 tx_send_failed; - u64 tx_large_packets; - u64 rx_large_packets; - /* Ethtool settings */ + struct vio_dev *vdev; + struct net_device *netdev; + struct napi_struct napi; + struct work_struct work; + unsigned int mcastFilterSize; + void *buffer_list_addr; + void *filter_list_addr; + void *tx_ltb_ptr[IBMVETH_MAX_QUEUES]; + unsigned int tx_ltb_size; + dma_addr_t tx_ltb_dma[IBMVETH_MAX_QUEUES]; + dma_addr_t buffer_list_dma; + dma_addr_t filter_list_dma; + struct ibmveth_buff_pool rx_buff_pool[IBMVETH_NUM_BUFF_POOLS]; + struct ibmveth_rx_q rx_queue; + int rx_csum; + int large_send; + bool is_active_trunk; + + u64 fw_ipv6_csum_support; + u64 fw_ipv4_csum_support; + u64 fw_large_send_support; + /* adapter specific stats */ + u64 replenish_task_cycles; + u64 replenish_no_mem; + u64 replenish_add_buff_failure; + u64 replenish_add_buff_success; + u64 rx_invalid_buffer; + u64 rx_no_buffer; + u64 tx_map_failed; + u64 tx_send_failed; + u64 tx_large_packets; + u64 rx_large_packets; + /* Ethtool settings */ u8 duplex; u32 speed; }; diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 30c47b8470ad..92647e137cf8 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -117,6 +117,7 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter, struct ibmvnic_long_term_buff *ltb); static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter); static void flush_reset_queue(struct ibmvnic_adapter *adapter); +static void print_subcrq_error(struct device *dev, int rc, const char *func); struct ibmvnic_stat { char name[ETH_GSTRING_LEN]; @@ -233,11 +234,17 @@ static int ibmvnic_set_queue_affinity(struct ibmvnic_sub_crq_queue *queue, (*stragglers)--; } /* atomic write is safer than writing bit by bit directly */ - for (i = 0; i < stride; i++) { - cpumask_set_cpu(*cpu, mask); - *cpu = cpumask_next_wrap(*cpu, cpu_online_mask, - nr_cpu_ids, false); + for_each_online_cpu_wrap(i, *cpu) { + if (!stride--) { + /* For the next queue we start from the first + * unused CPU in this queue + */ + *cpu = i; + break; + } + cpumask_set_cpu(i, mask); } + /* set queue affinity mask */ cpumask_copy(queue->affinity_mask, mask); rc = irq_set_affinity_and_hint(queue->irq, queue->affinity_mask); @@ -255,7 +262,7 @@ static void ibmvnic_set_affinity(struct ibmvnic_adapter *adapter) int num_rxqs = adapter->num_active_rx_scrqs, i_rxqs = 0; int num_txqs = adapter->num_active_tx_scrqs, i_txqs = 0; int total_queues, stride, stragglers, i; - unsigned int num_cpu, cpu; + unsigned int num_cpu, cpu = 0; bool is_rx_queue; int rc = 0; @@ -273,8 +280,6 @@ static void ibmvnic_set_affinity(struct ibmvnic_adapter *adapter) stride = max_t(int, num_cpu / total_queues, 1); /* number of leftover cpu's */ stragglers = num_cpu >= total_queues ? num_cpu % total_queues : 0; - /* next available cpu to assign irq to */ - cpu = cpumask_next(-1, cpu_online_mask); for (i = 0; i < total_queues; i++) { is_rx_queue = false; @@ -2140,63 +2145,49 @@ static int ibmvnic_close(struct net_device *netdev) } /** - * build_hdr_data - creates L2/L3/L4 header data buffer + * get_hdr_lens - fills list of L2/L3/L4 hdr lens * @hdr_field: bitfield determining needed headers * @skb: socket buffer - * @hdr_len: array of header lengths - * @hdr_data: buffer to write the header to + * @hdr_len: array of header lengths to be filled * * Reads hdr_field to determine which headers are needed by firmware. * Builds a buffer containing these headers. Saves individual header * lengths and total buffer length to be used to build descriptors. + * + * Return: total len of all headers */ -static int build_hdr_data(u8 hdr_field, struct sk_buff *skb, - int *hdr_len, u8 *hdr_data) +static int get_hdr_lens(u8 hdr_field, struct sk_buff *skb, + int *hdr_len) { int len = 0; - u8 *hdr; - if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb)) - hdr_len[0] = sizeof(struct vlan_ethhdr); - else - hdr_len[0] = sizeof(struct ethhdr); + + if ((hdr_field >> 6) & 1) { + hdr_len[0] = skb_mac_header_len(skb); + len += hdr_len[0]; + } + + if ((hdr_field >> 5) & 1) { + hdr_len[1] = skb_network_header_len(skb); + len += hdr_len[1]; + } + + if (!((hdr_field >> 4) & 1)) + return len; if (skb->protocol == htons(ETH_P_IP)) { - hdr_len[1] = ip_hdr(skb)->ihl * 4; if (ip_hdr(skb)->protocol == IPPROTO_TCP) hdr_len[2] = tcp_hdrlen(skb); else if (ip_hdr(skb)->protocol == IPPROTO_UDP) hdr_len[2] = sizeof(struct udphdr); } else if (skb->protocol == htons(ETH_P_IPV6)) { - hdr_len[1] = sizeof(struct ipv6hdr); if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) hdr_len[2] = tcp_hdrlen(skb); else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP) hdr_len[2] = sizeof(struct udphdr); - } else if (skb->protocol == htons(ETH_P_ARP)) { - hdr_len[1] = arp_hdr_len(skb->dev); - hdr_len[2] = 0; } - memset(hdr_data, 0, 120); - if ((hdr_field >> 6) & 1) { - hdr = skb_mac_header(skb); - memcpy(hdr_data, hdr, hdr_len[0]); - len += hdr_len[0]; - } - - if ((hdr_field >> 5) & 1) { - hdr = skb_network_header(skb); - memcpy(hdr_data + len, hdr, hdr_len[1]); - len += hdr_len[1]; - } - - if ((hdr_field >> 4) & 1) { - hdr = skb_transport_header(skb); - memcpy(hdr_data + len, hdr, hdr_len[2]); - len += hdr_len[2]; - } - return len; + return len + hdr_len[2]; } /** @@ -2209,12 +2200,14 @@ static int build_hdr_data(u8 hdr_field, struct sk_buff *skb, * * Creates header and, if needed, header extension descriptors and * places them in a descriptor array, scrq_arr + * + * Return: Number of header descs */ static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len, union sub_crq *scrq_arr) { - union sub_crq hdr_desc; + union sub_crq *hdr_desc; int tmp_len = len; int num_descs = 0; u8 *data, *cur; @@ -2223,28 +2216,26 @@ static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len, while (tmp_len > 0) { cur = hdr_data + len - tmp_len; - memset(&hdr_desc, 0, sizeof(hdr_desc)); - if (cur != hdr_data) { - data = hdr_desc.hdr_ext.data; + hdr_desc = &scrq_arr[num_descs]; + if (num_descs) { + data = hdr_desc->hdr_ext.data; tmp = tmp_len > 29 ? 29 : tmp_len; - hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD; - hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC; - hdr_desc.hdr_ext.len = tmp; + hdr_desc->hdr_ext.first = IBMVNIC_CRQ_CMD; + hdr_desc->hdr_ext.type = IBMVNIC_HDR_EXT_DESC; + hdr_desc->hdr_ext.len = tmp; } else { - data = hdr_desc.hdr.data; + data = hdr_desc->hdr.data; tmp = tmp_len > 24 ? 24 : tmp_len; - hdr_desc.hdr.first = IBMVNIC_CRQ_CMD; - hdr_desc.hdr.type = IBMVNIC_HDR_DESC; - hdr_desc.hdr.len = tmp; - hdr_desc.hdr.l2_len = (u8)hdr_len[0]; - hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]); - hdr_desc.hdr.l4_len = (u8)hdr_len[2]; - hdr_desc.hdr.flag = hdr_field << 1; + hdr_desc->hdr.first = IBMVNIC_CRQ_CMD; + hdr_desc->hdr.type = IBMVNIC_HDR_DESC; + hdr_desc->hdr.len = tmp; + hdr_desc->hdr.l2_len = (u8)hdr_len[0]; + hdr_desc->hdr.l3_len = cpu_to_be16((u16)hdr_len[1]); + hdr_desc->hdr.l4_len = (u8)hdr_len[2]; + hdr_desc->hdr.flag = hdr_field << 1; } memcpy(data, cur, tmp); tmp_len -= tmp; - *scrq_arr = hdr_desc; - scrq_arr++; num_descs++; } @@ -2267,13 +2258,11 @@ static void build_hdr_descs_arr(struct sk_buff *skb, int *num_entries, u8 hdr_field) { int hdr_len[3] = {0, 0, 0}; - u8 hdr_data[140] = {0}; int tot_len; - tot_len = build_hdr_data(hdr_field, skb, hdr_len, - hdr_data); - *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len, - indir_arr + 1); + tot_len = get_hdr_lens(hdr_field, skb, hdr_len); + *num_entries += create_hdr_descs(hdr_field, skb_mac_header(skb), + tot_len, hdr_len, indir_arr + 1); } static int ibmvnic_xmit_workarounds(struct sk_buff *skb, @@ -2325,7 +2314,7 @@ static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter, tx_buff = &tx_pool->tx_buff[index]; adapter->netdev->stats.tx_packets--; adapter->netdev->stats.tx_bytes -= tx_buff->skb->len; - adapter->tx_stats_buffers[queue_num].packets--; + adapter->tx_stats_buffers[queue_num].batched_packets--; adapter->tx_stats_buffers[queue_num].bytes -= tx_buff->skb->len; dev_kfree_skb_any(tx_buff->skb); @@ -2350,8 +2339,29 @@ static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter, } } +static int send_subcrq_direct(struct ibmvnic_adapter *adapter, + u64 remote_handle, u64 *entry) +{ + unsigned int ua = adapter->vdev->unit_address; + struct device *dev = &adapter->vdev->dev; + int rc; + + /* Make sure the hypervisor sees the complete request */ + dma_wmb(); + rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua, + cpu_to_be64(remote_handle), + cpu_to_be64(entry[0]), cpu_to_be64(entry[1]), + cpu_to_be64(entry[2]), cpu_to_be64(entry[3])); + + if (rc) + print_subcrq_error(dev, rc, __func__); + + return rc; +} + static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter, - struct ibmvnic_sub_crq_queue *tx_scrq) + struct ibmvnic_sub_crq_queue *tx_scrq, + bool indirect) { struct ibmvnic_ind_xmit_queue *ind_bufp; u64 dma_addr; @@ -2366,12 +2376,18 @@ static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter, if (!entries) return 0; - rc = send_subcrq_indirect(adapter, handle, dma_addr, entries); + + if (indirect) + rc = send_subcrq_indirect(adapter, handle, dma_addr, entries); + else + rc = send_subcrq_direct(adapter, handle, + (u64 *)ind_bufp->indir_arr); + if (rc) ibmvnic_tx_scrq_clean_buffer(adapter, tx_scrq); else ind_bufp->index = 0; - return 0; + return rc; } static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) @@ -2390,13 +2406,16 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) unsigned int tx_map_failed = 0; union sub_crq indir_arr[16]; unsigned int tx_dropped = 0; - unsigned int tx_packets = 0; + unsigned int tx_dpackets = 0; + unsigned int tx_bpackets = 0; unsigned int tx_bytes = 0; dma_addr_t data_dma_addr; struct netdev_queue *txq; unsigned long lpar_rc; + unsigned int skblen; union sub_crq tx_crq; unsigned int offset; + bool use_scrq_send_direct = false; int num_entries = 1; unsigned char *dst; int bufidx = 0; @@ -2424,7 +2443,9 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) tx_dropped++; tx_send_failed++; ret = NETDEV_TX_OK; - ibmvnic_tx_scrq_flush(adapter, tx_scrq); + lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, true); + if (lpar_rc != H_SUCCESS) + goto tx_err; goto out; } @@ -2439,8 +2460,10 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) dev_kfree_skb_any(skb); tx_send_failed++; tx_dropped++; - ibmvnic_tx_scrq_flush(adapter, tx_scrq); ret = NETDEV_TX_OK; + lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, true); + if (lpar_rc != H_SUCCESS) + goto tx_err; goto out; } @@ -2452,6 +2475,20 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) memset(dst, 0, tx_pool->buf_size); data_dma_addr = ltb->addr + offset; + /* if we are going to send_subcrq_direct this then we need to + * update the checksum before copying the data into ltb. Essentially + * these packets force disable CSO so that we can guarantee that + * FW does not need header info and we can send direct. Also, vnic + * server must be able to xmit standard packets without header data + */ + if (*hdrs == 0 && !skb_is_gso(skb) && + !ind_bufp->index && !netdev_xmit_more()) { + use_scrq_send_direct = true; + if (skb->ip_summed == CHECKSUM_PARTIAL && + skb_checksum_help(skb)) + use_scrq_send_direct = false; + } + if (skb_shinfo(skb)->nr_frags) { int cur, i; @@ -2471,16 +2508,26 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) skb_copy_from_linear_data(skb, dst, skb->len); } - /* post changes to long_term_buff *dst before VIOS accessing it */ - dma_wmb(); - tx_pool->consumer_index = (tx_pool->consumer_index + 1) % tx_pool->num_buffers; tx_buff = &tx_pool->tx_buff[bufidx]; + + /* Sanity checks on our free map to make sure it points to an index + * that is not being occupied by another skb. If skb memory is + * not freed then we see congestion control kick in and halt tx. + */ + if (unlikely(tx_buff->skb)) { + dev_warn_ratelimited(dev, "TX free map points to untracked skb (%s %d idx=%d)\n", + skb_is_gso(skb) ? "tso_pool" : "tx_pool", + queue_num, bufidx); + dev_kfree_skb_any(tx_buff->skb); + } + tx_buff->skb = skb; tx_buff->index = bufidx; tx_buff->pool_index = queue_num; + skblen = skb->len; memset(&tx_crq, 0, sizeof(tx_crq)); tx_crq.v1.first = IBMVNIC_CRQ_CMD; @@ -2524,6 +2571,19 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) tx_crq.v1.flags1 |= IBMVNIC_TX_LSO; tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size); hdrs += 2; + } else if (use_scrq_send_direct) { + /* See above comment, CSO disabled with direct xmit */ + tx_crq.v1.flags1 &= ~(IBMVNIC_TX_CHKSUM_OFFLOAD); + ind_bufp->index = 1; + tx_buff->num_entries = 1; + netdev_tx_sent_queue(txq, skb->len); + ind_bufp->indir_arr[0] = tx_crq; + lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, false); + if (lpar_rc != H_SUCCESS) + goto tx_err; + + tx_dpackets++; + goto early_exit; } if ((*hdrs >> 7) & 1) @@ -2533,7 +2593,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) tx_buff->num_entries = num_entries; /* flush buffer if current entry can not fit */ if (num_entries + ind_bufp->index > IBMVNIC_MAX_IND_DESCS) { - lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq); + lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, true); if (lpar_rc != H_SUCCESS) goto tx_flush_err; } @@ -2541,23 +2601,26 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) indir_arr[0] = tx_crq; memcpy(&ind_bufp->indir_arr[ind_bufp->index], &indir_arr[0], num_entries * sizeof(struct ibmvnic_generic_scrq)); + ind_bufp->index += num_entries; if (__netdev_tx_sent_queue(txq, skb->len, netdev_xmit_more() && ind_bufp->index < IBMVNIC_MAX_IND_DESCS)) { - lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq); + lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, true); if (lpar_rc != H_SUCCESS) goto tx_err; } + tx_bpackets++; + +early_exit: if (atomic_add_return(num_entries, &tx_scrq->used) >= adapter->req_tx_entries_per_subcrq) { netdev_dbg(netdev, "Stopping queue %d\n", queue_num); netif_stop_subqueue(netdev, queue_num); } - tx_packets++; - tx_bytes += skb->len; + tx_bytes += skblen; txq_trans_cond_update(txq); ret = NETDEV_TX_OK; goto out; @@ -2586,10 +2649,11 @@ out: rcu_read_unlock(); netdev->stats.tx_dropped += tx_dropped; netdev->stats.tx_bytes += tx_bytes; - netdev->stats.tx_packets += tx_packets; + netdev->stats.tx_packets += tx_bpackets + tx_dpackets; adapter->tx_send_failed += tx_send_failed; adapter->tx_map_failed += tx_map_failed; - adapter->tx_stats_buffers[queue_num].packets += tx_packets; + adapter->tx_stats_buffers[queue_num].batched_packets += tx_bpackets; + adapter->tx_stats_buffers[queue_num].direct_packets += tx_dpackets; adapter->tx_stats_buffers[queue_num].bytes += tx_bytes; adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped; @@ -3511,9 +3575,8 @@ restart_poll: } if (adapter->state != VNIC_CLOSING && - ((atomic_read(&adapter->rx_pool[scrq_num].available) < - adapter->req_rx_add_entries_per_subcrq / 2) || - frames_processed < budget)) + (atomic_read(&adapter->rx_pool[scrq_num].available) < + adapter->req_rx_add_entries_per_subcrq / 2)) replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]); if (frames_processed < budget) { if (napi_complete_done(napi, frames_processed)) { @@ -3751,29 +3814,20 @@ static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data) if (stringset != ETH_SS_STATS) return; - for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN) - memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN); + for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++) + ethtool_puts(&data, ibmvnic_stats[i].name); for (i = 0; i < adapter->req_tx_queues; i++) { - snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i); - data += ETH_GSTRING_LEN; - - snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i); - data += ETH_GSTRING_LEN; - - snprintf(data, ETH_GSTRING_LEN, "tx%d_dropped_packets", i); - data += ETH_GSTRING_LEN; + ethtool_sprintf(&data, "tx%d_batched_packets", i); + ethtool_sprintf(&data, "tx%d_direct_packets", i); + ethtool_sprintf(&data, "tx%d_bytes", i); + ethtool_sprintf(&data, "tx%d_dropped_packets", i); } for (i = 0; i < adapter->req_rx_queues; i++) { - snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i); - data += ETH_GSTRING_LEN; - - snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i); - data += ETH_GSTRING_LEN; - - snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i); - data += ETH_GSTRING_LEN; + ethtool_sprintf(&data, "rx%d_packets", i); + ethtool_sprintf(&data, "rx%d_bytes", i); + ethtool_sprintf(&data, "rx%d_interrupts", i); } } @@ -3820,7 +3874,9 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev, (adapter, ibmvnic_stats[i].offset)); for (j = 0; j < adapter->req_tx_queues; j++) { - data[i] = adapter->tx_stats_buffers[j].packets; + data[i] = adapter->tx_stats_buffers[j].batched_packets; + i++; + data[i] = adapter->tx_stats_buffers[j].direct_packets; i++; data[i] = adapter->tx_stats_buffers[j].bytes; i++; @@ -4057,6 +4113,12 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free) adapter->num_active_tx_scrqs = 0; } + /* Clean any remaining outstanding SKBs + * we freed the irq so we won't be hearing + * from them + */ + clean_tx_pools(adapter); + if (adapter->rx_scrq) { for (i = 0; i < adapter->num_active_rx_scrqs; i++) { if (!adapter->rx_scrq[i]) @@ -4147,20 +4209,17 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter, struct ibmvnic_sub_crq_queue *scrq) { struct device *dev = &adapter->vdev->dev; + int num_packets = 0, total_bytes = 0; struct ibmvnic_tx_pool *tx_pool; struct ibmvnic_tx_buff *txbuff; struct netdev_queue *txq; union sub_crq *next; - int index; - int i; + int index, i; restart_loop: while (pending_scrq(adapter, scrq)) { unsigned int pool = scrq->pool_index; int num_entries = 0; - int total_bytes = 0; - int num_packets = 0; - next = ibmvnic_next_scrq(adapter, scrq); for (i = 0; i < next->tx_comp.num_comps; i++) { index = be32_to_cpu(next->tx_comp.correlators[i]); @@ -4196,8 +4255,6 @@ restart_loop: /* remove tx_comp scrq*/ next->tx_comp.first = 0; - txq = netdev_get_tx_queue(adapter->netdev, scrq->pool_index); - netdev_tx_completed_queue(txq, num_packets, total_bytes); if (atomic_sub_return(num_entries, &scrq->used) <= (adapter->req_tx_entries_per_subcrq / 2) && @@ -4222,6 +4279,9 @@ restart_loop: goto restart_loop; } + txq = netdev_get_tx_queue(adapter->netdev, scrq->pool_index); + netdev_tx_completed_queue(txq, num_packets, total_bytes); + return 0; } @@ -4773,6 +4833,18 @@ static void vnic_add_client_data(struct ibmvnic_adapter *adapter, strscpy(vlcd->name, adapter->netdev->name, len); } +static void ibmvnic_print_hex_dump(struct net_device *dev, void *buf, + size_t len) +{ + unsigned char hex_str[16 * 3]; + + for (size_t i = 0; i < len; i += 16) { + hex_dump_to_buffer((unsigned char *)buf + i, len - i, 16, 8, + hex_str, sizeof(hex_str), false); + netdev_dbg(dev, "%s\n", hex_str); + } +} + static int send_login(struct ibmvnic_adapter *adapter) { struct ibmvnic_login_rsp_buffer *login_rsp_buffer; @@ -4883,10 +4955,8 @@ static int send_login(struct ibmvnic_adapter *adapter) vnic_add_client_data(adapter, vlcd); netdev_dbg(adapter->netdev, "Login Buffer:\n"); - for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) { - netdev_dbg(adapter->netdev, "%016lx\n", - ((unsigned long *)(adapter->login_buf))[i]); - } + ibmvnic_print_hex_dump(adapter->netdev, adapter->login_buf, + adapter->login_buf_sz); memset(&crq, 0, sizeof(crq)); crq.login.first = IBMVNIC_CRQ_CMD; @@ -5263,15 +5333,13 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter) { struct device *dev = &adapter->vdev->dev; struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf; - int i; dma_unmap_single(dev, adapter->ip_offload_tok, sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE); netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n"); - for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++) - netdev_dbg(adapter->netdev, "%016lx\n", - ((unsigned long *)(buf))[i]); + ibmvnic_print_hex_dump(adapter->netdev, buf, + sizeof(adapter->ip_offload_buf)); netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum); netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum); @@ -5502,10 +5570,8 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq, netdev->mtu = adapter->req_mtu - ETH_HLEN; netdev_dbg(adapter->netdev, "Login Response Buffer:\n"); - for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) { - netdev_dbg(adapter->netdev, "%016lx\n", - ((unsigned long *)(adapter->login_rsp_buf))[i]); - } + ibmvnic_print_hex_dump(netdev, adapter->login_rsp_buf, + adapter->login_rsp_buf_sz); /* Sanity checks */ if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs || diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index 94ac36b1408b..a189038d88df 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -213,7 +213,8 @@ struct ibmvnic_statistics { #define NUM_TX_STATS 3 struct ibmvnic_tx_queue_stats { - u64 packets; + u64 batched_packets; + u64 direct_packets; u64 bytes; u64 dropped_packets; }; |