diff options
Diffstat (limited to 'drivers/net/usb/usbnet.c')
| -rw-r--r-- | drivers/net/usb/usbnet.c | 1022 |
1 files changed, 664 insertions, 358 deletions
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 06ee82f557d4..1d9faa70ba3b 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -1,21 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * USB Network driver infrastructure * Copyright (C) 2000-2005 by David Brownell * Copyright (C) 2003-2005 David Hollis <dhollis@davehollis.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* @@ -30,9 +17,6 @@ * issues can usefully be addressed by this framework. */ -// #define DEBUG // error path messages, extra info -// #define VERBOSE // more; success messages - #include <linux/module.h> #include <linux/init.h> #include <linux/netdevice.h> @@ -47,9 +31,6 @@ #include <linux/kernel.h> #include <linux/pm_runtime.h> -#define DRIVER_VERSION "22-Aug-2005" - - /*-------------------------------------------------------------------------*/ /* @@ -59,21 +40,20 @@ * For high speed, each frame comfortably fits almost 36 max size * Ethernet packets (so queues should be bigger). * - * REVISIT qlens should be members of 'struct usbnet'; the goal is to - * let the USB host controller be busy for 5msec or more before an irq - * is required, under load. Jumbograms change the equation. + * The goal is to let the USB host controller be busy for 5msec or + * more before an irq is required, under load. Jumbograms change + * the equation. */ -#define RX_MAX_QUEUE_MEMORY (60 * 1518) -#define RX_QLEN(dev) (((dev)->udev->speed == USB_SPEED_HIGH) ? \ - (RX_MAX_QUEUE_MEMORY/(dev)->rx_urb_size) : 4) -#define TX_QLEN(dev) (((dev)->udev->speed == USB_SPEED_HIGH) ? \ - (RX_MAX_QUEUE_MEMORY/(dev)->hard_mtu) : 4) +#define MAX_QUEUE_MEMORY (60 * 1518) +#define RX_QLEN(dev) ((dev)->rx_qlen) +#define TX_QLEN(dev) ((dev)->tx_qlen) // reawaken network queue this soon after stopping; else watchdog barks #define TX_TIMEOUT_JIFFIES (5*HZ) -// throttle rx/tx briefly after some faults, so khubd might disconnect() -// us (it polls at HZ/4 usually) before we report too many false errors. +/* throttle rx/tx briefly after some faults, so hub_wq might disconnect() + * us (it polls at HZ/4 usually) before we report too many false errors. + */ #define THROTTLE_JIFFIES (HZ/8) // between wakeups @@ -81,11 +61,6 @@ /*-------------------------------------------------------------------------*/ -// randomly generated ethernet address -static u8 node_id [ETH_ALEN]; - -static const char driver_name [] = "usbnet"; - /* use ethtool to change the level for any given device */ static int msg_level = -1; module_param (msg_level, int, 0); @@ -93,6 +68,23 @@ MODULE_PARM_DESC (msg_level, "Override default message level"); /*-------------------------------------------------------------------------*/ +static const char * const usbnet_event_names[] = { + [EVENT_TX_HALT] = "EVENT_TX_HALT", + [EVENT_RX_HALT] = "EVENT_RX_HALT", + [EVENT_RX_MEMORY] = "EVENT_RX_MEMORY", + [EVENT_STS_SPLIT] = "EVENT_STS_SPLIT", + [EVENT_LINK_RESET] = "EVENT_LINK_RESET", + [EVENT_RX_PAUSED] = "EVENT_RX_PAUSED", + [EVENT_DEV_ASLEEP] = "EVENT_DEV_ASLEEP", + [EVENT_DEV_OPEN] = "EVENT_DEV_OPEN", + [EVENT_DEVICE_REPORT_IDLE] = "EVENT_DEVICE_REPORT_IDLE", + [EVENT_NO_RUNTIME_PM] = "EVENT_NO_RUNTIME_PM", + [EVENT_RX_KILL] = "EVENT_RX_KILL", + [EVENT_LINK_CHANGE] = "EVENT_LINK_CHANGE", + [EVENT_SET_RX_MODE] = "EVENT_SET_RX_MODE", + [EVENT_NO_IP_ALIGN] = "EVENT_NO_IP_ALIGN", +}; + /* handles CDC Ethernet and many other network "bulk data" interfaces */ int usbnet_get_endpoints(struct usbnet *dev, struct usb_interface *intf) { @@ -116,12 +108,17 @@ int usbnet_get_endpoints(struct usbnet *dev, struct usb_interface *intf) int intr = 0; e = alt->endpoint + ep; + + /* ignore endpoints which cannot transfer data */ + if (!usb_endpoint_maxp(&e->desc)) + continue; + switch (e->desc.bmAttributes) { case USB_ENDPOINT_XFER_INT: if (!usb_endpoint_dir_in(&e->desc)) continue; intr = 1; - /* FALLTHROUGH */ + fallthrough; case USB_ENDPOINT_XFER_BULK: break; default: @@ -145,16 +142,16 @@ int usbnet_get_endpoints(struct usbnet *dev, struct usb_interface *intf) if (alt->desc.bAlternateSetting != 0 || !(dev->driver_info->flags & FLAG_NO_SETINT)) { - tmp = usb_set_interface (dev->udev, alt->desc.bInterfaceNumber, - alt->desc.bAlternateSetting); + tmp = usb_set_interface(dev->udev, alt->desc.bInterfaceNumber, + alt->desc.bAlternateSetting); if (tmp < 0) return tmp; } - dev->in = usb_rcvbulkpipe (dev->udev, - in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); - dev->out = usb_sndbulkpipe (dev->udev, - out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); + dev->in = usb_rcvbulkpipe(dev->udev, + in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); + dev->out = usb_sndbulkpipe(dev->udev, + out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); dev->status = status; return 0; } @@ -162,25 +159,37 @@ EXPORT_SYMBOL_GPL(usbnet_get_endpoints); int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress) { - int tmp, i; + u8 addr[ETH_ALEN]; + int tmp = -1, ret; unsigned char buf [13]; - tmp = usb_string(dev->udev, iMACAddress, buf, sizeof buf); - if (tmp != 12) { + ret = usb_string(dev->udev, iMACAddress, buf, sizeof(buf)); + if (ret == 12) + tmp = hex2bin(addr, buf, 6); + if (tmp < 0) { dev_dbg(&dev->udev->dev, "bad MAC string %d fetch, %d\n", iMACAddress, tmp); - if (tmp >= 0) - tmp = -EINVAL; - return tmp; + if (ret >= 0) + ret = -EINVAL; + return ret; } - for (i = tmp = 0; i < 6; i++, tmp += 2) - dev->net->dev_addr [i] = - (hex_to_bin(buf[tmp]) << 4) + hex_to_bin(buf[tmp + 1]); + eth_hw_addr_set(dev->net, addr); return 0; } EXPORT_SYMBOL_GPL(usbnet_get_ethernet_addr); -static void intr_complete (struct urb *urb) +static bool usbnet_needs_usb_name_format(struct usbnet *dev, struct net_device *net) +{ + /* Point to point devices which don't have a real MAC address + * (or report a fake local one) have historically used the usb%d + * naming. Preserve this.. + */ + return (dev->driver_info->flags & FLAG_POINTTOPOINT) != 0 && + (is_zero_ether_addr(net->dev_addr) || + is_local_ether_addr(net->dev_addr)); +} + +static void intr_complete(struct urb *urb) { struct usbnet *dev = urb->context; int status = urb->status; @@ -206,16 +215,13 @@ static void intr_complete (struct urb *urb) break; } - if (!netif_running (dev->net)) - return; - - status = usb_submit_urb (urb, GFP_ATOMIC); + status = usb_submit_urb(urb, GFP_ATOMIC); if (status != 0) netif_err(dev, timer, dev->net, "intr resubmit --> %d\n", status); } -static int init_status (struct usbnet *dev, struct usb_interface *intf) +static int init_status(struct usbnet *dev, struct usb_interface *intf) { char *buf = NULL; unsigned pipe = 0; @@ -225,24 +231,24 @@ static int init_status (struct usbnet *dev, struct usb_interface *intf) if (!dev->driver_info->status) return 0; - pipe = usb_rcvintpipe (dev->udev, - dev->status->desc.bEndpointAddress - & USB_ENDPOINT_NUMBER_MASK); - maxp = usb_maxpacket (dev->udev, pipe, 0); + pipe = usb_rcvintpipe(dev->udev, + dev->status->desc.bEndpointAddress + & USB_ENDPOINT_NUMBER_MASK); + maxp = usb_maxpacket(dev->udev, pipe); /* avoid 1 msec chatter: min 8 msec poll rate */ period = max ((int) dev->status->desc.bInterval, (dev->udev->speed == USB_SPEED_HIGH) ? 7 : 3); - buf = kmalloc (maxp, GFP_KERNEL); + buf = kmalloc(maxp, GFP_KERNEL); if (buf) { - dev->interrupt = usb_alloc_urb (0, GFP_KERNEL); + dev->interrupt = usb_alloc_urb(0, GFP_KERNEL); if (!dev->interrupt) { - kfree (buf); + kfree(buf); return -ENOMEM; } else { usb_fill_int_urb(dev->interrupt, dev->udev, pipe, - buf, maxp, intr_complete, dev, period); + buf, maxp, intr_complete, dev, period); dev->interrupt->transfer_flags |= URB_FREE_BUFFER; dev_dbg(&intf->dev, "status ep%din, %d bytes period %d\n", @@ -320,8 +326,10 @@ static void __usbnet_status_stop_force(struct usbnet *dev) * Some link protocols batch packets, so their rx_fixup paths * can return clones as well as just modify the original skb. */ -void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb) +void usbnet_skb_return(struct usbnet *dev, struct sk_buff *skb) { + struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->net->tstats); + unsigned long flags; int status; if (test_bit(EVENT_RX_PAUSED, &dev->flags)) { @@ -329,13 +337,18 @@ void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb) return; } - skb->protocol = eth_type_trans (skb, dev->net); - dev->net->stats.rx_packets++; - dev->net->stats.rx_bytes += skb->len; + /* only update if unset to allow minidriver rx_fixup override */ + if (skb->protocol == 0) + skb->protocol = eth_type_trans(skb, dev->net); + + flags = u64_stats_update_begin_irqsave(&stats64->syncp); + u64_stats_inc(&stats64->rx_packets); + u64_stats_add(&stats64->rx_bytes, skb->len); + u64_stats_update_end_irqrestore(&stats64->syncp, flags); netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n", - skb->len + sizeof (struct ethhdr), skb->protocol); - memset (skb->cb, 0, sizeof (struct skb_data)); + skb->len + sizeof(struct ethhdr), skb->protocol); + memset(skb->cb, 0, sizeof(struct skb_data)); if (skb_defer_rx_timestamp(skb)) return; @@ -347,34 +360,67 @@ void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb) } EXPORT_SYMBOL_GPL(usbnet_skb_return); - +/* must be called if hard_mtu or rx_urb_size changed */ +void usbnet_update_max_qlen(struct usbnet *dev) +{ + enum usb_device_speed speed = dev->udev->speed; + + if (!dev->rx_urb_size || !dev->hard_mtu) + goto insanity; + switch (speed) { + case USB_SPEED_HIGH: + dev->rx_qlen = MAX_QUEUE_MEMORY / dev->rx_urb_size; + dev->tx_qlen = MAX_QUEUE_MEMORY / dev->hard_mtu; + break; + case USB_SPEED_SUPER: + case USB_SPEED_SUPER_PLUS: + /* + * Not take default 5ms qlen for super speed HC to + * save memory, and iperf tests show 2.5ms qlen can + * work well + */ + dev->rx_qlen = 5 * MAX_QUEUE_MEMORY / dev->rx_urb_size; + dev->tx_qlen = 5 * MAX_QUEUE_MEMORY / dev->hard_mtu; + break; + default: +insanity: + dev->rx_qlen = dev->tx_qlen = 4; + } +} +EXPORT_SYMBOL_GPL(usbnet_update_max_qlen); + + /*------------------------------------------------------------------------- * * Network Device Driver (peer link to "Host Device", from USB host) * *-------------------------------------------------------------------------*/ -int usbnet_change_mtu (struct net_device *net, int new_mtu) +int usbnet_change_mtu(struct net_device *net, int new_mtu) { struct usbnet *dev = netdev_priv(net); int ll_mtu = new_mtu + net->hard_header_len; int old_hard_mtu = dev->hard_mtu; int old_rx_urb_size = dev->rx_urb_size; - if (new_mtu <= 0) - return -EINVAL; // no second zero-length packet read wanted after mtu-sized packets if ((ll_mtu % dev->maxpacket) == 0) return -EDOM; - net->mtu = new_mtu; + WRITE_ONCE(net->mtu, new_mtu); dev->hard_mtu = net->mtu + net->hard_header_len; if (dev->rx_urb_size == old_hard_mtu) { dev->rx_urb_size = dev->hard_mtu; - if (dev->rx_urb_size > old_rx_urb_size) + if (dev->rx_urb_size > old_rx_urb_size) { + usbnet_pause_rx(dev); usbnet_unlink_rx_urbs(dev); + usbnet_resume_rx(dev); + } } + /* max qlen depend on hard_mtu and rx_urb_size */ + usbnet_update_max_qlen(dev); + return 0; } EXPORT_SYMBOL_GPL(usbnet_change_mtu); @@ -406,12 +452,18 @@ static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb, old_state = entry->state; entry->state = state; __skb_unlink(skb, list); - spin_unlock(&list->lock); - spin_lock(&dev->done.lock); + + /* defer_bh() is never called with list == &dev->done. + * spin_lock_nested() tells lockdep that it is OK to take + * dev->done.lock here with list->lock held. + */ + spin_lock_nested(&dev->done.lock, SINGLE_DEPTH_NESTING); + __skb_queue_tail(&dev->done, skb); if (dev->done.qlen == 1) - tasklet_schedule(&dev->bh); - spin_unlock_irqrestore(&dev->done.lock, flags); + queue_work(system_bh_wq, &dev->bh_work); + spin_unlock(&dev->done.lock); + spin_unlock_irqrestore(&list->lock, flags); return old_state; } @@ -420,23 +472,26 @@ static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb, * NOTE: annoying asymmetry: if it's active, schedule_work() fails, * but tasklet_schedule() doesn't. hope the failure is rare. */ -void usbnet_defer_kevent (struct usbnet *dev, int work) +void usbnet_defer_kevent(struct usbnet *dev, int work) { set_bit (work, &dev->flags); - if (!schedule_work (&dev->kevent)) { - if (net_ratelimit()) - netdev_err(dev->net, "kevent %d may have been dropped\n", work); - } else { - netdev_dbg(dev->net, "kevent %d scheduled\n", work); + if (!usbnet_going_away(dev)) { + if (!schedule_work(&dev->kevent)) + netdev_dbg(dev->net, + "kevent %s may have been dropped\n", + usbnet_event_names[work]); + else + netdev_dbg(dev->net, + "kevent %s scheduled\n", usbnet_event_names[work]); } } EXPORT_SYMBOL_GPL(usbnet_defer_kevent); /*-------------------------------------------------------------------------*/ -static void rx_complete (struct urb *urb); +static void rx_complete(struct urb *urb); -static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags) +static int rx_submit(struct usbnet *dev, struct urb *urb, gfp_t flags) { struct sk_buff *skb; struct skb_data *entry; @@ -450,11 +505,14 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags) return -ENOLINK; } - skb = __netdev_alloc_skb_ip_align(dev->net, size, flags); + if (test_bit(EVENT_NO_IP_ALIGN, &dev->flags)) + skb = __netdev_alloc_skb(dev->net, size, flags); + else + skb = __netdev_alloc_skb_ip_align(dev->net, size, flags); if (!skb) { netif_dbg(dev, rx_err, dev->net, "no rx skb\n"); - usbnet_defer_kevent (dev, EVENT_RX_MEMORY); - usb_free_urb (urb); + usbnet_defer_kevent(dev, EVENT_RX_MEMORY); + usb_free_urb(urb); return -ENOMEM; } @@ -463,25 +521,27 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags) entry->dev = dev; entry->length = 0; - usb_fill_bulk_urb (urb, dev->udev, dev->in, - skb->data, size, rx_complete, skb); + usb_fill_bulk_urb(urb, dev->udev, dev->in, + skb->data, size, rx_complete, skb); - spin_lock_irqsave (&dev->rxq.lock, lockflags); + spin_lock_irqsave(&dev->rxq.lock, lockflags); - if (netif_running (dev->net) && - netif_device_present (dev->net) && - !test_bit (EVENT_RX_HALT, &dev->flags) && - !test_bit (EVENT_DEV_ASLEEP, &dev->flags)) { - switch (retval = usb_submit_urb (urb, GFP_ATOMIC)) { + if (netif_running(dev->net) && + netif_device_present(dev->net) && + test_bit(EVENT_DEV_OPEN, &dev->flags) && + !test_bit(EVENT_RX_HALT, &dev->flags) && + !test_bit(EVENT_DEV_ASLEEP, &dev->flags) && + !usbnet_going_away(dev)) { + switch (retval = usb_submit_urb(urb, GFP_ATOMIC)) { case -EPIPE: - usbnet_defer_kevent (dev, EVENT_RX_HALT); + usbnet_defer_kevent(dev, EVENT_RX_HALT); break; case -ENOMEM: - usbnet_defer_kevent (dev, EVENT_RX_MEMORY); + usbnet_defer_kevent(dev, EVENT_RX_MEMORY); break; case -ENODEV: netif_dbg(dev, ifdown, dev->net, "device gone\n"); - netif_device_detach (dev->net); + netif_device_detach(dev->net); break; case -EHOSTUNREACH: retval = -ENOLINK; @@ -489,7 +549,7 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags) default: netif_dbg(dev, rx_err, dev->net, "rx submit, %d\n", retval); - tasklet_schedule (&dev->bh); + queue_work(system_bh_wq, &dev->bh_work); break; case 0: __usbnet_queue_skb(&dev->rxq, skb, rx_start); @@ -498,10 +558,10 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags) netif_dbg(dev, ifdown, dev->net, "rx: stopped\n"); retval = -ENOLINK; } - spin_unlock_irqrestore (&dev->rxq.lock, lockflags); + spin_unlock_irqrestore(&dev->rxq.lock, lockflags); if (retval) { - dev_kfree_skb_any (skb); - usb_free_urb (urb); + dev_kfree_skb_any(skb); + usb_free_urb(urb); } return retval; } @@ -509,35 +569,35 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags) /*-------------------------------------------------------------------------*/ -static inline void rx_process (struct usbnet *dev, struct sk_buff *skb) +static inline int rx_process(struct usbnet *dev, struct sk_buff *skb) { if (dev->driver_info->rx_fixup && - !dev->driver_info->rx_fixup (dev, skb)) { + !dev->driver_info->rx_fixup(dev, skb)) { /* With RX_ASSEMBLE, rx_fixup() must update counters */ if (!(dev->driver_info->flags & FLAG_RX_ASSEMBLE)) dev->net->stats.rx_errors++; - goto done; + return -EPROTO; } // else network stack removes extra byte if we forced a short packet - if (skb->len) { - /* all data was already cloned from skb inside the driver */ - if (dev->driver_info->flags & FLAG_MULTI_PACKET) - dev_kfree_skb_any(skb); - else - usbnet_skb_return(dev, skb); - return; + /* all data was already cloned from skb inside the driver */ + if (dev->driver_info->flags & FLAG_MULTI_PACKET) + return -EALREADY; + + if (skb->len < ETH_HLEN) { + dev->net->stats.rx_errors++; + dev->net->stats.rx_length_errors++; + netif_dbg(dev, rx_err, dev->net, "rx length %d\n", skb->len); + return -EPROTO; } - netif_dbg(dev, rx_err, dev->net, "drop\n"); - dev->net->stats.rx_errors++; -done: - skb_queue_tail(&dev->done, skb); + usbnet_skb_return(dev, skb); + return 0; } /*-------------------------------------------------------------------------*/ -static void rx_complete (struct urb *urb) +static void rx_complete(struct urb *urb) { struct sk_buff *skb = (struct sk_buff *) urb->context; struct skb_data *entry = (struct skb_data *) skb->cb; @@ -545,20 +605,13 @@ static void rx_complete (struct urb *urb) int urb_status = urb->status; enum skb_state state; - skb_put (skb, urb->actual_length); + skb_put(skb, urb->actual_length); state = rx_done; entry->urb = NULL; switch (urb_status) { /* success */ case 0: - if (skb->len < dev->net->hard_header_len) { - state = rx_cleanup; - dev->net->stats.rx_errors++; - dev->net->stats.rx_length_errors++; - netif_dbg(dev, rx_err, dev->net, - "rx length %d\n", skb->len); - } break; /* stalls need manual reset. this is rare ... except that @@ -568,8 +621,8 @@ static void rx_complete (struct urb *urb) */ case -EPIPE: dev->net->stats.rx_errors++; - usbnet_defer_kevent (dev, EVENT_RX_HALT); - // FALLTHROUGH + usbnet_defer_kevent(dev, EVENT_RX_HALT); + fallthrough; /* software-driven interface shutdown */ case -ECONNRESET: /* async unlink */ @@ -578,16 +631,16 @@ static void rx_complete (struct urb *urb) "rx shutdown, code %d\n", urb_status); goto block; - /* we get controller i/o faults during khubd disconnect() delays. + /* we get controller i/o faults during hub_wq disconnect() delays. * throttle down resubmits, to avoid log floods; just temporarily, - * so we still recover when the fault isn't a khubd delay. + * so we still recover when the fault isn't a hub_wq delay. */ case -EPROTO: case -ETIME: case -EILSEQ: dev->net->stats.rx_errors++; - if (!timer_pending (&dev->delay)) { - mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES); + if (!timer_pending(&dev->delay)) { + mod_timer(&dev->delay, jiffies + THROTTLE_JIFFIES); netif_dbg(dev, link, dev->net, "rx throttle %d\n", urb_status); } @@ -600,7 +653,7 @@ block: /* data overrun ... flush fifo? */ case -EOVERFLOW: dev->net->stats.rx_over_errors++; - // FALLTHROUGH + fallthrough; default: state = rx_cleanup; @@ -623,14 +676,14 @@ block: state = defer_bh(dev, skb, &dev->rxq, state); if (urb) { - if (netif_running (dev->net) && - !test_bit (EVENT_RX_HALT, &dev->flags) && + if (netif_running(dev->net) && + !test_bit(EVENT_RX_HALT, &dev->flags) && state != unlink_start) { - rx_submit (dev, urb, GFP_ATOMIC); + rx_submit(dev, urb, GFP_ATOMIC); usb_mark_last_busy(dev->udev); return; } - usb_free_urb (urb); + usb_free_urb(urb); } netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n"); } @@ -649,6 +702,7 @@ void usbnet_resume_rx(struct usbnet *dev) struct sk_buff *skb; int num = 0; + local_bh_disable(); clear_bit(EVENT_RX_PAUSED, &dev->flags); while ((skb = skb_dequeue(&dev->rxq_pause)) != NULL) { @@ -656,7 +710,8 @@ void usbnet_resume_rx(struct usbnet *dev) num++; } - tasklet_schedule(&dev->bh); + queue_work(system_bh_wq, &dev->bh_work); + local_bh_enable(); netif_dbg(dev, rx_status, dev->net, "paused rx queue disabled, %d skbs requeued\n", num); @@ -673,7 +728,7 @@ EXPORT_SYMBOL_GPL(usbnet_purge_paused_rxq); // unlink pending rx/tx; completion handlers do all other cleanup -static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q) +static int unlink_urbs(struct usbnet *dev, struct sk_buff_head *q) { unsigned long flags; struct sk_buff *skb; @@ -706,7 +761,7 @@ found: spin_unlock_irqrestore(&q->lock, flags); // during some PM-driven resume scenarios, // these (async) unlinks complete immediately - retval = usb_unlink_urb (urb); + retval = usb_unlink_urb(urb); if (retval != -EINPROGRESS && retval != 0) netdev_dbg(dev->net, "unlink urb err, %d\n", retval); else @@ -714,7 +769,7 @@ found: usb_put_urb(urb); spin_lock_irqsave(&q->lock, flags); } - spin_unlock_irqrestore (&q->lock, flags); + spin_unlock_irqrestore(&q->lock, flags); return count; } @@ -725,55 +780,66 @@ void usbnet_unlink_rx_urbs(struct usbnet *dev) { if (netif_running(dev->net)) { (void) unlink_urbs (dev, &dev->rxq); - tasklet_schedule(&dev->bh); + queue_work(system_bh_wq, &dev->bh_work); } } EXPORT_SYMBOL_GPL(usbnet_unlink_rx_urbs); /*-------------------------------------------------------------------------*/ +static void wait_skb_queue_empty(struct sk_buff_head *q) +{ + unsigned long flags; + + spin_lock_irqsave(&q->lock, flags); + while (!skb_queue_empty(q)) { + spin_unlock_irqrestore(&q->lock, flags); + schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS)); + set_current_state(TASK_UNINTERRUPTIBLE); + spin_lock_irqsave(&q->lock, flags); + } + spin_unlock_irqrestore(&q->lock, flags); +} + // precondition: never called in_interrupt static void usbnet_terminate_urbs(struct usbnet *dev) { - DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup); DECLARE_WAITQUEUE(wait, current); int temp; /* ensure there are no more active urbs */ - add_wait_queue(&unlink_wakeup, &wait); + add_wait_queue(&dev->wait, &wait); set_current_state(TASK_UNINTERRUPTIBLE); - dev->wait = &unlink_wakeup; temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq); /* maybe wait for deletions to finish. */ - while (!skb_queue_empty(&dev->rxq) - && !skb_queue_empty(&dev->txq) - && !skb_queue_empty(&dev->done)) { - schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS)); - set_current_state(TASK_UNINTERRUPTIBLE); - netif_dbg(dev, ifdown, dev->net, - "waited for %d urb completions\n", temp); - } + wait_skb_queue_empty(&dev->rxq); + wait_skb_queue_empty(&dev->txq); + wait_skb_queue_empty(&dev->done); + netif_dbg(dev, ifdown, dev->net, + "waited for %d urb completions\n", temp); set_current_state(TASK_RUNNING); - dev->wait = NULL; - remove_wait_queue(&unlink_wakeup, &wait); + remove_wait_queue(&dev->wait, &wait); } -int usbnet_stop (struct net_device *net) +int usbnet_stop(struct net_device *net) { struct usbnet *dev = netdev_priv(net); - struct driver_info *info = dev->driver_info; - int retval; + const struct driver_info *info = dev->driver_info; + int retval, pm, mpn; clear_bit(EVENT_DEV_OPEN, &dev->flags); - netif_stop_queue (net); + netif_stop_queue(net); + netdev_reset_queue(net); netif_info(dev, ifdown, dev->net, "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n", net->stats.rx_packets, net->stats.tx_packets, net->stats.rx_errors, net->stats.tx_errors); + /* to not race resume */ + pm = usb_autopm_get_interface(dev->intf); /* allow minidriver to stop correctly (wireless devices to turn off * radio etc) */ if (info->stop) { @@ -793,15 +859,26 @@ int usbnet_stop (struct net_device *net) usbnet_purge_paused_rxq(dev); - /* deferred work (task, timer, softirq) must also stop. - * can't flush_scheduled_work() until we drop rtnl (later), - * else workers could deadlock; so make workers a NOP. - */ + mpn = !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags); + + /* deferred work (timer, softirq, task) must also stop */ dev->flags = 0; - del_timer_sync (&dev->delay); - tasklet_kill (&dev->bh); - if (info->manage_power && - !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags)) + timer_delete_sync(&dev->delay); + cancel_work_sync(&dev->bh_work); + cancel_work_sync(&dev->kevent); + + /* We have cyclic dependencies. Those calls are needed + * to break a cycle. We cannot fall into the gaps because + * we have a flag + */ + cancel_work_sync(&dev->bh_work); + timer_delete_sync(&dev->delay); + cancel_work_sync(&dev->kevent); + + if (!pm) + usb_autopm_put_interface(dev->intf); + + if (info->manage_power && mpn) info->manage_power(dev, 0); else usb_autopm_put_interface(dev->intf); @@ -816,11 +893,11 @@ EXPORT_SYMBOL_GPL(usbnet_stop); // precondition: never called in_interrupt -int usbnet_open (struct net_device *net) +int usbnet_open(struct net_device *net) { struct usbnet *dev = netdev_priv(net); int retval; - struct driver_info *info = dev->driver_info; + const struct driver_info *info = dev->driver_info; if ((retval = usb_autopm_get_interface(dev->intf)) < 0) { netif_info(dev, ifup, dev->net, @@ -833,20 +910,29 @@ int usbnet_open (struct net_device *net) } // put into "known safe" state - if (info->reset && (retval = info->reset (dev)) < 0) { - netif_info(dev, ifup, dev->net, - "open reset fail (%d) usbnet usb-%s-%s, %s\n", - retval, - dev->udev->bus->bus_name, - dev->udev->devpath, - info->description); - goto done; + if (info->reset) { + retval = info->reset(dev); + if (retval < 0) { + netif_info(dev, ifup, dev->net, + "open reset fail (%d) usbnet usb-%s-%s, %s\n", + retval, + dev->udev->bus->bus_name, + dev->udev->devpath, + info->description); + goto done; + } } + /* hard_mtu or rx_urb_size may change in reset() */ + usbnet_update_max_qlen(dev); + // insist peer be connected - if (info->check_connect && (retval = info->check_connect (dev)) < 0) { - netif_dbg(dev, ifup, dev->net, "can't open; %d\n", retval); - goto done; + if (info->check_connect) { + retval = info->check_connect(dev); + if (retval < 0) { + netif_err(dev, ifup, dev->net, "can't open; %d\n", retval); + goto done; + } } /* start any status interrupt transfer */ @@ -860,6 +946,7 @@ int usbnet_open (struct net_device *net) } set_bit(EVENT_DEV_OPEN, &dev->flags); + netdev_reset_queue(net); netif_start_queue (net); netif_info(dev, ifup, dev->net, "open: enable queueing (rx %d, tx %d) mtu %d %s framing\n", @@ -878,7 +965,7 @@ int usbnet_open (struct net_device *net) clear_bit(EVENT_RX_KILL, &dev->flags); // delay posting reads until we're fully open - tasklet_schedule (&dev->bh); + queue_work(system_bh_wq, &dev->bh_work); if (info->manage_power) { retval = info->manage_power(dev, 1); if (retval < 0) { @@ -902,18 +989,53 @@ EXPORT_SYMBOL_GPL(usbnet_open); * they'll probably want to use this base set. */ -int usbnet_get_settings (struct net_device *net, struct ethtool_cmd *cmd) +/* These methods are written on the assumption that the device + * uses MII + */ +int usbnet_get_link_ksettings_mii(struct net_device *net, + struct ethtool_link_ksettings *cmd) { struct usbnet *dev = netdev_priv(net); if (!dev->mii.mdio_read) return -EOPNOTSUPP; - return mii_ethtool_gset(&dev->mii, cmd); + mii_ethtool_get_link_ksettings(&dev->mii, cmd); + + return 0; +} +EXPORT_SYMBOL_GPL(usbnet_get_link_ksettings_mii); + +int usbnet_get_link_ksettings_internal(struct net_device *net, + struct ethtool_link_ksettings *cmd) +{ + struct usbnet *dev = netdev_priv(net); + + /* the assumption that speed is equal on tx and rx + * is deeply engrained into the networking layer. + * For wireless stuff it is not true. + * We assume that rx_speed matters more. + */ + if (dev->rx_speed != SPEED_UNSET) + cmd->base.speed = dev->rx_speed / 1000000; + else if (dev->tx_speed != SPEED_UNSET) + cmd->base.speed = dev->tx_speed / 1000000; + else + cmd->base.speed = SPEED_UNKNOWN; + + /* The standard "Universal Serial Bus Class Definitions + * for Communications Devices v1.2" does not specify + * anything about duplex status. + * So set it DUPLEX_UNKNOWN instead of default DUPLEX_HALF. + */ + cmd->base.duplex = DUPLEX_UNKNOWN; + + return 0; } -EXPORT_SYMBOL_GPL(usbnet_get_settings); +EXPORT_SYMBOL_GPL(usbnet_get_link_ksettings_internal); -int usbnet_set_settings (struct net_device *net, struct ethtool_cmd *cmd) +int usbnet_set_link_ksettings_mii(struct net_device *net, + const struct ethtool_link_ksettings *cmd) { struct usbnet *dev = netdev_priv(net); int retval; @@ -921,24 +1043,26 @@ int usbnet_set_settings (struct net_device *net, struct ethtool_cmd *cmd) if (!dev->mii.mdio_write) return -EOPNOTSUPP; - retval = mii_ethtool_sset(&dev->mii, cmd); + retval = mii_ethtool_set_link_ksettings(&dev->mii, cmd); /* link speed/duplex might have changed */ if (dev->driver_info->link_reset) dev->driver_info->link_reset(dev); - return retval; + /* hard_mtu or rx_urb_size may change in link_reset() */ + usbnet_update_max_qlen(dev); + return retval; } -EXPORT_SYMBOL_GPL(usbnet_set_settings); +EXPORT_SYMBOL_GPL(usbnet_set_link_ksettings_mii); -u32 usbnet_get_link (struct net_device *net) +u32 usbnet_get_link(struct net_device *net) { struct usbnet *dev = netdev_priv(net); /* If a check_connect is defined, return its result */ if (dev->driver_info->check_connect) - return dev->driver_info->check_connect (dev) == 0; + return dev->driver_info->check_connect(dev) == 0; /* if the device has mii operations, use those */ if (dev->mii.mdio_read) @@ -960,19 +1084,18 @@ int usbnet_nway_reset(struct net_device *net) } EXPORT_SYMBOL_GPL(usbnet_nway_reset); -void usbnet_get_drvinfo (struct net_device *net, struct ethtool_drvinfo *info) +void usbnet_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info) { struct usbnet *dev = netdev_priv(net); - strlcpy (info->driver, dev->driver_name, sizeof info->driver); - strlcpy (info->version, DRIVER_VERSION, sizeof info->version); - strlcpy (info->fw_version, dev->driver_info->description, - sizeof info->fw_version); - usb_make_path (dev->udev, info->bus_info, sizeof info->bus_info); + strscpy(info->driver, dev->driver_name, sizeof(info->driver)); + strscpy(info->fw_version, dev->driver_info->description, + sizeof(info->fw_version)); + usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info)); } EXPORT_SYMBOL_GPL(usbnet_get_drvinfo); -u32 usbnet_get_msglevel (struct net_device *net) +u32 usbnet_get_msglevel(struct net_device *net) { struct usbnet *dev = netdev_priv(net); @@ -980,7 +1103,7 @@ u32 usbnet_get_msglevel (struct net_device *net) } EXPORT_SYMBOL_GPL(usbnet_get_msglevel); -void usbnet_set_msglevel (struct net_device *net, u32 level) +void usbnet_set_msglevel(struct net_device *net, u32 level) { struct usbnet *dev = netdev_priv(net); @@ -990,14 +1113,14 @@ EXPORT_SYMBOL_GPL(usbnet_set_msglevel); /* drivers may override default ethtool_ops in their bind() routine */ static const struct ethtool_ops usbnet_ethtool_ops = { - .get_settings = usbnet_get_settings, - .set_settings = usbnet_set_settings, .get_link = usbnet_get_link, .nway_reset = usbnet_nway_reset, .get_drvinfo = usbnet_get_drvinfo, .get_msglevel = usbnet_get_msglevel, .set_msglevel = usbnet_set_msglevel, .get_ts_info = ethtool_op_get_ts_info, + .get_link_ksettings = usbnet_get_link_ksettings_mii, + .set_link_ksettings = usbnet_set_link_ksettings_mii, }; /*-------------------------------------------------------------------------*/ @@ -1007,6 +1130,9 @@ static void __handle_link_change(struct usbnet *dev) if (!test_bit(EVENT_DEV_OPEN, &dev->flags)) return; + if (test_and_clear_bit(EVENT_LINK_CARRIER_ON, &dev->flags)) + netif_carrier_on(dev->net); + if (!netif_carrier_ok(dev->net)) { /* kill URBs for reading packets to save bus bandwidth */ unlink_urbs(dev, &dev->rxq); @@ -1017,95 +1143,116 @@ static void __handle_link_change(struct usbnet *dev) */ } else { /* submitting URBs for reading packets */ - tasklet_schedule(&dev->bh); + queue_work(system_bh_wq, &dev->bh_work); } + /* hard_mtu or rx_urb_size may change during link change */ + usbnet_update_max_qlen(dev); + clear_bit(EVENT_LINK_CHANGE, &dev->flags); } +void usbnet_set_rx_mode(struct net_device *net) +{ + struct usbnet *dev = netdev_priv(net); + + usbnet_defer_kevent(dev, EVENT_SET_RX_MODE); +} +EXPORT_SYMBOL_GPL(usbnet_set_rx_mode); + +static void __handle_set_rx_mode(struct usbnet *dev) +{ + if (dev->driver_info->set_rx_mode) + (dev->driver_info->set_rx_mode)(dev); + + clear_bit(EVENT_SET_RX_MODE, &dev->flags); +} + /* work that cannot be done in interrupt context uses keventd. * * NOTE: with 2.5 we could do more of this using completion callbacks, * especially now that control transfers can be queued. */ static void -kevent (struct work_struct *work) +usbnet_deferred_kevent(struct work_struct *work) { struct usbnet *dev = container_of(work, struct usbnet, kevent); int status; /* usb_clear_halt() needs a thread context */ - if (test_bit (EVENT_TX_HALT, &dev->flags)) { - unlink_urbs (dev, &dev->txq); + if (test_bit(EVENT_TX_HALT, &dev->flags)) { + unlink_urbs(dev, &dev->txq); status = usb_autopm_get_interface(dev->intf); if (status < 0) goto fail_pipe; - status = usb_clear_halt (dev->udev, dev->out); + status = usb_clear_halt(dev->udev, dev->out); usb_autopm_put_interface(dev->intf); if (status < 0 && status != -EPIPE && status != -ESHUTDOWN) { - if (netif_msg_tx_err (dev)) + if (netif_msg_tx_err(dev)) fail_pipe: netdev_err(dev->net, "can't clear tx halt, status %d\n", status); } else { - clear_bit (EVENT_TX_HALT, &dev->flags); + clear_bit(EVENT_TX_HALT, &dev->flags); if (status != -ESHUTDOWN) - netif_wake_queue (dev->net); + netif_wake_queue(dev->net); } } - if (test_bit (EVENT_RX_HALT, &dev->flags)) { - unlink_urbs (dev, &dev->rxq); + if (test_bit(EVENT_RX_HALT, &dev->flags)) { + unlink_urbs(dev, &dev->rxq); status = usb_autopm_get_interface(dev->intf); if (status < 0) goto fail_halt; - status = usb_clear_halt (dev->udev, dev->in); + status = usb_clear_halt(dev->udev, dev->in); usb_autopm_put_interface(dev->intf); if (status < 0 && status != -EPIPE && status != -ESHUTDOWN) { - if (netif_msg_rx_err (dev)) + if (netif_msg_rx_err(dev)) fail_halt: netdev_err(dev->net, "can't clear rx halt, status %d\n", status); } else { - clear_bit (EVENT_RX_HALT, &dev->flags); - tasklet_schedule (&dev->bh); + clear_bit(EVENT_RX_HALT, &dev->flags); + if (!usbnet_going_away(dev)) + queue_work(system_bh_wq, &dev->bh_work); } } - /* tasklet could resubmit itself forever if memory is tight */ - if (test_bit (EVENT_RX_MEMORY, &dev->flags)) { + /* work could resubmit itself forever if memory is tight */ + if (test_bit(EVENT_RX_MEMORY, &dev->flags)) { struct urb *urb = NULL; int resched = 1; - if (netif_running (dev->net)) - urb = usb_alloc_urb (0, GFP_KERNEL); + if (netif_running(dev->net)) + urb = usb_alloc_urb(0, GFP_KERNEL); else - clear_bit (EVENT_RX_MEMORY, &dev->flags); + clear_bit(EVENT_RX_MEMORY, &dev->flags); if (urb != NULL) { - clear_bit (EVENT_RX_MEMORY, &dev->flags); + clear_bit(EVENT_RX_MEMORY, &dev->flags); status = usb_autopm_get_interface(dev->intf); if (status < 0) { usb_free_urb(urb); goto fail_lowmem; } - if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK) + if (rx_submit(dev, urb, GFP_KERNEL) == -ENOLINK) resched = 0; usb_autopm_put_interface(dev->intf); fail_lowmem: if (resched) - tasklet_schedule (&dev->bh); + if (!usbnet_going_away(dev)) + queue_work(system_bh_wq, &dev->bh_work); } } if (test_bit (EVENT_LINK_RESET, &dev->flags)) { - struct driver_info *info = dev->driver_info; + const struct driver_info *info = dev->driver_info; int retval = 0; - clear_bit (EVENT_LINK_RESET, &dev->flags); + clear_bit(EVENT_LINK_RESET, &dev->flags); status = usb_autopm_get_interface(dev->intf); if (status < 0) goto skip_reset; @@ -1125,31 +1272,39 @@ skip_reset: __handle_link_change(dev); } - if (test_bit (EVENT_LINK_CHANGE, &dev->flags)) + if (test_bit(EVENT_LINK_CHANGE, &dev->flags)) __handle_link_change(dev); + if (test_bit(EVENT_SET_RX_MODE, &dev->flags)) + __handle_set_rx_mode(dev); + + if (dev->flags) netdev_dbg(dev->net, "kevent done, flags = 0x%lx\n", dev->flags); } /*-------------------------------------------------------------------------*/ -static void tx_complete (struct urb *urb) +static void tx_complete(struct urb *urb) { struct sk_buff *skb = (struct sk_buff *) urb->context; struct skb_data *entry = (struct skb_data *) skb->cb; struct usbnet *dev = entry->dev; if (urb->status == 0) { - if (!(dev->driver_info->flags & FLAG_MULTI_PACKET)) - dev->net->stats.tx_packets++; - dev->net->stats.tx_bytes += entry->length; + struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->net->tstats); + unsigned long flags; + + flags = u64_stats_update_begin_irqsave(&stats64->syncp); + u64_stats_add(&stats64->tx_packets, entry->packets); + u64_stats_add(&stats64->tx_bytes, entry->length); + u64_stats_update_end_irqrestore(&stats64->syncp, flags); } else { dev->net->stats.tx_errors++; switch (urb->status) { case -EPIPE: - usbnet_defer_kevent (dev, EVENT_TX_HALT); + usbnet_defer_kevent(dev, EVENT_TX_HALT); break; /* software-driven interface shutdown */ @@ -1157,19 +1312,20 @@ static void tx_complete (struct urb *urb) case -ESHUTDOWN: // hardware gone break; - // like rx, tx gets controller i/o faults during khubd delays - // and so it uses the same throttling mechanism. + /* like rx, tx gets controller i/o faults during hub_wq + * delays and so it uses the same throttling mechanism. + */ case -EPROTO: case -ETIME: case -EILSEQ: usb_mark_last_busy(dev->udev); - if (!timer_pending (&dev->delay)) { - mod_timer (&dev->delay, - jiffies + THROTTLE_JIFFIES); + if (!timer_pending(&dev->delay)) { + mod_timer(&dev->delay, + jiffies + THROTTLE_JIFFIES); netif_dbg(dev, link, dev->net, "tx throttle %d\n", urb->status); } - netif_stop_queue (dev->net); + netif_stop_queue(dev->net); break; default: netif_dbg(dev, tx_err, dev->net, @@ -1184,27 +1340,63 @@ static void tx_complete (struct urb *urb) /*-------------------------------------------------------------------------*/ -void usbnet_tx_timeout (struct net_device *net) +void usbnet_tx_timeout(struct net_device *net, unsigned int txqueue) { struct usbnet *dev = netdev_priv(net); - unlink_urbs (dev, &dev->txq); - tasklet_schedule (&dev->bh); - - // FIXME: device recovery -- reset? + unlink_urbs(dev, &dev->txq); + queue_work(system_bh_wq, &dev->bh_work); + /* this needs to be handled individually because the generic layer + * doesn't know what is sufficient and could not restore private + * information if a remedy of an unconditional reset were used. + */ + if (dev->driver_info->recover) + (dev->driver_info->recover)(dev); } EXPORT_SYMBOL_GPL(usbnet_tx_timeout); /*-------------------------------------------------------------------------*/ -netdev_tx_t usbnet_start_xmit (struct sk_buff *skb, - struct net_device *net) +static int build_dma_sg(const struct sk_buff *skb, struct urb *urb) +{ + unsigned num_sgs, total_len = 0; + int i, s = 0; + + num_sgs = skb_shinfo(skb)->nr_frags + 1; + if (num_sgs == 1) + return 0; + + /* reserve one for zero packet */ + urb->sg = kmalloc_array(num_sgs + 1, sizeof(struct scatterlist), + GFP_ATOMIC); + if (!urb->sg) + return -ENOMEM; + + urb->num_sgs = num_sgs; + sg_init_table(urb->sg, urb->num_sgs + 1); + + sg_set_buf(&urb->sg[s++], skb->data, skb_headlen(skb)); + total_len += skb_headlen(skb); + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + skb_frag_t *f = &skb_shinfo(skb)->frags[i]; + + total_len += skb_frag_size(f); + sg_set_page(&urb->sg[i + s], skb_frag_page(f), skb_frag_size(f), + skb_frag_off(f)); + } + urb->transfer_buffer_length = total_len; + + return 1; +} + +netdev_tx_t usbnet_start_xmit(struct sk_buff *skb, struct net_device *net) { struct usbnet *dev = netdev_priv(net); - int length; + unsigned int length; struct urb *urb = NULL; struct skb_data *entry; - struct driver_info *info = dev->driver_info; + const struct driver_info *info = dev->driver_info; unsigned long flags; int retval; @@ -1214,7 +1406,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb, // some devices want funky USB-level framing, for // win32 driver (usually) and/or hardware quirks if (info->tx_fixup) { - skb = info->tx_fixup (dev, skb, GFP_ATOMIC); + skb = info->tx_fixup(dev, skb, GFP_ATOMIC); if (!skb) { /* packet collected; minidriver waiting for more */ if (info->flags & FLAG_MULTI_PACKET) @@ -1223,9 +1415,9 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb, goto drop; } } - length = skb->len; - if (!(urb = usb_alloc_urb (0, GFP_ATOMIC))) { + urb = usb_alloc_urb(0, GFP_ATOMIC); + if (!urb) { netif_dbg(dev, tx_err, dev->net, "no urb\n"); goto drop; } @@ -1233,10 +1425,14 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb, entry = (struct skb_data *) skb->cb; entry->urb = urb; entry->dev = dev; - entry->length = length; - usb_fill_bulk_urb (urb, dev->udev, dev->out, - skb->data, skb->len, tx_complete, skb); + usb_fill_bulk_urb(urb, dev->udev, dev->out, + skb->data, skb->len, tx_complete, skb); + if (dev->can_dma_sg) { + if (build_dma_sg(skb, urb) < 0) + goto drop; + } + length = urb->transfer_buffer_length; /* don't assume the hardware handles USB_ZERO_PACKET * NOTE: strictly conforming cdc-ether devices should expect @@ -1248,15 +1444,30 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb, if (length % dev->maxpacket == 0) { if (!(info->flags & FLAG_SEND_ZLP)) { if (!(info->flags & FLAG_MULTI_PACKET)) { - urb->transfer_buffer_length++; - if (skb_tailroom(skb)) { + length++; + if (skb_tailroom(skb) && !urb->num_sgs) { skb->data[skb->len] = 0; __skb_put(skb, 1); - } + } else if (urb->num_sgs) + sg_set_buf(&urb->sg[urb->num_sgs++], + dev->padding_pkt, 1); } } else urb->transfer_flags |= URB_ZERO_PACKET; } + urb->transfer_buffer_length = length; + + if (info->flags & FLAG_MULTI_PACKET) { + /* Driver has set number of packets and a length delta. + * Calculate the complete length and ensure that it's + * positive. + */ + entry->length += length; + if (WARN_ON_ONCE(entry->length <= 0)) + entry->length = length; + } else { + usbnet_set_skb_tx_stats(skb, 1, length); + } spin_lock_irqsave(&dev->txq.lock, flags); retval = usb_autopm_get_interface_async(dev->intf); @@ -1264,6 +1475,11 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb, spin_unlock_irqrestore(&dev->txq.lock, flags); goto drop; } + if (netif_queue_stopped(net)) { + usb_autopm_put_interface_async(dev->intf); + spin_unlock_irqrestore(&dev->txq.lock, flags); + goto drop; + } #ifdef CONFIG_PM /* if this triggers the device is still a sleep */ @@ -1281,8 +1497,8 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb, switch ((retval = usb_submit_urb (urb, GFP_ATOMIC))) { case -EPIPE: - netif_stop_queue (net); - usbnet_defer_kevent (dev, EVENT_TX_HALT); + netif_stop_queue(net); + usbnet_defer_kevent(dev, EVENT_TX_HALT); usb_autopm_put_interface_async(dev->intf); break; default: @@ -1291,12 +1507,13 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb, "tx: submit urb err %d\n", retval); break; case 0: - net->trans_start = jiffies; + netif_trans_update(net); __usbnet_queue_skb(&dev->txq, skb, tx_start); + netdev_sent_queue(net, skb->len); if (dev->txq.qlen >= TX_QLEN (dev)) netif_stop_queue (net); } - spin_unlock_irqrestore (&dev->txq.lock, flags); + spin_unlock_irqrestore(&dev->txq.lock, flags); if (retval) { netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", retval); @@ -1304,11 +1521,14 @@ drop: dev->net->stats.tx_dropped++; not_drop: if (skb) - dev_kfree_skb_any (skb); - usb_free_urb (urb); + dev_kfree_skb_any(skb); + if (urb) { + kfree(urb->sg); + usb_free_urb(urb); + } } else netif_dbg(dev, tx_queued, dev->net, - "> tx, len %d, type 0x%x\n", length, skb->protocol); + "> tx, len %u, type 0x%x\n", length, skb->protocol); #ifdef CONFIG_PM deferred: #endif @@ -1338,13 +1558,22 @@ err: return ret; } +static inline void usb_free_skb(struct sk_buff *skb) +{ + struct skb_data *entry = (struct skb_data *)skb->cb; + + usb_free_urb(entry->urb); + dev_kfree_skb(skb); +} + /*-------------------------------------------------------------------------*/ -// tasklet (work deferred from completions, in_irq) or timer +// work (work deferred from completions, in_irq) or timer -static void usbnet_bh (unsigned long param) +static void usbnet_bh(struct timer_list *t) { - struct usbnet *dev = (struct usbnet *) param; + struct usbnet *dev = timer_container_of(dev, t, delay); + unsigned int bytes_compl = 0, pkts_compl = 0; struct sk_buff *skb; struct skb_data *entry; @@ -1352,34 +1581,44 @@ static void usbnet_bh (unsigned long param) entry = (struct skb_data *) skb->cb; switch (entry->state) { case rx_done: - entry->state = rx_cleanup; - rx_process (dev, skb); + if (rx_process(dev, skb)) + usb_free_skb(skb); continue; case tx_done: + bytes_compl += skb->len; + pkts_compl++; + kfree(entry->urb->sg); + fallthrough; case rx_cleanup: - usb_free_urb (entry->urb); - dev_kfree_skb (skb); + usb_free_skb(skb); continue; default: netdev_dbg(dev->net, "bogus skb state %d\n", entry->state); } } + spin_lock_bh(&dev->bql_spinlock); + netdev_completed_queue(dev->net, pkts_compl, bytes_compl); + spin_unlock_bh(&dev->bql_spinlock); + /* restart RX again after disabling due to high error rate */ clear_bit(EVENT_RX_KILL, &dev->flags); - // waiting for all pending urbs to complete? - if (dev->wait) { - if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) { - wake_up (dev->wait); - } + /* waiting for all pending urbs to complete? + * only then can we forgo submitting anew + */ + if (waitqueue_active(&dev->wait)) { + if (dev->txq.qlen + dev->rxq.qlen + dev->done.qlen == 0) + wake_up_all(&dev->wait); // or are we maybe short a few urbs? } else if (netif_running (dev->net) && netif_device_present (dev->net) && netif_carrier_ok(dev->net) && - !timer_pending (&dev->delay) && - !test_bit (EVENT_RX_HALT, &dev->flags)) { + !usbnet_going_away(dev) && + !timer_pending(&dev->delay) && + !test_bit(EVENT_RX_PAUSED, &dev->flags) && + !test_bit(EVENT_RX_HALT, &dev->flags)) { int temp = dev->rxq.qlen; if (temp < RX_QLEN(dev)) { @@ -1390,13 +1629,20 @@ static void usbnet_bh (unsigned long param) "rxqlen %d --> %d\n", temp, dev->rxq.qlen); if (dev->rxq.qlen < RX_QLEN(dev)) - tasklet_schedule (&dev->bh); + queue_work(system_bh_wq, &dev->bh_work); } if (dev->txq.qlen < TX_QLEN (dev)) - netif_wake_queue (dev->net); + netif_wake_queue(dev->net); } } +static void usbnet_bh_work(struct work_struct *work) +{ + struct usbnet *dev = from_work(dev, work, bh_work); + + usbnet_bh(&dev->delay); +} + /*------------------------------------------------------------------------- * @@ -1406,18 +1652,20 @@ static void usbnet_bh (unsigned long param) // precondition: never called in_interrupt -void usbnet_disconnect (struct usb_interface *intf) +void usbnet_disconnect(struct usb_interface *intf) { struct usbnet *dev; struct usb_device *xdev; struct net_device *net; + struct urb *urb; dev = usb_get_intfdata(intf); usb_set_intfdata(intf, NULL); if (!dev) return; + usbnet_mark_going_away(dev); - xdev = interface_to_usbdev (intf); + xdev = interface_to_usbdev(intf); netif_info(dev, probe, dev->net, "unregister '%s' usb-%s-%s, %s\n", intf->dev.driver->name, @@ -1425,17 +1673,22 @@ void usbnet_disconnect (struct usb_interface *intf) dev->driver_info->description); net = dev->net; - unregister_netdev (net); + unregister_netdev(net); cancel_work_sync(&dev->kevent); - usb_scuttle_anchored_urbs(&dev->deferred); + while ((urb = usb_get_from_anchor(&dev->deferred))) { + dev_kfree_skb(urb->context); + kfree(urb->sg); + usb_free_urb(urb); + } if (dev->driver_info->unbind) - dev->driver_info->unbind (dev, intf); + dev->driver_info->unbind(dev, intf); usb_kill_urb(dev->interrupt); usb_free_urb(dev->interrupt); + kfree(dev->padding_pkt); free_netdev(net); } @@ -1446,6 +1699,7 @@ static const struct net_device_ops usbnet_netdev_ops = { .ndo_stop = usbnet_stop, .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, + .ndo_set_rx_mode = usbnet_set_rx_mode, .ndo_change_mtu = usbnet_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, @@ -1455,21 +1709,21 @@ static const struct net_device_ops usbnet_netdev_ops = { // precondition: never called in_interrupt -static struct device_type wlan_type = { +static const struct device_type wlan_type = { .name = "wlan", }; -static struct device_type wwan_type = { +static const struct device_type wwan_type = { .name = "wwan", }; int -usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) +usbnet_probe(struct usb_interface *udev, const struct usb_device_id *prod) { struct usbnet *dev; struct net_device *net; struct usb_host_interface *interface; - struct driver_info *info; + const struct driver_info *info; struct usb_device *xdev; int status; const char *name; @@ -1485,12 +1739,12 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) } name = udev->dev.driver->name; - info = (struct driver_info *) prod->driver_info; + info = (const struct driver_info *) prod->driver_info; if (!info) { dev_dbg (&udev->dev, "blacklisted by %s\n", name); return -ENODEV; } - xdev = interface_to_usbdev (udev); + xdev = interface_to_usbdev(udev); interface = udev->cur_altsetting; status = -ENOMEM; @@ -1508,62 +1762,59 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) dev->intf = udev; dev->driver_info = info; dev->driver_name = name; + dev->rx_speed = SPEED_UNSET; + dev->tx_speed = SPEED_UNSET; + dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK); + init_waitqueue_head(&dev->wait); skb_queue_head_init (&dev->rxq); skb_queue_head_init (&dev->txq); skb_queue_head_init (&dev->done); skb_queue_head_init(&dev->rxq_pause); - dev->bh.func = usbnet_bh; - dev->bh.data = (unsigned long) dev; - INIT_WORK (&dev->kevent, kevent); + spin_lock_init(&dev->bql_spinlock); + INIT_WORK(&dev->bh_work, usbnet_bh_work); + INIT_WORK(&dev->kevent, usbnet_deferred_kevent); init_usb_anchor(&dev->deferred); - dev->delay.function = usbnet_bh; - dev->delay.data = (unsigned long) dev; - init_timer (&dev->delay); - mutex_init (&dev->phy_mutex); + timer_setup(&dev->delay, usbnet_bh, 0); + mutex_init(&dev->phy_mutex); mutex_init(&dev->interrupt_mutex); dev->interrupt_count = 0; dev->net = net; - strcpy (net->name, "usb%d"); - memcpy (net->dev_addr, node_id, sizeof node_id); + strscpy(net->name, "usb%d", sizeof(net->name)); /* rx and tx sides can use different message sizes; * bind() should set rx_urb_size in that case. */ dev->hard_mtu = net->mtu + net->hard_header_len; -#if 0 -// dma_supported() is deeply broken on almost all architectures - // possible with some EHCI controllers - if (dma_supported (&udev->dev, DMA_BIT_MASK(64))) - net->features |= NETIF_F_HIGHDMA; -#endif + net->min_mtu = 0; + net->max_mtu = ETH_MAX_MTU; net->netdev_ops = &usbnet_netdev_ops; net->watchdog_timeo = TX_TIMEOUT_JIFFIES; net->ethtool_ops = &usbnet_ethtool_ops; + net->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; // allow device-specific bind/init procedures // NOTE net->name still not usable ... if (info->bind) { - status = info->bind (dev, udev); + status = info->bind(dev, udev); if (status < 0) goto out1; - // heuristic: "usb%d" for links we know are two-host, - // else "eth%d" when there's reasonable doubt. userspace - // can rename the link if it knows better. + /* heuristic: rename to "eth%d" if we are not sure this link + * is two-host (these links keep "usb%d") + */ if ((dev->driver_info->flags & FLAG_ETHER) != 0 && - ((dev->driver_info->flags & FLAG_POINTTOPOINT) == 0 || - (net->dev_addr [0] & 0x02) == 0)) - strcpy (net->name, "eth%d"); + !usbnet_needs_usb_name_format(dev, net)) + strscpy(net->name, "eth%d", sizeof(net->name)); /* WLAN devices should always be named "wlan%d" */ if ((dev->driver_info->flags & FLAG_WLAN) != 0) - strcpy(net->name, "wlan%d"); + strscpy(net->name, "wlan%d", sizeof(net->name)); /* WWAN devices should always be named "wwan%d" */ if ((dev->driver_info->flags & FLAG_WWAN) != 0) - strcpy(net->name, "wwan%d"); + strscpy(net->name, "wwan%d", sizeof(net->name)); /* devices that cannot do ARP */ if ((dev->driver_info->flags & FLAG_NOARP) != 0) @@ -1573,35 +1824,62 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) if (net->mtu > (dev->hard_mtu - net->hard_header_len)) net->mtu = dev->hard_mtu - net->hard_header_len; } else if (!info->in || !info->out) - status = usbnet_get_endpoints (dev, udev); + status = usbnet_get_endpoints(dev, udev); else { - dev->in = usb_rcvbulkpipe (xdev, info->in); - dev->out = usb_sndbulkpipe (xdev, info->out); + u8 ep_addrs[3] = { + info->in + USB_DIR_IN, info->out + USB_DIR_OUT, 0 + }; + + dev->in = usb_rcvbulkpipe(xdev, info->in); + dev->out = usb_sndbulkpipe(xdev, info->out); if (!(info->flags & FLAG_NO_SETINT)) - status = usb_set_interface (xdev, - interface->desc.bInterfaceNumber, - interface->desc.bAlternateSetting); + status = usb_set_interface(xdev, + interface->desc.bInterfaceNumber, + interface->desc.bAlternateSetting); else status = 0; + if (status == 0 && !usb_check_bulk_endpoints(udev, ep_addrs)) + status = -EINVAL; } if (status >= 0 && dev->status) - status = init_status (dev, udev); + status = init_status(dev, udev); if (status < 0) goto out3; if (!dev->rx_urb_size) dev->rx_urb_size = dev->hard_mtu; - dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1); + dev->maxpacket = usb_maxpacket(dev->udev, dev->out); + if (dev->maxpacket == 0) { + /* that is a broken device */ + status = -ENODEV; + goto out4; + } + + /* this flags the device for user space */ + if (!is_valid_ether_addr(net->dev_addr)) + eth_hw_addr_random(net); if ((dev->driver_info->flags & FLAG_WLAN) != 0) SET_NETDEV_DEVTYPE(net, &wlan_type); if ((dev->driver_info->flags & FLAG_WWAN) != 0) SET_NETDEV_DEVTYPE(net, &wwan_type); - status = register_netdev (net); + /* initialize max rx_qlen and tx_qlen */ + usbnet_update_max_qlen(dev); + + if (dev->can_dma_sg && !(info->flags & FLAG_SEND_ZLP) && + !(info->flags & FLAG_MULTI_PACKET)) { + dev->padding_pkt = kzalloc(1, GFP_KERNEL); + if (!dev->padding_pkt) { + status = -ENOMEM; + goto out4; + } + } + + status = register_netdev(net); if (status) - goto out4; + goto out5; netif_info(dev, probe, dev->net, "register '%s' at usb-%s-%s, %s, %pM\n", udev->dev.driver->name, @@ -1610,21 +1888,31 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) net->dev_addr); // ok, it's ready to go. - usb_set_intfdata (udev, dev); + usb_set_intfdata(udev, dev); - netif_device_attach (net); + netif_device_attach(net); if (dev->driver_info->flags & FLAG_LINK_INTR) usbnet_link_change(dev, 0, 0); return 0; +out5: + kfree(dev->padding_pkt); out4: usb_free_urb(dev->interrupt); out3: if (info->unbind) - info->unbind (dev, udev); + info->unbind(dev, udev); out1: + /* subdrivers must undo all they did in bind() if they + * fail it, but we may fail later and a deferred kevent + * may trigger an error resubmitting itself and, worse, + * schedule a timer. So we kill it all just in case. + */ + usbnet_mark_going_away(dev); + cancel_work_sync(&dev->kevent); + timer_delete_sync(&dev->delay); free_netdev(net); out: return status; @@ -1638,7 +1926,7 @@ EXPORT_SYMBOL_GPL(usbnet_probe); * resume only when the last interface is resumed */ -int usbnet_suspend (struct usb_interface *intf, pm_message_t message) +int usbnet_suspend(struct usb_interface *intf, pm_message_t message) { struct usbnet *dev = usb_get_intfdata(intf); @@ -1657,7 +1945,7 @@ int usbnet_suspend (struct usb_interface *intf, pm_message_t message) * accelerate emptying of the rx and queues, to avoid * having everything error out. */ - netif_device_detach (dev->net); + netif_device_detach(dev->net); usbnet_terminate_urbs(dev); __usbnet_status_stop_force(dev); @@ -1665,13 +1953,13 @@ int usbnet_suspend (struct usb_interface *intf, pm_message_t message) * reattach so runtime management can use and * wake the device */ - netif_device_attach (dev->net); + netif_device_attach(dev->net); } return 0; } EXPORT_SYMBOL_GPL(usbnet_suspend); -int usbnet_resume (struct usb_interface *intf) +int usbnet_resume(struct usb_interface *intf) { struct usbnet *dev = usb_get_intfdata(intf); struct sk_buff *skb; @@ -1689,10 +1977,11 @@ int usbnet_resume (struct usb_interface *intf) retval = usb_submit_urb(res, GFP_ATOMIC); if (retval < 0) { dev_kfree_skb_any(skb); + kfree(res->sg); usb_free_urb(res); usb_autopm_put_interface_async(dev->intf); } else { - dev->net->trans_start = jiffies; + netif_trans_update(dev->net); __skb_queue_tail(&dev->txq, skb); } } @@ -1702,16 +1991,17 @@ int usbnet_resume (struct usb_interface *intf) spin_unlock_irq(&dev->txq.lock); if (test_bit(EVENT_DEV_OPEN, &dev->flags)) { - /* handle remote wakeup ASAP */ - if (!dev->wait && - netif_device_present(dev->net) && + /* handle remote wakeup ASAP + * we cannot race against stop + */ + if (netif_device_present(dev->net) && !timer_pending(&dev->delay) && !test_bit(EVENT_RX_HALT, &dev->flags)) rx_alloc_submit(dev, GFP_NOIO); if (!(dev->txq.qlen >= TX_QLEN(dev))) netif_tx_wake_all_queues(dev->net); - tasklet_schedule (&dev->bh); + queue_work(system_bh_wq, &dev->bh_work); } } @@ -1749,10 +2039,12 @@ EXPORT_SYMBOL(usbnet_manage_power); void usbnet_link_change(struct usbnet *dev, bool link, bool need_reset) { /* update link after link is reseted */ - if (link && !need_reset) - netif_carrier_on(dev->net); - else + if (link && !need_reset) { + set_bit(EVENT_LINK_CARRIER_ON, &dev->flags); + } else { + clear_bit(EVENT_LINK_CARRIER_ON, &dev->flags); netif_carrier_off(dev->net); + } if (need_reset && link) usbnet_defer_kevent(dev, EVENT_LINK_RESET); @@ -1772,8 +2064,8 @@ static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype, " value=0x%04x index=0x%04x size=%d\n", cmd, reqtype, value, index, size); - if (data) { - buf = kmalloc(size, GFP_KERNEL); + if (size) { + buf = kmalloc(size, GFP_NOIO); if (!buf) goto out; } @@ -1781,8 +2073,13 @@ static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype, err = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), cmd, reqtype, value, index, buf, size, USB_CTRL_GET_TIMEOUT); - if (err > 0 && err <= size) - memcpy(data, buf, err); + if (err > 0 && err <= size) { + if (data) + memcpy(data, buf, err); + else + netdev_dbg(dev->net, + "Huh? Data requested but thrown away.\n"); + } kfree(buf); out: return err; @@ -1800,10 +2097,16 @@ static int __usbnet_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype, cmd, reqtype, value, index, size); if (data) { - buf = kmemdup(data, size, GFP_KERNEL); + buf = kmemdup(data, size, GFP_NOIO); if (!buf) goto out; - } + } else { + if (size) { + WARN_ON_ONCE(1); + err = -EINVAL; + goto out; + } + } err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), cmd, reqtype, value, index, buf, size, @@ -1895,7 +2198,7 @@ static void usbnet_async_cmd_cb(struct urb *urb) int usbnet_write_cmd_async(struct usbnet *dev, u8 cmd, u8 reqtype, u16 value, u16 index, const void *data, u16 size) { - struct usb_ctrlrequest *req = NULL; + struct usb_ctrlrequest *req; struct urb *urb; int err = -ENOMEM; void *buf = NULL; @@ -1905,18 +2208,15 @@ int usbnet_write_cmd_async(struct usbnet *dev, u8 cmd, u8 reqtype, cmd, reqtype, value, index, size); urb = usb_alloc_urb(0, GFP_ATOMIC); - if (!urb) { - netdev_err(dev->net, "Error allocating URB in" - " %s!\n", __func__); + if (!urb) goto fail; - } if (data) { buf = kmemdup(data, size, GFP_ATOMIC); if (!buf) { netdev_err(dev->net, "Error allocating buffer" " in %s!\n", __func__); - goto fail_free; + goto fail_free_urb; } } @@ -1940,14 +2240,21 @@ int usbnet_write_cmd_async(struct usbnet *dev, u8 cmd, u8 reqtype, if (err < 0) { netdev_err(dev->net, "Error submitting the control" " message: status=%d\n", err); - goto fail_free; + goto fail_free_all; } return 0; +fail_free_all: + kfree(req); fail_free_buf: kfree(buf); -fail_free: - kfree(req); + /* + * avoid a double free + * needed because the flag can be set only + * after filling the URB + */ + urb->transfer_flags = 0; +fail_free_urb: usb_free_urb(urb); fail: return err; @@ -1960,9 +2267,8 @@ static int __init usbnet_init(void) { /* Compiler should optimize this out. */ BUILD_BUG_ON( - FIELD_SIZEOF(struct sk_buff, cb) < sizeof(struct skb_data)); + sizeof_field(struct sk_buff, cb) < sizeof(struct skb_data)); - eth_random_addr(node_id); return 0; } module_init(usbnet_init); |
