diff options
Diffstat (limited to 'drivers/usb/host/xhci.c')
-rw-r--r-- | drivers/usb/host/xhci.c | 258 |
1 files changed, 169 insertions, 89 deletions
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index c057c42c36f4..1a90ebc8a30e 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -8,6 +8,7 @@ * Some code borrowed from the Linux EHCI driver. */ +#include <linux/jiffies.h> #include <linux/pci.h> #include <linux/iommu.h> #include <linux/iopoll.h> @@ -16,6 +17,7 @@ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/slab.h> +#include <linux/string_choices.h> #include <linux/dmi.h> #include <linux/dma-mapping.h> @@ -40,15 +42,15 @@ MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default"); static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring) { - struct xhci_segment *seg = ring->first_seg; + struct xhci_segment *seg; if (!td || !td->start_seg) return false; - do { + + xhci_for_each_ring_seg(ring->first_seg, seg) { if (seg == td->start_seg) return true; - seg = seg->next; - } while (seg && seg != ring->first_seg); + } return false; } @@ -346,6 +348,23 @@ static int xhci_disable_interrupter(struct xhci_interrupter *ir) return 0; } +/* interrupt moderation interval imod_interval in nanoseconds */ +int xhci_set_interrupter_moderation(struct xhci_interrupter *ir, + u32 imod_interval) +{ + u32 imod; + + if (!ir || !ir->ir_set || imod_interval > U16_MAX * 250) + return -EINVAL; + + imod = readl(&ir->ir_set->irq_control); + imod &= ~ER_IRQ_INTERVAL_MASK; + imod |= (imod_interval / 250) & ER_IRQ_INTERVAL_MASK; + writel(imod, &ir->ir_set->irq_control); + + return 0; +} + static void compliance_mode_recovery(struct timer_list *t) { struct xhci_hcd *xhci; @@ -456,14 +475,7 @@ static int xhci_init(struct usb_hcd *hcd) xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init"); spin_lock_init(&xhci->lock); - if (xhci->hci_version == 0x95 && link_quirk) { - xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, - "QUIRK: Not clearing Link TRB chain bits."); - xhci->quirks |= XHCI_LINK_TRB_QUIRK; - } else { - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "xHCI doesn't need link TRB QUIRK"); - } + retval = xhci_mem_init(xhci, GFP_KERNEL); xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init"); @@ -528,7 +540,6 @@ static int xhci_run_finished(struct xhci_hcd *xhci) */ int xhci_run(struct usb_hcd *hcd) { - u32 temp; u64 temp_64; int ret; struct xhci_hcd *xhci = hcd_to_xhci(hcd); @@ -538,6 +549,9 @@ int xhci_run(struct usb_hcd *hcd) */ hcd->uses_new_polling = 1; + if (hcd->msi_enabled) + ir->ip_autoclear = true; + if (!usb_hcd_is_primary_hcd(hcd)) return xhci_run_finished(xhci); @@ -548,12 +562,7 @@ int xhci_run(struct usb_hcd *hcd) xhci_dbg_trace(xhci, trace_xhci_dbg_init, "ERST deq = 64'h%0lx", (long unsigned int) temp_64); - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "// Set the interrupt modulation register"); - temp = readl(&ir->ir_set->irq_control); - temp &= ~ER_IRQ_INTERVAL_MASK; - temp |= (xhci->imod_interval / 250) & ER_IRQ_INTERVAL_MASK; - writel(temp, &ir->ir_set->irq_control); + xhci_set_interrupter_moderation(ir, xhci->imod_interval); if (xhci->quirks & XHCI_NEC_HOST) { struct xhci_command *command; @@ -771,28 +780,14 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci) struct xhci_segment *seg; ring = xhci->cmd_ring; - seg = ring->deq_seg; - do { - memset(seg->trbs, 0, - sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1)); - seg->trbs[TRBS_PER_SEGMENT - 1].link.control &= - cpu_to_le32(~TRB_CYCLE); - seg = seg->next; - } while (seg != ring->deq_seg); - - /* Reset the software enqueue and dequeue pointers */ - ring->deq_seg = ring->first_seg; - ring->dequeue = ring->first_seg->trbs; - ring->enq_seg = ring->deq_seg; - ring->enqueue = ring->dequeue; - - ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1; - /* - * Ring is now zeroed, so the HW should look for change of ownership - * when the cycle bit is set to 1. - */ - ring->cycle_state = 1; + xhci_for_each_ring_seg(ring->first_seg, seg) { + /* erase all TRBs before the link */ + memset(seg->trbs, 0, sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1)); + /* clear link cycle bit */ + seg->trbs[TRBS_PER_SEGMENT - 1].link.control &= cpu_to_le32(~TRB_CYCLE); + } + xhci_initialize_ring_info(ring); /* * Reset the hardware dequeue pointer. * Yes, this will need to be re-written after resume, but we're paranoid @@ -1123,10 +1118,20 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg) xhci_dbg(xhci, "Start the secondary HCD\n"); retval = xhci_run(xhci->shared_hcd); } - + if (retval) + return retval; + /* + * Resume roothubs unconditionally as PORTSC change bits are not + * immediately visible after xHC reset + */ hcd->state = HC_STATE_SUSPENDED; - if (xhci->shared_hcd) + + if (xhci->shared_hcd) { xhci->shared_hcd->state = HC_STATE_SUSPENDED; + usb_hcd_resume_root_hub(xhci->shared_hcd); + } + usb_hcd_resume_root_hub(hcd); + goto done; } @@ -1150,7 +1155,6 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg) xhci_dbc_resume(xhci); - done: if (retval == 0) { /* * Resume roothubs only if there are pending events. @@ -1176,6 +1180,7 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg) usb_hcd_resume_root_hub(hcd); } } +done: /* * If system is subject to the Quirk, Compliance Mode Timer needs to * be re-initialized Always after a system resume. Ports are subject @@ -1217,6 +1222,8 @@ static int xhci_map_temp_buffer(struct usb_hcd *hcd, struct urb *urb) temp = kzalloc_node(buf_len, GFP_ATOMIC, dev_to_node(hcd->self.sysdev)); + if (!temp) + return -ENOMEM; if (usb_urb_dir_out(urb)) sg_pcopy_to_buffer(urb->sg, urb->num_sgs, @@ -1742,7 +1749,7 @@ static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) urb->ep->desc.bEndpointAddress, (unsigned long long) xhci_trb_virt_to_dma( urb_priv->td[i].start_seg, - urb_priv->td[i].first_trb)); + urb_priv->td[i].start_trb)); for (; i < urb_priv->num_tds; i++) { td = &urb_priv->td[i]; @@ -1754,15 +1761,27 @@ static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) } } - /* Queue a stop endpoint command, but only if this is - * the first cancellation to be handled. - */ - if (!(ep->ep_state & EP_STOP_CMD_PENDING)) { + /* These completion handlers will sort out cancelled TDs for us */ + if (ep->ep_state & (EP_STOP_CMD_PENDING | EP_HALTED | SET_DEQ_PENDING)) { + xhci_dbg(xhci, "Not queuing Stop Endpoint on slot %d ep %d in state 0x%x\n", + urb->dev->slot_id, ep_index, ep->ep_state); + goto done; + } + + /* In this case no commands are pending but the endpoint is stopped */ + if (ep->ep_state & EP_CLEARING_TT) { + /* and cancelled TDs can be given back right away */ + xhci_dbg(xhci, "Invalidating TDs instantly on slot %d ep %d in state 0x%x\n", + urb->dev->slot_id, ep_index, ep->ep_state); + xhci_process_cancelled_tds(ep); + } else { + /* Otherwise, queue a new Stop Endpoint command */ command = xhci_alloc_command(xhci, false, GFP_ATOMIC); if (!command) { ret = -ENOMEM; goto done; } + ep->stop_time = jiffies; ep->ep_state |= EP_STOP_CMD_PENDING; xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id, ep_index, 0); @@ -2259,7 +2278,7 @@ static int xhci_check_tt_bw_table(struct xhci_hcd *xhci, struct xhci_tt_bw_info *tt_info; /* Find the bandwidth table for the root port this TT is attached to. */ - bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table; + bw_table = &xhci->rh_bw[virt_dev->rhub_port->hw_portnum].bw_table; tt_info = virt_dev->tt_info; /* If this TT already had active endpoints, the bandwidth for this TT * has already been added. Removing all periodic endpoints (and thus @@ -2377,7 +2396,7 @@ static int xhci_check_bw_table(struct xhci_hcd *xhci, if (virt_dev->tt_info) { xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, "Recalculating BW for rootport %u", - virt_dev->real_port); + virt_dev->rhub_port->hw_portnum + 1); if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) { xhci_warn(xhci, "Not enough bandwidth on HS bus for " "newly activated TT.\n"); @@ -2390,7 +2409,7 @@ static int xhci_check_bw_table(struct xhci_hcd *xhci, } else { xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, "Recalculating BW for rootport %u", - virt_dev->real_port); + virt_dev->rhub_port->hw_portnum + 1); } /* Add in how much bandwidth will be used for interval zero, or the @@ -2487,14 +2506,12 @@ static int xhci_check_bw_table(struct xhci_hcd *xhci, bw_used += overhead + packet_size; if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) { - unsigned int port_index = virt_dev->real_port - 1; - /* OK, we're manipulating a HS device attached to a * root port bandwidth domain. Include the number of active TTs * in the bandwidth used. */ bw_used += TT_HS_OVERHEAD * - xhci->rh_bw[port_index].num_active_tts; + xhci->rh_bw[virt_dev->rhub_port->hw_portnum].num_active_tts; } xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, @@ -2681,7 +2698,7 @@ void xhci_update_tt_active_eps(struct xhci_hcd *xhci, if (!virt_dev->tt_info) return; - rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1]; + rh_bw_info = &xhci->rh_bw[virt_dev->rhub_port->hw_portnum]; if (old_active_eps == 0 && virt_dev->tt_info->active_eps != 0) { rh_bw_info->num_active_tts += 1; @@ -2782,6 +2799,51 @@ static int xhci_reserve_bandwidth(struct xhci_hcd *xhci, return -ENOMEM; } +/* + * Synchronous XHCI stop endpoint helper. Issues the stop endpoint command and + * waits for the command completion before returning. This does not call + * xhci_handle_cmd_stop_ep(), which has additional handling for 'context error' + * cases, along with transfer ring cleanup. + * + * xhci_stop_endpoint_sync() is intended to be utilized by clients that manage + * their own transfer ring, such as offload situations. + */ +int xhci_stop_endpoint_sync(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, int suspend, + gfp_t gfp_flags) +{ + struct xhci_command *command; + unsigned long flags; + int ret; + + command = xhci_alloc_command(xhci, true, gfp_flags); + if (!command) + return -ENOMEM; + + spin_lock_irqsave(&xhci->lock, flags); + ret = xhci_queue_stop_endpoint(xhci, command, ep->vdev->slot_id, + ep->ep_index, suspend); + if (ret < 0) { + spin_unlock_irqrestore(&xhci->lock, flags); + goto out; + } + + xhci_ring_cmd_db(xhci); + spin_unlock_irqrestore(&xhci->lock, flags); + + wait_for_completion(command->completion); + + /* No handling for COMP_CONTEXT_STATE_ERROR done at command completion*/ + if (command->status == COMP_COMMAND_ABORTED || + command->status == COMP_COMMAND_RING_STOPPED) { + xhci_warn(xhci, "Timeout while waiting for stop endpoint command\n"); + ret = -ETIME; + } +out: + xhci_free_command(xhci, command); + + return ret; +} +EXPORT_SYMBOL_GPL(xhci_stop_endpoint_sync); /* Issue a configure endpoint command or evaluate context command * and wait for it to finish. @@ -2825,7 +2887,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci, xhci->num_active_eps); return -ENOMEM; } - if ((xhci->quirks & XHCI_SW_BW_CHECKING) && + if ((xhci->quirks & XHCI_SW_BW_CHECKING) && !ctx_change && xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) { if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) xhci_free_host_resources(xhci, ctrl_ctx); @@ -3680,6 +3742,8 @@ void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci, xhci->num_active_eps); } +static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev); + /* * This submits a Reset Device Command, which will set the device state to 0, * set the device address to 0, and disable all the endpoints except the default @@ -3750,6 +3814,23 @@ static int xhci_discover_or_reset_device(struct usb_hcd *hcd, SLOT_STATE_DISABLED) return 0; + if (xhci->quirks & XHCI_ETRON_HOST) { + /* + * Obtaining a new device slot to inform the xHCI host that + * the USB device has been reset. + */ + ret = xhci_disable_slot(xhci, udev->slot_id); + xhci_free_virt_device(xhci, udev->slot_id); + if (!ret) { + ret = xhci_alloc_dev(hcd, udev); + if (ret == 1) + ret = 0; + else + ret = -EINVAL; + } + return ret; + } + trace_xhci_discover_or_reset_device(slot_ctx); xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id); @@ -4188,8 +4269,10 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, mutex_unlock(&xhci->mutex); ret = xhci_disable_slot(xhci, udev->slot_id); xhci_free_virt_device(xhci, udev->slot_id); - if (!ret) - xhci_alloc_dev(hcd, udev); + if (!ret) { + if (xhci_alloc_dev(hcd, udev) == 1) + xhci_setup_addressable_virt_dev(xhci, udev); + } kfree(command->completion); kfree(command); return -EPROTO; @@ -4445,7 +4528,7 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, hlpm_addr = ports[port_num]->addr + PORTHLPMC; xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n", - enable ? "enable" : "disable", port_num + 1); + str_enable_disable(enable), port_num + 1); if (enable) { /* Host supports BESL timeout instead of HIRD */ @@ -4505,35 +4588,27 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, return 0; } -/* check if a usb2 port supports a given extened capability protocol - * only USB2 ports extended protocol capability values are cached. - * Return 1 if capability is supported - */ -static int xhci_check_usb2_port_capability(struct xhci_hcd *xhci, int port, - unsigned capability) -{ - u32 port_offset, port_count; - int i; - - for (i = 0; i < xhci->num_ext_caps; i++) { - if (xhci->ext_caps[i] & capability) { - /* port offsets starts at 1 */ - port_offset = XHCI_EXT_PORT_OFF(xhci->ext_caps[i]) - 1; - port_count = XHCI_EXT_PORT_COUNT(xhci->ext_caps[i]); - if (port >= port_offset && - port < port_offset + port_count) - return 1; - } - } - return 0; -} - static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); - int portnum = udev->portnum - 1; + struct xhci_port *port; + u32 capability; + + /* Check if USB3 device at root port is tunneled over USB4 */ + if (hcd->speed >= HCD_USB3 && !udev->parent->parent) { + port = xhci->usb3_rhub.ports[udev->portnum - 1]; + + udev->tunnel_mode = xhci_port_is_tunneled(xhci, port); + if (udev->tunnel_mode == USB_LINK_UNKNOWN) + dev_dbg(&udev->dev, "link tunnel state unknown\n"); + else if (udev->tunnel_mode == USB_LINK_TUNNELED) + dev_dbg(&udev->dev, "tunneled over USB4 link\n"); + else if (udev->tunnel_mode == USB_LINK_NATIVE) + dev_dbg(&udev->dev, "native USB 3.x link\n"); + return 0; + } - if (hcd->speed >= HCD_USB3 || !udev->lpm_capable) + if (hcd->speed >= HCD_USB3 || !udev->lpm_capable || !xhci->hw_lpm_support) return 0; /* we only support lpm for non-hub device connected to root hub yet */ @@ -4541,14 +4616,14 @@ static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev) udev->descriptor.bDeviceClass == USB_CLASS_HUB) return 0; - if (xhci->hw_lpm_support == 1 && - xhci_check_usb2_port_capability( - xhci, portnum, XHCI_HLC)) { + port = xhci->usb2_rhub.ports[udev->portnum - 1]; + capability = port->port_cap->protocol_caps; + + if (capability & XHCI_HLC) { udev->usb2_hw_lpm_capable = 1; udev->l1_params.timeout = XHCI_L1_TIMEOUT; udev->l1_params.besl = XHCI_DEFAULT_BESL; - if (xhci_check_usb2_port_capability(xhci, portnum, - XHCI_BLC)) + if (capability & XHCI_BLC) udev->usb2_hw_lpm_besl_capable = 1; } @@ -5245,6 +5320,11 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) if (xhci->hci_version > 0x96) xhci->quirks |= XHCI_SPURIOUS_SUCCESS; + if (xhci->hci_version == 0x95 && link_quirk) { + xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits"); + xhci->quirks |= XHCI_LINK_TRB_QUIRK; + } + /* Make sure the HC is halted. */ retval = xhci_halt(xhci); if (retval) |