diff options
Diffstat (limited to 'drivers/usb/host/xhci.c')
| -rw-r--r-- | drivers/usb/host/xhci.c | 1942 |
1 files changed, 1190 insertions, 752 deletions
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 005e65922608..02c9bfe21ae2 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -8,18 +8,22 @@ * Some code borrowed from the Linux EHCI driver. */ +#include <linux/jiffies.h> #include <linux/pci.h> +#include <linux/iommu.h> +#include <linux/iopoll.h> #include <linux/irq.h> #include <linux/log2.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/slab.h> +#include <linux/string_choices.h> #include <linux/dmi.h> #include <linux/dma-mapping.h> +#include <linux/usb/xhci-sideband.h> #include "xhci.h" #include "xhci-trace.h" -#include "xhci-mtk.h" #include "xhci-debugfs.h" #include "xhci-dbgcap.h" @@ -37,22 +41,34 @@ static unsigned long long quirks; module_param(quirks, ullong, S_IRUGO); MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default"); +void xhci_portsc_writel(struct xhci_port *port, u32 val) +{ + trace_xhci_portsc_writel(port, val); + writel(val, &port->port_reg->portsc); +} +EXPORT_SYMBOL_GPL(xhci_portsc_writel); + +u32 xhci_portsc_readl(struct xhci_port *port) +{ + return readl(&port->port_reg->portsc); +} +EXPORT_SYMBOL_GPL(xhci_portsc_readl); + static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring) { - struct xhci_segment *seg = ring->first_seg; + struct xhci_segment *seg; if (!td || !td->start_seg) return false; - do { + + xhci_for_each_ring_seg(ring->first_seg, seg) { if (seg == td->start_seg) return true; - seg = seg->next; - } while (seg && seg != ring->first_seg); + } return false; } -/* TODO: copied from ehci-hcd.c - can this be refactored? */ /* * xhci_handshake - spin reading hc until handshake completes or fails * @ptr: address of hc register to be read @@ -66,21 +82,19 @@ static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring) * handshake done). There are two failure modes: "usec" have passed (major * hardware flakeout), or the register reads as all-ones (hardware removed). */ -int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec) +int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us) { u32 result; + int ret; - do { - result = readl(ptr); - if (result == ~(u32)0) /* card removed */ - return -ENODEV; - result &= mask; - if (result == done) - return 0; - udelay(1); - usec--; - } while (usec > 0); - return -ETIMEDOUT; + ret = readl_poll_timeout_atomic(ptr, result, + (result & mask) == done || + result == U32_MAX, + 1, timeout_us); + if (result == U32_MAX) /* card removed */ + return -ENODEV; + + return ret; } /* @@ -113,17 +127,21 @@ void xhci_quiesce(struct xhci_hcd *xhci) int xhci_halt(struct xhci_hcd *xhci) { int ret; + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Halt the HC"); xhci_quiesce(xhci); ret = xhci_handshake(&xhci->op_regs->status, STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC); if (ret) { - xhci_warn(xhci, "Host halt failed, %d\n", ret); + if (!(xhci->xhc_state & XHCI_STATE_DYING)) + xhci_warn(xhci, "Host halt failed, %d\n", ret); return ret; } + xhci->xhc_state |= XHCI_STATE_HALTED; xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; + return ret; } @@ -151,9 +169,11 @@ int xhci_start(struct xhci_hcd *xhci) xhci_err(xhci, "Host took too long to start, " "waited %u microseconds.\n", XHCI_MAX_HALT_USEC); - if (!ret) + if (!ret) { /* clear state flags. Including dying, halted or removing */ xhci->xhc_state = 0; + xhci->run_graceperiod = jiffies + msecs_to_jiffies(500); + } return ret; } @@ -165,7 +185,7 @@ int xhci_start(struct xhci_hcd *xhci) * Transactions will be terminated immediately, and operational registers * will be set to their defaults. */ -int xhci_reset(struct xhci_hcd *xhci) +int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us) { u32 command; u32 state; @@ -174,7 +194,8 @@ int xhci_reset(struct xhci_hcd *xhci) state = readl(&xhci->op_regs->status); if (state == ~(u32)0) { - xhci_warn(xhci, "Host not accessible, reset failed.\n"); + if (!(xhci->xhc_state & XHCI_STATE_DYING)) + xhci_warn(xhci, "Host not accessible, reset failed.\n"); return -ENODEV; } @@ -198,8 +219,7 @@ int xhci_reset(struct xhci_hcd *xhci) if (xhci->quirks & XHCI_INTEL_HOST) udelay(1000); - ret = xhci_handshake(&xhci->op_regs->command, - CMD_RESET, 0, 10 * 1000 * 1000); + ret = xhci_handshake(&xhci->op_regs->command, CMD_RESET, 0, timeout_us); if (ret) return ret; @@ -212,8 +232,7 @@ int xhci_reset(struct xhci_hcd *xhci) * xHCI cannot write to any doorbells or operational registers other * than status until the "Controller Not Ready" flag is cleared. */ - ret = xhci_handshake(&xhci->op_regs->status, - STS_CNR, 0, 10 * 1000 * 1000); + ret = xhci_handshake(&xhci->op_regs->status, STS_CNR, 0, timeout_us); xhci->usb2_rhub.bus_state.port_c_suspend = 0; xhci->usb2_rhub.bus_state.suspended_ports = 0; @@ -228,6 +247,7 @@ int xhci_reset(struct xhci_hcd *xhci) static void xhci_zero_64b_regs(struct xhci_hcd *xhci) { struct device *dev = xhci_to_hcd(xhci)->self.sysdev; + struct iommu_domain *domain; int err, i; u64 val; @@ -245,7 +265,9 @@ static void xhci_zero_64b_regs(struct xhci_hcd *xhci) * an iommu. Doing anything when there is no iommu is definitely * unsafe... */ - if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !device_iommu_mapped(dev)) + domain = iommu_get_domain_for_dev(dev); + if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !domain || + domain->type == IOMMU_DOMAIN_IDENTITY) return; xhci_info(xhci, "Zeroing 64bit base registers, expecting fault\n"); @@ -268,7 +290,7 @@ static void xhci_zero_64b_regs(struct xhci_hcd *xhci) if (upper_32_bits(val)) xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring); - for (i = 0; i < HCS_MAX_INTRS(xhci->hcs_params1); i++) { + for (i = 0; i < xhci->max_interrupters; i++) { struct xhci_intr_reg __iomem *ir; ir = &xhci->run_regs->ir_set[i]; @@ -288,192 +310,62 @@ static void xhci_zero_64b_regs(struct xhci_hcd *xhci) xhci_info(xhci, "Fault detected\n"); } -#ifdef CONFIG_USB_PCI -/* - * Set up MSI - */ -static int xhci_setup_msi(struct xhci_hcd *xhci) +int xhci_enable_interrupter(struct xhci_interrupter *ir) { - int ret; - /* - * TODO:Check with MSI Soc for sysdev - */ - struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); - - ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI); - if (ret < 0) { - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "failed to allocate MSI entry"); - return ret; - } + u32 iman; - ret = request_irq(pdev->irq, xhci_msi_irq, - 0, "xhci_hcd", xhci_to_hcd(xhci)); - if (ret) { - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "disable MSI interrupt"); - pci_free_irq_vectors(pdev); - } - - return ret; -} - -/* - * Set up MSI-X - */ -static int xhci_setup_msix(struct xhci_hcd *xhci) -{ - int i, ret = 0; - struct usb_hcd *hcd = xhci_to_hcd(xhci); - struct pci_dev *pdev = to_pci_dev(hcd->self.controller); - - /* - * calculate number of msi-x vectors supported. - * - HCS_MAX_INTRS: the max number of interrupts the host can handle, - * with max number of interrupters based on the xhci HCSPARAMS1. - * - num_online_cpus: maximum msi-x vectors per CPUs core. - * Add additional 1 vector to ensure always available interrupt. - */ - xhci->msix_count = min(num_online_cpus() + 1, - HCS_MAX_INTRS(xhci->hcs_params1)); - - ret = pci_alloc_irq_vectors(pdev, xhci->msix_count, xhci->msix_count, - PCI_IRQ_MSIX); - if (ret < 0) { - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "Failed to enable MSI-X"); - return ret; - } - - for (i = 0; i < xhci->msix_count; i++) { - ret = request_irq(pci_irq_vector(pdev, i), xhci_msi_irq, 0, - "xhci_hcd", xhci_to_hcd(xhci)); - if (ret) - goto disable_msix; - } + if (!ir || !ir->ir_set) + return -EINVAL; - hcd->msix_enabled = 1; - return ret; + iman = readl(&ir->ir_set->iman); + iman &= ~IMAN_IP; + iman |= IMAN_IE; + writel(iman, &ir->ir_set->iman); -disable_msix: - xhci_dbg_trace(xhci, trace_xhci_dbg_init, "disable MSI-X interrupt"); - while (--i >= 0) - free_irq(pci_irq_vector(pdev, i), xhci_to_hcd(xhci)); - pci_free_irq_vectors(pdev); - return ret; + /* Read operation to guarantee the write has been flushed from posted buffers */ + readl(&ir->ir_set->iman); + return 0; } -/* Free any IRQs and disable MSI-X */ -static void xhci_cleanup_msix(struct xhci_hcd *xhci) +int xhci_disable_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir) { - struct usb_hcd *hcd = xhci_to_hcd(xhci); - struct pci_dev *pdev = to_pci_dev(hcd->self.controller); + u32 iman; - if (xhci->quirks & XHCI_PLAT) - return; - - /* return if using legacy interrupt */ - if (hcd->irq > 0) - return; - - if (hcd->msix_enabled) { - int i; - - for (i = 0; i < xhci->msix_count; i++) - free_irq(pci_irq_vector(pdev, i), xhci_to_hcd(xhci)); - } else { - free_irq(pci_irq_vector(pdev, 0), xhci_to_hcd(xhci)); - } - - pci_free_irq_vectors(pdev); - hcd->msix_enabled = 0; -} + if (!ir || !ir->ir_set) + return -EINVAL; -static void __maybe_unused xhci_msix_sync_irqs(struct xhci_hcd *xhci) -{ - struct usb_hcd *hcd = xhci_to_hcd(xhci); + iman = readl(&ir->ir_set->iman); + iman &= ~IMAN_IP; + iman &= ~IMAN_IE; + writel(iman, &ir->ir_set->iman); - if (hcd->msix_enabled) { - struct pci_dev *pdev = to_pci_dev(hcd->self.controller); - int i; + iman = readl(&ir->ir_set->iman); + if (iman & IMAN_IP) + xhci_dbg(xhci, "%s: Interrupt pending\n", __func__); - for (i = 0; i < xhci->msix_count; i++) - synchronize_irq(pci_irq_vector(pdev, i)); - } + return 0; } -static int xhci_try_enable_msi(struct usb_hcd *hcd) +/* interrupt moderation interval imod_interval in nanoseconds */ +int xhci_set_interrupter_moderation(struct xhci_interrupter *ir, + u32 imod_interval) { - struct xhci_hcd *xhci = hcd_to_xhci(hcd); - struct pci_dev *pdev; - int ret; - - /* The xhci platform device has set up IRQs through usb_add_hcd. */ - if (xhci->quirks & XHCI_PLAT) - return 0; + u32 imod; - pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); - /* - * Some Fresco Logic host controllers advertise MSI, but fail to - * generate interrupts. Don't even try to enable MSI. - */ - if (xhci->quirks & XHCI_BROKEN_MSI) - goto legacy_irq; - - /* unregister the legacy interrupt */ - if (hcd->irq) - free_irq(hcd->irq, hcd); - hcd->irq = 0; - - ret = xhci_setup_msix(xhci); - if (ret) - /* fall back to msi*/ - ret = xhci_setup_msi(xhci); - - if (!ret) { - hcd->msi_enabled = 1; - return 0; - } - - if (!pdev->irq) { - xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n"); + if (!ir || !ir->ir_set) return -EINVAL; - } - - legacy_irq: - if (!strlen(hcd->irq_descr)) - snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d", - hcd->driver->description, hcd->self.busnum); - /* fall back to legacy interrupt*/ - ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED, - hcd->irq_descr, hcd); - if (ret) { - xhci_err(xhci, "request interrupt %d failed\n", - pdev->irq); - return ret; - } - hcd->irq = pdev->irq; - return 0; -} + /* IMODI value in IMOD register is in 250ns increments */ + imod_interval = umin(imod_interval / 250, IMODI_MASK); -#else + imod = readl(&ir->ir_set->imod); + imod &= ~IMODI_MASK; + imod |= imod_interval; + writel(imod, &ir->ir_set->imod); -static inline int xhci_try_enable_msi(struct usb_hcd *hcd) -{ return 0; } -static inline void xhci_cleanup_msix(struct xhci_hcd *xhci) -{ -} - -static inline void xhci_msix_sync_irqs(struct xhci_hcd *xhci) -{ -} - -#endif - static void compliance_mode_recovery(struct timer_list *t) { struct xhci_hcd *xhci; @@ -482,11 +374,15 @@ static void compliance_mode_recovery(struct timer_list *t) u32 temp; int i; - xhci = from_timer(xhci, t, comp_mode_recovery_timer); + xhci = timer_container_of(xhci, t, comp_mode_recovery_timer); rhub = &xhci->usb3_rhub; + hcd = rhub->hcd; + + if (!hcd) + return; for (i = 0; i < rhub->num_ports; i++) { - temp = readl(rhub->ports[i]->addr); + temp = xhci_portsc_readl(rhub->ports[i]); if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) { /* * Compliance Mode Detected. Letting USB Core @@ -497,7 +393,6 @@ static void compliance_mode_recovery(struct timer_list *t) i + 1); xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, "Attempting compliance mode recovery"); - hcd = xhci->shared_hcd; if (hcd->state == HC_STATE_SUSPENDED) usb_hcd_resume_root_hub(hcd); @@ -566,6 +461,80 @@ static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci) return (xhci->port_status_u0 == ((1 << xhci->usb3_rhub.num_ports) - 1)); } +static void xhci_hcd_page_size(struct xhci_hcd *xhci) +{ + u32 page_size; + + page_size = readl(&xhci->op_regs->page_size) & XHCI_PAGE_SIZE_MASK; + if (!is_power_of_2(page_size)) { + xhci_warn(xhci, "Invalid page size register = 0x%x\n", page_size); + /* Fallback to 4K page size, since that's common */ + page_size = 1; + } + + xhci->page_size = page_size << 12; + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "HCD page size set to %iK", + xhci->page_size >> 10); +} + +static void xhci_enable_max_dev_slots(struct xhci_hcd *xhci) +{ + u32 config_reg; + + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xHC can handle at most %d device slots", + xhci->max_slots); + + config_reg = readl(&xhci->op_regs->config_reg); + config_reg &= ~HCS_SLOTS_MASK; + config_reg |= xhci->max_slots; + + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Setting Max device slots reg = 0x%x", + config_reg); + writel(config_reg, &xhci->op_regs->config_reg); +} + +static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci) +{ + dma_addr_t deq_dma; + u64 crcr; + + deq_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, xhci->cmd_ring->dequeue); + deq_dma &= CMD_RING_PTR_MASK; + + crcr = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); + crcr &= ~CMD_RING_PTR_MASK; + crcr |= deq_dma; + + crcr &= ~CMD_RING_CYCLE; + crcr |= xhci->cmd_ring->cycle_state; + + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Setting command ring address to 0x%llx", crcr); + xhci_write_64(xhci, crcr, &xhci->op_regs->cmd_ring); +} + +static void xhci_set_doorbell_ptr(struct xhci_hcd *xhci) +{ + u32 offset; + + offset = readl(&xhci->cap_regs->db_off) & DBOFF_MASK; + xhci->dba = (void __iomem *)xhci->cap_regs + offset; + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "Doorbell array is located at offset 0x%x from cap regs base addr", offset); +} + +/* + * Enable USB 3.0 device notifications for function remote wake, which is necessary + * for allowing USB 3.0 devices to do remote wakeup from U3 (device suspend). + */ +static void xhci_set_dev_notifications(struct xhci_hcd *xhci) +{ + u32 dev_notf; + + dev_notf = readl(&xhci->op_regs->dev_notification); + dev_notf &= ~DEV_NOTE_MASK; + dev_notf |= DEV_NOTE_FWAKE; + writel(dev_notf, &xhci->op_regs->dev_notification); +} /* * Initialize memory for HCD and xHC (one-time init). @@ -577,20 +546,39 @@ static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci) static int xhci_init(struct usb_hcd *hcd) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); - int retval = 0; + int retval; - xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init"); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Starting %s", __func__); spin_lock_init(&xhci->lock); - if (xhci->hci_version == 0x95 && link_quirk) { - xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, - "QUIRK: Not clearing Link TRB chain bits."); - xhci->quirks |= XHCI_LINK_TRB_QUIRK; - } else { - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "xHCI doesn't need link TRB QUIRK"); - } + + INIT_LIST_HEAD(&xhci->cmd_list); + INIT_DELAYED_WORK(&xhci->cmd_timer, xhci_handle_command_timeout); + init_completion(&xhci->cmd_ring_stop_completion); + xhci_hcd_page_size(xhci); + memset(xhci->devs, 0, MAX_HC_SLOTS * sizeof(*xhci->devs)); + retval = xhci_mem_init(xhci, GFP_KERNEL); - xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init"); + if (retval) + return retval; + + /* Set the Number of Device Slots Enabled to the maximum supported value */ + xhci_enable_max_dev_slots(xhci); + + /* Set the address in the Command Ring Control register */ + xhci_set_cmd_ring_deq(xhci); + + /* Set Device Context Base Address Array pointer */ + xhci_write_64(xhci, xhci->dcbaa->dma, &xhci->op_regs->dcbaa_ptr); + + /* Set Doorbell array pointer */ + xhci_set_doorbell_ptr(xhci); + + /* Set USB 3.0 device notifications for function remote wake */ + xhci_set_dev_notifications(xhci); + + /* Initialize the Primary interrupter */ + xhci_add_interrupter(xhci, 0); + xhci->interrupters[0]->isoc_bei_interval = AVOID_BEI_INTERVAL_MAX; /* Initializing Compliance Mode Recovery Data If Needed */ if (xhci_compliance_mode_recovery_timer_quirk_check()) { @@ -598,26 +586,45 @@ static int xhci_init(struct usb_hcd *hcd) compliance_mode_recovery_timer_init(xhci); } - return retval; + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished %s", __func__); + return 0; } /*-------------------------------------------------------------------------*/ - static int xhci_run_finished(struct xhci_hcd *xhci) { + struct xhci_interrupter *ir = xhci->interrupters[0]; + unsigned long flags; + u32 temp; + + /* + * Enable interrupts before starting the host (xhci 4.2 and 5.5.2). + * Protect the short window before host is running with a lock + */ + spin_lock_irqsave(&xhci->lock, flags); + + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Enable interrupts"); + temp = readl(&xhci->op_regs->command); + temp |= (CMD_EIE); + writel(temp, &xhci->op_regs->command); + + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Enable primary interrupter"); + xhci_enable_interrupter(ir); + if (xhci_start(xhci)) { xhci_halt(xhci); + spin_unlock_irqrestore(&xhci->lock, flags); return -ENODEV; } - xhci->shared_hcd->state = HC_STATE_RUNNING; + xhci->cmd_ring_state = CMD_RING_STATE_RUNNING; if (xhci->quirks & XHCI_NEC_HOST) xhci_ring_cmd_db(xhci); - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "Finished xhci_run for USB3 roothub"); + spin_unlock_irqrestore(&xhci->lock, flags); + return 0; } @@ -635,49 +642,29 @@ static int xhci_run_finished(struct xhci_hcd *xhci) */ int xhci_run(struct usb_hcd *hcd) { - u32 temp; u64 temp_64; int ret; struct xhci_hcd *xhci = hcd_to_xhci(hcd); - + struct xhci_interrupter *ir = xhci->interrupters[0]; /* Start the xHCI host controller running only after the USB 2.0 roothub * is setup. */ hcd->uses_new_polling = 1; + if (hcd->msi_enabled) + ir->ip_autoclear = true; + if (!usb_hcd_is_primary_hcd(hcd)) return xhci_run_finished(xhci); xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run"); - ret = xhci_try_enable_msi(hcd); - if (ret) - return ret; - - temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); - temp_64 &= ~ERST_PTR_MASK; + temp_64 = xhci_read_64(xhci, &ir->ir_set->erst_dequeue); + temp_64 &= ERST_PTR_MASK; xhci_dbg_trace(xhci, trace_xhci_dbg_init, "ERST deq = 64'h%0lx", (long unsigned int) temp_64); - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "// Set the interrupt modulation register"); - temp = readl(&xhci->ir_set->irq_control); - temp &= ~ER_IRQ_INTERVAL_MASK; - temp |= (xhci->imod_interval / 250) & ER_IRQ_INTERVAL_MASK; - writel(temp, &xhci->ir_set->irq_control); - - /* Set the HCD state before we enable the irqs */ - temp = readl(&xhci->op_regs->command); - temp |= (CMD_EIE); - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "// Enable interrupts, cmd = 0x%x.", temp); - writel(temp, &xhci->op_regs->command); - - temp = readl(&xhci->ir_set->irq_pending); - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "// Enabling event ring interrupter %p by writing 0x%x to irq_pending", - xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp)); - writel(ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending); + xhci_set_interrupter_moderation(ir, xhci->imod_interval); if (xhci->quirks & XHCI_NEC_HOST) { struct xhci_command *command; @@ -692,12 +679,17 @@ int xhci_run(struct usb_hcd *hcd) xhci_free_command(xhci, command); } xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "Finished xhci_run for USB2 roothub"); + "Finished %s for main hcd", __func__); - xhci_dbc_init(xhci); + xhci_create_dbc_dev(xhci); xhci_debugfs_init(xhci); + if (xhci_has_one_roothub(xhci)) + return xhci_run_finished(xhci); + + set_bit(HCD_FLAG_DEFER_RH_REGISTER, &hcd->flags); + return 0; } EXPORT_SYMBOL_GPL(xhci_run); @@ -711,10 +703,11 @@ EXPORT_SYMBOL_GPL(xhci_run); * Disable device contexts, disable IRQs, and quiesce the HC. * Reset the HC, finish any completed transactions, and cleanup memory. */ -static void xhci_stop(struct usb_hcd *hcd) +void xhci_stop(struct usb_hcd *hcd) { u32 temp; struct xhci_hcd *xhci = hcd_to_xhci(hcd); + struct xhci_interrupter *ir = xhci->interrupters[0]; mutex_lock(&xhci->mutex); @@ -724,21 +717,19 @@ static void xhci_stop(struct usb_hcd *hcd) return; } - xhci_dbc_exit(xhci); + xhci_remove_dbc_dev(xhci); spin_lock_irq(&xhci->lock); xhci->xhc_state |= XHCI_STATE_HALTED; xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; xhci_halt(xhci); - xhci_reset(xhci); + xhci_reset(xhci, XHCI_RESET_SHORT_USEC); spin_unlock_irq(&xhci->lock); - xhci_cleanup_msix(xhci); - /* Deleting Compliance Mode Recovery Timer */ if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && (!(xhci_all_ports_seen_u0(xhci)))) { - del_timer_sync(&xhci->comp_mode_recovery_timer); + timer_delete_sync(&xhci->comp_mode_recovery_timer); xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, "%s: compliance mode recovery timer deleted", __func__); @@ -751,8 +742,7 @@ static void xhci_stop(struct usb_hcd *hcd) "// Disabling event ring interrupts"); temp = readl(&xhci->op_regs->status); writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status); - temp = readl(&xhci->ir_set->irq_pending); - writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending); + xhci_disable_interrupter(xhci, ir); xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory"); xhci_mem_cleanup(xhci); @@ -762,6 +752,7 @@ static void xhci_stop(struct usb_hcd *hcd) readl(&xhci->op_regs->status)); mutex_unlock(&xhci->mutex); } +EXPORT_SYMBOL_GPL(xhci_stop); /* * Shutdown HC (not bus-specific) @@ -772,73 +763,91 @@ static void xhci_stop(struct usb_hcd *hcd) * * This will only ever be called with the main usb_hcd (the USB3 roothub). */ -static void xhci_shutdown(struct usb_hcd *hcd) +void xhci_shutdown(struct usb_hcd *hcd) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); if (xhci->quirks & XHCI_SPURIOUS_REBOOT) usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev)); + /* Don't poll the roothubs after shutdown. */ + xhci_dbg(xhci, "%s: stopping usb%d port polling.\n", + __func__, hcd->self.busnum); + clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); + timer_delete_sync(&hcd->rh_timer); + + if (xhci->shared_hcd) { + clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); + timer_delete_sync(&xhci->shared_hcd->rh_timer); + } + spin_lock_irq(&xhci->lock); xhci_halt(xhci); - /* Workaround for spurious wakeups at shutdown with HSW */ - if (xhci->quirks & XHCI_SPURIOUS_WAKEUP) - xhci_reset(xhci); - spin_unlock_irq(&xhci->lock); - xhci_cleanup_msix(xhci); + /* + * Workaround for spurious wakeps at shutdown with HSW, and for boot + * firmware delay in ADL-P PCH if port are left in U3 at shutdown + */ + if (xhci->quirks & XHCI_SPURIOUS_WAKEUP || + xhci->quirks & XHCI_RESET_TO_DEFAULT) + xhci_reset(xhci, XHCI_RESET_SHORT_USEC); + + spin_unlock_irq(&xhci->lock); xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_shutdown completed - status = %x", readl(&xhci->op_regs->status)); - - /* Yet another workaround for spurious wakeups at shutdown with HSW */ - if (xhci->quirks & XHCI_SPURIOUS_WAKEUP) - pci_set_power_state(to_pci_dev(hcd->self.sysdev), PCI_D3hot); } +EXPORT_SYMBOL_GPL(xhci_shutdown); #ifdef CONFIG_PM static void xhci_save_registers(struct xhci_hcd *xhci) { + struct xhci_interrupter *ir; + unsigned int i; + xhci->s3.command = readl(&xhci->op_regs->command); xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification); xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); xhci->s3.config_reg = readl(&xhci->op_regs->config_reg); - xhci->s3.erst_size = readl(&xhci->ir_set->erst_size); - xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base); - xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); - xhci->s3.irq_pending = readl(&xhci->ir_set->irq_pending); - xhci->s3.irq_control = readl(&xhci->ir_set->irq_control); + + /* save both primary and all secondary interrupters */ + /* fixme, shold we lock to prevent race with remove secondary interrupter? */ + for (i = 0; i < xhci->max_interrupters; i++) { + ir = xhci->interrupters[i]; + if (!ir) + continue; + + ir->s3_erst_size = readl(&ir->ir_set->erst_size); + ir->s3_erst_base = xhci_read_64(xhci, &ir->ir_set->erst_base); + ir->s3_erst_dequeue = xhci_read_64(xhci, &ir->ir_set->erst_dequeue); + ir->s3_iman = readl(&ir->ir_set->iman); + ir->s3_imod = readl(&ir->ir_set->imod); + } } static void xhci_restore_registers(struct xhci_hcd *xhci) { + struct xhci_interrupter *ir; + unsigned int i; + writel(xhci->s3.command, &xhci->op_regs->command); writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification); xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr); writel(xhci->s3.config_reg, &xhci->op_regs->config_reg); - writel(xhci->s3.erst_size, &xhci->ir_set->erst_size); - xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base); - xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue); - writel(xhci->s3.irq_pending, &xhci->ir_set->irq_pending); - writel(xhci->s3.irq_control, &xhci->ir_set->irq_control); -} -static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci) -{ - u64 val_64; - - /* step 2: initialize command ring buffer */ - val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); - val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | - (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, - xhci->cmd_ring->dequeue) & - (u64) ~CMD_RING_RSVD_BITS) | - xhci->cmd_ring->cycle_state; - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "// Setting command ring address to 0x%llx", - (long unsigned long) val_64); - xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); + /* FIXME should we lock to protect against freeing of interrupters */ + for (i = 0; i < xhci->max_interrupters; i++) { + ir = xhci->interrupters[i]; + if (!ir) + continue; + + writel(ir->s3_erst_size, &ir->ir_set->erst_size); + xhci_write_64(xhci, ir->s3_erst_base, &ir->ir_set->erst_base); + xhci_write_64(xhci, ir->s3_erst_dequeue, &ir->ir_set->erst_dequeue); + writel(ir->s3_iman, &ir->ir_set->iman); + writel(ir->s3_imod, &ir->ir_set->imod); + } } /* @@ -856,28 +865,14 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci) struct xhci_segment *seg; ring = xhci->cmd_ring; - seg = ring->deq_seg; - do { - memset(seg->trbs, 0, - sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1)); - seg->trbs[TRBS_PER_SEGMENT - 1].link.control &= - cpu_to_le32(~TRB_CYCLE); - seg = seg->next; - } while (seg != ring->deq_seg); - - /* Reset the software enqueue and dequeue pointers */ - ring->deq_seg = ring->first_seg; - ring->dequeue = ring->first_seg->trbs; - ring->enq_seg = ring->deq_seg; - ring->enqueue = ring->dequeue; - - ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1; - /* - * Ring is now zeroed, so the HW should look for change of ownership - * when the cycle bit is set to 1. - */ - ring->cycle_state = 1; + xhci_for_each_ring_seg(ring->first_seg, seg) { + /* erase all TRBs before the link */ + memset(seg->trbs, 0, sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1)); + /* clear link cycle bit */ + seg->trbs[TRBS_PER_SEGMENT - 1].link.control &= cpu_to_le32(~TRB_CYCLE); + } + xhci_initialize_ring_info(ring); /* * Reset the hardware dequeue pointer. * Yes, this will need to be re-written after resume, but we're paranoid @@ -888,37 +883,44 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci) xhci_set_cmd_ring_deq(xhci); } -static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci) +/* + * Disable port wake bits if do_wakeup is not set. + * + * Also clear a possible internal port wake state left hanging for ports that + * detected termination but never successfully enumerated (trained to 0U). + * Internal wake causes immediate xHCI wake after suspend. PORT_CSC write done + * at enumeration clears this wake, force one here as well for unconnected ports + */ + +static void xhci_disable_hub_port_wake(struct xhci_hcd *xhci, + struct xhci_hub *rhub, + bool do_wakeup) { - struct xhci_port **ports; - int port_index; unsigned long flags; - u32 t1, t2; + u32 t1, t2, portsc; + int i; spin_lock_irqsave(&xhci->lock, flags); - /* disable usb3 ports Wake bits */ - port_index = xhci->usb3_rhub.num_ports; - ports = xhci->usb3_rhub.ports; - while (port_index--) { - t1 = readl(ports[port_index]->addr); - t1 = xhci_port_state_to_neutral(t1); - t2 = t1 & ~PORT_WAKE_BITS; - if (t1 != t2) - writel(t2, ports[port_index]->addr); - } - - /* disable usb2 ports Wake bits */ - port_index = xhci->usb2_rhub.num_ports; - ports = xhci->usb2_rhub.ports; - while (port_index--) { - t1 = readl(ports[port_index]->addr); - t1 = xhci_port_state_to_neutral(t1); - t2 = t1 & ~PORT_WAKE_BITS; - if (t1 != t2) - writel(t2, ports[port_index]->addr); + for (i = 0; i < rhub->num_ports; i++) { + portsc = xhci_portsc_readl(rhub->ports[i]); + t1 = xhci_port_state_to_neutral(portsc); + t2 = t1; + + /* clear wake bits if do_wake is not set */ + if (!do_wakeup) + t2 &= ~PORT_WAKE_BITS; + + /* Don't touch csc bit if connected or connect change is set */ + if (!(portsc & (PORT_CSC | PORT_CONNECT))) + t2 |= PORT_CSC; + + if (t1 != t2) { + xhci_portsc_writel(rhub->ports[i], t2); + xhci_dbg(xhci, "config port %d-%d wake bits, portsc: 0x%x, write: 0x%x\n", + rhub->hcd->self.busnum, i + 1, portsc, t2); + } } - spin_unlock_irqrestore(&xhci->lock, flags); } @@ -941,7 +943,7 @@ static bool xhci_pending_portevent(struct xhci_hcd *xhci) port_index = xhci->usb2_rhub.num_ports; ports = xhci->usb2_rhub.ports; while (port_index--) { - portsc = readl(ports[port_index]->addr); + portsc = xhci_portsc_readl(ports[port_index]); if (portsc & PORT_CHANGE_MASK || (portsc & PORT_PLS_MASK) == XDEV_RESUME) return true; @@ -949,8 +951,8 @@ static bool xhci_pending_portevent(struct xhci_hcd *xhci) port_index = xhci->usb3_rhub.num_ports; ports = xhci->usb3_rhub.ports; while (port_index--) { - portsc = readl(ports[port_index]->addr); - if (portsc & PORT_CHANGE_MASK || + portsc = xhci_portsc_readl(ports[port_index]); + if (portsc & (PORT_CHANGE_MASK | PORT_CAS) || (portsc & PORT_PLS_MASK) == XDEV_RESUME) return true; } @@ -966,7 +968,7 @@ static bool xhci_pending_portevent(struct xhci_hcd *xhci) int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup) { int rc = 0; - unsigned int delay = XHCI_MAX_HALT_USEC; + unsigned int delay = XHCI_MAX_HALT_USEC * 2; struct usb_hcd *hcd = xhci_to_hcd(xhci); u32 command; u32 res; @@ -975,28 +977,35 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup) return 0; if (hcd->state != HC_STATE_SUSPENDED || - xhci->shared_hcd->state != HC_STATE_SUSPENDED) + (xhci->shared_hcd && xhci->shared_hcd->state != HC_STATE_SUSPENDED)) return -EINVAL; - xhci_dbc_suspend(xhci); - /* Clear root port wake on bits if wakeup not allowed. */ - if (!do_wakeup) - xhci_disable_port_wake_on_bits(xhci); + xhci_disable_hub_port_wake(xhci, &xhci->usb3_rhub, do_wakeup); + xhci_disable_hub_port_wake(xhci, &xhci->usb2_rhub, do_wakeup); + + if (!HCD_HW_ACCESSIBLE(hcd)) + return 0; + + xhci_dbc_suspend(xhci); /* Don't poll the roothubs on bus suspend. */ - xhci_dbg(xhci, "%s: stopping port polling.\n", __func__); + xhci_dbg(xhci, "%s: stopping usb%d port polling.\n", + __func__, hcd->self.busnum); clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); - del_timer_sync(&hcd->rh_timer); - clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); - del_timer_sync(&xhci->shared_hcd->rh_timer); + timer_delete_sync(&hcd->rh_timer); + if (xhci->shared_hcd) { + clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); + timer_delete_sync(&xhci->shared_hcd->rh_timer); + } if (xhci->quirks & XHCI_SUSPEND_DELAY) usleep_range(1000, 1500); spin_lock_irq(&xhci->lock); clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); - clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); + if (xhci->shared_hcd) + clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); /* step 1: stop endpoint */ /* skipped assuming that port suspend has done */ @@ -1025,7 +1034,7 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup) writel(command, &xhci->op_regs->command); xhci->broken_suspend = 0; if (xhci_handshake(&xhci->op_regs->status, - STS_SAVE, 0, 10 * 1000)) { + STS_SAVE, 0, 20 * 1000)) { /* * AMD SNPS xHC 3.0 occasionally does not clear the * SSS bit of USBSTS and when driver tries to poll @@ -1054,16 +1063,12 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup) */ if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && (!(xhci_all_ports_seen_u0(xhci)))) { - del_timer_sync(&xhci->comp_mode_recovery_timer); + timer_delete_sync(&xhci->comp_mode_recovery_timer); xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, "%s: compliance mode recovery timer deleted", __func__); } - /* step 5: remove core well power */ - /* synchronize irq when using MSI-X */ - xhci_msix_sync_irqs(xhci); - return rc; } EXPORT_SYMBOL_GPL(xhci_suspend); @@ -1074,13 +1079,14 @@ EXPORT_SYMBOL_GPL(xhci_suspend); * This is called when the machine transition from S3/S4 mode. * */ -int xhci_resume(struct xhci_hcd *xhci, bool hibernated) +int xhci_resume(struct xhci_hcd *xhci, bool power_lost, bool is_auto_resume) { u32 command, temp = 0; struct usb_hcd *hcd = xhci_to_hcd(xhci); - struct usb_hcd *secondary_hcd; int retval = 0; bool comp_timer_running = false; + bool pending_portevent = false; + bool suspended_usb3_devs = false; if (!hcd->state) return 0; @@ -1094,13 +1100,27 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) msleep(100); set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); - set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); + if (xhci->shared_hcd) + set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); spin_lock_irq(&xhci->lock); - if ((xhci->quirks & XHCI_RESET_ON_RESUME) || xhci->broken_suspend) - hibernated = true; - if (!hibernated) { + if (xhci->quirks & XHCI_RESET_ON_RESUME || xhci->broken_suspend) + power_lost = true; + + if (!power_lost) { + /* + * Some controllers might lose power during suspend, so wait + * for controller not ready bit to clear, just as in xHC init. + */ + retval = xhci_handshake(&xhci->op_regs->status, + STS_CNR, 0, 10 * 1000 * 1000); + if (retval) { + xhci_warn(xhci, "Controller not ready at resume %d\n", + retval); + spin_unlock_irq(&xhci->lock); + return retval; + } /* step 1: restore register */ xhci_restore_registers(xhci); /* step 2: initialize command ring buffer */ @@ -1121,35 +1141,46 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) spin_unlock_irq(&xhci->lock); return -ETIMEDOUT; } - temp = readl(&xhci->op_regs->status); } - /* If restore operation fails, re-initialize the HC during resume */ - if ((temp & STS_SRE) || hibernated) { + temp = readl(&xhci->op_regs->status); + /* re-initialize the HC on Restore Error, or Host Controller Error */ + if ((temp & (STS_SRE | STS_HCE)) && + !(xhci->xhc_state & XHCI_STATE_REMOVING)) { + if (!power_lost) + xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp); + power_lost = true; + } + + if (power_lost) { if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !(xhci_all_ports_seen_u0(xhci))) { - del_timer_sync(&xhci->comp_mode_recovery_timer); + timer_delete_sync(&xhci->comp_mode_recovery_timer); xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, "Compliance Mode Recovery Timer deleted!"); } /* Let the USB core know _both_ roothubs lost power. */ usb_root_hub_lost_power(xhci->main_hcd->self.root_hub); - usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub); + if (xhci->shared_hcd) + usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub); xhci_dbg(xhci, "Stop HCD\n"); xhci_halt(xhci); xhci_zero_64b_regs(xhci); - xhci_reset(xhci); + if (xhci->xhc_state & XHCI_STATE_REMOVING) + retval = -ENODEV; + else + retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC); spin_unlock_irq(&xhci->lock); - xhci_cleanup_msix(xhci); + if (retval) + return retval; xhci_dbg(xhci, "// Disabling event ring interrupts\n"); temp = readl(&xhci->op_regs->status); writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status); - temp = readl(&xhci->ir_set->irq_pending); - writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending); + xhci_disable_interrupter(xhci, xhci->interrupters[0]); xhci_dbg(xhci, "cleaning up memory\n"); xhci_mem_cleanup(xhci); @@ -1161,25 +1192,32 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) * first with the primary HCD, and then with the secondary HCD. * If we don't do the same, the host will never be started. */ - if (!usb_hcd_is_primary_hcd(hcd)) - secondary_hcd = hcd; - else - secondary_hcd = xhci->shared_hcd; - xhci_dbg(xhci, "Initialize the xhci_hcd\n"); - retval = xhci_init(hcd->primary_hcd); + retval = xhci_init(hcd); if (retval) return retval; comp_timer_running = true; xhci_dbg(xhci, "Start the primary HCD\n"); - retval = xhci_run(hcd->primary_hcd); - if (!retval) { + retval = xhci_run(hcd); + if (!retval && xhci->shared_hcd) { xhci_dbg(xhci, "Start the secondary HCD\n"); - retval = xhci_run(secondary_hcd); + retval = xhci_run(xhci->shared_hcd); } + if (retval) + return retval; + /* + * Resume roothubs unconditionally as PORTSC change bits are not + * immediately visible after xHC reset + */ hcd->state = HC_STATE_SUSPENDED; - xhci->shared_hcd->state = HC_STATE_SUSPENDED; + + if (xhci->shared_hcd) { + xhci->shared_hcd->state = HC_STATE_SUSPENDED; + usb_hcd_resume_root_hub(xhci->shared_hcd); + } + usb_hcd_resume_root_hub(hcd); + goto done; } @@ -1203,15 +1241,31 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) xhci_dbc_resume(xhci); - done: if (retval == 0) { - /* Resume root hubs only when have pending events. */ - if (xhci_pending_portevent(xhci)) { - usb_hcd_resume_root_hub(xhci->shared_hcd); + /* + * Resume roothubs only if there are pending events. + * USB 3 devices resend U3 LFPS wake after a 100ms delay if + * the first wake signalling failed, give it that chance if + * there are suspended USB 3 devices. + */ + if (xhci->usb3_rhub.bus_state.suspended_ports || + xhci->usb3_rhub.bus_state.bus_suspended) + suspended_usb3_devs = true; + + pending_portevent = xhci_pending_portevent(xhci); + + if (suspended_usb3_devs && !pending_portevent && is_auto_resume) { + msleep(120); + pending_portevent = xhci_pending_portevent(xhci); + } + + if (pending_portevent) { + if (xhci->shared_hcd) + usb_hcd_resume_root_hub(xhci->shared_hcd); usb_hcd_resume_root_hub(hcd); } } - +done: /* * If system is subject to the Quirk, Compliance Mode Timer needs to * be re-initialized Always after a system resume. Ports are subject @@ -1225,9 +1279,12 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) usb_asmedia_modifyflowcontrol(to_pci_dev(hcd->self.controller)); /* Re-enable port polling. */ - xhci_dbg(xhci, "%s: starting port polling.\n", __func__); - set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); - usb_hcd_poll_rh_status(xhci->shared_hcd); + xhci_dbg(xhci, "%s: starting usb%d port polling.\n", + __func__, hcd->self.busnum); + if (xhci->shared_hcd) { + set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); + usb_hcd_poll_rh_status(xhci->shared_hcd); + } set_bit(HCD_FLAG_POLL_RH, &hcd->flags); usb_hcd_poll_rh_status(hcd); @@ -1238,10 +1295,159 @@ EXPORT_SYMBOL_GPL(xhci_resume); /*-------------------------------------------------------------------------*/ +static int xhci_map_temp_buffer(struct usb_hcd *hcd, struct urb *urb) +{ + void *temp; + int ret = 0; + unsigned int buf_len; + enum dma_data_direction dir; + + dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; + buf_len = urb->transfer_buffer_length; + + temp = kzalloc_node(buf_len, GFP_ATOMIC, + dev_to_node(hcd->self.sysdev)); + if (!temp) + return -ENOMEM; + + if (usb_urb_dir_out(urb)) + sg_pcopy_to_buffer(urb->sg, urb->num_sgs, + temp, buf_len, 0); + + urb->transfer_buffer = temp; + urb->transfer_dma = dma_map_single(hcd->self.sysdev, + urb->transfer_buffer, + urb->transfer_buffer_length, + dir); + + if (dma_mapping_error(hcd->self.sysdev, + urb->transfer_dma)) { + ret = -EAGAIN; + kfree(temp); + } else { + urb->transfer_flags |= URB_DMA_MAP_SINGLE; + } + + return ret; +} + +static bool xhci_urb_temp_buffer_required(struct usb_hcd *hcd, + struct urb *urb) +{ + bool ret = false; + unsigned int i; + unsigned int len = 0; + unsigned int trb_size; + unsigned int max_pkt; + struct scatterlist *sg; + struct scatterlist *tail_sg; + + tail_sg = urb->sg; + max_pkt = xhci_usb_endpoint_maxp(urb->dev, urb->ep); + + if (!urb->num_sgs) + return ret; + + if (urb->dev->speed >= USB_SPEED_SUPER) + trb_size = TRB_CACHE_SIZE_SS; + else + trb_size = TRB_CACHE_SIZE_HS; + + if (urb->transfer_buffer_length != 0 && + !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) { + for_each_sg(urb->sg, sg, urb->num_sgs, i) { + len = len + sg->length; + if (i > trb_size - 2) { + len = len - tail_sg->length; + if (len < max_pkt) { + ret = true; + break; + } + + tail_sg = sg_next(tail_sg); + } + } + } + return ret; +} + +static void xhci_unmap_temp_buf(struct usb_hcd *hcd, struct urb *urb) +{ + unsigned int len; + unsigned int buf_len; + enum dma_data_direction dir; + + dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; + + buf_len = urb->transfer_buffer_length; + + if (IS_ENABLED(CONFIG_HAS_DMA) && + (urb->transfer_flags & URB_DMA_MAP_SINGLE)) + dma_unmap_single(hcd->self.sysdev, + urb->transfer_dma, + urb->transfer_buffer_length, + dir); + + if (usb_urb_dir_in(urb)) { + len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, + urb->transfer_buffer, + buf_len, + 0); + if (len != buf_len) { + xhci_dbg(hcd_to_xhci(hcd), + "Copy from tmp buf to urb sg list failed\n"); + urb->actual_length = len; + } + } + urb->transfer_flags &= ~URB_DMA_MAP_SINGLE; + kfree(urb->transfer_buffer); + urb->transfer_buffer = NULL; +} + +/* + * Bypass the DMA mapping if URB is suitable for Immediate Transfer (IDT), + * we'll copy the actual data into the TRB address register. This is limited to + * transfers up to 8 bytes on output endpoints of any kind with wMaxPacketSize + * >= 8 bytes. If suitable for IDT only one Transfer TRB per TD is allowed. + */ +static int xhci_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, + gfp_t mem_flags) +{ + struct xhci_hcd *xhci; + + xhci = hcd_to_xhci(hcd); + + if (xhci_urb_suitable_for_idt(urb)) + return 0; + + if (xhci->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK) { + if (xhci_urb_temp_buffer_required(hcd, urb)) + return xhci_map_temp_buffer(hcd, urb); + } + return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags); +} + +static void xhci_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb) +{ + struct xhci_hcd *xhci; + bool unmap_temp_buf = false; + + xhci = hcd_to_xhci(hcd); + + if (urb->num_sgs && (urb->transfer_flags & URB_DMA_MAP_SINGLE)) + unmap_temp_buf = true; + + if ((xhci->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK) && unmap_temp_buf) + xhci_unmap_temp_buf(hcd, urb); + else + usb_hcd_unmap_urb_for_dma(hcd, urb); +} + /** * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and * HCDs. Find the index for an endpoint given its descriptor. Use the return * value to right shift 1 for the bitmask. + * @desc: USB endpoint descriptor to determine index for * * Index = (epnum * 2) + direction - 1, * where direction = 0 for OUT, 1 for IN. @@ -1258,11 +1464,12 @@ unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc) (usb_endpoint_dir_in(desc) ? 1 : 0) - 1; return index; } +EXPORT_SYMBOL_GPL(xhci_get_endpoint_index); /* The reverse operation to xhci_get_endpoint_index. Calculate the USB endpoint * address from the XHCI endpoint index. */ -unsigned int xhci_get_endpoint_address(unsigned int ep_index) +static unsigned int xhci_get_endpoint_address(unsigned int ep_index) { unsigned int number = DIV_ROUND_UP(ep_index, 2); unsigned int direction = ep_index % 2 ? USB_DIR_OUT : USB_DIR_IN; @@ -1278,15 +1485,6 @@ static unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc) return 1 << (xhci_get_endpoint_index(desc) + 1); } -/* Find the flag for this endpoint (for use in the control context). Use the - * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is - * bit 1, etc. - */ -static unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index) -{ - return 1 << (ep_index + 1); -} - /* Compute the last valid endpoint context index. Basically, this is the * endpoint index plus one. For slot contexts with more than valid endpoint, * we find the most significant bit set in the added contexts flags. @@ -1348,10 +1546,8 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci, * descriptor. If the usb_device's max packet size changes after that point, * we need to issue an evaluate context command and wait on it. */ -static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, - unsigned int ep_index, struct urb *urb) +static int xhci_check_ep0_maxpacket(struct xhci_hcd *xhci, struct xhci_virt_device *vdev) { - struct xhci_container_ctx *out_ctx; struct xhci_input_control_ctx *ctrl_ctx; struct xhci_ep_ctx *ep_ctx; struct xhci_command *command; @@ -1359,11 +1555,15 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, int hw_max_packet_size; int ret = 0; - out_ctx = xhci->devs[slot_id]->out_ctx; - ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); + ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, 0); hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2)); - max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc); - if (hw_max_packet_size != max_packet_size) { + max_packet_size = usb_endpoint_maxp(&vdev->udev->ep0.desc); + + if (hw_max_packet_size == max_packet_size) + return 0; + + switch (max_packet_size) { + case 8: case 16: case 32: case 64: case 9: xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, "Max Packet Size for ep 0 changed."); xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, @@ -1375,45 +1575,43 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, "Issuing evaluate context command."); - /* Set up the input context flags for the command */ - /* FIXME: This won't work if a non-default control endpoint - * changes max packet sizes. - */ - command = xhci_alloc_command(xhci, true, GFP_KERNEL); if (!command) return -ENOMEM; - command->in_ctx = xhci->devs[slot_id]->in_ctx; + command->in_ctx = vdev->in_ctx; ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx); if (!ctrl_ctx) { xhci_warn(xhci, "%s: Could not get input context, bad type.\n", __func__); ret = -ENOMEM; - goto command_cleanup; + break; } /* Set up the modified control endpoint 0 */ - xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, - xhci->devs[slot_id]->out_ctx, ep_index); + xhci_endpoint_copy(xhci, vdev->in_ctx, vdev->out_ctx, 0); - ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index); + ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, 0); + ep_ctx->ep_info &= cpu_to_le32(~EP_STATE_MASK);/* must clear */ ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK); ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size)); ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG); ctrl_ctx->drop_flags = 0; - ret = xhci_configure_endpoint(xhci, urb->dev, command, - true, false); - - /* Clean up the input context for later use by bandwidth - * functions. - */ + ret = xhci_configure_endpoint(xhci, vdev->udev, command, + true, false); + /* Clean up the input context for later use by bandwidth functions */ ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG); -command_cleanup: - kfree(command->completion); - kfree(command); + break; + default: + dev_dbg(&vdev->udev->dev, "incorrect max packet size %d for ep0\n", + max_packet_size); + return -EINVAL; } + + kfree(command->completion); + kfree(command); + return ret; } @@ -1431,19 +1629,7 @@ static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag struct urb_priv *urb_priv; int num_tds; - if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, - true, true, __func__) <= 0) - return -EINVAL; - - slot_id = urb->dev->slot_id; ep_index = xhci_get_endpoint_index(&urb->ep->desc); - ep_state = &xhci->devs[slot_id]->eps[ep_index].ep_state; - - if (!HCD_HW_ACCESSIBLE(hcd)) { - if (!in_interrupt()) - xhci_dbg(xhci, "urb submitted during PCI suspend\n"); - return -ESHUTDOWN; - } if (usb_endpoint_xfer_isoc(&urb->ep->desc)) num_tds = urb->number_of_packets; @@ -1455,8 +1641,7 @@ static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag else num_tds = 1; - urb_priv = kzalloc(sizeof(struct urb_priv) + - num_tds * sizeof(struct xhci_td), mem_flags); + urb_priv = kzalloc(struct_size(urb_priv, td, num_tds), mem_flags); if (!urb_priv) return -ENOMEM; @@ -1466,22 +1651,27 @@ static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag trace_xhci_urb_enqueue(urb); - if (usb_endpoint_xfer_control(&urb->ep->desc)) { - /* Check to see if the max packet size for the default control - * endpoint changed during FS device enumeration - */ - if (urb->dev->speed == USB_SPEED_FULL) { - ret = xhci_check_maxpacket(xhci, slot_id, - ep_index, urb); - if (ret < 0) { - xhci_urb_free_priv(urb_priv); - urb->hcpriv = NULL; - return ret; - } - } + spin_lock_irqsave(&xhci->lock, flags); + + ret = xhci_check_args(hcd, urb->dev, urb->ep, + true, true, __func__); + if (ret <= 0) { + ret = ret ? ret : -EINVAL; + goto free_priv; } - spin_lock_irqsave(&xhci->lock, flags); + slot_id = urb->dev->slot_id; + + if (!HCD_HW_ACCESSIBLE(hcd)) { + ret = -ESHUTDOWN; + goto free_priv; + } + + if (xhci->devs[slot_id]->flags & VDEV_PORT_ERROR) { + xhci_dbg(xhci, "Can't queue urb, port error, link inactive\n"); + ret = -ENODEV; + goto free_priv; + } if (xhci->xhc_state & XHCI_STATE_DYING) { xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for non-responsive xHCI host.\n", @@ -1489,6 +1679,9 @@ static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag ret = -ESHUTDOWN; goto free_priv; } + + ep_state = &xhci->devs[slot_id]->eps[ep_index].ep_state; + if (*ep_state & (EP_GETTING_STREAMS | EP_GETTING_NO_STREAMS)) { xhci_warn(xhci, "WARN: Can't enqueue URB, ep in streams transition state %x\n", *ep_state); @@ -1642,26 +1835,40 @@ static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) urb->ep->desc.bEndpointAddress, (unsigned long long) xhci_trb_virt_to_dma( urb_priv->td[i].start_seg, - urb_priv->td[i].first_trb)); + urb_priv->td[i].start_trb)); for (; i < urb_priv->num_tds; i++) { td = &urb_priv->td[i]; - list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list); + /* TD can already be on cancelled list if ep halted on it */ + if (list_empty(&td->cancelled_td_list)) { + td->cancel_status = TD_DIRTY; + list_add_tail(&td->cancelled_td_list, + &ep->cancelled_td_list); + } } - /* Queue a stop endpoint command, but only if this is - * the first cancellation to be handled. - */ - if (!(ep->ep_state & EP_STOP_CMD_PENDING)) { + /* These completion handlers will sort out cancelled TDs for us */ + if (ep->ep_state & (EP_STOP_CMD_PENDING | EP_HALTED | SET_DEQ_PENDING)) { + xhci_dbg(xhci, "Not queuing Stop Endpoint on slot %d ep %d in state 0x%x\n", + urb->dev->slot_id, ep_index, ep->ep_state); + goto done; + } + + /* In this case no commands are pending but the endpoint is stopped */ + if (ep->ep_state & EP_CLEARING_TT) { + /* and cancelled TDs can be given back right away */ + xhci_dbg(xhci, "Invalidating TDs instantly on slot %d ep %d in state 0x%x\n", + urb->dev->slot_id, ep_index, ep->ep_state); + xhci_process_cancelled_tds(ep); + } else { + /* Otherwise, queue a new Stop Endpoint command */ command = xhci_alloc_command(xhci, false, GFP_ATOMIC); if (!command) { ret = -ENOMEM; goto done; } + ep->stop_time = jiffies; ep->ep_state |= EP_STOP_CMD_PENDING; - ep->stop_cmd_timer.expires = jiffies + - XHCI_STOP_EP_CMD_TIMEOUT * HZ; - add_timer(&ep->stop_cmd_timer); xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id, ep_index, 0); xhci_ring_cmd_db(xhci); @@ -1692,8 +1899,8 @@ err_giveback: * disabled, so there's no need for mutual exclusion to protect * the xhci->devs[slot_id] structure. */ -static int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, - struct usb_host_endpoint *ep) +int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, + struct usb_host_endpoint *ep) { struct xhci_hcd *xhci; struct xhci_container_ctx *in_ctx, *out_ctx; @@ -1753,9 +1960,6 @@ static int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep); - if (xhci->quirks & XHCI_MTK_HOST) - xhci_mtk_drop_ep_quirk(hcd, udev, ep); - xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n", (unsigned int) ep->desc.bEndpointAddress, udev->slot_id, @@ -1763,6 +1967,7 @@ static int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, (unsigned int) new_add_flags); return 0; } +EXPORT_SYMBOL_GPL(xhci_drop_endpoint); /* Add an endpoint to a new possible bandwidth configuration for this device. * Only one call to this function is allowed per endpoint before @@ -1777,13 +1982,14 @@ static int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, * configuration or alt setting is installed in the device, so there's no need * for mutual exclusion to protect the xhci->devs[slot_id] structure. */ -static int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, - struct usb_host_endpoint *ep) +int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, + struct usb_host_endpoint *ep) { struct xhci_hcd *xhci; struct xhci_container_ctx *in_ctx; unsigned int ep_index; struct xhci_input_control_ctx *ctrl_ctx; + struct xhci_ep_ctx *ep_ctx; u32 added_ctxs; u32 new_add_flags, new_drop_flags; struct xhci_virt_device *virt_dev; @@ -1851,15 +2057,6 @@ static int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, return -ENOMEM; } - if (xhci->quirks & XHCI_MTK_HOST) { - ret = xhci_mtk_add_ep_quirk(hcd, udev, ep); - if (ret < 0) { - xhci_ring_free(xhci, virt_dev->eps[ep_index].new_ring); - virt_dev->eps[ep_index].new_ring = NULL; - return ret; - } - } - ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs); new_add_flags = le32_to_cpu(ctrl_ctx->add_flags); @@ -1874,7 +2071,8 @@ static int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, /* Store the usb_device pointer for later use */ ep->hcpriv = udev; - xhci_debugfs_create_endpoint(xhci, virt_dev, ep_index); + ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); + trace_xhci_add_endpoint(ep_ctx); xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n", (unsigned int) ep->desc.bEndpointAddress, @@ -1883,6 +2081,7 @@ static int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, (unsigned int) new_add_flags); return 0; } +EXPORT_SYMBOL_GPL(xhci_add_endpoint); static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev) { @@ -2137,7 +2336,6 @@ static unsigned int xhci_get_block_size(struct usb_device *udev) case USB_SPEED_SUPER_PLUS: return SS_BLOCK; case USB_SPEED_UNKNOWN: - case USB_SPEED_WIRELESS: default: /* Should never happen */ return 1; @@ -2166,7 +2364,7 @@ static int xhci_check_tt_bw_table(struct xhci_hcd *xhci, struct xhci_tt_bw_info *tt_info; /* Find the bandwidth table for the root port this TT is attached to. */ - bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table; + bw_table = &xhci->rh_bw[virt_dev->rhub_port->hw_portnum].bw_table; tt_info = virt_dev->tt_info; /* If this TT already had active endpoints, the bandwidth for this TT * has already been added. Removing all periodic endpoints (and thus @@ -2284,7 +2482,7 @@ static int xhci_check_bw_table(struct xhci_hcd *xhci, if (virt_dev->tt_info) { xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, "Recalculating BW for rootport %u", - virt_dev->real_port); + virt_dev->rhub_port->hw_portnum + 1); if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) { xhci_warn(xhci, "Not enough bandwidth on HS bus for " "newly activated TT.\n"); @@ -2297,7 +2495,7 @@ static int xhci_check_bw_table(struct xhci_hcd *xhci, } else { xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, "Recalculating BW for rootport %u", - virt_dev->real_port); + virt_dev->rhub_port->hw_portnum + 1); } /* Add in how much bandwidth will be used for interval zero, or the @@ -2394,14 +2592,12 @@ static int xhci_check_bw_table(struct xhci_hcd *xhci, bw_used += overhead + packet_size; if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) { - unsigned int port_index = virt_dev->real_port - 1; - /* OK, we're manipulating a HS device attached to a * root port bandwidth domain. Include the number of active TTs * in the bandwidth used. */ bw_used += TT_HS_OVERHEAD * - xhci->rh_bw[port_index].num_active_tts; + xhci->rh_bw[virt_dev->rhub_port->hw_portnum].num_active_tts; } xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, @@ -2498,10 +2694,7 @@ static void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci, case USB_SPEED_HIGH: interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1; break; - case USB_SPEED_SUPER: - case USB_SPEED_SUPER_PLUS: - case USB_SPEED_UNKNOWN: - case USB_SPEED_WIRELESS: + default: /* Should never happen because only LS/FS/HS endpoints will get * added to the endpoint list. */ @@ -2558,10 +2751,7 @@ static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci, case USB_SPEED_HIGH: interval_bw->overhead[HS_OVERHEAD_TYPE] += 1; break; - case USB_SPEED_SUPER: - case USB_SPEED_SUPER_PLUS: - case USB_SPEED_UNKNOWN: - case USB_SPEED_WIRELESS: + default: /* Should never happen because only LS/FS/HS endpoints will get * added to the endpoint list. */ @@ -2594,7 +2784,7 @@ void xhci_update_tt_active_eps(struct xhci_hcd *xhci, if (!virt_dev->tt_info) return; - rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1]; + rh_bw_info = &xhci->rh_bw[virt_dev->rhub_port->hw_portnum]; if (old_active_eps == 0 && virt_dev->tt_info->active_eps != 0) { rh_bw_info->num_active_tts += 1; @@ -2695,6 +2885,65 @@ static int xhci_reserve_bandwidth(struct xhci_hcd *xhci, return -ENOMEM; } +/* + * Synchronous XHCI stop endpoint helper. Issues the stop endpoint command and + * waits for the command completion before returning. This does not call + * xhci_handle_cmd_stop_ep(), which has additional handling for 'context error' + * cases, along with transfer ring cleanup. + * + * xhci_stop_endpoint_sync() is intended to be utilized by clients that manage + * their own transfer ring, such as offload situations. + */ +int xhci_stop_endpoint_sync(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, int suspend, + gfp_t gfp_flags) +{ + struct xhci_command *command; + unsigned long flags; + int ret; + + command = xhci_alloc_command(xhci, true, gfp_flags); + if (!command) + return -ENOMEM; + + spin_lock_irqsave(&xhci->lock, flags); + ret = xhci_queue_stop_endpoint(xhci, command, ep->vdev->slot_id, + ep->ep_index, suspend); + if (ret < 0) { + spin_unlock_irqrestore(&xhci->lock, flags); + goto out; + } + + xhci_ring_cmd_db(xhci); + spin_unlock_irqrestore(&xhci->lock, flags); + + wait_for_completion(command->completion); + + /* No handling for COMP_CONTEXT_STATE_ERROR done at command completion*/ + if (command->status == COMP_COMMAND_ABORTED || + command->status == COMP_COMMAND_RING_STOPPED) { + xhci_warn(xhci, "Timeout while waiting for stop endpoint command\n"); + ret = -ETIME; + } +out: + xhci_free_command(xhci, command); + + return ret; +} +EXPORT_SYMBOL_GPL(xhci_stop_endpoint_sync); + +/* + * xhci_usb_endpoint_maxp - get endpoint max packet size + * @host_ep: USB host endpoint to be checked + * + * Returns max packet from the correct descriptor + */ +int xhci_usb_endpoint_maxp(struct usb_device *udev, + struct usb_host_endpoint *host_ep) +{ + if (usb_endpoint_is_hs_isoc_double(udev, host_ep)) + return le16_to_cpu(host_ep->eusb2_isoc_ep_comp.wMaxPacketSize); + return usb_endpoint_maxp(&host_ep->desc); +} /* Issue a configure endpoint command or evaluate context command * and wait for it to finish. @@ -2738,7 +2987,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci, xhci->num_active_eps); return -ENOMEM; } - if ((xhci->quirks & XHCI_SW_BW_CHECKING) && + if ((xhci->quirks & XHCI_SW_BW_CHECKING) && !ctx_change && xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) { if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) xhci_free_host_resources(xhci, ctrl_ctx); @@ -2748,6 +2997,8 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci, } slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx); + + trace_xhci_configure_endpoint_ctrl_ctx(ctrl_ctx); trace_xhci_configure_endpoint(slot_ctx); if (!ctx_change) @@ -2817,7 +3068,7 @@ static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci, * else should be touching the xhci->devs[slot_id] structure, so we * don't need to take the xhci->lock for manipulating that. */ -static int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) +int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) { int i; int ret = 0; @@ -2906,6 +3157,7 @@ static int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) xhci_check_bw_drop_ep_streams(xhci, virt_dev, i); virt_dev->eps[i].ring = virt_dev->eps[i].new_ring; virt_dev->eps[i].new_ring = NULL; + xhci_debugfs_create_endpoint(xhci, virt_dev, i); } command_cleanup: kfree(command->completion); @@ -2913,8 +3165,9 @@ command_cleanup: return ret; } +EXPORT_SYMBOL_GPL(xhci_check_bandwidth); -static void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) +void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) { struct xhci_hcd *xhci; struct xhci_virt_device *virt_dev; @@ -2937,6 +3190,43 @@ static void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) } xhci_zero_in_ctx(xhci, virt_dev); } +EXPORT_SYMBOL_GPL(xhci_reset_bandwidth); + +/* Get the available bandwidth of the ports under the xhci roothub */ +int xhci_get_port_bandwidth(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, + u8 dev_speed) +{ + struct xhci_command *cmd; + unsigned long flags; + int ret; + + if (!ctx || !xhci) + return -EINVAL; + + cmd = xhci_alloc_command(xhci, true, GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + cmd->in_ctx = ctx; + + /* get xhci port bandwidth, refer to xhci rev1_2 protocol 4.6.15 */ + spin_lock_irqsave(&xhci->lock, flags); + + ret = xhci_queue_get_port_bw(xhci, cmd, ctx->dma, dev_speed, 0); + if (ret) { + spin_unlock_irqrestore(&xhci->lock, flags); + goto err_out; + } + xhci_ring_cmd_db(xhci); + spin_unlock_irqrestore(&xhci->lock, flags); + + wait_for_completion(cmd->completion); +err_out: + kfree(cmd->completion); + kfree(cmd); + + return ret; +} static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci, struct xhci_container_ctx *in_ctx, @@ -2950,82 +3240,44 @@ static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci, ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); } -static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci, - unsigned int slot_id, unsigned int ep_index, - struct xhci_dequeue_state *deq_state) +static void xhci_endpoint_disable(struct usb_hcd *hcd, + struct usb_host_endpoint *host_ep) { - struct xhci_input_control_ctx *ctrl_ctx; - struct xhci_container_ctx *in_ctx; - struct xhci_ep_ctx *ep_ctx; - u32 added_ctxs; - dma_addr_t addr; - - in_ctx = xhci->devs[slot_id]->in_ctx; - ctrl_ctx = xhci_get_input_control_ctx(in_ctx); - if (!ctrl_ctx) { - xhci_warn(xhci, "%s: Could not get input context, bad type.\n", - __func__); - return; - } - - xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, - xhci->devs[slot_id]->out_ctx, ep_index); - ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); - addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg, - deq_state->new_deq_ptr); - if (addr == 0) { - xhci_warn(xhci, "WARN Cannot submit config ep after " - "reset ep command\n"); - xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n", - deq_state->new_deq_seg, - deq_state->new_deq_ptr); - return; - } - ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state); + struct xhci_hcd *xhci; + struct xhci_virt_device *vdev; + struct xhci_virt_ep *ep; + struct usb_device *udev; + unsigned long flags; + unsigned int ep_index; - added_ctxs = xhci_get_endpoint_flag_from_index(ep_index); - xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx, - xhci->devs[slot_id]->out_ctx, ctrl_ctx, - added_ctxs, added_ctxs); -} + xhci = hcd_to_xhci(hcd); +rescan: + spin_lock_irqsave(&xhci->lock, flags); -void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int ep_index, - unsigned int stream_id, struct xhci_td *td) -{ - struct xhci_dequeue_state deq_state; - struct usb_device *udev = td->urb->dev; + udev = (struct usb_device *)host_ep->hcpriv; + if (!udev || !udev->slot_id) + goto done; - xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, - "Cleaning up stalled endpoint ring"); - /* We need to move the HW's dequeue pointer past this TD, - * or it will attempt to resend it on the next doorbell ring. - */ - xhci_find_new_dequeue_state(xhci, udev->slot_id, - ep_index, stream_id, td, &deq_state); + vdev = xhci->devs[udev->slot_id]; + if (!vdev) + goto done; - if (!deq_state.new_deq_ptr || !deq_state.new_deq_seg) - return; + ep_index = xhci_get_endpoint_index(&host_ep->desc); + ep = &vdev->eps[ep_index]; - /* HW with the reset endpoint quirk will use the saved dequeue state to - * issue a configure endpoint command later. - */ - if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) { - xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, - "Queueing new dequeue state"); - xhci_queue_new_dequeue_state(xhci, udev->slot_id, - ep_index, &deq_state); - } else { - /* Better hope no one uses the input context between now and the - * reset endpoint completion! - * XXX: No idea how this hardware will react when stream rings - * are enabled. - */ - xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, - "Setting up input context for " - "configure endpoint command"); - xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id, - ep_index, &deq_state); + /* wait for hub_tt_work to finish clearing hub TT */ + if (ep->ep_state & EP_CLEARING_TT) { + spin_unlock_irqrestore(&xhci->lock, flags); + schedule_timeout_uninterruptible(1); + goto rescan; } + + if (ep->ep_state) + xhci_dbg(xhci, "endpoint disable with ep_state 0x%x\n", + ep->ep_state); +done: + host_ep->hcpriv = NULL; + spin_unlock_irqrestore(&xhci->lock, flags); } /* @@ -3038,6 +3290,9 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int ep_index, * of an endpoint that isn't in the halted state this function will issue a * configure endpoint command with the Drop and Add bits set for the target * endpoint. Refer to the additional note in xhci spcification section 4.6.8. + * + * vdev may be lost due to xHC restore error and re-initialization during S3/S4 + * resume. A new vdev will be allocated later by xhci_discover_or_reset_device() */ static void xhci_endpoint_reset(struct usb_hcd *hcd, @@ -3052,20 +3307,50 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd, unsigned int ep_index; unsigned long flags; u32 ep_flag; + int err; xhci = hcd_to_xhci(hcd); + ep_index = xhci_get_endpoint_index(&host_ep->desc); + + /* + * Usb core assumes a max packet value for ep0 on FS devices until the + * real value is read from the descriptor. Core resets Ep0 if values + * mismatch. Reconfigure the xhci ep0 endpoint context here in that case + */ + if (usb_endpoint_xfer_control(&host_ep->desc) && ep_index == 0) { + + udev = container_of(host_ep, struct usb_device, ep0); + if (udev->speed != USB_SPEED_FULL || !udev->slot_id) + return; + + vdev = xhci->devs[udev->slot_id]; + if (!vdev || vdev->udev != udev) + return; + + xhci_check_ep0_maxpacket(xhci, vdev); + + /* Nothing else should be done here for ep0 during ep reset */ + return; + } + if (!host_ep->hcpriv) return; udev = (struct usb_device *) host_ep->hcpriv; vdev = xhci->devs[udev->slot_id]; - ep_index = xhci_get_endpoint_index(&host_ep->desc); + + if (!udev->slot_id || !vdev) + return; + ep = &vdev->eps[ep_index]; /* Bail out if toggle is already being cleared by a endpoint reset */ + spin_lock_irqsave(&xhci->lock, flags); if (ep->ep_state & EP_HARD_CLEAR_TOGGLE) { ep->ep_state &= ~EP_HARD_CLEAR_TOGGLE; + spin_unlock_irqrestore(&xhci->lock, flags); return; } + spin_unlock_irqrestore(&xhci->lock, flags); /* Only interrupt and bulk ep's use data toggle, USB2 spec 5.5.4-> */ if (usb_endpoint_xfer_control(&host_ep->desc) || usb_endpoint_xfer_isoc(&host_ep->desc)) @@ -3101,7 +3386,17 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd, xhci_free_command(xhci, cfg_cmd); goto cleanup; } - xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id, ep_index, 0); + + err = xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id, + ep_index, 0); + if (err < 0) { + spin_unlock_irqrestore(&xhci->lock, flags); + xhci_free_command(xhci, cfg_cmd); + xhci_dbg(xhci, "%s: Failed to queue stop ep command, %d ", + __func__, err); + goto cleanup; + } + xhci_ring_cmd_db(xhci); spin_unlock_irqrestore(&xhci->lock, flags); @@ -3111,21 +3406,40 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd, /* config ep command clears toggle if add and drop ep flags are set */ ctrl_ctx = xhci_get_input_control_ctx(cfg_cmd->in_ctx); + if (!ctrl_ctx) { + spin_unlock_irqrestore(&xhci->lock, flags); + xhci_free_command(xhci, cfg_cmd); + xhci_warn(xhci, "%s: Could not get input context, bad type.\n", + __func__); + goto cleanup; + } + xhci_setup_input_ctx_for_config_ep(xhci, cfg_cmd->in_ctx, vdev->out_ctx, ctrl_ctx, ep_flag, ep_flag); xhci_endpoint_copy(xhci, cfg_cmd->in_ctx, vdev->out_ctx, ep_index); - xhci_queue_configure_endpoint(xhci, cfg_cmd, cfg_cmd->in_ctx->dma, + err = xhci_queue_configure_endpoint(xhci, cfg_cmd, cfg_cmd->in_ctx->dma, udev->slot_id, false); + if (err < 0) { + spin_unlock_irqrestore(&xhci->lock, flags); + xhci_free_command(xhci, cfg_cmd); + xhci_dbg(xhci, "%s: Failed to queue config ep command, %d ", + __func__, err); + goto cleanup; + } + xhci_ring_cmd_db(xhci); spin_unlock_irqrestore(&xhci->lock, flags); wait_for_completion(cfg_cmd->completion); - ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE; xhci_free_command(xhci, cfg_cmd); cleanup: xhci_free_command(xhci, stop_cmd); + spin_lock_irqsave(&xhci->lock, flags); + if (ep->ep_state & EP_SOFT_CLEAR_TOGGLE) + ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE; + spin_unlock_irqrestore(&xhci->lock, flags); } static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, @@ -3140,7 +3454,7 @@ static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, return -EINVAL; ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__); if (ret <= 0) - return -EINVAL; + return ret ? ret : -EINVAL; if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) { xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion" " descriptor for ep 0x%x does not support streams\n", @@ -3420,6 +3734,10 @@ static int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev, xhci_free_command(xhci, config_cmd); spin_unlock_irqrestore(&xhci->lock, flags); + for (i = 0; i < num_eps; i++) { + ep_index = xhci_get_endpoint_index(&eps[i]->desc); + xhci_debugfs_create_stream_files(xhci, vdev, ep_index); + } /* Subtract 1 for stream 0, which drivers can't use */ return num_streams - 1; @@ -3560,6 +3878,8 @@ void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci, xhci->num_active_eps); } +static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev); + /* * This submits a Reset Device Command, which will set the device state to 0, * set the device address to 0, and disable all the endpoints except the default @@ -3630,6 +3950,22 @@ static int xhci_discover_or_reset_device(struct usb_hcd *hcd, SLOT_STATE_DISABLED) return 0; + if (xhci->quirks & XHCI_ETRON_HOST) { + /* + * Obtaining a new device slot to inform the xHCI host that + * the USB device has been reset. + */ + ret = xhci_disable_and_free_slot(xhci, udev->slot_id); + if (!ret) { + ret = xhci_alloc_dev(hcd, udev); + if (ret == 1) + ret = 0; + else + ret = -EINVAL; + } + return ret; + } + trace_xhci_discover_or_reset_device(slot_ctx); xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id); @@ -3678,6 +4014,7 @@ static int xhci_discover_or_reset_device(struct usb_hcd *hcd, xhci_get_slot_state(xhci, virt_dev->out_ctx)); xhci_dbg(xhci, "Not freeing device rings.\n"); /* Don't treat this as an error. May change my mind later. */ + virt_dev->flags = 0; ret = 0; goto command_cleanup; case COMP_SUCCESS: @@ -3713,6 +4050,8 @@ static int xhci_discover_or_reset_device(struct usb_hcd *hcd, } if (ep->ring) { + if (ep->sideband) + xhci_sideband_notify_ep_ring_free(ep->sideband, i); xhci_debugfs_remove_endpoint(xhci, virt_dev, i); xhci_free_endpoint_ring(xhci, virt_dev, i); } @@ -3727,6 +4066,7 @@ static int xhci_discover_or_reset_device(struct usb_hcd *hcd, } /* If necessary, update the number of active TTs on this root port */ xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps); + virt_dev->flags = 0; ret = 0; command_cleanup: @@ -3744,9 +4084,9 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) struct xhci_hcd *xhci = hcd_to_xhci(hcd); struct xhci_virt_device *virt_dev; struct xhci_slot_ctx *slot_ctx; + unsigned long flags; int i, ret; -#ifndef CONFIG_USB_DEFAULT_PERSIST /* * We called pm_runtime_get_noresume when the device was attached. * Decrement the counter here to allow controller to runtime suspend @@ -3754,7 +4094,6 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) */ if (xhci->quirks & XHCI_RESET_ON_RESUME) pm_runtime_put_noidle(hcd->self.controller); -#endif ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); /* If the host is halted due to driver unload, we still need to free the @@ -3768,15 +4107,15 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) trace_xhci_free_dev(slot_ctx); /* Stop any wayward timer functions (which may grab the lock) */ - for (i = 0; i < 31; i++) { + for (i = 0; i < 31; i++) virt_dev->eps[i].ep_state &= ~EP_STOP_CMD_PENDING; - del_timer_sync(&virt_dev->eps[i].stop_cmd_timer); - } - xhci_debugfs_remove_slot(xhci, udev->slot_id); virt_dev->udev = NULL; - ret = xhci_disable_slot(xhci, udev->slot_id); - if (ret) - xhci_free_virt_device(xhci, udev->slot_id); + xhci_disable_slot(xhci, udev->slot_id); + + spin_lock_irqsave(&xhci->lock, flags); + xhci_free_virt_device(xhci, virt_dev, udev->slot_id); + spin_unlock_irqrestore(&xhci->lock, flags); + } int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id) @@ -3784,12 +4123,14 @@ int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id) struct xhci_command *command; unsigned long flags; u32 state; - int ret = 0; + int ret; - command = xhci_alloc_command(xhci, false, GFP_KERNEL); + command = xhci_alloc_command(xhci, true, GFP_KERNEL); if (!command) return -ENOMEM; + xhci_debugfs_remove_slot(xhci, slot_id); + spin_lock_irqsave(&xhci->lock, flags); /* Don't disable the slot if the host controller is dead. */ state = readl(&xhci->op_regs->status); @@ -3809,6 +4150,25 @@ int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id) } xhci_ring_cmd_db(xhci); spin_unlock_irqrestore(&xhci->lock, flags); + + wait_for_completion(command->completion); + + if (command->status != COMP_SUCCESS) + xhci_warn(xhci, "Unsuccessful disable slot %u command, status %d\n", + slot_id, command->status); + + xhci_free_command(xhci, command); + + return 0; +} + +int xhci_disable_and_free_slot(struct xhci_hcd *xhci, u32 slot_id) +{ + struct xhci_virt_device *vdev = xhci->devs[slot_id]; + int ret; + + ret = xhci_disable_slot(xhci, slot_id); + xhci_free_virt_device(xhci, vdev, slot_id); return ret; } @@ -3867,10 +4227,10 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) slot_id = command->slot_id; if (!slot_id || command->status != COMP_SUCCESS) { - xhci_err(xhci, "Error while assigning device slot ID\n"); + xhci_err(xhci, "Error while assigning device slot ID: %s\n", + xhci_trb_comp_code_string(command->status)); xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n", - HCS_MAX_SLOTS( - readl(&xhci->cap_regs->hcs_params1))); + xhci->max_slots); xhci_free_command(xhci, command); return 0; } @@ -3905,33 +4265,35 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) xhci_debugfs_create_slot(xhci, slot_id); -#ifndef CONFIG_USB_DEFAULT_PERSIST /* * If resetting upon resume, we can't put the controller into runtime * suspend if there is a device attached. */ if (xhci->quirks & XHCI_RESET_ON_RESUME) pm_runtime_get_noresume(hcd->self.controller); -#endif /* Is this a LS or FS device under a HS hub? */ /* Hub or peripherial? */ return 1; disable_slot: - ret = xhci_disable_slot(xhci, udev->slot_id); - if (ret) - xhci_free_virt_device(xhci, udev->slot_id); + xhci_disable_and_free_slot(xhci, udev->slot_id); return 0; } -/* - * Issue an Address Device command and optionally send a corresponding - * SetAddress request to the device. +/** + * xhci_setup_device - issues an Address Device command to assign a unique + * USB bus address. + * @hcd: USB host controller data structure. + * @udev: USB dev structure representing the connected device. + * @setup: Enum specifying setup mode: address only or with context. + * @timeout_ms: Max wait time (ms) for the command operation to complete. + * + * Return: 0 if successful; otherwise, negative error code. */ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, - enum xhci_setup_dev setup) + enum xhci_setup_dev setup, unsigned int timeout_ms) { const char *act = setup == SETUP_CONTEXT_ONLY ? "context" : "address"; unsigned long flags; @@ -3988,6 +4350,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, } command->in_ctx = virt_dev->in_ctx; + command->timeout_ms = timeout_ms; slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx); @@ -4010,9 +4373,9 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG); ctrl_ctx->drop_flags = 0; - trace_xhci_address_ctx(xhci, virt_dev->in_ctx, - le32_to_cpu(slot_ctx->dev_info) >> 27); + trace_xhci_address_ctx(xhci, virt_dev->in_ctx); + trace_xhci_address_ctrl_ctx(ctrl_ctx); spin_lock_irqsave(&xhci->lock, flags); trace_xhci_setup_device(virt_dev); ret = xhci_queue_address_device(xhci, command, virt_dev->in_ctx->dma, @@ -4049,9 +4412,11 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, dev_warn(&udev->dev, "Device not responding to setup %s.\n", act); mutex_unlock(&xhci->mutex); - ret = xhci_disable_slot(xhci, udev->slot_id); - if (!ret) - xhci_alloc_dev(hcd, udev); + ret = xhci_disable_and_free_slot(xhci, udev->slot_id); + if (!ret) { + if (xhci_alloc_dev(hcd, udev) == 1) + xhci_setup_addressable_virt_dev(xhci, udev); + } kfree(command->completion); kfree(command); return -EPROTO; @@ -4068,7 +4433,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, xhci_err(xhci, "ERROR: unexpected setup %s command completion code 0x%x.\n", act, command->status); - trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 1); + trace_xhci_address_ctx(xhci, virt_dev->out_ctx); ret = -EINVAL; break; } @@ -4086,17 +4451,17 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, xhci_dbg_trace(xhci, trace_xhci_dbg_address, "Output Context DMA address = %#08llx", (unsigned long long)virt_dev->out_ctx->dma); - trace_xhci_address_ctx(xhci, virt_dev->in_ctx, - le32_to_cpu(slot_ctx->dev_info) >> 27); + trace_xhci_address_ctx(xhci, virt_dev->in_ctx); /* * USB core uses address 1 for the roothubs, so we add one to the * address given back to us by the HC. */ - trace_xhci_address_ctx(xhci, virt_dev->out_ctx, - le32_to_cpu(slot_ctx->dev_info) >> 27); + trace_xhci_address_ctx(xhci, virt_dev->out_ctx); /* Zero the input context control for later use */ ctrl_ctx->add_flags = 0; ctrl_ctx->drop_flags = 0; + slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); + udev->devaddr = (u8)(le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK); xhci_dbg_trace(xhci, trace_xhci_dbg_address, "Internal device address = %d", @@ -4110,14 +4475,16 @@ out: return ret; } -static int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) +static int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev, + unsigned int timeout_ms) { - return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS); + return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS, timeout_ms); } static int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev) { - return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ONLY); + return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ONLY, + XHCI_CMD_DEFAULT_TIMEOUT); } /* @@ -4148,6 +4515,10 @@ static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci, unsigned long flags; int ret; + command = xhci_alloc_command_with_ctx(xhci, true, GFP_KERNEL); + if (!command) + return -ENOMEM; + spin_lock_irqsave(&xhci->lock, flags); virt_dev = xhci->devs[udev->slot_id]; @@ -4160,14 +4531,15 @@ static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci, if (!virt_dev || max_exit_latency == virt_dev->current_mel) { spin_unlock_irqrestore(&xhci->lock, flags); + xhci_free_command(xhci, command); return 0; } /* Attempt to issue an Evaluate Context command to change the MEL. */ - command = xhci->lpm_command; ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx); if (!ctrl_ctx) { spin_unlock_irqrestore(&xhci->lock, flags); + xhci_free_command(xhci, command); xhci_warn(xhci, "%s: Could not get input context, bad type.\n", __func__); return -ENOMEM; @@ -4194,6 +4566,9 @@ static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci, virt_dev->current_mel = max_exit_latency; spin_unlock_irqrestore(&xhci->lock, flags); } + + xhci_free_command(xhci, command); + return ret; } @@ -4265,13 +4640,16 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, { struct xhci_hcd *xhci = hcd_to_xhci(hcd); struct xhci_port **ports; - __le32 __iomem *pm_addr, *hlpm_addr; + struct xhci_port_regs __iomem *port_reg; u32 pm_val, hlpm_val, field; unsigned int port_num; unsigned long flags; int hird, exit_latency; int ret; + if (xhci->quirks & XHCI_HW_LPM_DISABLE) + return -EPERM; + if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support || !udev->lpm_capable) return -EPERM; @@ -4287,21 +4665,20 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, ports = xhci->usb2_rhub.ports; port_num = udev->portnum - 1; - pm_addr = ports[port_num]->addr + PORTPMSC; - pm_val = readl(pm_addr); - hlpm_addr = ports[port_num]->addr + PORTHLPMC; - field = le32_to_cpu(udev->bos->ext_cap->bmAttributes); + port_reg = ports[port_num]->port_reg; + pm_val = readl(&port_reg->portpmsc); xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n", - enable ? "enable" : "disable", port_num + 1); + str_enable_disable(enable), port_num + 1); - if (enable && !(xhci->quirks & XHCI_HW_LPM_DISABLE)) { + if (enable) { /* Host supports BESL timeout instead of HIRD */ if (udev->usb2_hw_lpm_besl_capable) { /* if device doesn't have a preferred BESL value use a * default one which works with mixed HIRD and BESL * systems. See XHCI_DEFAULT_BESL definition in xhci.h */ + field = le32_to_cpu(udev->bos->ext_cap->bmAttributes); if ((field & USB_BESL_SUPPORT) && (field & USB_BESL_BASELINE_VALID)) hird = USB_GET_BESL_BASELINE(field); @@ -4311,48 +4688,39 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, exit_latency = xhci_besl_encoding[hird]; spin_unlock_irqrestore(&xhci->lock, flags); - /* USB 3.0 code dedicate one xhci->lpm_command->in_ctx - * input context for link powermanagement evaluate - * context commands. It is protected by hcd->bandwidth - * mutex and is shared by all devices. We need to set - * the max ext latency in USB 2 BESL LPM as well, so - * use the same mutex and xhci_change_max_exit_latency() - */ - mutex_lock(hcd->bandwidth_mutex); ret = xhci_change_max_exit_latency(xhci, udev, exit_latency); - mutex_unlock(hcd->bandwidth_mutex); - if (ret < 0) return ret; spin_lock_irqsave(&xhci->lock, flags); hlpm_val = xhci_calculate_usb2_hw_lpm_params(udev); - writel(hlpm_val, hlpm_addr); + writel(hlpm_val, &port_reg->porthlmpc); /* flush write */ - readl(hlpm_addr); + readl(&port_reg->porthlmpc); } else { hird = xhci_calculate_hird_besl(xhci, udev); } pm_val &= ~PORT_HIRD_MASK; pm_val |= PORT_HIRD(hird) | PORT_RWE | PORT_L1DS(udev->slot_id); - writel(pm_val, pm_addr); - pm_val = readl(pm_addr); + writel(pm_val, &port_reg->portpmsc); + pm_val = readl(&port_reg->portpmsc); pm_val |= PORT_HLE; - writel(pm_val, pm_addr); + writel(pm_val, &port_reg->portpmsc); /* flush write */ - readl(pm_addr); + readl(&port_reg->portpmsc); } else { pm_val &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK | PORT_L1DS_MASK); - writel(pm_val, pm_addr); + writel(pm_val, &port_reg->portpmsc); /* flush write */ - readl(pm_addr); + readl(&port_reg->portpmsc); if (udev->usb2_hw_lpm_besl_capable) { spin_unlock_irqrestore(&xhci->lock, flags); - mutex_lock(hcd->bandwidth_mutex); xhci_change_max_exit_latency(xhci, udev, 0); - mutex_unlock(hcd->bandwidth_mutex); + readl_poll_timeout(&ports[port_num]->port_reg->portsc, pm_val, + (pm_val & PORT_PLS_MASK) == XDEV_U0, + 100, 10000); return 0; } } @@ -4361,35 +4729,27 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, return 0; } -/* check if a usb2 port supports a given extened capability protocol - * only USB2 ports extended protocol capability values are cached. - * Return 1 if capability is supported - */ -static int xhci_check_usb2_port_capability(struct xhci_hcd *xhci, int port, - unsigned capability) -{ - u32 port_offset, port_count; - int i; - - for (i = 0; i < xhci->num_ext_caps; i++) { - if (xhci->ext_caps[i] & capability) { - /* port offsets starts at 1 */ - port_offset = XHCI_EXT_PORT_OFF(xhci->ext_caps[i]) - 1; - port_count = XHCI_EXT_PORT_COUNT(xhci->ext_caps[i]); - if (port >= port_offset && - port < port_offset + port_count) - return 1; - } - } - return 0; -} - static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); - int portnum = udev->portnum - 1; + struct xhci_port *port; + u32 capability; + + /* Check if USB3 device at root port is tunneled over USB4 */ + if (hcd->speed >= HCD_USB3 && !udev->parent->parent) { + port = xhci->usb3_rhub.ports[udev->portnum - 1]; + + udev->tunnel_mode = xhci_port_is_tunneled(xhci, port); + if (udev->tunnel_mode == USB_LINK_UNKNOWN) + dev_dbg(&udev->dev, "link tunnel state unknown\n"); + else if (udev->tunnel_mode == USB_LINK_TUNNELED) + dev_dbg(&udev->dev, "tunneled over USB4 link\n"); + else if (udev->tunnel_mode == USB_LINK_NATIVE) + dev_dbg(&udev->dev, "native USB 3.x link\n"); + return 0; + } - if (hcd->speed >= HCD_USB3 || !udev->lpm_capable) + if (hcd->speed >= HCD_USB3 || !udev->lpm_capable || !xhci->hw_lpm_support) return 0; /* we only support lpm for non-hub device connected to root hub yet */ @@ -4397,14 +4757,14 @@ static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev) udev->descriptor.bDeviceClass == USB_CLASS_HUB) return 0; - if (xhci->hw_lpm_support == 1 && - xhci_check_usb2_port_capability( - xhci, portnum, XHCI_HLC)) { + port = xhci->usb2_rhub.ports[udev->portnum - 1]; + capability = port->port_cap->protocol_caps; + + if (capability & XHCI_HLC) { udev->usb2_hw_lpm_capable = 1; udev->l1_params.timeout = XHCI_L1_TIMEOUT; udev->l1_params.besl = XHCI_DEFAULT_BESL; - if (xhci_check_usb2_port_capability(xhci, portnum, - XHCI_BLC)) + if (capability & XHCI_BLC) udev->usb2_hw_lpm_besl_capable = 1; } @@ -4493,7 +4853,7 @@ static unsigned long long xhci_calculate_intel_u1_timeout( break; } /* Otherwise the calculation is the same as isoc eps */ - /* fall through */ + fallthrough; case USB_ENDPOINT_XFER_ISOC: timeout_ns = xhci_service_interval_to_ns(desc); timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100); @@ -4522,7 +4882,7 @@ static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci, } } - if (xhci->quirks & XHCI_INTEL_HOST) + if (xhci->quirks & (XHCI_INTEL_HOST | XHCI_ZHAOXIN_HOST)) timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc); else timeout_ns = udev->u1_params.sel; @@ -4540,8 +4900,8 @@ static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci, */ if (timeout_ns <= USB3_LPM_U1_MAX_TIMEOUT) return timeout_ns; - dev_dbg(&udev->dev, "Hub-initiated U1 disabled " - "due to long timeout %llu ms\n", timeout_ns); + dev_dbg(&udev->dev, "Hub-initiated U1 disabled due to long timeout %lluus\n", + timeout_ns); return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U1); } @@ -4586,7 +4946,7 @@ static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci, } } - if (xhci->quirks & XHCI_INTEL_HOST) + if (xhci->quirks & (XHCI_INTEL_HOST | XHCI_ZHAOXIN_HOST)) timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc); else timeout_ns = udev->u2_params.sel; @@ -4598,8 +4958,8 @@ static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci, */ if (timeout_ns <= USB3_LPM_U2_MAX_TIMEOUT) return timeout_ns; - dev_dbg(&udev->dev, "Hub-initiated U2 disabled " - "due to long timeout %llu ms\n", timeout_ns); + dev_dbg(&udev->dev, "Hub-initiated U2 disabled due to long timeout %lluus\n", + timeout_ns * 256); return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U2); } @@ -4628,12 +4988,12 @@ static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci, alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev, desc, state, timeout); - /* If we found we can't enable hub-initiated LPM, or + /* If we found we can't enable hub-initiated LPM, and * the U1 or U2 exit latency was too high to allow - * device-initiated LPM as well, just stop searching. + * device-initiated LPM as well, then we will disable LPM + * for this device, so stop searching any further. */ - if (alt_timeout == USB3_LPM_DISABLED || - alt_timeout == USB3_LPM_DEVICE_INITIATED) { + if (alt_timeout == USB3_LPM_DISABLED) { *timeout = alt_timeout; return -E2BIG; } @@ -4654,45 +5014,34 @@ static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci, if (xhci_update_timeout_for_endpoint(xhci, udev, &alt->endpoint[j].desc, state, timeout)) return -E2BIG; - continue; } return 0; } -static int xhci_check_intel_tier_policy(struct usb_device *udev, +static int xhci_check_tier_policy(struct xhci_hcd *xhci, + struct usb_device *udev, enum usb3_link_state state) { - struct usb_device *parent; - unsigned int num_hubs; + struct usb_device *parent = udev->parent; + int tier = 1; /* roothub is tier1 */ - if (state == USB3_LPM_U2) - return 0; + while (parent) { + parent = parent->parent; + tier++; + } - /* Don't enable U1 if the device is on a 2nd tier hub or lower. */ - for (parent = udev->parent, num_hubs = 0; parent->parent; - parent = parent->parent) - num_hubs++; + if (xhci->quirks & XHCI_INTEL_HOST && tier > 3) + goto fail; + if (xhci->quirks & XHCI_ZHAOXIN_HOST && tier > 2) + goto fail; - if (num_hubs < 2) - return 0; - - dev_dbg(&udev->dev, "Disabling U1 link state for device" - " below second-tier hub.\n"); - dev_dbg(&udev->dev, "Plug device into first-tier hub " - "to decrease power consumption.\n"); + return 0; +fail: + dev_dbg(&udev->dev, "Tier policy prevents U1/U2 LPM states for devices at tier %d\n", + tier); return -E2BIG; } -static int xhci_check_tier_policy(struct xhci_hcd *xhci, - struct usb_device *udev, - enum usb3_link_state state) -{ - if (xhci->quirks & XHCI_INTEL_HOST) - return xhci_check_intel_tier_policy(udev, state); - else - return 0; -} - /* Returns the U1 or U2 timeout that should be enabled. * If the tier check or timeout setting functions return with a non-zero exit * code, that means the timeout value has been finalized and we shouldn't look @@ -4717,9 +5066,6 @@ static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd, return timeout; } - if (xhci_check_tier_policy(xhci, udev, state) < 0) - return timeout; - /* Gather some information about the currently installed configuration * and alternate interface settings. */ @@ -4744,10 +5090,12 @@ static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd, if (intf->dev.driver) { driver = to_usb_driver(intf->dev.driver); if (driver && driver->disable_hub_initiated_lpm) { - dev_dbg(&udev->dev, "Hub-initiated %s disabled " - "at request of driver %s\n", - state_name, driver->name); - return xhci_get_timeout_no_hub_lpm(udev, state); + dev_dbg(&udev->dev, "Hub-initiated %s disabled at request of driver %s\n", + state_name, driver->name); + timeout = xhci_get_timeout_no_hub_lpm(udev, + state); + if (timeout == USB3_LPM_DISABLED) + return timeout; } } @@ -4795,10 +5143,8 @@ static int calculate_max_exit_latency(struct usb_device *udev, enabling_u2) u2_mel_us = DIV_ROUND_UP(udev->u2_params.mel, 1000); - if (u1_mel_us > u2_mel_us) - mel_us = u1_mel_us; - else - mel_us = u2_mel_us; + mel_us = max(u1_mel_us, u2_mel_us); + /* xHCI host controller max exit latency field is only 16 bits wide. */ if (mel_us > MAX_EXIT) { dev_warn(&udev->dev, "Link PM max exit latency of %lluus " @@ -4813,6 +5159,7 @@ static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd, struct usb_device *udev, enum usb3_link_state state) { struct xhci_hcd *xhci; + struct xhci_port *port; u16 hub_encoded_timeout; int mel; int ret; @@ -4826,6 +5173,16 @@ static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd, !xhci->devs[udev->slot_id]) return USB3_LPM_DISABLED; + if (xhci_check_tier_policy(xhci, udev, state) < 0) + return USB3_LPM_DISABLED; + + /* If connected to root port then check port can handle lpm */ + if (udev->parent && !udev->parent->parent) { + port = xhci->usb3_rhub.ports[udev->portnum - 1]; + if (port->lpm_incapable) + return USB3_LPM_DISABLED; + } + hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state); mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout); if (mel < 0) { @@ -4885,7 +5242,7 @@ static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd, /* Once a hub descriptor is fetched for a device, we need to update the xHC's * internal data structures for the device. */ -static int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, +int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, struct usb_tt *tt, gfp_t mem_flags) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); @@ -4985,6 +5342,7 @@ static int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, xhci_free_command(xhci, config_cmd); return ret; } +EXPORT_SYMBOL_GPL(xhci_update_hub_device); static int xhci_get_frame(struct usb_hcd *hcd) { @@ -4993,6 +5351,57 @@ static int xhci_get_frame(struct usb_hcd *hcd) return readl(&xhci->run_regs->microframe_index) >> 3; } +static void xhci_hcd_init_usb2_data(struct xhci_hcd *xhci, struct usb_hcd *hcd) +{ + xhci->usb2_rhub.hcd = hcd; + hcd->speed = HCD_USB2; + hcd->self.root_hub->speed = USB_SPEED_HIGH; + /* + * USB 2.0 roothub under xHCI has an integrated TT, + * (rate matching hub) as opposed to having an OHCI/UHCI + * companion controller. + */ + hcd->has_tt = 1; +} + +static void xhci_hcd_init_usb3_data(struct xhci_hcd *xhci, struct usb_hcd *hcd) +{ + unsigned int minor_rev; + + /* + * Early xHCI 1.1 spec did not mention USB 3.1 capable hosts + * should return 0x31 for sbrn, or that the minor revision + * is a two digit BCD containig minor and sub-minor numbers. + * This was later clarified in xHCI 1.2. + * + * Some USB 3.1 capable hosts therefore have sbrn 0x30, and + * minor revision set to 0x1 instead of 0x10. + */ + if (xhci->usb3_rhub.min_rev == 0x1) + minor_rev = 1; + else + minor_rev = xhci->usb3_rhub.min_rev / 0x10; + + switch (minor_rev) { + case 2: + hcd->speed = HCD_USB32; + hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS; + hcd->self.root_hub->rx_lanes = 2; + hcd->self.root_hub->tx_lanes = 2; + hcd->self.root_hub->ssp_rate = USB_SSP_GEN_2x2; + break; + case 1: + hcd->speed = HCD_USB31; + hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS; + hcd->self.root_hub->ssp_rate = USB_SSP_GEN_2x1; + break; + } + xhci_info(xhci, "Host supports USB 3.%x %sSuperSpeed\n", + minor_rev, minor_rev ? "Enhanced " : ""); + + xhci->usb3_rhub.hcd = hcd; +} + int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) { struct xhci_hcd *xhci; @@ -5001,8 +5410,8 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) * quirks */ struct device *dev = hcd->self.sysdev; - unsigned int minor_rev; int retval; + u32 hcs_params1; /* Accept arbitrarily long scatter-gather lists */ hcd->self.sg_tablesize = ~0; @@ -5015,60 +5424,39 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) xhci = hcd_to_xhci(hcd); - if (usb_hcd_is_primary_hcd(hcd)) { - xhci->main_hcd = hcd; - xhci->usb2_rhub.hcd = hcd; - /* Mark the first roothub as being USB 2.0. - * The xHCI driver will register the USB 3.0 roothub. - */ - hcd->speed = HCD_USB2; - hcd->self.root_hub->speed = USB_SPEED_HIGH; - /* - * USB 2.0 roothub under xHCI has an integrated TT, - * (rate matching hub) as opposed to having an OHCI/UHCI - * companion controller. - */ - hcd->has_tt = 1; - } else { - /* - * Some 3.1 hosts return sbrn 0x30, use xhci supported protocol - * minor revision instead of sbrn - */ - minor_rev = xhci->usb3_rhub.min_rev; - if (minor_rev) { - hcd->speed = HCD_USB31; - hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS; - } - xhci_info(xhci, "Host supports USB 3.%x %s SuperSpeed\n", - minor_rev, - minor_rev ? "Enhanced" : ""); - - xhci->usb3_rhub.hcd = hcd; - /* xHCI private pointer was set in xhci_pci_probe for the second - * registered roothub. - */ + if (!usb_hcd_is_primary_hcd(hcd)) { + xhci_hcd_init_usb3_data(xhci, hcd); return 0; } mutex_init(&xhci->mutex); + xhci->main_hcd = hcd; xhci->cap_regs = hcd->regs; xhci->op_regs = hcd->regs + HC_LENGTH(readl(&xhci->cap_regs->hc_capbase)); xhci->run_regs = hcd->regs + (readl(&xhci->cap_regs->run_regs_off) & RTSOFF_MASK); /* Cache read-only capability registers */ - xhci->hcs_params1 = readl(&xhci->cap_regs->hcs_params1); + hcs_params1 = readl(&xhci->cap_regs->hcs_params1); xhci->hcs_params2 = readl(&xhci->cap_regs->hcs_params2); xhci->hcs_params3 = readl(&xhci->cap_regs->hcs_params3); - xhci->hcc_params = readl(&xhci->cap_regs->hc_capbase); - xhci->hci_version = HC_VERSION(xhci->hcc_params); + xhci->hci_version = HC_VERSION(readl(&xhci->cap_regs->hc_capbase)); xhci->hcc_params = readl(&xhci->cap_regs->hcc_params); if (xhci->hci_version > 0x100) xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2); + xhci->max_slots = HCS_MAX_SLOTS(hcs_params1); + xhci->max_ports = min(HCS_MAX_PORTS(hcs_params1), MAX_HC_PORTS); + /* xhci-plat or xhci-pci might have set max_interrupters already */ + if (!xhci->max_interrupters) + xhci->max_interrupters = min(HCS_MAX_INTRS(hcs_params1), MAX_HC_INTRS); + else if (xhci->max_interrupters > HCS_MAX_INTRS(hcs_params1)) + xhci->max_interrupters = HCS_MAX_INTRS(hcs_params1); + xhci->quirks |= quirks; - get_quirks(dev, xhci); + if (get_quirks) + get_quirks(dev, xhci); /* In xhci controllers which follow xhci 1.0 spec gives a spurious * success event after a short transfer. This quirk will ignore such @@ -5077,6 +5465,11 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) if (xhci->hci_version > 0x96) xhci->quirks |= XHCI_SPURIOUS_SUCCESS; + if (xhci->hci_version == 0x95 && link_quirk) { + xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits"); + xhci->quirks |= XHCI_LINK_TRB_QUIRK; + } + /* Make sure the HC is halted. */ retval = xhci_halt(xhci); if (retval) @@ -5086,7 +5479,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) xhci_dbg(xhci, "Resetting HCD\n"); /* Reset the internal HC memory state and registers. */ - retval = xhci_reset(xhci); + retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC); if (retval) return retval; xhci_dbg(xhci, "Reset complete\n"); @@ -5103,7 +5496,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) /* Set dma_mask and coherent_dma_mask to 64-bits, * if xHC supports 64-bit addressing */ - if (HCC_64BIT_ADDR(xhci->hcc_params) && + if ((xhci->hcc_params & HCC_64BIT_ADDR) && !dma_set_mask(dev, DMA_BIT_MASK(64))) { xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n"); dma_set_coherent_mask(dev, DMA_BIT_MASK(64)); @@ -5126,6 +5519,11 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) return retval; xhci_dbg(xhci, "Called HCD init\n"); + if (xhci_hcd_is_usb3(hcd)) + xhci_hcd_init_usb3_data(xhci, hcd); + else + xhci_hcd_init_usb2_data(xhci, hcd); + xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%016llx\n", xhci->hcc_params, xhci->hci_version, xhci->quirks); @@ -5133,6 +5531,27 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) } EXPORT_SYMBOL_GPL(xhci_gen_setup); +static void xhci_clear_tt_buffer_complete(struct usb_hcd *hcd, + struct usb_host_endpoint *ep) +{ + struct xhci_hcd *xhci; + struct usb_device *udev; + unsigned int slot_id; + unsigned int ep_index; + unsigned long flags; + + xhci = hcd_to_xhci(hcd); + + spin_lock_irqsave(&xhci->lock, flags); + udev = (struct usb_device *)ep->hcpriv; + slot_id = udev->slot_id; + ep_index = xhci_get_endpoint_index(&ep->desc); + + xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_CLEARING_TT; + xhci_ring_doorbell_for_active_rings(xhci, slot_id, ep_index); + spin_unlock_irqrestore(&xhci->lock, flags); +} + static const struct hc_driver xhci_hc_driver = { .description = "xhci-hcd", .product_desc = "xHCI Host Controller", @@ -5142,7 +5561,8 @@ static const struct hc_driver xhci_hc_driver = { * generic hardware linkage */ .irq = xhci_irq, - .flags = HCD_MEMORY | HCD_USB3 | HCD_SHARED, + .flags = HCD_MEMORY | HCD_DMA | HCD_USB3 | HCD_SHARED | + HCD_BH, /* * basic lifecycle operations @@ -5155,6 +5575,8 @@ static const struct hc_driver xhci_hc_driver = { /* * managing i/o requests and associated device resources */ + .map_urb_for_dma = xhci_map_urb_for_dma, + .unmap_urb_for_dma = xhci_unmap_urb_for_dma, .urb_enqueue = xhci_urb_enqueue, .urb_dequeue = xhci_urb_dequeue, .alloc_dev = xhci_alloc_dev, @@ -5163,6 +5585,7 @@ static const struct hc_driver xhci_hc_driver = { .free_streams = xhci_free_streams, .add_endpoint = xhci_add_endpoint, .drop_endpoint = xhci_drop_endpoint, + .endpoint_disable = xhci_endpoint_disable, .endpoint_reset = xhci_endpoint_reset, .check_bandwidth = xhci_check_bandwidth, .reset_bandwidth = xhci_reset_bandwidth, @@ -5193,6 +5616,7 @@ static const struct hc_driver xhci_hc_driver = { .enable_usb3_lpm_timeout = xhci_enable_usb3_lpm_timeout, .disable_usb3_lpm_timeout = xhci_disable_usb3_lpm_timeout, .find_raw_port_number = xhci_find_raw_port_number, + .clear_tt_buffer_complete = xhci_clear_tt_buffer_complete, }; void xhci_init_driver(struct hc_driver *drv, @@ -5209,6 +5633,18 @@ void xhci_init_driver(struct hc_driver *drv, drv->reset = over->reset; if (over->start) drv->start = over->start; + if (over->add_endpoint) + drv->add_endpoint = over->add_endpoint; + if (over->drop_endpoint) + drv->drop_endpoint = over->drop_endpoint; + if (over->check_bandwidth) + drv->check_bandwidth = over->check_bandwidth; + if (over->reset_bandwidth) + drv->reset_bandwidth = over->reset_bandwidth; + if (over->update_hub_device) + drv->update_hub_device = over->update_hub_device; + if (over->hub_control) + drv->hub_control = over->hub_control; } } EXPORT_SYMBOL_GPL(xhci_init_driver); @@ -5234,13 +5670,14 @@ static int __init xhci_hcd_init(void) BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8); BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 8*32/8); BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8); - /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */ - BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8); + /* xhci_run_regs has eight fields and embeds 1024 xhci_intr_regs */ + BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*1024)*32/8); if (usb_disabled()) return -ENODEV; xhci_debugfs_create_root(); + xhci_dbc_init(); return 0; } @@ -5252,6 +5689,7 @@ static int __init xhci_hcd_init(void) static void __exit xhci_hcd_fini(void) { xhci_debugfs_remove_root(); + xhci_dbc_exit(); } module_init(xhci_hcd_init); |
