diff options
Diffstat (limited to 'drivers/usb/host')
39 files changed, 1436 insertions, 625 deletions
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig index d011d6c753ed..109100cc77a3 100644 --- a/drivers/usb/host/Kconfig +++ b/drivers/usb/host/Kconfig @@ -104,6 +104,15 @@ config USB_XHCI_RZV2M Say 'Y' to enable the support for the xHCI host controller found in Renesas RZ/V2M SoC. +config USB_XHCI_SIDEBAND + bool "xHCI support for sideband" + help + Say 'Y' to enable the support for the xHCI sideband capability. + Provide a mechanism for a sideband datapath for payload associated + with audio class endpoints. This allows for an audio DSP to use + xHCI USB endpoints directly, allowing CPU to sleep while playing + audio. + config USB_XHCI_TEGRA tristate "xHCI support for NVIDIA Tegra SoCs" depends on PHY_TEGRA_XUSB @@ -225,7 +234,7 @@ config USB_EHCI_HCD_OMAP tristate "EHCI support for OMAP3 and later chips" depends on ARCH_OMAP || COMPILE_TEST depends on NOP_USB_XCEIV - default y + default ARCH_OMAP help Enables support for the on-chip EHCI controller on OMAP3 and later chips. diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile index be4e5245c52f..4df946c05ba0 100644 --- a/drivers/usb/host/Makefile +++ b/drivers/usb/host/Makefile @@ -32,6 +32,10 @@ endif xhci-rcar-hcd-y += xhci-rcar.o xhci-rcar-hcd-$(CONFIG_USB_XHCI_RZV2M) += xhci-rzv2m.o +ifneq ($(CONFIG_USB_XHCI_SIDEBAND),) + xhci-hcd-y += xhci-sideband.o +endif + obj-$(CONFIG_USB_PCI) += pci-quirks.o obj-$(CONFIG_USB_EHCI_HCD) += ehci-hcd.o diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c index 26f13278d4d6..6ed2fa5418a4 100644 --- a/drivers/usb/host/ehci-fsl.c +++ b/drivers/usb/host/ehci-fsl.c @@ -410,15 +410,13 @@ static int ehci_fsl_setup(struct usb_hcd *hcd) return retval; } -struct ehci_fsl { - struct ehci_hcd ehci; - -#ifdef CONFIG_PM +struct ehci_fsl_priv { /* Saved USB PHY settings, need to restore after deep sleep. */ u32 usb_ctrl; -#endif }; +#define hcd_to_ehci_fsl_priv(h) ((struct ehci_fsl_priv *) hcd_to_ehci(h)->priv) + #ifdef CONFIG_PM #ifdef CONFIG_PPC_MPC512x @@ -566,17 +564,10 @@ static inline int ehci_fsl_mpc512x_drv_resume(struct device *dev) } #endif /* CONFIG_PPC_MPC512x */ -static struct ehci_fsl *hcd_to_ehci_fsl(struct usb_hcd *hcd) -{ - struct ehci_hcd *ehci = hcd_to_ehci(hcd); - - return container_of(ehci, struct ehci_fsl, ehci); -} - static int ehci_fsl_drv_suspend(struct device *dev) { struct usb_hcd *hcd = dev_get_drvdata(dev); - struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd); + struct ehci_fsl_priv *priv = hcd_to_ehci_fsl_priv(hcd); void __iomem *non_ehci = hcd->regs; if (of_device_is_compatible(dev->parent->of_node, @@ -589,14 +580,14 @@ static int ehci_fsl_drv_suspend(struct device *dev) if (!fsl_deep_sleep()) return 0; - ehci_fsl->usb_ctrl = ioread32be(non_ehci + FSL_SOC_USB_CTRL); + priv->usb_ctrl = ioread32be(non_ehci + FSL_SOC_USB_CTRL); return 0; } static int ehci_fsl_drv_resume(struct device *dev) { struct usb_hcd *hcd = dev_get_drvdata(dev); - struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd); + struct ehci_fsl_priv *priv = hcd_to_ehci_fsl_priv(hcd); struct ehci_hcd *ehci = hcd_to_ehci(hcd); void __iomem *non_ehci = hcd->regs; @@ -612,7 +603,7 @@ static int ehci_fsl_drv_resume(struct device *dev) usb_root_hub_lost_power(hcd->self.root_hub); /* Restore USB PHY settings and enable the controller. */ - iowrite32be(ehci_fsl->usb_ctrl, non_ehci + FSL_SOC_USB_CTRL); + iowrite32be(priv->usb_ctrl, non_ehci + FSL_SOC_USB_CTRL); ehci_reset(ehci); ehci_fsl_reinit(ehci); @@ -671,7 +662,7 @@ static int ehci_start_port_reset(struct usb_hcd *hcd, unsigned port) #endif /* CONFIG_USB_OTG */ static const struct ehci_driver_overrides ehci_fsl_overrides __initconst = { - .extra_priv_size = sizeof(struct ehci_fsl), + .extra_priv_size = sizeof(struct ehci_fsl_priv), .reset = ehci_fsl_setup, }; diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c index cdf41886e8ca..6aab45c8525c 100644 --- a/drivers/usb/host/ehci-platform.c +++ b/drivers/usb/host/ehci-platform.c @@ -198,7 +198,8 @@ static void quirk_poll_work(struct work_struct *work) static void quirk_poll_timer(struct timer_list *t) { - struct ehci_platform_priv *priv = from_timer(priv, t, poll_timer); + struct ehci_platform_priv *priv = timer_container_of(priv, t, + poll_timer); struct ehci_hcd *ehci = container_of((void *)priv, struct ehci_hcd, priv); @@ -224,7 +225,7 @@ static void quirk_poll_init(struct ehci_platform_priv *priv) static void quirk_poll_end(struct ehci_platform_priv *priv) { - del_timer_sync(&priv->poll_timer); + timer_delete_sync(&priv->poll_timer); cancel_delayed_work(&priv->poll_work); } diff --git a/drivers/usb/host/ehci-sysfs.c b/drivers/usb/host/ehci-sysfs.c index 8f75cb7b197c..5e6b545c30e6 100644 --- a/drivers/usb/host/ehci-sysfs.c +++ b/drivers/usb/host/ehci-sysfs.c @@ -12,21 +12,17 @@ static ssize_t companion_show(struct device *dev, char *buf) { struct ehci_hcd *ehci; - int nports, index, n; - int count = PAGE_SIZE; - char *ptr = buf; + int nports, index; + int len = 0; ehci = hcd_to_ehci(dev_get_drvdata(dev)); nports = HCS_N_PORTS(ehci->hcs_params); for (index = 0; index < nports; ++index) { - if (test_bit(index, &ehci->companion_ports)) { - n = scnprintf(ptr, count, "%d\n", index + 1); - ptr += n; - count -= n; - } + if (test_bit(index, &ehci->companion_ports)) + len += sysfs_emit_at(buf, len, "%d\n", index + 1); } - return ptr - buf; + return len; } /* @@ -70,11 +66,9 @@ static ssize_t uframe_periodic_max_show(struct device *dev, char *buf) { struct ehci_hcd *ehci; - int n; ehci = hcd_to_ehci(dev_get_drvdata(dev)); - n = scnprintf(buf, PAGE_SIZE, "%d\n", ehci->uframe_periodic_max); - return n; + return sysfs_emit(buf, "%d\n", ehci->uframe_periodic_max); } diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c index 4e67b9471986..edfb63543029 100644 --- a/drivers/usb/host/fsl-mph-dr-of.c +++ b/drivers/usb/host/fsl-mph-dr-of.c @@ -327,8 +327,7 @@ static void fsl_usb2_mpc5121_exit(struct platform_device *pdev) pdata->regs = NULL; - if (pdata->clk) - clk_disable_unprepare(pdata->clk); + clk_disable_unprepare(pdata->clk); } static struct fsl_usb2_platform_data fsl_usb2_mpc5121_pd = { diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c index 2d3a082cb52f..954fc5ad565b 100644 --- a/drivers/usb/host/isp1362-hcd.c +++ b/drivers/usb/host/isp1362-hcd.c @@ -2357,7 +2357,7 @@ static void isp1362_hc_stop(struct usb_hcd *hcd) pr_debug("%s:\n", __func__); - del_timer_sync(&hcd->rh_timer); + timer_delete_sync(&hcd->rh_timer); spin_lock_irqsave(&isp1362_hcd->lock, flags); diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c index 0881fdd1823e..dcf31a592f5d 100644 --- a/drivers/usb/host/max3421-hcd.c +++ b/drivers/usb/host/max3421-hcd.c @@ -1946,6 +1946,12 @@ max3421_remove(struct spi_device *spi) usb_put_hcd(hcd); } +static const struct spi_device_id max3421_spi_ids[] = { + { "max3421" }, + { }, +}; +MODULE_DEVICE_TABLE(spi, max3421_spi_ids); + static const struct of_device_id max3421_of_match_table[] = { { .compatible = "maxim,max3421", }, {}, @@ -1955,6 +1961,7 @@ MODULE_DEVICE_TABLE(of, max3421_of_match_table); static struct spi_driver max3421_driver = { .probe = max3421_probe, .remove = max3421_remove, + .id_table = max3421_spi_ids, .driver = { .name = "max3421-hcd", .of_match_table = max3421_of_match_table, diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c index 5df793dcb25d..12fdb18934cf 100644 --- a/drivers/usb/host/ohci-at91.c +++ b/drivers/usb/host/ohci-at91.c @@ -193,7 +193,7 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver, if (irq < 0) return irq; - hcd = usb_create_hcd(driver, dev, "at91"); + hcd = usb_create_hcd(driver, dev, dev_name(dev)); if (!hcd) return -ENOMEM; ohci_at91 = hcd_to_ohci_at91_priv(hcd); diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c index 9b24181fee60..9c7f3008646e 100644 --- a/drivers/usb/host/ohci-hcd.c +++ b/drivers/usb/host/ohci-hcd.c @@ -746,7 +746,8 @@ static int ohci_start(struct usb_hcd *hcd) */ static void io_watchdog_func(struct timer_list *t) { - struct ohci_hcd *ohci = from_timer(ohci, t, io_watchdog); + struct ohci_hcd *ohci = timer_container_of(ohci, t, + io_watchdog); bool takeback_all_pending = false; u32 status; u32 head; @@ -1003,7 +1004,7 @@ static void ohci_stop (struct usb_hcd *hcd) if (quirk_nec(ohci)) flush_work(&ohci->nec_work); - del_timer_sync(&ohci->io_watchdog); + timer_delete_sync(&ohci->io_watchdog); ohci->prev_frame_no = IO_WATCHDOG_OFF; ohci_writel (ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable); diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c index 90cee192e96d..b3d734ab6201 100644 --- a/drivers/usb/host/ohci-hub.c +++ b/drivers/usb/host/ohci-hub.c @@ -315,7 +315,7 @@ static int ohci_bus_suspend (struct usb_hcd *hcd) spin_unlock_irq (&ohci->lock); if (rc == 0) { - del_timer_sync(&ohci->io_watchdog); + timer_delete_sync(&ohci->io_watchdog); ohci->prev_frame_no = IO_WATCHDOG_OFF; } return rc; diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c index 900ea0d368e0..9f0a6b27e47c 100644 --- a/drivers/usb/host/ohci-pci.c +++ b/drivers/usb/host/ohci-pci.c @@ -165,6 +165,25 @@ static int ohci_quirk_amd700(struct usb_hcd *hcd) return 0; } +static int ohci_quirk_loongson(struct usb_hcd *hcd) +{ + struct pci_dev *pdev = to_pci_dev(hcd->self.controller); + + /* + * Loongson's LS7A OHCI controller (rev 0x02) has a + * flaw. MMIO register with offset 0x60/64 is treated + * as legacy PS2-compatible keyboard/mouse interface. + * Since OHCI only use 4KB BAR resource, LS7A OHCI's + * 32KB BAR is wrapped around (the 2nd 4KB BAR space + * is the same as the 1st 4KB internally). So add 4KB + * offset (0x1000) to the OHCI registers as a quirk. + */ + if (pdev->revision == 0x2) + hcd->regs += SZ_4K; /* SZ_4K = 0x1000 */ + + return 0; +} + static int ohci_quirk_qemu(struct usb_hcd *hcd) { struct ohci_hcd *ohci = hcd_to_ohci(hcd); @@ -225,6 +244,10 @@ static const struct pci_device_id ohci_pci_quirks[] = { .driver_data = (unsigned long)ohci_quirk_amd700, }, { + PCI_DEVICE(PCI_VENDOR_ID_LOONGSON, 0x7a24), + .driver_data = (unsigned long)ohci_quirk_loongson, + }, + { .vendor = PCI_VENDOR_ID_APPLE, .device = 0x003f, .subvendor = PCI_SUBVENDOR_ID_REDHAT_QUMRANET, diff --git a/drivers/usb/host/ohci-spear.c b/drivers/usb/host/ohci-spear.c index d7131e5a4477..6843d7cb3f9f 100644 --- a/drivers/usb/host/ohci-spear.c +++ b/drivers/usb/host/ohci-spear.c @@ -103,8 +103,7 @@ static void spear_ohci_hcd_drv_remove(struct platform_device *pdev) struct spear_ohci *sohci_p = to_spear_ohci(hcd); usb_remove_hcd(hcd); - if (sohci_p->clk) - clk_disable_unprepare(sohci_p->clk); + clk_disable_unprepare(sohci_p->clk); usb_put_hcd(hcd); } diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c index fce800ba4c61..6b7c73eff081 100644 --- a/drivers/usb/host/oxu210hp-hcd.c +++ b/drivers/usb/host/oxu210hp-hcd.c @@ -1127,7 +1127,7 @@ static void ehci_mem_cleanup(struct oxu_hcd *oxu) qh_put(oxu->async); oxu->async = NULL; - del_timer(&oxu->urb_timer); + timer_delete(&oxu->urb_timer); oxu->periodic = NULL; @@ -2955,7 +2955,7 @@ static irqreturn_t oxu_irq(struct usb_hcd *hcd) static void oxu_watchdog(struct timer_list *t) { - struct oxu_hcd *oxu = from_timer(oxu, t, watchdog); + struct oxu_hcd *oxu = timer_container_of(oxu, t, watchdog); unsigned long flags; spin_lock_irqsave(&oxu->lock, flags); @@ -3154,7 +3154,7 @@ static void oxu_stop(struct usb_hcd *hcd) ehci_port_power(oxu, 0); /* no more interrupts ... */ - del_timer_sync(&oxu->watchdog); + timer_delete_sync(&oxu->watchdog); spin_lock_irq(&oxu->lock); if (HC_IS_RUNNING(hcd->state)) @@ -3887,7 +3887,7 @@ static int oxu_bus_suspend(struct usb_hcd *hcd) spin_unlock_irq(&oxu->lock); /* turn off now-idle HC */ - del_timer_sync(&oxu->watchdog); + timer_delete_sync(&oxu->watchdog); spin_lock_irq(&oxu->lock); ehci_halt(oxu); hcd->state = HC_STATE_SUSPENDED; diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c index a44992e2561b..d21a03cf5c17 100644 --- a/drivers/usb/host/r8a66597-hcd.c +++ b/drivers/usb/host/r8a66597-hcd.c @@ -1720,7 +1720,8 @@ static void r8a66597_root_hub_control(struct r8a66597 *r8a66597, int port) static void r8a66597_interval_timer(struct timer_list *t) { - struct r8a66597_timers *timers = from_timer(timers, t, interval); + struct r8a66597_timers *timers = timer_container_of(timers, t, + interval); struct r8a66597 *r8a66597 = timers->r8a66597; unsigned long flags; u16 pipenum; @@ -1744,7 +1745,7 @@ static void r8a66597_interval_timer(struct timer_list *t) static void r8a66597_td_timer(struct timer_list *t) { - struct r8a66597_timers *timers = from_timer(timers, t, td); + struct r8a66597_timers *timers = timer_container_of(timers, t, td); struct r8a66597 *r8a66597 = timers->r8a66597; unsigned long flags; u16 pipenum; @@ -1798,7 +1799,7 @@ static void r8a66597_td_timer(struct timer_list *t) static void r8a66597_timer(struct timer_list *t) { - struct r8a66597 *r8a66597 = from_timer(r8a66597, t, rh_timer); + struct r8a66597 *r8a66597 = timer_container_of(r8a66597, t, rh_timer); unsigned long flags; int port; @@ -2384,7 +2385,7 @@ static void r8a66597_remove(struct platform_device *pdev) struct r8a66597 *r8a66597 = platform_get_drvdata(pdev); struct usb_hcd *hcd = r8a66597_to_hcd(r8a66597); - del_timer_sync(&r8a66597->rh_timer); + timer_delete_sync(&r8a66597->rh_timer); usb_remove_hcd(hcd); iounmap(r8a66597->reg); if (r8a66597->pdata->on_chip) diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c index fa2e4badd288..ea3cab99c5d4 100644 --- a/drivers/usb/host/sl811-hcd.c +++ b/drivers/usb/host/sl811-hcd.c @@ -1124,7 +1124,7 @@ sl811h_hub_descriptor ( static void sl811h_timer(struct timer_list *t) { - struct sl811 *sl811 = from_timer(sl811, t, timer); + struct sl811 *sl811 = timer_container_of(sl811, t, timer); unsigned long flags; u8 irqstat; u8 signaling = sl811->ctrl1 & SL11H_CTL1MASK_FORCE; @@ -1515,7 +1515,7 @@ sl811h_stop(struct usb_hcd *hcd) struct sl811 *sl811 = hcd_to_sl811(hcd); unsigned long flags; - del_timer_sync(&hcd->rh_timer); + timer_delete_sync(&hcd->rh_timer); spin_lock_irqsave(&sl811->lock, flags); port_power(sl811, 0); diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c index fd2408b553cf..14e6dfef16c6 100644 --- a/drivers/usb/host/uhci-hcd.c +++ b/drivers/usb/host/uhci-hcd.c @@ -716,7 +716,7 @@ static void uhci_stop(struct usb_hcd *hcd) spin_unlock_irq(&uhci->lock); synchronize_irq(hcd->irq); - del_timer_sync(&uhci->fsbr_timer); + timer_delete_sync(&uhci->fsbr_timer); release_uhci(uhci); } diff --git a/drivers/usb/host/uhci-platform.c b/drivers/usb/host/uhci-platform.c index a7c934404ebc..62318291f566 100644 --- a/drivers/usb/host/uhci-platform.c +++ b/drivers/usb/host/uhci-platform.c @@ -121,7 +121,7 @@ static int uhci_hcd_platform_probe(struct platform_device *pdev) } /* Get and enable clock if any specified */ - uhci->clk = devm_clk_get(&pdev->dev, NULL); + uhci->clk = devm_clk_get_optional(&pdev->dev, NULL); if (IS_ERR(uhci->clk)) { ret = PTR_ERR(uhci->clk); goto err_rmr; diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c index 35fcb826152c..9480d4ff0111 100644 --- a/drivers/usb/host/uhci-q.c +++ b/drivers/usb/host/uhci-q.c @@ -84,14 +84,14 @@ static void uhci_urbp_wants_fsbr(struct uhci_hcd *uhci, struct urb_priv *urbp) uhci_fsbr_on(uhci); else if (uhci->fsbr_expiring) { uhci->fsbr_expiring = 0; - del_timer(&uhci->fsbr_timer); + timer_delete(&uhci->fsbr_timer); } } } static void uhci_fsbr_timeout(struct timer_list *t) { - struct uhci_hcd *uhci = from_timer(uhci, t, fsbr_timer); + struct uhci_hcd *uhci = timer_container_of(uhci, t, fsbr_timer); unsigned long flags; spin_lock_irqsave(&uhci->lock, flags); diff --git a/drivers/usb/host/xen-hcd.c b/drivers/usb/host/xen-hcd.c index 46fdab940092..1c2a95fe41e5 100644 --- a/drivers/usb/host/xen-hcd.c +++ b/drivers/usb/host/xen-hcd.c @@ -327,7 +327,7 @@ static int xenhcd_bus_suspend(struct usb_hcd *hcd) } spin_unlock_irq(&info->lock); - del_timer_sync(&info->watchdog); + timer_delete_sync(&info->watchdog); return ret; } @@ -1258,7 +1258,7 @@ static void xenhcd_disconnect(struct xenbus_device *dev) static void xenhcd_watchdog(struct timer_list *timer) { - struct xenhcd_info *info = from_timer(info, timer, watchdog); + struct xenhcd_info *info = timer_container_of(info, timer, watchdog); unsigned long flags; spin_lock_irqsave(&info->lock, flags); @@ -1307,7 +1307,7 @@ static void xenhcd_stop(struct usb_hcd *hcd) { struct xenhcd_info *info = xenhcd_hcd_to_info(hcd); - del_timer_sync(&info->watchdog); + timer_delete_sync(&info->watchdog); spin_lock_irq(&info->lock); /* cancel all urbs */ hcd->state = HC_STATE_HALT; diff --git a/drivers/usb/host/xhci-caps.h b/drivers/usb/host/xhci-caps.h index f6b9a00a0ab9..4b8ff4815644 100644 --- a/drivers/usb/host/xhci-caps.h +++ b/drivers/usb/host/xhci-caps.h @@ -62,8 +62,8 @@ #define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32) -/* db_off bitmask - bits 0:1 reserved */ -#define DBOFF_MASK (~0x3) +/* db_off bitmask - bits 31:2 Doorbell Array Offset */ +#define DBOFF_MASK (0xfffffffc) /* run_regs_off bitmask - bits 0:4 reserved */ #define RTSOFF_MASK (~0x1f) diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c index fd7895b24367..06a2edb9e86e 100644 --- a/drivers/usb/host/xhci-dbgcap.c +++ b/drivers/usb/host/xhci-dbgcap.c @@ -652,6 +652,10 @@ static void xhci_dbc_stop(struct xhci_dbc *dbc) case DS_DISABLED: return; case DS_CONFIGURED: + spin_lock(&dbc->lock); + xhci_dbc_flush_requests(dbc); + spin_unlock(&dbc->lock); + if (dbc->driver->disconnect) dbc->driver->disconnect(dbc); break; @@ -823,6 +827,7 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc) { dma_addr_t deq; union xhci_trb *evt; + enum evtreturn ret = EVT_DONE; u32 ctrl, portsc; bool update_erdp = false; @@ -909,6 +914,7 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc) break; case TRB_TYPE(TRB_TRANSFER): dbc_handle_xfer_event(dbc, evt); + ret = EVT_XFER_DONE; break; default: break; @@ -927,7 +933,7 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc) lo_hi_writeq(deq, &dbc->regs->erdp); } - return EVT_DONE; + return ret; } static void xhci_dbc_handle_events(struct work_struct *work) @@ -936,6 +942,7 @@ static void xhci_dbc_handle_events(struct work_struct *work) struct xhci_dbc *dbc; unsigned long flags; unsigned int poll_interval; + unsigned long busypoll_timelimit; dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work); poll_interval = dbc->poll_interval; @@ -954,11 +961,21 @@ static void xhci_dbc_handle_events(struct work_struct *work) dbc->driver->disconnect(dbc); break; case EVT_DONE: - /* set fast poll rate if there are pending data transfers */ + /* + * Set fast poll rate if there are pending out transfers, or + * a transfer was recently processed + */ + busypoll_timelimit = dbc->xfer_timestamp + + msecs_to_jiffies(DBC_XFER_INACTIVITY_TIMEOUT); + if (!list_empty(&dbc->eps[BULK_OUT].list_pending) || - !list_empty(&dbc->eps[BULK_IN].list_pending)) + time_is_after_jiffies(busypoll_timelimit)) poll_interval = 0; break; + case EVT_XFER_DONE: + dbc->xfer_timestamp = jiffies; + poll_interval = 0; + break; default: dev_info(dbc->dev, "stop handling dbc events\n"); return; diff --git a/drivers/usb/host/xhci-dbgcap.h b/drivers/usb/host/xhci-dbgcap.h index 9dc8f4d8077c..47ac72c2286d 100644 --- a/drivers/usb/host/xhci-dbgcap.h +++ b/drivers/usb/host/xhci-dbgcap.h @@ -96,6 +96,7 @@ struct dbc_ep { #define DBC_WRITE_BUF_SIZE 8192 #define DBC_POLL_INTERVAL_DEFAULT 64 /* milliseconds */ #define DBC_POLL_INTERVAL_MAX 5000 /* milliseconds */ +#define DBC_XFER_INACTIVITY_TIMEOUT 10 /* milliseconds */ /* * Private structure for DbC hardware state: */ @@ -142,6 +143,7 @@ struct xhci_dbc { enum dbc_state state; struct delayed_work event_work; unsigned int poll_interval; /* ms */ + unsigned long xfer_timestamp; unsigned resume_required:1; struct dbc_ep eps[2]; @@ -187,6 +189,7 @@ struct dbc_request { enum evtreturn { EVT_ERR = -1, EVT_DONE, + EVT_XFER_DONE, EVT_GSER, EVT_DISC, }; diff --git a/drivers/usb/host/xhci-dbgtty.c b/drivers/usb/host/xhci-dbgtty.c index 60ed753c85bb..d894081d8d15 100644 --- a/drivers/usb/host/xhci-dbgtty.c +++ b/drivers/usb/host/xhci-dbgtty.c @@ -617,6 +617,7 @@ int dbc_tty_init(void) dbc_tty_driver->type = TTY_DRIVER_TYPE_SERIAL; dbc_tty_driver->subtype = SERIAL_TYPE_NORMAL; dbc_tty_driver->init_termios = tty_std_termios; + dbc_tty_driver->init_termios.c_lflag &= ~ECHO; dbc_tty_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL; dbc_tty_driver->init_termios.c_ispeed = 9600; diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c index 1f5ef174abea..c6d44977193f 100644 --- a/drivers/usb/host/xhci-debugfs.c +++ b/drivers/usb/host/xhci-debugfs.c @@ -631,6 +631,112 @@ static void xhci_debugfs_create_ports(struct xhci_hcd *xhci, } } +static int xhci_port_bw_show(struct xhci_hcd *xhci, u8 dev_speed, + struct seq_file *s) +{ + unsigned int num_ports; + unsigned int i; + int ret; + struct xhci_container_ctx *ctx; + struct usb_hcd *hcd = xhci_to_hcd(xhci); + struct device *dev = hcd->self.controller; + + ret = pm_runtime_get_sync(dev); + if (ret < 0) + return ret; + + num_ports = HCS_MAX_PORTS(xhci->hcs_params1); + + ctx = xhci_alloc_port_bw_ctx(xhci, 0); + if (!ctx) { + pm_runtime_put_sync(dev); + return -ENOMEM; + } + + /* get roothub port bandwidth */ + ret = xhci_get_port_bandwidth(xhci, ctx, dev_speed); + if (ret) + goto err_out; + + /* print all roothub ports available bandwidth + * refer to xhci rev1_2 protocol 6.2.6 , byte 0 is reserved + */ + for (i = 1; i < num_ports+1; i++) + seq_printf(s, "port[%d] available bw: %d%%.\n", i, + ctx->bytes[i]); +err_out: + pm_runtime_put_sync(dev); + xhci_free_port_bw_ctx(xhci, ctx); + return ret; +} + +static int xhci_ss_bw_show(struct seq_file *s, void *unused) +{ + int ret; + struct xhci_hcd *xhci = (struct xhci_hcd *)s->private; + + ret = xhci_port_bw_show(xhci, USB_SPEED_SUPER, s); + return ret; +} + +static int xhci_hs_bw_show(struct seq_file *s, void *unused) +{ + int ret; + struct xhci_hcd *xhci = (struct xhci_hcd *)s->private; + + ret = xhci_port_bw_show(xhci, USB_SPEED_HIGH, s); + return ret; +} + +static int xhci_fs_bw_show(struct seq_file *s, void *unused) +{ + int ret; + struct xhci_hcd *xhci = (struct xhci_hcd *)s->private; + + ret = xhci_port_bw_show(xhci, USB_SPEED_FULL, s); + return ret; +} + +static struct xhci_file_map bw_context_files[] = { + {"SS_BW", xhci_ss_bw_show, }, + {"HS_BW", xhci_hs_bw_show, }, + {"FS_BW", xhci_fs_bw_show, }, +}; + +static int bw_context_open(struct inode *inode, struct file *file) +{ + int i; + struct xhci_file_map *f_map; + const char *file_name = file_dentry(file)->d_iname; + + for (i = 0; i < ARRAY_SIZE(bw_context_files); i++) { + f_map = &bw_context_files[i]; + + if (strcmp(f_map->name, file_name) == 0) + break; + } + + return single_open(file, f_map->show, inode->i_private); +} + +static const struct file_operations bw_fops = { + .open = bw_context_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static void xhci_debugfs_create_bandwidth(struct xhci_hcd *xhci, + struct dentry *parent) +{ + parent = debugfs_create_dir("port_bandwidth", parent); + + xhci_debugfs_create_files(xhci, bw_context_files, + ARRAY_SIZE(bw_context_files), + xhci, + parent, &bw_fops); +} + void xhci_debugfs_init(struct xhci_hcd *xhci) { struct device *dev = xhci_to_hcd(xhci)->self.controller; @@ -681,6 +787,8 @@ void xhci_debugfs_init(struct xhci_hcd *xhci) xhci->debugfs_slots = debugfs_create_dir("devices", xhci->debugfs_root); xhci_debugfs_create_ports(xhci, xhci->debugfs_root); + + xhci_debugfs_create_bandwidth(xhci, xhci->debugfs_root); } void xhci_debugfs_exit(struct xhci_hcd *xhci) diff --git a/drivers/usb/host/xhci-histb.c b/drivers/usb/host/xhci-histb.c index 8a7d46dae62c..02396c8721dc 100644 --- a/drivers/usb/host/xhci-histb.c +++ b/drivers/usb/host/xhci-histb.c @@ -355,7 +355,7 @@ static int __maybe_unused xhci_histb_resume(struct device *dev) if (!device_may_wakeup(dev)) xhci_histb_host_enable(histb); - return xhci_resume(xhci, PMSG_RESUME); + return xhci_resume(xhci, false, false); } static const struct dev_pm_ops xhci_histb_pm_ops = { diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index 69c278b64084..92bb84f8132a 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -926,7 +926,7 @@ static void xhci_del_comp_mod_timer(struct xhci_hcd *xhci, u32 status, if ((xhci->port_status_u0 != all_ports_seen_u0) && port_in_u0) { xhci->port_status_u0 |= 1 << wIndex; if (xhci->port_status_u0 == all_ports_seen_u0) { - del_timer_sync(&xhci->comp_mode_recovery_timer); + timer_delete_sync(&xhci->comp_mode_recovery_timer); xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, "All USB3 ports have entered U0 already!"); xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, @@ -1878,9 +1878,10 @@ int xhci_bus_resume(struct usb_hcd *hcd) int max_ports, port_index; int sret; u32 next_state; - u32 temp, portsc; + u32 portsc; struct xhci_hub *rhub; struct xhci_port **ports; + bool disabled_irq = false; rhub = xhci_get_rhub(hcd); ports = rhub->ports; @@ -1896,17 +1897,20 @@ int xhci_bus_resume(struct usb_hcd *hcd) return -ESHUTDOWN; } - /* delay the irqs */ - temp = readl(&xhci->op_regs->command); - temp &= ~CMD_EIE; - writel(temp, &xhci->op_regs->command); - /* bus specific resume for ports we suspended at bus_suspend */ - if (hcd->speed >= HCD_USB3) + if (hcd->speed >= HCD_USB3) { next_state = XDEV_U0; - else + } else { next_state = XDEV_RESUME; - + if (bus_state->bus_suspended) { + /* + * prevent port event interrupts from interfering + * with usb2 port resume process + */ + xhci_disable_interrupter(xhci, xhci->interrupters[0]); + disabled_irq = true; + } + } port_index = max_ports; while (port_index--) { portsc = readl(ports[port_index]->addr); @@ -1974,11 +1978,9 @@ int xhci_bus_resume(struct usb_hcd *hcd) (void) readl(&xhci->op_regs->command); bus_state->next_statechange = jiffies + msecs_to_jiffies(5); - /* re-enable irqs */ - temp = readl(&xhci->op_regs->command); - temp |= CMD_EIE; - writel(temp, &xhci->op_regs->command); - temp = readl(&xhci->op_regs->command); + /* re-enable interrupter */ + if (disabled_irq) + xhci_enable_interrupter(xhci->interrupters[0]); spin_unlock_irqrestore(&xhci->lock, flags); return 0; diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index fdf0c1008225..07289333a1e8 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -484,6 +484,35 @@ void xhci_free_container_ctx(struct xhci_hcd *xhci, kfree(ctx); } +struct xhci_container_ctx *xhci_alloc_port_bw_ctx(struct xhci_hcd *xhci, + gfp_t flags) +{ + struct xhci_container_ctx *ctx; + struct device *dev = xhci_to_hcd(xhci)->self.sysdev; + + ctx = kzalloc_node(sizeof(*ctx), flags, dev_to_node(dev)); + if (!ctx) + return NULL; + + ctx->size = GET_PORT_BW_ARRAY_SIZE; + + ctx->bytes = dma_pool_zalloc(xhci->port_bw_pool, flags, &ctx->dma); + if (!ctx->bytes) { + kfree(ctx); + return NULL; + } + return ctx; +} + +void xhci_free_port_bw_ctx(struct xhci_hcd *xhci, + struct xhci_container_ctx *ctx) +{ + if (!ctx) + return; + dma_pool_free(xhci->port_bw_pool, ctx->bytes, ctx->dma); + kfree(ctx); +} + struct xhci_input_control_ctx *xhci_get_input_control_ctx( struct xhci_container_ctx *ctx) { @@ -1166,6 +1195,8 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud ep0_ctx->deq = cpu_to_le64(dev->eps[0].ring->first_seg->dma | dev->eps[0].ring->cycle_state); + ep0_ctx->tx_info = cpu_to_le32(EP_AVG_TRB_LENGTH(8)); + trace_xhci_setup_addressable_virt_device(dev); /* Steps 7 and 8 were done in xhci_alloc_virt_device() */ @@ -1420,6 +1451,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, /* Periodic endpoint bInterval limit quirk */ if (usb_endpoint_xfer_int(&ep->desc) || usb_endpoint_xfer_isoc(&ep->desc)) { + if ((xhci->quirks & XHCI_LIMIT_ENDPOINT_INTERVAL_9) && + interval >= 9) { + interval = 8; + } if ((xhci->quirks & XHCI_LIMIT_ENDPOINT_INTERVAL_7) && udev->speed >= USB_SPEED_HIGH && interval >= 7) { @@ -1802,10 +1837,10 @@ xhci_remove_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir) */ if (ir->ir_set) { tmp = readl(&ir->ir_set->erst_size); - tmp &= ERST_SIZE_MASK; + tmp &= ~ERST_SIZE_MASK; writel(tmp, &ir->ir_set->erst_size); - xhci_write_64(xhci, ERST_EHB, &ir->ir_set->erst_dequeue); + xhci_update_erst_dequeue(xhci, ir, true); } } @@ -1848,6 +1883,11 @@ void xhci_remove_secondary_interrupter(struct usb_hcd *hcd, struct xhci_interrup return; } + /* + * Cleanup secondary interrupter to ensure there are no pending events. + * This also updates event ring dequeue pointer back to the start. + */ + xhci_skip_sec_intr_events(xhci, ir->event_ring, ir); intr_num = ir->intr_num; xhci_remove_interrupter(xhci, ir); @@ -1907,6 +1947,11 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed small stream array pool"); + dma_pool_destroy(xhci->port_bw_pool); + xhci->port_bw_pool = NULL; + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "Freed xhci port bw array pool"); + dma_pool_destroy(xhci->medium_streams_pool); xhci->medium_streams_pool = NULL; xhci_dbg_trace(xhci, trace_xhci_dbg_init, @@ -1953,7 +1998,6 @@ no_bw: xhci->interrupters = NULL; xhci->page_size = 0; - xhci->page_shift = 0; xhci->usb2_rhub.bus_state.bus_suspended = 0; xhci->usb3_rhub.bus_state.bus_suspended = 0; } @@ -2283,37 +2327,25 @@ xhci_alloc_interrupter(struct xhci_hcd *xhci, unsigned int segs, gfp_t flags) return ir; } -static int -xhci_add_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir, - unsigned int intr_num) +void xhci_add_interrupter(struct xhci_hcd *xhci, unsigned int intr_num) { + struct xhci_interrupter *ir; u64 erst_base; u32 erst_size; - if (intr_num >= xhci->max_interrupters) { - xhci_warn(xhci, "Can't add interrupter %d, max interrupters %d\n", - intr_num, xhci->max_interrupters); - return -EINVAL; - } - - if (xhci->interrupters[intr_num]) { - xhci_warn(xhci, "Interrupter %d\n already set up", intr_num); - return -EINVAL; - } - - xhci->interrupters[intr_num] = ir; + ir = xhci->interrupters[intr_num]; ir->intr_num = intr_num; ir->ir_set = &xhci->run_regs->ir_set[intr_num]; /* set ERST count with the number of entries in the segment table */ erst_size = readl(&ir->ir_set->erst_size); - erst_size &= ERST_SIZE_MASK; + erst_size &= ~ERST_SIZE_MASK; erst_size |= ir->event_ring->num_segs; writel(erst_size, &ir->ir_set->erst_size); erst_base = xhci_read_64(xhci, &ir->ir_set->erst_base); - erst_base &= ERST_BASE_RSVDP; - erst_base |= ir->erst.erst_dma_addr & ~ERST_BASE_RSVDP; + erst_base &= ~ERST_BASE_ADDRESS_MASK; + erst_base |= ir->erst.erst_dma_addr & ERST_BASE_ADDRESS_MASK; if (xhci->quirks & XHCI_WRITE_64_HI_LO) hi_lo_writeq(erst_base, &ir->ir_set->erst_base); else @@ -2321,20 +2353,19 @@ xhci_add_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir, /* Set the event ring dequeue address of this interrupter */ xhci_set_hc_event_deq(xhci, ir); - - return 0; } struct xhci_interrupter * xhci_create_secondary_interrupter(struct usb_hcd *hcd, unsigned int segs, - u32 imod_interval) + u32 imod_interval, unsigned int intr_num) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); struct xhci_interrupter *ir; unsigned int i; int err = -ENOSPC; - if (!xhci->interrupters || xhci->max_interrupters <= 1) + if (!xhci->interrupters || xhci->max_interrupters <= 1 || + intr_num >= xhci->max_interrupters) return NULL; ir = xhci_alloc_interrupter(xhci, segs, GFP_KERNEL); @@ -2342,15 +2373,23 @@ xhci_create_secondary_interrupter(struct usb_hcd *hcd, unsigned int segs, return NULL; spin_lock_irq(&xhci->lock); - - /* Find available secondary interrupter, interrupter 0 is reserved for primary */ - for (i = 1; i < xhci->max_interrupters; i++) { - if (xhci->interrupters[i] == NULL) { - err = xhci_add_interrupter(xhci, ir, i); - break; + if (!intr_num) { + /* Find available secondary interrupter, interrupter 0 is reserved for primary */ + for (i = 1; i < xhci->max_interrupters; i++) { + if (!xhci->interrupters[i]) { + xhci->interrupters[i] = ir; + xhci_add_interrupter(xhci, i); + err = 0; + break; + } + } + } else { + if (!xhci->interrupters[intr_num]) { + xhci->interrupters[intr_num] = ir; + xhci_add_interrupter(xhci, intr_num); + err = 0; } } - spin_unlock_irq(&xhci->lock); if (err) { @@ -2360,13 +2399,10 @@ xhci_create_secondary_interrupter(struct usb_hcd *hcd, unsigned int segs, return NULL; } - err = xhci_set_interrupter_moderation(ir, imod_interval); - if (err) - xhci_warn(xhci, "Failed to set interrupter %d moderation to %uns\n", - i, imod_interval); + xhci_set_interrupter_moderation(ir, imod_interval); xhci_dbg(xhci, "Add secondary interrupter %d, max interrupters %d\n", - i, xhci->max_interrupters); + ir->intr_num, xhci->max_interrupters); return ir; } @@ -2374,61 +2410,21 @@ EXPORT_SYMBOL_GPL(xhci_create_secondary_interrupter); int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) { - struct xhci_interrupter *ir; struct device *dev = xhci_to_hcd(xhci)->self.sysdev; dma_addr_t dma; - unsigned int val, val2; - u64 val_64; - u32 page_size, temp; - int i; - - INIT_LIST_HEAD(&xhci->cmd_list); - - /* init command timeout work */ - INIT_DELAYED_WORK(&xhci->cmd_timer, xhci_handle_command_timeout); - init_completion(&xhci->cmd_ring_stop_completion); - - page_size = readl(&xhci->op_regs->page_size); - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "Supported page size register = 0x%x", page_size); - i = ffs(page_size); - if (i < 16) - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "Supported page size of %iK", (1 << (i+12)) / 1024); - else - xhci_warn(xhci, "WARN: no supported page size\n"); - /* Use 4K pages, since that's common and the minimum the HC supports */ - xhci->page_shift = 12; - xhci->page_size = 1 << xhci->page_shift; - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "HCD page size set to %iK", xhci->page_size / 1024); - - /* - * Program the Number of Device Slots Enabled field in the CONFIG - * register with the max value of slots the HC can handle. - */ - val = HCS_MAX_SLOTS(readl(&xhci->cap_regs->hcs_params1)); - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "// xHC can handle at most %d device slots.", val); - val2 = readl(&xhci->op_regs->config_reg); - val |= (val2 & ~HCS_SLOTS_MASK); - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "// Setting Max device slots reg = 0x%x.", val); - writel(val, &xhci->op_regs->config_reg); /* * xHCI section 5.4.6 - Device Context array must be * "physically contiguous and 64-byte (cache line) aligned". */ - xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma, - flags); + xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma, flags); if (!xhci->dcbaa) goto fail; + xhci->dcbaa->dma = dma; xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "// Device context base array address = 0x%pad (DMA), %p (virt)", - &xhci->dcbaa->dma, xhci->dcbaa); - xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr); + "Device context base array address = 0x%pad (DMA), %p (virt)", + &xhci->dcbaa->dma, xhci->dcbaa); /* * Initialize the ring segment pool. The ring must be a contiguous @@ -2444,92 +2440,77 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) else xhci->segment_pool = dma_pool_create("xHCI ring segments", dev, TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE, xhci->page_size); + if (!xhci->segment_pool) + goto fail; /* See Table 46 and Note on Figure 55 */ - xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev, - 2112, 64, xhci->page_size); - if (!xhci->segment_pool || !xhci->device_pool) + xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev, 2112, 64, + xhci->page_size); + if (!xhci->device_pool) goto fail; - /* Linear stream context arrays don't have any boundary restrictions, + /* + * Linear stream context arrays don't have any boundary restrictions, * and only need to be 16-byte aligned. */ - xhci->small_streams_pool = - dma_pool_create("xHCI 256 byte stream ctx arrays", - dev, SMALL_STREAM_ARRAY_SIZE, 16, 0); - xhci->medium_streams_pool = - dma_pool_create("xHCI 1KB stream ctx arrays", - dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0); - /* Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE - * will be allocated with dma_alloc_coherent() + xhci->small_streams_pool = dma_pool_create("xHCI 256 byte stream ctx arrays", + dev, SMALL_STREAM_ARRAY_SIZE, 16, 0); + if (!xhci->small_streams_pool) + goto fail; + + /* + * Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE will be + * allocated with dma_alloc_coherent(). */ - if (!xhci->small_streams_pool || !xhci->medium_streams_pool) + xhci->medium_streams_pool = dma_pool_create("xHCI 1KB stream ctx arrays", + dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0); + if (!xhci->medium_streams_pool) + goto fail; + + /* + * refer to xhci rev1_2 protocol 5.3.3 max ports is 255. + * refer to xhci rev1_2 protocol 6.4.3.14 port bandwidth buffer need + * to be 16-byte aligned. + */ + xhci->port_bw_pool = dma_pool_create("xHCI 256 port bw ctx arrays", + dev, GET_PORT_BW_ARRAY_SIZE, 16, 0); + if (!xhci->port_bw_pool) goto fail; /* Set up the command ring to have one segments for now. */ xhci->cmd_ring = xhci_ring_alloc(xhci, 1, TYPE_COMMAND, 0, flags); if (!xhci->cmd_ring) goto fail; - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "Allocated command ring at %p", xhci->cmd_ring); - xhci_dbg_trace(xhci, trace_xhci_dbg_init, "First segment DMA is 0x%pad", - &xhci->cmd_ring->first_seg->dma); - /* Set the address in the Command Ring Control register */ - val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); - val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | - (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) | - xhci->cmd_ring->cycle_state; - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "// Setting command ring address to 0x%016llx", val_64); - xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Allocated command ring at %p", xhci->cmd_ring); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "First segment DMA is 0x%pad", + &xhci->cmd_ring->first_seg->dma); - /* Reserve one command ring TRB for disabling LPM. + /* + * Reserve one command ring TRB for disabling LPM. * Since the USB core grabs the shared usb_bus bandwidth mutex before * disabling LPM, we only need to reserve one TRB for all devices. */ xhci->cmd_ring_reserved_trbs++; - val = readl(&xhci->cap_regs->db_off); - val &= DBOFF_MASK; - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "// Doorbell array is located at offset 0x%x from cap regs base addr", - val); - xhci->dba = (void __iomem *) xhci->cap_regs + val; - /* Allocate and set up primary interrupter 0 with an event ring. */ - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "Allocating primary event ring"); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Allocating primary event ring"); xhci->interrupters = kcalloc_node(xhci->max_interrupters, sizeof(*xhci->interrupters), flags, dev_to_node(dev)); - - ir = xhci_alloc_interrupter(xhci, 0, flags); - if (!ir) + if (!xhci->interrupters) goto fail; - if (xhci_add_interrupter(xhci, ir, 0)) + xhci->interrupters[0] = xhci_alloc_interrupter(xhci, 0, flags); + if (!xhci->interrupters[0]) goto fail; - ir->isoc_bei_interval = AVOID_BEI_INTERVAL_MAX; - - for (i = 0; i < MAX_HC_SLOTS; i++) - xhci->devs[i] = NULL; - if (scratchpad_alloc(xhci, flags)) goto fail; + if (xhci_setup_port_arrays(xhci, flags)) goto fail; - /* Enable USB 3.0 device notifications for function remote wake, which - * is necessary for allowing USB 3.0 devices to do remote wakeup from - * U3 (device suspend). - */ - temp = readl(&xhci->op_regs->dev_notification); - temp &= ~DEV_NOTE_MASK; - temp |= DEV_NOTE_FWAKE; - writel(temp, &xhci->op_regs->dev_notification); - return 0; fail: diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c index 904831344440..208558cf822d 100644 --- a/drivers/usb/host/xhci-mtk.c +++ b/drivers/usb/host/xhci-mtk.c @@ -746,10 +746,10 @@ static int __maybe_unused xhci_mtk_suspend(struct device *dev) xhci_dbg(xhci, "%s: stop port polling\n", __func__); clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); - del_timer_sync(&hcd->rh_timer); + timer_delete_sync(&hcd->rh_timer); if (shared_hcd) { clear_bit(HCD_FLAG_POLL_RH, &shared_hcd->flags); - del_timer_sync(&shared_hcd->rh_timer); + timer_delete_sync(&shared_hcd->rh_timer); } ret = xhci_mtk_host_disable(mtk); diff --git a/drivers/usb/host/xhci-mvebu.c b/drivers/usb/host/xhci-mvebu.c index 87f1597a0e5a..257e4d79971f 100644 --- a/drivers/usb/host/xhci-mvebu.c +++ b/drivers/usb/host/xhci-mvebu.c @@ -73,13 +73,3 @@ int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd) return 0; } - -int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd) -{ - struct xhci_hcd *xhci = hcd_to_xhci(hcd); - - /* Without reset on resume, the HC won't work at all */ - xhci->quirks |= XHCI_RESET_ON_RESUME; - - return 0; -} diff --git a/drivers/usb/host/xhci-mvebu.h b/drivers/usb/host/xhci-mvebu.h index 3be021793cc8..9d26e22c4842 100644 --- a/drivers/usb/host/xhci-mvebu.h +++ b/drivers/usb/host/xhci-mvebu.h @@ -12,16 +12,10 @@ struct usb_hcd; #if IS_ENABLED(CONFIG_USB_XHCI_MVEBU) int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd); -int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd); #else static inline int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd) { return 0; } - -static inline int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd) -{ - return 0; -} #endif #endif /* __LINUX_XHCI_MVEBU_H */ diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 54460d11f7ee..00fac8b233d2 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -71,12 +71,22 @@ #define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_XHCI 0x15ec #define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI 0x15f0 +#define PCI_DEVICE_ID_AMD_ARIEL_TYPEC_XHCI 0x13ed +#define PCI_DEVICE_ID_AMD_ARIEL_TYPEA_XHCI 0x13ee +#define PCI_DEVICE_ID_AMD_STARSHIP_XHCI 0x148c +#define PCI_DEVICE_ID_AMD_FIREFLIGHT_15D4_XHCI 0x15d4 +#define PCI_DEVICE_ID_AMD_FIREFLIGHT_15D5_XHCI 0x15d5 +#define PCI_DEVICE_ID_AMD_RAVEN_15E0_XHCI 0x15e0 +#define PCI_DEVICE_ID_AMD_RAVEN_15E1_XHCI 0x15e1 +#define PCI_DEVICE_ID_AMD_RAVEN2_XHCI 0x15e5 #define PCI_DEVICE_ID_AMD_RENOIR_XHCI 0x1639 #define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9 #define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba #define PCI_DEVICE_ID_AMD_PROMONTORYA_2 0x43bb #define PCI_DEVICE_ID_AMD_PROMONTORYA_1 0x43bc +#define PCI_DEVICE_ID_ATI_NAVI10_7316_XHCI 0x7316 + #define PCI_DEVICE_ID_ASMEDIA_1042_XHCI 0x1042 #define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142 #define PCI_DEVICE_ID_ASMEDIA_1142_XHCI 0x1242 @@ -280,6 +290,21 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) if (pdev->vendor == PCI_VENDOR_ID_NEC) xhci->quirks |= XHCI_NEC_HOST; + if (pdev->vendor == PCI_VENDOR_ID_AMD && + (pdev->device == PCI_DEVICE_ID_AMD_ARIEL_TYPEC_XHCI || + pdev->device == PCI_DEVICE_ID_AMD_ARIEL_TYPEA_XHCI || + pdev->device == PCI_DEVICE_ID_AMD_STARSHIP_XHCI || + pdev->device == PCI_DEVICE_ID_AMD_FIREFLIGHT_15D4_XHCI || + pdev->device == PCI_DEVICE_ID_AMD_FIREFLIGHT_15D5_XHCI || + pdev->device == PCI_DEVICE_ID_AMD_RAVEN_15E0_XHCI || + pdev->device == PCI_DEVICE_ID_AMD_RAVEN_15E1_XHCI || + pdev->device == PCI_DEVICE_ID_AMD_RAVEN2_XHCI)) + xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_9; + + if (pdev->vendor == PCI_VENDOR_ID_ATI && + pdev->device == PCI_DEVICE_ID_ATI_NAVI10_7316_XHCI) + xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_9; + if (pdev->vendor == PCI_VENDOR_ID_AMD && xhci->hci_version == 0x96) xhci->quirks |= XHCI_AMD_0x96_HOST; @@ -807,8 +832,10 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup) static int xhci_pci_resume(struct usb_hcd *hcd, pm_message_t msg) { - struct xhci_hcd *xhci = hcd_to_xhci(hcd); - struct pci_dev *pdev = to_pci_dev(hcd->self.controller); + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + struct pci_dev *pdev = to_pci_dev(hcd->self.controller); + bool power_lost = msg.event == PM_EVENT_RESTORE; + bool is_auto_resume = msg.event == PM_EVENT_AUTO_RESUME; reset_control_reset(xhci->reset); @@ -839,7 +866,7 @@ static int xhci_pci_resume(struct usb_hcd *hcd, pm_message_t msg) if (xhci->quirks & XHCI_PME_STUCK_QUIRK) xhci_pme_quirk(hcd); - return xhci_resume(xhci, msg); + return xhci_resume(xhci, power_lost, is_auto_resume); } static int xhci_pci_poweroff_late(struct usb_hcd *hcd, bool do_wakeup) diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index d85ffa9ffaa7..5eb51797de32 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -106,7 +106,7 @@ static const struct xhci_plat_priv xhci_plat_marvell_armada = { }; static const struct xhci_plat_priv xhci_plat_marvell_armada3700 = { - .init_quirk = xhci_mvebu_a3700_init_quirk, + .quirks = XHCI_RESET_ON_RESUME, }; static const struct xhci_plat_priv xhci_plat_brcm = { @@ -152,7 +152,7 @@ int xhci_plat_probe(struct platform_device *pdev, struct device *sysdev, const s int ret; int irq; struct xhci_plat_priv *priv = NULL; - bool of_match; + const struct of_device_id *of_match; if (usb_disabled()) return -ENODEV; @@ -267,6 +267,8 @@ int xhci_plat_probe(struct platform_device *pdev, struct device *sysdev, const s device_property_read_u32(tmpdev, "imod-interval-ns", &xhci->imod_interval); + device_property_read_u16(tmpdev, "num-hc-interrupters", + &xhci->max_interrupters); } /* @@ -326,7 +328,8 @@ int xhci_plat_probe(struct platform_device *pdev, struct device *sysdev, const s } usb3_hcd = xhci_get_usb3_hcd(xhci); - if (usb3_hcd && HCC_MAX_PSA(xhci->hcc_params) >= 4) + if (usb3_hcd && HCC_MAX_PSA(xhci->hcc_params) >= 4 && + !(xhci->quirks & XHCI_BROKEN_STREAMS)) usb3_hcd->can_do_streams = 1; if (xhci->shared_hcd) { @@ -479,9 +482,10 @@ static int xhci_plat_suspend(struct device *dev) return 0; } -static int xhci_plat_resume_common(struct device *dev, struct pm_message pmsg) +static int xhci_plat_resume_common(struct device *dev, bool power_lost) { struct usb_hcd *hcd = dev_get_drvdata(dev); + struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd); struct xhci_hcd *xhci = hcd_to_xhci(hcd); int ret; @@ -501,7 +505,7 @@ static int xhci_plat_resume_common(struct device *dev, struct pm_message pmsg) if (ret) goto disable_clks; - ret = xhci_resume(xhci, pmsg); + ret = xhci_resume(xhci, power_lost || priv->power_lost, false); if (ret) goto disable_clks; @@ -522,12 +526,12 @@ disable_clks: static int xhci_plat_resume(struct device *dev) { - return xhci_plat_resume_common(dev, PMSG_RESUME); + return xhci_plat_resume_common(dev, false); } static int xhci_plat_restore(struct device *dev) { - return xhci_plat_resume_common(dev, PMSG_RESTORE); + return xhci_plat_resume_common(dev, true); } static int __maybe_unused xhci_plat_runtime_suspend(struct device *dev) @@ -548,7 +552,7 @@ static int __maybe_unused xhci_plat_runtime_resume(struct device *dev) struct usb_hcd *hcd = dev_get_drvdata(dev); struct xhci_hcd *xhci = hcd_to_xhci(hcd); - return xhci_resume(xhci, PMSG_AUTO_RESUME); + return xhci_resume(xhci, false, true); } const struct dev_pm_ops xhci_plat_pm_ops = { diff --git a/drivers/usb/host/xhci-plat.h b/drivers/usb/host/xhci-plat.h index 6475130eac4b..fe4f95e690fa 100644 --- a/drivers/usb/host/xhci-plat.h +++ b/drivers/usb/host/xhci-plat.h @@ -15,6 +15,7 @@ struct usb_hcd; struct xhci_plat_priv { const char *firmware_name; unsigned long long quirks; + bool power_lost; void (*plat_start)(struct usb_hcd *); int (*init_quirk)(struct usb_hcd *); int (*suspend_quirk)(struct usb_hcd *); diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 965bffce301e..ecd757d482c5 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -204,6 +204,50 @@ void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring) } /* + * If enqueue points at a link TRB, follow links until an ordinary TRB is reached. + * Toggle the cycle bit of passed link TRBs and optionally chain them. + */ +static void inc_enq_past_link(struct xhci_hcd *xhci, struct xhci_ring *ring, u32 chain) +{ + unsigned int link_trb_count = 0; + + while (trb_is_link(ring->enqueue)) { + + /* + * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit + * set, but other sections talk about dealing with the chain bit set. This was + * fixed in the 0.96 specification errata, but we have to assume that all 0.95 + * xHCI hardware can't handle the chain bit being cleared on a link TRB. + * + * On 0.95 and some 0.96 HCs the chain bit is set once at segment initalization + * and never changed here. On all others, modify it as requested by the caller. + */ + if (!xhci_link_chain_quirk(xhci, ring->type)) { + ring->enqueue->link.control &= cpu_to_le32(~TRB_CHAIN); + ring->enqueue->link.control |= cpu_to_le32(chain); + } + + /* Give this link TRB to the hardware */ + wmb(); + ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE); + + /* Toggle the cycle bit after the last ring segment. */ + if (link_trb_toggles_cycle(ring->enqueue)) + ring->cycle_state ^= 1; + + ring->enq_seg = ring->enq_seg->next; + ring->enqueue = ring->enq_seg->trbs; + + trace_xhci_inc_enq(ring); + + if (link_trb_count++ > ring->num_segs) { + xhci_warn(xhci, "Link TRB loop at enqueue\n"); + break; + } + } +} + +/* * See Cycle bit rules. SW is the consumer for the event ring only. * * If we've just enqueued a TRB that is in the middle of a TD (meaning the @@ -211,11 +255,6 @@ void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring) * If we've enqueued the last TRB in a TD, make sure the following link TRBs * have their chain bit cleared (so that each Link TRB is a separate TD). * - * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit - * set, but other sections talk about dealing with the chain bit set. This was - * fixed in the 0.96 specification errata, but we have to assume that all 0.95 - * xHCI hardware can't handle the chain bit being cleared on a link TRB. - * * @more_trbs_coming: Will you enqueue more TRBs before calling * prepare_transfer()? */ @@ -223,8 +262,6 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool more_trbs_coming) { u32 chain; - union xhci_trb *next; - unsigned int link_trb_count = 0; chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN; @@ -233,48 +270,67 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, return; } - next = ++(ring->enqueue); + ring->enqueue++; - /* Update the dequeue pointer further if that was a link TRB */ - while (trb_is_link(next)) { + /* + * If we are in the middle of a TD or the caller plans to enqueue more + * TDs as one transfer (eg. control), traverse any link TRBs right now. + * Otherwise, enqueue can stay on a link until the next prepare_ring(). + * This avoids enqueue entering deq_seg and simplifies ring expansion. + */ + if (trb_is_link(ring->enqueue) && (chain || more_trbs_coming)) + inc_enq_past_link(xhci, ring, chain); +} - /* - * If the caller doesn't plan on enqueueing more TDs before - * ringing the doorbell, then we don't want to give the link TRB - * to the hardware just yet. We'll give the link TRB back in - * prepare_ring() just before we enqueue the TD at the top of - * the ring. - */ - if (!chain && !more_trbs_coming) - break; +/* + * If the suspect DMA address is a TRB in this TD, this function returns that + * TRB's segment. Otherwise it returns 0. + */ +static struct xhci_segment *trb_in_td(struct xhci_td *td, dma_addr_t suspect_dma) +{ + dma_addr_t start_dma; + dma_addr_t end_seg_dma; + dma_addr_t end_trb_dma; + struct xhci_segment *cur_seg; - /* If we're not dealing with 0.95 hardware or isoc rings on - * AMD 0.96 host, carry over the chain bit of the previous TRB - * (which may mean the chain bit is cleared). - */ - if (!xhci_link_chain_quirk(xhci, ring->type)) { - next->link.control &= cpu_to_le32(~TRB_CHAIN); - next->link.control |= cpu_to_le32(chain); - } - /* Give this link TRB to the hardware */ - wmb(); - next->link.control ^= cpu_to_le32(TRB_CYCLE); + start_dma = xhci_trb_virt_to_dma(td->start_seg, td->start_trb); + cur_seg = td->start_seg; - /* Toggle the cycle bit after the last ring segment. */ - if (link_trb_toggles_cycle(next)) - ring->cycle_state ^= 1; + do { + if (start_dma == 0) + return NULL; + /* We may get an event for a Link TRB in the middle of a TD */ + end_seg_dma = xhci_trb_virt_to_dma(cur_seg, + &cur_seg->trbs[TRBS_PER_SEGMENT - 1]); + /* If the end TRB isn't in this segment, this is set to 0 */ + end_trb_dma = xhci_trb_virt_to_dma(cur_seg, td->end_trb); - ring->enq_seg = ring->enq_seg->next; - ring->enqueue = ring->enq_seg->trbs; - next = ring->enqueue; + if (end_trb_dma > 0) { + /* The end TRB is in this segment, so suspect should be here */ + if (start_dma <= end_trb_dma) { + if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma) + return cur_seg; + } else { + /* Case for one segment with + * a TD wrapped around to the top + */ + if ((suspect_dma >= start_dma && + suspect_dma <= end_seg_dma) || + (suspect_dma >= cur_seg->dma && + suspect_dma <= end_trb_dma)) + return cur_seg; + } + return NULL; + } + /* Might still be somewhere in this segment */ + if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma) + return cur_seg; - trace_xhci_inc_enq(ring); + cur_seg = cur_seg->next; + start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]); + } while (cur_seg != td->start_seg); - if (link_trb_count++ > ring->num_segs) { - xhci_warn(xhci, "%s: Ring link TRB loop\n", __func__); - break; - } - } + return NULL; } /* @@ -462,9 +518,8 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags) * In the future we should distinguish between -ENODEV and -ETIMEDOUT * and try to recover a -ETIMEDOUT with a host controller reset. */ - ret = xhci_handshake_check_state(xhci, &xhci->op_regs->cmd_ring, - CMD_RING_RUNNING, 0, 5 * 1000 * 1000, - XHCI_STATE_REMOVING); + ret = xhci_handshake(&xhci->op_regs->cmd_ring, + CMD_RING_RUNNING, 0, 5 * 1000 * 1000); if (ret < 0) { xhci_err(xhci, "Abort failed to stop command ring: %d\n", ret); xhci_halt(xhci); @@ -643,7 +698,7 @@ static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci, int new_cycle; dma_addr_t addr; u64 hw_dequeue; - bool cycle_found = false; + bool hw_dequeue_found = false; bool td_last_trb_found = false; u32 trb_sct = 0; int ret; @@ -659,25 +714,24 @@ static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci, hw_dequeue = xhci_get_hw_deq(xhci, dev, ep_index, stream_id); new_seg = ep_ring->deq_seg; new_deq = ep_ring->dequeue; - new_cycle = hw_dequeue & 0x1; + new_cycle = le32_to_cpu(td->end_trb->generic.field[3]) & TRB_CYCLE; /* - * We want to find the pointer, segment and cycle state of the new trb - * (the one after current TD's end_trb). We know the cycle state at - * hw_dequeue, so walk the ring until both hw_dequeue and end_trb are - * found. + * Walk the ring until both the next TRB and hw_dequeue are found (don't + * move hw_dequeue back if it went forward due to a HW bug). Cycle state + * is loaded from a known good TRB, track later toggles to maintain it. */ do { - if (!cycle_found && xhci_trb_virt_to_dma(new_seg, new_deq) + if (!hw_dequeue_found && xhci_trb_virt_to_dma(new_seg, new_deq) == (dma_addr_t)(hw_dequeue & ~0xf)) { - cycle_found = true; + hw_dequeue_found = true; if (td_last_trb_found) break; } if (new_deq == td->end_trb) td_last_trb_found = true; - if (cycle_found && trb_is_link(new_deq) && + if (td_last_trb_found && trb_is_link(new_deq) && link_trb_toggles_cycle(new_deq)) new_cycle ^= 0x1; @@ -689,7 +743,7 @@ static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci, return -EINVAL; } - } while (!cycle_found || !td_last_trb_found); + } while (!hw_dequeue_found || !td_last_trb_found); /* Don't update the ring cycle state for the producer (us). */ addr = xhci_trb_virt_to_dma(new_seg, new_deq); @@ -1014,7 +1068,7 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep) td->urb->stream_id); hw_deq &= ~0xf; - if (td->cancel_status == TD_HALTED || trb_in_td(xhci, td, hw_deq, false)) { + if (td->cancel_status == TD_HALTED || trb_in_td(td, hw_deq)) { switch (td->cancel_status) { case TD_CLEARED: /* TD is already no-op */ case TD_CLEARING_CACHE: /* set TR deq command already queued */ @@ -1104,7 +1158,7 @@ static struct xhci_td *find_halted_td(struct xhci_virt_ep *ep) hw_deq = xhci_get_hw_deq(ep->xhci, ep->vdev, ep->ep_index, 0); hw_deq &= ~0xf; td = list_first_entry(&ep->ring->td_list, struct xhci_td, td_list); - if (trb_in_td(ep->xhci, td, hw_deq, false)) + if (trb_in_td(td, hw_deq)) return td; } return NULL; @@ -1164,7 +1218,14 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id, */ switch (GET_EP_CTX_STATE(ep_ctx)) { case EP_STATE_HALTED: - xhci_dbg(xhci, "Stop ep completion raced with stall, reset ep\n"); + xhci_dbg(xhci, "Stop ep completion raced with stall\n"); + /* + * If the halt happened before Stop Endpoint failed, its transfer event + * should have already been handled and Reset Endpoint should be pending. + */ + if (ep->ep_state & EP_HALTED) + goto reset_done; + if (ep->ep_state & EP_HAS_STREAMS) { reset_type = EP_SOFT_RESET; } else { @@ -1175,8 +1236,11 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id, } /* reset ep, reset handler cleans up cancelled tds */ err = xhci_handle_halted_endpoint(xhci, ep, td, reset_type); + xhci_dbg(xhci, "Stop ep completion resetting ep, status %d\n", err); if (err) break; +reset_done: + /* Reset EP handler will clean up cancelled TDs */ ep->ep_state &= ~EP_STOP_CMD_PENDING; return; case EP_STATE_STOPPED: @@ -1198,16 +1262,19 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id, * Stopped state, but it will soon change to Running. * * Assume this bug on unexpected Stop Endpoint failures. - * Keep retrying until the EP starts and stops again, on - * chips where this is known to help. Wait for 100ms. + * Keep retrying until the EP starts and stops again. */ - if (time_is_before_jiffies(ep->stop_time + msecs_to_jiffies(100))) - break; fallthrough; case EP_STATE_RUNNING: /* Race, HW handled stop ep cmd before ep was running */ xhci_dbg(xhci, "Stop ep completion ctx error, ctx_state %d\n", GET_EP_CTX_STATE(ep_ctx)); + /* + * Don't retry forever if we guessed wrong or a defective HC never starts + * the EP or says 'Running' but fails the command. We must give back TDs. + */ + if (time_is_before_jiffies(ep->stop_time + msecs_to_jiffies(100))) + break; command = xhci_alloc_command(xhci, false, GFP_ATOMIC); if (!command) { @@ -1309,12 +1376,15 @@ static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci, */ void xhci_hc_died(struct xhci_hcd *xhci) { + bool notify; int i, j; if (xhci->xhc_state & XHCI_STATE_DYING) return; - xhci_err(xhci, "xHCI host controller not responding, assume dead\n"); + notify = !(xhci->xhc_state & XHCI_STATE_REMOVING); + if (notify) + xhci_err(xhci, "xHCI host controller not responding, assume dead\n"); xhci->xhc_state |= XHCI_STATE_DYING; xhci_cleanup_command_queue(xhci); @@ -1328,47 +1398,10 @@ void xhci_hc_died(struct xhci_hcd *xhci) } /* inform usb core hc died if PCI remove isn't already handling it */ - if (!(xhci->xhc_state & XHCI_STATE_REMOVING)) + if (notify) usb_hc_died(xhci_to_hcd(xhci)); } -static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci, - struct xhci_virt_device *dev, - struct xhci_ring *ep_ring, - unsigned int ep_index) -{ - union xhci_trb *dequeue_temp; - - dequeue_temp = ep_ring->dequeue; - - /* If we get two back-to-back stalls, and the first stalled transfer - * ends just before a link TRB, the dequeue pointer will be left on - * the link TRB by the code in the while loop. So we have to update - * the dequeue pointer one segment further, or we'll jump off - * the segment into la-la-land. - */ - if (trb_is_link(ep_ring->dequeue)) { - ep_ring->deq_seg = ep_ring->deq_seg->next; - ep_ring->dequeue = ep_ring->deq_seg->trbs; - } - - while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) { - /* We have more usable TRBs */ - ep_ring->dequeue++; - if (trb_is_link(ep_ring->dequeue)) { - if (ep_ring->dequeue == - dev->eps[ep_index].queued_deq_ptr) - break; - ep_ring->deq_seg = ep_ring->deq_seg->next; - ep_ring->dequeue = ep_ring->deq_seg->trbs; - } - if (ep_ring->dequeue == dequeue_temp) { - xhci_dbg(xhci, "Unable to find new dequeue pointer\n"); - break; - } - } -} - /* * When we get a completion for a Set Transfer Ring Dequeue Pointer command, * we need to clear the set deq pending flag in the endpoint ring state, so that @@ -1473,8 +1506,8 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, /* Update the ring's dequeue segment and dequeue pointer * to reflect the new position. */ - update_ring_for_set_deq_completion(xhci, ep->vdev, - ep_ring, ep_index); + ep_ring->deq_seg = ep->queued_deq_seg; + ep_ring->dequeue = ep->queued_deq_ptr; } else { xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n"); xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n", @@ -1867,6 +1900,8 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, case TRB_NEC_GET_FW: xhci_handle_cmd_nec_get_fw(xhci, event); break; + case TRB_GET_BW: + break; default: /* Skip over unknown commands on the event ring */ xhci_info(xhci, "INFO unknown command type %d\n", cmd_type); @@ -2116,67 +2151,6 @@ cleanup: spin_lock(&xhci->lock); } -/* - * If the suspect DMA address is a TRB in this TD, this function returns that - * TRB's segment. Otherwise it returns 0. - */ -struct xhci_segment *trb_in_td(struct xhci_hcd *xhci, struct xhci_td *td, dma_addr_t suspect_dma, - bool debug) -{ - dma_addr_t start_dma; - dma_addr_t end_seg_dma; - dma_addr_t end_trb_dma; - struct xhci_segment *cur_seg; - - start_dma = xhci_trb_virt_to_dma(td->start_seg, td->start_trb); - cur_seg = td->start_seg; - - do { - if (start_dma == 0) - return NULL; - /* We may get an event for a Link TRB in the middle of a TD */ - end_seg_dma = xhci_trb_virt_to_dma(cur_seg, - &cur_seg->trbs[TRBS_PER_SEGMENT - 1]); - /* If the end TRB isn't in this segment, this is set to 0 */ - end_trb_dma = xhci_trb_virt_to_dma(cur_seg, td->end_trb); - - if (debug) - xhci_warn(xhci, - "Looking for event-dma %016llx trb-start %016llx trb-end %016llx seg-start %016llx seg-end %016llx\n", - (unsigned long long)suspect_dma, - (unsigned long long)start_dma, - (unsigned long long)end_trb_dma, - (unsigned long long)cur_seg->dma, - (unsigned long long)end_seg_dma); - - if (end_trb_dma > 0) { - /* The end TRB is in this segment, so suspect should be here */ - if (start_dma <= end_trb_dma) { - if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma) - return cur_seg; - } else { - /* Case for one segment with - * a TD wrapped around to the top - */ - if ((suspect_dma >= start_dma && - suspect_dma <= end_seg_dma) || - (suspect_dma >= cur_seg->dma && - suspect_dma <= end_trb_dma)) - return cur_seg; - } - return NULL; - } else { - /* Might still be somewhere in this segment */ - if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma) - return cur_seg; - } - cur_seg = cur_seg->next; - start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]); - } while (cur_seg != td->start_seg); - - return NULL; -} - static void xhci_clear_hub_tt_buffer(struct xhci_hcd *xhci, struct xhci_td *td, struct xhci_virt_ep *ep) { @@ -2476,6 +2450,12 @@ static void process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, if (ep_trb != td->end_trb) td->error_mid_td = true; break; + case COMP_MISSED_SERVICE_ERROR: + frame->status = -EXDEV; + sum_trbs_for_length = true; + if (ep_trb != td->end_trb) + td->error_mid_td = true; + break; case COMP_INCOMPATIBLE_DEVICE_ERROR: case COMP_STALL_ERROR: frame->status = -EPROTO; @@ -2644,6 +2624,22 @@ static int handle_transferless_tx_event(struct xhci_hcd *xhci, struct xhci_virt_ return 0; } +static bool xhci_spurious_success_tx_event(struct xhci_hcd *xhci, + struct xhci_ring *ring) +{ + switch (ring->old_trb_comp_code) { + case COMP_SHORT_PACKET: + return xhci->quirks & XHCI_SPURIOUS_SUCCESS; + case COMP_USB_TRANSACTION_ERROR: + case COMP_BABBLE_DETECTED_ERROR: + case COMP_ISOCH_BUFFER_OVERRUN: + return xhci->quirks & XHCI_ETRON_HOST && + ring->type == TYPE_ISOC; + default: + return false; + } +} + /* * If this function returns an error condition, it means it got a Transfer * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address. @@ -2664,6 +2660,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, int status = -EINPROGRESS; struct xhci_ep_ctx *ep_ctx; u32 trb_comp_code; + bool ring_xrun_event = false; slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; @@ -2697,8 +2694,8 @@ static int handle_tx_event(struct xhci_hcd *xhci, case COMP_SUCCESS: if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) { trb_comp_code = COMP_SHORT_PACKET; - xhci_dbg(xhci, "Successful completion on short TX for slot %u ep %u with last td short %d\n", - slot_id, ep_index, ep_ring->last_td_was_short); + xhci_dbg(xhci, "Successful completion on short TX for slot %u ep %u with last td comp code %d\n", + slot_id, ep_index, ep_ring->old_trb_comp_code); } break; case COMP_SHORT_PACKET: @@ -2770,14 +2767,12 @@ static int handle_tx_event(struct xhci_hcd *xhci, * Underrun Event for OUT Isoch endpoint. */ xhci_dbg(xhci, "Underrun event on slot %u ep %u\n", slot_id, ep_index); - if (ep->skip) - break; - return 0; + ring_xrun_event = true; + break; case COMP_RING_OVERRUN: xhci_dbg(xhci, "Overrun event on slot %u ep %u\n", slot_id, ep_index); - if (ep->skip) - break; - return 0; + ring_xrun_event = true; + break; case COMP_MISSED_SERVICE_ERROR: /* * When encounter missed service error, one or more isoc tds @@ -2787,9 +2782,9 @@ static int handle_tx_event(struct xhci_hcd *xhci, */ ep->skip = true; xhci_dbg(xhci, - "Miss service interval error for slot %u ep %u, set skip flag\n", - slot_id, ep_index); - return 0; + "Miss service interval error for slot %u ep %u, set skip flag%s\n", + slot_id, ep_index, ep_trb_dma ? ", skip now" : ""); + break; case COMP_NO_PING_RESPONSE_ERROR: ep->skip = true; xhci_dbg(xhci, @@ -2832,11 +2827,15 @@ static int handle_tx_event(struct xhci_hcd *xhci, */ td = list_first_entry_or_null(&ep_ring->td_list, struct xhci_td, td_list); - if (td && td->error_mid_td && !trb_in_td(xhci, td, ep_trb_dma, false)) { + if (td && td->error_mid_td && !trb_in_td(td, ep_trb_dma)) { xhci_dbg(xhci, "Missing TD completion event after mid TD error\n"); xhci_dequeue_td(xhci, td, ep_ring, td->status); } + /* If the TRB pointer is NULL, missed TDs will be skipped on the next event */ + if (trb_comp_code == COMP_MISSED_SERVICE_ERROR && !ep_trb_dma) + return 0; + if (list_empty(&ep_ring->td_list)) { /* * Don't print wanings if ring is empty due to a stopped endpoint generating an @@ -2846,7 +2845,8 @@ static int handle_tx_event(struct xhci_hcd *xhci, */ if (trb_comp_code != COMP_STOPPED && trb_comp_code != COMP_STOPPED_LENGTH_INVALID && - !ep_ring->last_td_was_short) { + !ring_xrun_event && + !xhci_spurious_success_tx_event(xhci, ep_ring)) { xhci_warn(xhci, "Event TRB for slot %u ep %u with no TDs queued\n", slot_id, ep_index); } @@ -2860,14 +2860,31 @@ static int handle_tx_event(struct xhci_hcd *xhci, td_list); /* Is this a TRB in the currently executing TD? */ - ep_seg = trb_in_td(xhci, td, ep_trb_dma, false); + ep_seg = trb_in_td(td, ep_trb_dma); if (!ep_seg) { if (ep->skip && usb_endpoint_xfer_isoc(&td->urb->ep->desc)) { + /* this event is unlikely to match any TD, don't skip them all */ + if (trb_comp_code == COMP_STOPPED_LENGTH_INVALID) + return 0; + skip_isoc_td(xhci, td, ep, status); - if (!list_empty(&ep_ring->td_list)) + + if (!list_empty(&ep_ring->td_list)) { + if (ring_xrun_event) { + /* + * If we are here, we are on xHCI 1.0 host with no + * idea how many TDs were missed or where the xrun + * occurred. New TDs may have been added after the + * xrun, so skip only one TD to be safe. + */ + xhci_dbg(xhci, "Skipped one TD for slot %u ep %u", + slot_id, ep_index); + return 0; + } continue; + } xhci_dbg(xhci, "All TDs skipped for slot %u ep %u. Clear skip flag.\n", slot_id, ep_index); @@ -2876,6 +2893,10 @@ static int handle_tx_event(struct xhci_hcd *xhci, goto check_endpoint_halted; } + /* TD was queued after xrun, maybe xrun was on a link, don't panic yet */ + if (ring_xrun_event) + return 0; + /* * Skip the Force Stopped Event. The 'ep_trb' of FSE is not in the current * TD pointed by 'ep_ring->dequeue' because that the hardware dequeue @@ -2890,21 +2911,17 @@ static int handle_tx_event(struct xhci_hcd *xhci, /* * Some hosts give a spurious success event after a short - * transfer. Ignore it. + * transfer or error on last TRB. Ignore it. */ - if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) && - ep_ring->last_td_was_short) { - ep_ring->last_td_was_short = false; + if (xhci_spurious_success_tx_event(xhci, ep_ring)) { + xhci_dbg(xhci, "Spurious event dma %pad, comp_code %u after %u\n", + &ep_trb_dma, trb_comp_code, ep_ring->old_trb_comp_code); + ep_ring->old_trb_comp_code = 0; return 0; } /* HC is busted, give up! */ - xhci_err(xhci, - "ERROR Transfer event TRB DMA ptr not part of current TD ep_index %d comp_code %u\n", - ep_index, trb_comp_code); - trb_in_td(xhci, td, ep_trb_dma, true); - - return -ESHUTDOWN; + goto debug_finding_td; } if (ep->skip) { @@ -2922,10 +2939,11 @@ static int handle_tx_event(struct xhci_hcd *xhci, */ } while (ep->skip); - if (trb_comp_code == COMP_SHORT_PACKET) - ep_ring->last_td_was_short = true; - else - ep_ring->last_td_was_short = false; + ep_ring->old_trb_comp_code = trb_comp_code; + + /* Get out if a TD was queued at enqueue after the xrun occurred */ + if (ring_xrun_event) + return 0; ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma) / sizeof(*ep_trb)]; trace_xhci_handle_transfer(ep_ring, (struct xhci_generic_trb *) ep_trb, ep_trb_dma); @@ -2957,6 +2975,14 @@ check_endpoint_halted: return 0; +debug_finding_td: + xhci_err(xhci, "Event dma %pad for ep %d status %d not part of TD at %016llx - %016llx\n", + &ep_trb_dma, ep_index, trb_comp_code, + (unsigned long long)xhci_trb_virt_to_dma(td->start_seg, td->start_trb), + (unsigned long long)xhci_trb_virt_to_dma(td->end_seg, td->end_trb)); + + return -ESHUTDOWN; + err_out: xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n", (unsigned long long) xhci_trb_virt_to_dma( @@ -3025,9 +3051,9 @@ static int xhci_handle_event_trb(struct xhci_hcd *xhci, struct xhci_interrupter * - When all events have finished * - To avoid "Event Ring Full Error" condition */ -static void xhci_update_erst_dequeue(struct xhci_hcd *xhci, - struct xhci_interrupter *ir, - bool clear_ehb) +void xhci_update_erst_dequeue(struct xhci_hcd *xhci, + struct xhci_interrupter *ir, + bool clear_ehb) { u64 temp_64; dma_addr_t deq; @@ -3058,11 +3084,14 @@ static void xhci_update_erst_dequeue(struct xhci_hcd *xhci, static void xhci_clear_interrupt_pending(struct xhci_interrupter *ir) { if (!ir->ip_autoclear) { - u32 irq_pending; + u32 iman; - irq_pending = readl(&ir->ir_set->irq_pending); - irq_pending |= IMAN_IP; - writel(irq_pending, &ir->ir_set->irq_pending); + iman = readl(&ir->ir_set->iman); + iman |= IMAN_IP; + writel(iman, &ir->ir_set->iman); + + /* Read operation to guarantee the write has been flushed from posted buffers */ + readl(&ir->ir_set->iman); } } @@ -3070,10 +3099,11 @@ static void xhci_clear_interrupt_pending(struct xhci_interrupter *ir) * Handle all OS-owned events on an interrupter event ring. It may drop * and reaquire xhci->lock between event processing. */ -static int xhci_handle_events(struct xhci_hcd *xhci, struct xhci_interrupter *ir) +static int xhci_handle_events(struct xhci_hcd *xhci, struct xhci_interrupter *ir, + bool skip_events) { int event_loop = 0; - int err; + int err = 0; u64 temp; xhci_clear_interrupt_pending(ir); @@ -3096,7 +3126,8 @@ static int xhci_handle_events(struct xhci_hcd *xhci, struct xhci_interrupter *ir /* Process all OS owned event TRBs on this event ring */ while (unhandled_event_trb(ir->event_ring)) { - err = xhci_handle_event_trb(xhci, ir, ir->event_ring->dequeue); + if (!skip_events) + err = xhci_handle_event_trb(xhci, ir, ir->event_ring->dequeue); /* * If half a segment of events have been handled in one go then @@ -3124,6 +3155,37 @@ static int xhci_handle_events(struct xhci_hcd *xhci, struct xhci_interrupter *ir } /* + * Move the event ring dequeue pointer to skip events kept in the secondary + * event ring. This is used to ensure that pending events in the ring are + * acknowledged, so the xHCI HCD can properly enter suspend/resume. The + * secondary ring is typically maintained by an external component. + */ +void xhci_skip_sec_intr_events(struct xhci_hcd *xhci, + struct xhci_ring *ring, struct xhci_interrupter *ir) +{ + union xhci_trb *current_trb; + u64 erdp_reg; + dma_addr_t deq; + + /* disable irq, ack pending interrupt and ack all pending events */ + xhci_disable_interrupter(xhci, ir); + + /* last acked event trb is in erdp reg */ + erdp_reg = xhci_read_64(xhci, &ir->ir_set->erst_dequeue); + deq = (dma_addr_t)(erdp_reg & ERST_PTR_MASK); + if (!deq) { + xhci_err(xhci, "event ring handling not required\n"); + return; + } + + current_trb = ir->event_ring->dequeue; + /* read cycle state of the last acked trb to find out CCS */ + ring->cycle_state = le32_to_cpu(current_trb->event_cmd.flags) & TRB_CYCLE; + + xhci_handle_events(xhci, ir, true); +} + +/* * xHCI spec says we can get an interrupt, and if the HC has an error condition, * we might get bad data out of the event ring. Section 4.10.2.7 has a list of * indicators of an event TRB error, but we check the status *first* to be safe. @@ -3167,7 +3229,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd) writel(status, &xhci->op_regs->status); /* This is the handler of the primary interrupter */ - xhci_handle_events(xhci, xhci->interrupters[0]); + xhci_handle_events(xhci, xhci->interrupters[0], false); out: spin_unlock(&xhci->lock); @@ -3216,7 +3278,6 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, u32 ep_state, unsigned int num_trbs, gfp_t mem_flags) { - unsigned int link_trb_count = 0; unsigned int new_segs = 0; /* Make sure the endpoint has been added to xHC schedule */ @@ -3264,33 +3325,9 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, } } - while (trb_is_link(ep_ring->enqueue)) { - /* If we're not dealing with 0.95 hardware or isoc rings - * on AMD 0.96 host, clear the chain bit. - */ - if (!xhci_link_chain_quirk(xhci, ep_ring->type)) - ep_ring->enqueue->link.control &= - cpu_to_le32(~TRB_CHAIN); - else - ep_ring->enqueue->link.control |= - cpu_to_le32(TRB_CHAIN); - - wmb(); - ep_ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE); - - /* Toggle the cycle bit after the last ring segment. */ - if (link_trb_toggles_cycle(ep_ring->enqueue)) - ep_ring->cycle_state ^= 1; - - ep_ring->enq_seg = ep_ring->enq_seg->next; - ep_ring->enqueue = ep_ring->enq_seg->trbs; - - /* prevent infinite loop if all first trbs are link trbs */ - if (link_trb_count++ > ep_ring->num_segs) { - xhci_warn(xhci, "Ring is an endless link TRB loop\n"); - return -EINVAL; - } - } + /* Ensure that new TRBs won't overwrite a link */ + if (trb_is_link(ep_ring->enqueue)) + inc_enq_past_link(xhci, ep_ring, 0); if (last_trb_on_seg(ep_ring->enq_seg, ep_ring->enqueue)) { xhci_warn(xhci, "Missing link TRB at end of ring segment\n"); @@ -3776,7 +3813,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, * enqueue a No Op TRB, this can prevent the Setup and Data Stage * TRB to be breaked by the Link TRB. */ - if (trb_is_link(ep_ring->enqueue + 1)) { + if (last_trb_on_seg(ep_ring->enq_seg, ep_ring->enqueue + 1)) { field = TRB_TYPE(TRB_TR_NOOP) | ep_ring->cycle_state; queue_trb(xhci, ep_ring, false, 0, 0, TRB_INTR_TARGET(0), field); @@ -4338,7 +4375,8 @@ static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd, if ((xhci->xhc_state & XHCI_STATE_DYING) || (xhci->xhc_state & XHCI_STATE_HALTED)) { - xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n"); + xhci_dbg(xhci, "xHCI dying or halted, can't queue_command. state: 0x%x\n", + xhci->xhc_state); return -ESHUTDOWN; } @@ -4414,6 +4452,17 @@ int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, command_must_succeed); } +/* Queue a get root hub port bandwidth command TRB */ +int xhci_queue_get_port_bw(struct xhci_hcd *xhci, + struct xhci_command *cmd, dma_addr_t in_ctx_ptr, + u8 dev_speed, bool command_must_succeed) +{ + return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), + upper_32_bits(in_ctx_ptr), 0, + TRB_TYPE(TRB_GET_BW) | DEV_SPEED_FOR_TRB(dev_speed), + command_must_succeed); +} + /* Queue an evaluate context command TRB */ int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd, dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed) diff --git a/drivers/usb/host/xhci-sideband.c b/drivers/usb/host/xhci-sideband.c new file mode 100644 index 000000000000..d49f9886dd84 --- /dev/null +++ b/drivers/usb/host/xhci-sideband.c @@ -0,0 +1,457 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * xHCI host controller sideband support + * + * Copyright (c) 2023-2025, Intel Corporation. + * + * Author: Mathias Nyman + */ + +#include <linux/usb/xhci-sideband.h> +#include <linux/dma-direct.h> + +#include "xhci.h" + +/* sideband internal helpers */ +static struct sg_table * +xhci_ring_to_sgtable(struct xhci_sideband *sb, struct xhci_ring *ring) +{ + struct xhci_segment *seg; + struct sg_table *sgt; + unsigned int n_pages; + struct page **pages; + struct device *dev; + size_t sz; + int i; + + dev = xhci_to_hcd(sb->xhci)->self.sysdev; + sz = ring->num_segs * TRB_SEGMENT_SIZE; + n_pages = PAGE_ALIGN(sz) >> PAGE_SHIFT; + pages = kvmalloc_array(n_pages, sizeof(struct page *), GFP_KERNEL); + if (!pages) + return NULL; + + sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); + if (!sgt) { + kvfree(pages); + return NULL; + } + + seg = ring->first_seg; + if (!seg) + goto err; + /* + * Rings can potentially have multiple segments, create an array that + * carries page references to allocated segments. Utilize the + * sg_alloc_table_from_pages() to create the sg table, and to ensure + * that page links are created. + */ + for (i = 0; i < ring->num_segs; i++) { + dma_get_sgtable(dev, sgt, seg->trbs, seg->dma, + TRB_SEGMENT_SIZE); + pages[i] = sg_page(sgt->sgl); + sg_free_table(sgt); + seg = seg->next; + } + + if (sg_alloc_table_from_pages(sgt, pages, n_pages, 0, sz, GFP_KERNEL)) + goto err; + + /* + * Save first segment dma address to sg dma_address field for the sideband + * client to have access to the IOVA of the ring. + */ + sg_dma_address(sgt->sgl) = ring->first_seg->dma; + + return sgt; + +err: + kvfree(pages); + kfree(sgt); + + return NULL; +} + +static void +__xhci_sideband_remove_endpoint(struct xhci_sideband *sb, struct xhci_virt_ep *ep) +{ + /* + * Issue a stop endpoint command when an endpoint is removed. + * The stop ep cmd handler will handle the ring cleanup. + */ + xhci_stop_endpoint_sync(sb->xhci, ep, 0, GFP_KERNEL); + + ep->sideband = NULL; + sb->eps[ep->ep_index] = NULL; +} + +/* sideband api functions */ + +/** + * xhci_sideband_notify_ep_ring_free - notify client of xfer ring free + * @sb: sideband instance for this usb device + * @ep_index: usb endpoint index + * + * Notifies the xHCI sideband client driver of a xHCI transfer ring free + * routine. This will allow for the client to ensure that all transfers + * are completed. + * + * The callback should be synchronous, as the ring free happens after. + */ +void xhci_sideband_notify_ep_ring_free(struct xhci_sideband *sb, + unsigned int ep_index) +{ + struct xhci_sideband_event evt; + + evt.type = XHCI_SIDEBAND_XFER_RING_FREE; + evt.evt_data = &ep_index; + + if (sb->notify_client) + sb->notify_client(sb->intf, &evt); +} +EXPORT_SYMBOL_GPL(xhci_sideband_notify_ep_ring_free); + +/** + * xhci_sideband_add_endpoint - add endpoint to sideband access list + * @sb: sideband instance for this usb device + * @host_ep: usb host endpoint + * + * Adds an endpoint to the list of sideband accessed endpoints for this usb + * device. + * After an endpoint is added the sideband client can get the endpoint transfer + * ring buffer by calling xhci_sideband_endpoint_buffer() + * + * Return: 0 on success, negative error otherwise. + */ +int +xhci_sideband_add_endpoint(struct xhci_sideband *sb, + struct usb_host_endpoint *host_ep) +{ + struct xhci_virt_ep *ep; + unsigned int ep_index; + + mutex_lock(&sb->mutex); + ep_index = xhci_get_endpoint_index(&host_ep->desc); + ep = &sb->vdev->eps[ep_index]; + + if (ep->ep_state & EP_HAS_STREAMS) { + mutex_unlock(&sb->mutex); + return -EINVAL; + } + + /* + * Note, we don't know the DMA mask of the audio DSP device, if its + * smaller than for xhci it won't be able to access the endpoint ring + * buffer. This could be solved by not allowing the audio class driver + * to add the endpoint the normal way, but instead offload it immediately, + * and let this function add the endpoint and allocate the ring buffer + * with the smallest common DMA mask + */ + if (sb->eps[ep_index] || ep->sideband) { + mutex_unlock(&sb->mutex); + return -EBUSY; + } + + ep->sideband = sb; + sb->eps[ep_index] = ep; + mutex_unlock(&sb->mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(xhci_sideband_add_endpoint); + +/** + * xhci_sideband_remove_endpoint - remove endpoint from sideband access list + * @sb: sideband instance for this usb device + * @host_ep: usb host endpoint + * + * Removes an endpoint from the list of sideband accessed endpoints for this usb + * device. + * sideband client should no longer touch the endpoint transfer buffer after + * calling this. + * + * Return: 0 on success, negative error otherwise. + */ +int +xhci_sideband_remove_endpoint(struct xhci_sideband *sb, + struct usb_host_endpoint *host_ep) +{ + struct xhci_virt_ep *ep; + unsigned int ep_index; + + mutex_lock(&sb->mutex); + ep_index = xhci_get_endpoint_index(&host_ep->desc); + ep = sb->eps[ep_index]; + + if (!ep || !ep->sideband || ep->sideband != sb) { + mutex_unlock(&sb->mutex); + return -ENODEV; + } + + __xhci_sideband_remove_endpoint(sb, ep); + xhci_initialize_ring_info(ep->ring); + mutex_unlock(&sb->mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(xhci_sideband_remove_endpoint); + +int +xhci_sideband_stop_endpoint(struct xhci_sideband *sb, + struct usb_host_endpoint *host_ep) +{ + struct xhci_virt_ep *ep; + unsigned int ep_index; + + ep_index = xhci_get_endpoint_index(&host_ep->desc); + ep = sb->eps[ep_index]; + + if (!ep || !ep->sideband || ep->sideband != sb) + return -EINVAL; + + return xhci_stop_endpoint_sync(sb->xhci, ep, 0, GFP_KERNEL); +} +EXPORT_SYMBOL_GPL(xhci_sideband_stop_endpoint); + +/** + * xhci_sideband_get_endpoint_buffer - gets the endpoint transfer buffer address + * @sb: sideband instance for this usb device + * @host_ep: usb host endpoint + * + * Returns the address of the endpoint buffer where xHC controller reads queued + * transfer TRBs from. This is the starting address of the ringbuffer where the + * sideband client should write TRBs to. + * + * Caller needs to free the returned sg_table + * + * Return: struct sg_table * if successful. NULL otherwise. + */ +struct sg_table * +xhci_sideband_get_endpoint_buffer(struct xhci_sideband *sb, + struct usb_host_endpoint *host_ep) +{ + struct xhci_virt_ep *ep; + unsigned int ep_index; + + ep_index = xhci_get_endpoint_index(&host_ep->desc); + ep = sb->eps[ep_index]; + + if (!ep || !ep->ring || !ep->sideband || ep->sideband != sb) + return NULL; + + return xhci_ring_to_sgtable(sb, ep->ring); +} +EXPORT_SYMBOL_GPL(xhci_sideband_get_endpoint_buffer); + +/** + * xhci_sideband_get_event_buffer - return the event buffer for this device + * @sb: sideband instance for this usb device + * + * If a secondary xhci interupter is set up for this usb device then this + * function returns the address of the event buffer where xHC writes + * the transfer completion events. + * + * Caller needs to free the returned sg_table + * + * Return: struct sg_table * if successful. NULL otherwise. + */ +struct sg_table * +xhci_sideband_get_event_buffer(struct xhci_sideband *sb) +{ + if (!sb || !sb->ir) + return NULL; + + return xhci_ring_to_sgtable(sb, sb->ir->event_ring); +} +EXPORT_SYMBOL_GPL(xhci_sideband_get_event_buffer); + +/** + * xhci_sideband_create_interrupter - creates a new interrupter for this sideband + * @sb: sideband instance for this usb device + * @num_seg: number of event ring segments to allocate + * @ip_autoclear: IP autoclearing support such as MSI implemented + * + * Sets up a xhci interrupter that can be used for this sideband accessed usb + * device. Transfer events for this device can be routed to this interrupters + * event ring by setting the 'Interrupter Target' field correctly when queueing + * the transfer TRBs. + * Once this interrupter is created the interrupter target ID can be obtained + * by calling xhci_sideband_interrupter_id() + * + * Returns 0 on success, negative error otherwise + */ +int +xhci_sideband_create_interrupter(struct xhci_sideband *sb, int num_seg, + bool ip_autoclear, u32 imod_interval, int intr_num) +{ + int ret = 0; + + if (!sb || !sb->xhci) + return -ENODEV; + + mutex_lock(&sb->mutex); + if (sb->ir) { + ret = -EBUSY; + goto out; + } + + sb->ir = xhci_create_secondary_interrupter(xhci_to_hcd(sb->xhci), + num_seg, imod_interval, + intr_num); + if (!sb->ir) { + ret = -ENOMEM; + goto out; + } + + sb->ir->ip_autoclear = ip_autoclear; + +out: + mutex_unlock(&sb->mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(xhci_sideband_create_interrupter); + +/** + * xhci_sideband_remove_interrupter - remove the interrupter from a sideband + * @sb: sideband instance for this usb device + * + * Removes a registered interrupt for a sideband. This would allow for other + * sideband users to utilize this interrupter. + */ +void +xhci_sideband_remove_interrupter(struct xhci_sideband *sb) +{ + if (!sb || !sb->ir) + return; + + mutex_lock(&sb->mutex); + xhci_remove_secondary_interrupter(xhci_to_hcd(sb->xhci), sb->ir); + + sb->ir = NULL; + mutex_unlock(&sb->mutex); +} +EXPORT_SYMBOL_GPL(xhci_sideband_remove_interrupter); + +/** + * xhci_sideband_interrupter_id - return the interrupter target id + * @sb: sideband instance for this usb device + * + * If a secondary xhci interrupter is set up for this usb device then this + * function returns the ID used by the interrupter. The sideband client + * needs to write this ID to the 'Interrupter Target' field of the transfer TRBs + * it queues on the endpoints transfer ring to ensure transfer completion event + * are written by xHC to the correct interrupter event ring. + * + * Returns interrupter id on success, negative error othgerwise + */ +int +xhci_sideband_interrupter_id(struct xhci_sideband *sb) +{ + if (!sb || !sb->ir) + return -ENODEV; + + return sb->ir->intr_num; +} +EXPORT_SYMBOL_GPL(xhci_sideband_interrupter_id); + +/** + * xhci_sideband_register - register a sideband for a usb device + * @intf: usb interface associated with the sideband device + * + * Allows for clients to utilize XHCI interrupters and fetch transfer and event + * ring parameters for executing data transfers. + * + * Return: pointer to a new xhci_sideband instance if successful. NULL otherwise. + */ +struct xhci_sideband * +xhci_sideband_register(struct usb_interface *intf, enum xhci_sideband_type type, + int (*notify_client)(struct usb_interface *intf, + struct xhci_sideband_event *evt)) +{ + struct usb_device *udev = interface_to_usbdev(intf); + struct usb_hcd *hcd = bus_to_hcd(udev->bus); + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + struct xhci_virt_device *vdev; + struct xhci_sideband *sb; + + /* + * Make sure the usb device is connected to a xhci controller. Fail + * registration if the type is anything other than XHCI_SIDEBAND_VENDOR, + * as this is the only type that is currently supported by xhci-sideband. + */ + if (!udev->slot_id || type != XHCI_SIDEBAND_VENDOR) + return NULL; + + sb = kzalloc_node(sizeof(*sb), GFP_KERNEL, dev_to_node(hcd->self.sysdev)); + if (!sb) + return NULL; + + mutex_init(&sb->mutex); + + /* check this device isn't already controlled via sideband */ + spin_lock_irq(&xhci->lock); + + vdev = xhci->devs[udev->slot_id]; + + if (!vdev || vdev->sideband) { + xhci_warn(xhci, "XHCI sideband for slot %d already in use\n", + udev->slot_id); + spin_unlock_irq(&xhci->lock); + kfree(sb); + return NULL; + } + + sb->xhci = xhci; + sb->vdev = vdev; + sb->intf = intf; + sb->type = type; + sb->notify_client = notify_client; + vdev->sideband = sb; + + spin_unlock_irq(&xhci->lock); + + return sb; +} +EXPORT_SYMBOL_GPL(xhci_sideband_register); + +/** + * xhci_sideband_unregister - unregister sideband access to a usb device + * @sb: sideband instance to be unregistered + * + * Unregisters sideband access to a usb device and frees the sideband + * instance. + * After this the endpoint and interrupter event buffers should no longer + * be accessed via sideband. The xhci driver can now take over handling + * the buffers. + */ +void +xhci_sideband_unregister(struct xhci_sideband *sb) +{ + struct xhci_hcd *xhci; + int i; + + if (!sb) + return; + + xhci = sb->xhci; + + mutex_lock(&sb->mutex); + for (i = 0; i < EP_CTX_PER_DEV; i++) + if (sb->eps[i]) + __xhci_sideband_remove_endpoint(sb, sb->eps[i]); + mutex_unlock(&sb->mutex); + + xhci_sideband_remove_interrupter(sb); + + spin_lock_irq(&xhci->lock); + sb->xhci = NULL; + sb->vdev->sideband = NULL; + spin_unlock_irq(&xhci->lock); + + kfree(sb); +} +EXPORT_SYMBOL_GPL(xhci_sideband_unregister); +MODULE_DESCRIPTION("xHCI sideband driver for secondary interrupter management"); +MODULE_LICENSE("GPL"); diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c index 22dc86fb5254..0c7af44d4dae 100644 --- a/drivers/usb/host/xhci-tegra.c +++ b/drivers/usb/host/xhci-tegra.c @@ -1364,6 +1364,7 @@ static void tegra_xhci_id_work(struct work_struct *work) tegra->otg_usb3_port = tegra_xusb_padctl_get_usb3_companion(tegra->padctl, tegra->otg_usb2_port); + pm_runtime_get_sync(tegra->dev); if (tegra->host_mode) { /* switch to host mode */ if (tegra->otg_usb3_port >= 0) { @@ -1393,6 +1394,7 @@ static void tegra_xhci_id_work(struct work_struct *work) } tegra_xhci_set_port_power(tegra, true, true); + pm_runtime_mark_last_busy(tegra->dev); } else { if (tegra->otg_usb3_port >= 0) @@ -1400,6 +1402,7 @@ static void tegra_xhci_id_work(struct work_struct *work) tegra_xhci_set_port_power(tegra, true, false); } + pm_runtime_put_autosuspend(tegra->dev); } #if IS_ENABLED(CONFIG_PM) || IS_ENABLED(CONFIG_PM_SLEEP) @@ -2162,11 +2165,11 @@ static void tegra_xhci_program_utmi_power_lp0_exit(struct tegra_xusb *tegra) } } -static int tegra_xusb_enter_elpg(struct tegra_xusb *tegra, bool runtime) +static int tegra_xusb_enter_elpg(struct tegra_xusb *tegra, bool is_auto_resume) { struct xhci_hcd *xhci = hcd_to_xhci(tegra->hcd); struct device *dev = tegra->dev; - bool wakeup = runtime ? true : device_may_wakeup(dev); + bool wakeup = is_auto_resume ? true : device_may_wakeup(dev); unsigned int i; int err; u32 usbcmd; @@ -2232,11 +2235,11 @@ out: return err; } -static int tegra_xusb_exit_elpg(struct tegra_xusb *tegra, bool runtime) +static int tegra_xusb_exit_elpg(struct tegra_xusb *tegra, bool is_auto_resume) { struct xhci_hcd *xhci = hcd_to_xhci(tegra->hcd); struct device *dev = tegra->dev; - bool wakeup = runtime ? true : device_may_wakeup(dev); + bool wakeup = is_auto_resume ? true : device_may_wakeup(dev); unsigned int i; u32 usbcmd; int err; @@ -2287,7 +2290,7 @@ static int tegra_xusb_exit_elpg(struct tegra_xusb *tegra, bool runtime) if (wakeup) tegra_xhci_disable_phy_sleepwalk(tegra); - err = xhci_resume(xhci, runtime ? PMSG_AUTO_RESUME : PMSG_RESUME); + err = xhci_resume(xhci, false, is_auto_resume); if (err < 0) { dev_err(tegra->dev, "failed to resume XHCI: %d\n", err); goto disable_phy; diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 1a90ebc8a30e..47151ca527bf 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -20,6 +20,7 @@ #include <linux/string_choices.h> #include <linux/dmi.h> #include <linux/dma-mapping.h> +#include <linux/usb/xhci-sideband.h> #include "xhci.h" #include "xhci-trace.h" @@ -84,29 +85,6 @@ int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us) } /* - * xhci_handshake_check_state - same as xhci_handshake but takes an additional - * exit_state parameter, and bails out with an error immediately when xhc_state - * has exit_state flag set. - */ -int xhci_handshake_check_state(struct xhci_hcd *xhci, void __iomem *ptr, - u32 mask, u32 done, int usec, unsigned int exit_state) -{ - u32 result; - int ret; - - ret = readl_poll_timeout_atomic(ptr, result, - (result & mask) == done || - result == U32_MAX || - xhci->xhc_state & exit_state, - 1, usec); - - if (result == U32_MAX || xhci->xhc_state & exit_state) - return -ENODEV; - - return ret; -} - -/* * Disable interrupts and begin the xHCI halting process. */ void xhci_quiesce(struct xhci_hcd *xhci) @@ -143,7 +121,8 @@ int xhci_halt(struct xhci_hcd *xhci) ret = xhci_handshake(&xhci->op_regs->status, STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC); if (ret) { - xhci_warn(xhci, "Host halt failed, %d\n", ret); + if (!(xhci->xhc_state & XHCI_STATE_DYING)) + xhci_warn(xhci, "Host halt failed, %d\n", ret); return ret; } @@ -202,7 +181,8 @@ int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us) state = readl(&xhci->op_regs->status); if (state == ~(u32)0) { - xhci_warn(xhci, "Host not accessible, reset failed.\n"); + if (!(xhci->xhc_state & XHCI_STATE_DYING)) + xhci_warn(xhci, "Host not accessible, reset failed.\n"); return -ENODEV; } @@ -226,8 +206,7 @@ int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us) if (xhci->quirks & XHCI_INTEL_HOST) udelay(1000); - ret = xhci_handshake_check_state(xhci, &xhci->op_regs->command, - CMD_RESET, 0, timeout_us, XHCI_STATE_REMOVING); + ret = xhci_handshake(&xhci->op_regs->command, CMD_RESET, 0, timeout_us); if (ret) return ret; @@ -322,28 +301,36 @@ static void xhci_zero_64b_regs(struct xhci_hcd *xhci) xhci_info(xhci, "Fault detected\n"); } -static int xhci_enable_interrupter(struct xhci_interrupter *ir) +int xhci_enable_interrupter(struct xhci_interrupter *ir) { u32 iman; if (!ir || !ir->ir_set) return -EINVAL; - iman = readl(&ir->ir_set->irq_pending); - writel(ER_IRQ_ENABLE(iman), &ir->ir_set->irq_pending); + iman = readl(&ir->ir_set->iman); + iman |= IMAN_IE; + writel(iman, &ir->ir_set->iman); + /* Read operation to guarantee the write has been flushed from posted buffers */ + readl(&ir->ir_set->iman); return 0; } -static int xhci_disable_interrupter(struct xhci_interrupter *ir) +int xhci_disable_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir) { u32 iman; if (!ir || !ir->ir_set) return -EINVAL; - iman = readl(&ir->ir_set->irq_pending); - writel(ER_IRQ_DISABLE(iman), &ir->ir_set->irq_pending); + iman = readl(&ir->ir_set->iman); + iman &= ~IMAN_IE; + writel(iman, &ir->ir_set->iman); + + iman = readl(&ir->ir_set->iman); + if (iman & IMAN_IP) + xhci_dbg(xhci, "%s: Interrupt pending\n", __func__); return 0; } @@ -354,13 +341,16 @@ int xhci_set_interrupter_moderation(struct xhci_interrupter *ir, { u32 imod; - if (!ir || !ir->ir_set || imod_interval > U16_MAX * 250) + if (!ir || !ir->ir_set) return -EINVAL; - imod = readl(&ir->ir_set->irq_control); - imod &= ~ER_IRQ_INTERVAL_MASK; - imod |= (imod_interval / 250) & ER_IRQ_INTERVAL_MASK; - writel(imod, &ir->ir_set->irq_control); + /* IMODI value in IMOD register is in 250ns increments */ + imod_interval = umin(imod_interval / 250, IMODI_MASK); + + imod = readl(&ir->ir_set->imod); + imod &= ~IMODI_MASK; + imod |= imod_interval; + writel(imod, &ir->ir_set->imod); return 0; } @@ -373,7 +363,7 @@ static void compliance_mode_recovery(struct timer_list *t) u32 temp; int i; - xhci = from_timer(xhci, t, comp_mode_recovery_timer); + xhci = timer_container_of(xhci, t, comp_mode_recovery_timer); rhub = &xhci->usb3_rhub; hcd = rhub->hcd; @@ -460,6 +450,82 @@ static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci) return (xhci->port_status_u0 == ((1 << xhci->usb3_rhub.num_ports) - 1)); } +static void xhci_hcd_page_size(struct xhci_hcd *xhci) +{ + u32 page_size; + + page_size = readl(&xhci->op_regs->page_size) & XHCI_PAGE_SIZE_MASK; + if (!is_power_of_2(page_size)) { + xhci_warn(xhci, "Invalid page size register = 0x%x\n", page_size); + /* Fallback to 4K page size, since that's common */ + page_size = 1; + } + + xhci->page_size = page_size << 12; + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "HCD page size set to %iK", + xhci->page_size >> 10); +} + +static void xhci_enable_max_dev_slots(struct xhci_hcd *xhci) +{ + u32 config_reg; + u32 max_slots; + + max_slots = HCS_MAX_SLOTS(xhci->hcs_params1); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xHC can handle at most %d device slots", + max_slots); + + config_reg = readl(&xhci->op_regs->config_reg); + config_reg &= ~HCS_SLOTS_MASK; + config_reg |= max_slots; + + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Setting Max device slots reg = 0x%x", + config_reg); + writel(config_reg, &xhci->op_regs->config_reg); +} + +static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci) +{ + dma_addr_t deq_dma; + u64 crcr; + + deq_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, xhci->cmd_ring->dequeue); + deq_dma &= CMD_RING_PTR_MASK; + + crcr = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); + crcr &= ~CMD_RING_PTR_MASK; + crcr |= deq_dma; + + crcr &= ~CMD_RING_CYCLE; + crcr |= xhci->cmd_ring->cycle_state; + + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Setting command ring address to 0x%llx", crcr); + xhci_write_64(xhci, crcr, &xhci->op_regs->cmd_ring); +} + +static void xhci_set_doorbell_ptr(struct xhci_hcd *xhci) +{ + u32 offset; + + offset = readl(&xhci->cap_regs->db_off) & DBOFF_MASK; + xhci->dba = (void __iomem *)xhci->cap_regs + offset; + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "Doorbell array is located at offset 0x%x from cap regs base addr", offset); +} + +/* + * Enable USB 3.0 device notifications for function remote wake, which is necessary + * for allowing USB 3.0 devices to do remote wakeup from U3 (device suspend). + */ +static void xhci_set_dev_notifications(struct xhci_hcd *xhci) +{ + u32 dev_notf; + + dev_notf = readl(&xhci->op_regs->dev_notification); + dev_notf &= ~DEV_NOTE_MASK; + dev_notf |= DEV_NOTE_FWAKE; + writel(dev_notf, &xhci->op_regs->dev_notification); +} /* * Initialize memory for HCD and xHC (one-time init). @@ -473,11 +539,37 @@ static int xhci_init(struct usb_hcd *hcd) struct xhci_hcd *xhci = hcd_to_xhci(hcd); int retval; - xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init"); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Starting %s", __func__); spin_lock_init(&xhci->lock); + INIT_LIST_HEAD(&xhci->cmd_list); + INIT_DELAYED_WORK(&xhci->cmd_timer, xhci_handle_command_timeout); + init_completion(&xhci->cmd_ring_stop_completion); + xhci_hcd_page_size(xhci); + memset(xhci->devs, 0, MAX_HC_SLOTS * sizeof(*xhci->devs)); + retval = xhci_mem_init(xhci, GFP_KERNEL); - xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init"); + if (retval) + return retval; + + /* Set the Number of Device Slots Enabled to the maximum supported value */ + xhci_enable_max_dev_slots(xhci); + + /* Set the address in the Command Ring Control register */ + xhci_set_cmd_ring_deq(xhci); + + /* Set Device Context Base Address Array pointer */ + xhci_write_64(xhci, xhci->dcbaa->dma, &xhci->op_regs->dcbaa_ptr); + + /* Set Doorbell array pointer */ + xhci_set_doorbell_ptr(xhci); + + /* Set USB 3.0 device notifications for function remote wake */ + xhci_set_dev_notifications(xhci); + + /* Initialize the Primary interrupter */ + xhci_add_interrupter(xhci, 0); + xhci->interrupters[0]->isoc_bei_interval = AVOID_BEI_INTERVAL_MAX; /* Initializing Compliance Mode Recovery Data If Needed */ if (xhci_compliance_mode_recovery_timer_quirk_check()) { @@ -485,7 +577,8 @@ static int xhci_init(struct usb_hcd *hcd) compliance_mode_recovery_timer_init(xhci); } - return retval; + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished %s", __func__); + return 0; } /*-------------------------------------------------------------------------*/ @@ -627,7 +720,7 @@ void xhci_stop(struct usb_hcd *hcd) /* Deleting Compliance Mode Recovery Timer */ if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && (!(xhci_all_ports_seen_u0(xhci)))) { - del_timer_sync(&xhci->comp_mode_recovery_timer); + timer_delete_sync(&xhci->comp_mode_recovery_timer); xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, "%s: compliance mode recovery timer deleted", __func__); @@ -640,7 +733,7 @@ void xhci_stop(struct usb_hcd *hcd) "// Disabling event ring interrupts"); temp = readl(&xhci->op_regs->status); writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status); - xhci_disable_interrupter(ir); + xhci_disable_interrupter(xhci, ir); xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory"); xhci_mem_cleanup(xhci); @@ -672,11 +765,11 @@ void xhci_shutdown(struct usb_hcd *hcd) xhci_dbg(xhci, "%s: stopping usb%d port polling.\n", __func__, hcd->self.busnum); clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); - del_timer_sync(&hcd->rh_timer); + timer_delete_sync(&hcd->rh_timer); if (xhci->shared_hcd) { clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); - del_timer_sync(&xhci->shared_hcd->rh_timer); + timer_delete_sync(&xhci->shared_hcd->rh_timer); } spin_lock_irq(&xhci->lock); @@ -719,8 +812,8 @@ static void xhci_save_registers(struct xhci_hcd *xhci) ir->s3_erst_size = readl(&ir->ir_set->erst_size); ir->s3_erst_base = xhci_read_64(xhci, &ir->ir_set->erst_base); ir->s3_erst_dequeue = xhci_read_64(xhci, &ir->ir_set->erst_dequeue); - ir->s3_irq_pending = readl(&ir->ir_set->irq_pending); - ir->s3_irq_control = readl(&ir->ir_set->irq_control); + ir->s3_iman = readl(&ir->ir_set->iman); + ir->s3_imod = readl(&ir->ir_set->imod); } } @@ -743,28 +836,11 @@ static void xhci_restore_registers(struct xhci_hcd *xhci) writel(ir->s3_erst_size, &ir->ir_set->erst_size); xhci_write_64(xhci, ir->s3_erst_base, &ir->ir_set->erst_base); xhci_write_64(xhci, ir->s3_erst_dequeue, &ir->ir_set->erst_dequeue); - writel(ir->s3_irq_pending, &ir->ir_set->irq_pending); - writel(ir->s3_irq_control, &ir->ir_set->irq_control); + writel(ir->s3_iman, &ir->ir_set->iman); + writel(ir->s3_imod, &ir->ir_set->imod); } } -static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci) -{ - u64 val_64; - - /* step 2: initialize command ring buffer */ - val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); - val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | - (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, - xhci->cmd_ring->dequeue) & - (u64) ~CMD_RING_RSVD_BITS) | - xhci->cmd_ring->cycle_state; - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "// Setting command ring address to 0x%llx", - (long unsigned long) val_64); - xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); -} - /* * The whole command ring must be cleared to zero when we suspend the host. * @@ -908,10 +984,10 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup) xhci_dbg(xhci, "%s: stopping usb%d port polling.\n", __func__, hcd->self.busnum); clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); - del_timer_sync(&hcd->rh_timer); + timer_delete_sync(&hcd->rh_timer); if (xhci->shared_hcd) { clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); - del_timer_sync(&xhci->shared_hcd->rh_timer); + timer_delete_sync(&xhci->shared_hcd->rh_timer); } if (xhci->quirks & XHCI_SUSPEND_DELAY) @@ -978,7 +1054,7 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup) */ if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && (!(xhci_all_ports_seen_u0(xhci)))) { - del_timer_sync(&xhci->comp_mode_recovery_timer); + timer_delete_sync(&xhci->comp_mode_recovery_timer); xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, "%s: compliance mode recovery timer deleted", __func__); @@ -994,16 +1070,14 @@ EXPORT_SYMBOL_GPL(xhci_suspend); * This is called when the machine transition from S3/S4 mode. * */ -int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg) +int xhci_resume(struct xhci_hcd *xhci, bool power_lost, bool is_auto_resume) { - bool hibernated = (msg.event == PM_EVENT_RESTORE); u32 command, temp = 0; struct usb_hcd *hcd = xhci_to_hcd(xhci); int retval = 0; bool comp_timer_running = false; bool pending_portevent = false; bool suspended_usb3_devs = false; - bool reinit_xhc = false; if (!hcd->state) return 0; @@ -1022,10 +1096,10 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg) spin_lock_irq(&xhci->lock); - if (hibernated || xhci->quirks & XHCI_RESET_ON_RESUME || xhci->broken_suspend) - reinit_xhc = true; + if (xhci->quirks & XHCI_RESET_ON_RESUME || xhci->broken_suspend) + power_lost = true; - if (!reinit_xhc) { + if (!power_lost) { /* * Some controllers might lose power during suspend, so wait * for controller not ready bit to clear, just as in xHC init. @@ -1065,15 +1139,15 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg) /* re-initialize the HC on Restore Error, or Host Controller Error */ if ((temp & (STS_SRE | STS_HCE)) && !(xhci->xhc_state & XHCI_STATE_REMOVING)) { - reinit_xhc = true; - if (!xhci->broken_suspend) + if (!power_lost) xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp); + power_lost = true; } - if (reinit_xhc) { + if (power_lost) { if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !(xhci_all_ports_seen_u0(xhci))) { - del_timer_sync(&xhci->comp_mode_recovery_timer); + timer_delete_sync(&xhci->comp_mode_recovery_timer); xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, "Compliance Mode Recovery Timer deleted!"); } @@ -1086,7 +1160,10 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg) xhci_dbg(xhci, "Stop HCD\n"); xhci_halt(xhci); xhci_zero_64b_regs(xhci); - retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC); + if (xhci->xhc_state & XHCI_STATE_REMOVING) + retval = -ENODEV; + else + retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC); spin_unlock_irq(&xhci->lock); if (retval) return retval; @@ -1094,7 +1171,7 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg) xhci_dbg(xhci, "// Disabling event ring interrupts\n"); temp = readl(&xhci->op_regs->status); writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status); - xhci_disable_interrupter(xhci->interrupters[0]); + xhci_disable_interrupter(xhci, xhci->interrupters[0]); xhci_dbg(xhci, "cleaning up memory\n"); xhci_mem_cleanup(xhci); @@ -1168,8 +1245,7 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg) pending_portevent = xhci_pending_portevent(xhci); - if (suspended_usb3_devs && !pending_portevent && - msg.event == PM_EVENT_AUTO_RESUME) { + if (suspended_usb3_devs && !pending_portevent && is_auto_resume) { msleep(120); pending_portevent = xhci_pending_portevent(xhci); } @@ -1362,6 +1438,7 @@ static void xhci_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb) * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and * HCDs. Find the index for an endpoint given its descriptor. Use the return * value to right shift 1 for the bitmask. + * @desc: USB endpoint descriptor to determine index for * * Index = (epnum * 2) + direction - 1, * where direction = 0 for OUT, 1 for IN. @@ -3092,6 +3169,42 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) } EXPORT_SYMBOL_GPL(xhci_reset_bandwidth); +/* Get the available bandwidth of the ports under the xhci roothub */ +int xhci_get_port_bandwidth(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, + u8 dev_speed) +{ + struct xhci_command *cmd; + unsigned long flags; + int ret; + + if (!ctx || !xhci) + return -EINVAL; + + cmd = xhci_alloc_command(xhci, true, GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + cmd->in_ctx = ctx; + + /* get xhci port bandwidth, refer to xhci rev1_2 protocol 4.6.15 */ + spin_lock_irqsave(&xhci->lock, flags); + + ret = xhci_queue_get_port_bw(xhci, cmd, ctx->dma, dev_speed, 0); + if (ret) { + spin_unlock_irqrestore(&xhci->lock, flags); + goto err_out; + } + xhci_ring_cmd_db(xhci); + spin_unlock_irqrestore(&xhci->lock, flags); + + wait_for_completion(cmd->completion); +err_out: + kfree(cmd->completion); + kfree(cmd); + + return ret; +} + static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci, struct xhci_container_ctx *in_ctx, struct xhci_container_ctx *out_ctx, @@ -3914,6 +4027,8 @@ static int xhci_discover_or_reset_device(struct usb_hcd *hcd, } if (ep->ring) { + if (ep->sideband) + xhci_sideband_notify_ep_ring_free(ep->sideband, i); xhci_debugfs_remove_endpoint(xhci, virt_dev, i); xhci_free_endpoint_ring(xhci, virt_dev, i); } @@ -4759,8 +4874,8 @@ static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci, */ if (timeout_ns <= USB3_LPM_U1_MAX_TIMEOUT) return timeout_ns; - dev_dbg(&udev->dev, "Hub-initiated U1 disabled " - "due to long timeout %llu ms\n", timeout_ns); + dev_dbg(&udev->dev, "Hub-initiated U1 disabled due to long timeout %lluus\n", + timeout_ns); return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U1); } @@ -4817,8 +4932,8 @@ static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci, */ if (timeout_ns <= USB3_LPM_U2_MAX_TIMEOUT) return timeout_ns; - dev_dbg(&udev->dev, "Hub-initiated U2 disabled " - "due to long timeout %llu ms\n", timeout_ns); + dev_dbg(&udev->dev, "Hub-initiated U2 disabled due to long timeout %lluus\n", + timeout_ns * 256); return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U2); } diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 779b01dee068..a20f4e7cd43a 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -152,10 +152,6 @@ struct xhci_op_regs { #define XHCI_RESET_LONG_USEC (10 * 1000 * 1000) #define XHCI_RESET_SHORT_USEC (250 * 1000) -/* IMAN - Interrupt Management Register */ -#define IMAN_IE (1 << 1) -#define IMAN_IP (1 << 0) - /* USBSTS - USB status - status bitmasks */ /* HC not running - set to 1 when run/stop bit is cleared. */ #define STS_HALT XHCI_STS_HALT @@ -184,23 +180,22 @@ struct xhci_op_regs { * notification type that matches a bit set in this bit field. */ #define DEV_NOTE_MASK (0xffff) -#define ENABLE_DEV_NOTE(x) (1 << (x)) /* Most of the device notification types should only be used for debug. * SW does need to pay attention to function wake notifications. */ -#define DEV_NOTE_FWAKE ENABLE_DEV_NOTE(1) +#define DEV_NOTE_FWAKE (1 << 1) /* CRCR - Command Ring Control Register - cmd_ring bitmasks */ -/* bit 0 is the command ring cycle state */ +/* bit 0 - Cycle bit indicates the ownership of the command ring */ +#define CMD_RING_CYCLE (1 << 0) /* stop ring operation after completion of the currently executing command */ #define CMD_RING_PAUSE (1 << 1) /* stop ring immediately - abort the currently executing command */ #define CMD_RING_ABORT (1 << 2) /* true: command ring is running */ #define CMD_RING_RUNNING (1 << 3) -/* bits 4:5 reserved and should be preserved */ -/* Command Ring pointer - bit mask for the lower 32 bits. */ -#define CMD_RING_RSVD_BITS (0x3f) +/* bits 63:6 - Command Ring pointer */ +#define CMD_RING_PTR_MASK GENMASK_ULL(63, 6) /* CONFIG - Configure Register - config_reg bitmasks */ /* bits 0:7 - maximum number of device slots enabled (NumSlotsEn) */ @@ -211,15 +206,17 @@ struct xhci_op_regs { #define CONFIG_CIE (1 << 9) /* bits 10:31 - reserved and should be preserved */ +/* bits 15:0 - HCD page shift bit */ +#define XHCI_PAGE_SIZE_MASK 0xffff + /** - * struct xhci_intr_reg - Interrupt Register Set - * @irq_pending: IMAN - Interrupt Management Register. Used to enable + * struct xhci_intr_reg - Interrupt Register Set, v1.2 section 5.5.2. + * @iman: IMAN - Interrupt Management Register. Used to enable * interrupts and check for pending interrupts. - * @irq_control: IMOD - Interrupt Moderation Register. - * Used to throttle interrupts. - * @erst_size: Number of segments in the Event Ring Segment Table (ERST). - * @erst_base: ERST base address. - * @erst_dequeue: Event ring dequeue pointer. + * @imod: IMOD - Interrupt Moderation Register. Used to throttle interrupts. + * @erst_size: ERSTSZ - Number of segments in the Event Ring Segment Table (ERST). + * @erst_base: ERSTBA - Event ring segment table base address. + * @erst_dequeue: ERDP - Event ring dequeue pointer. * * Each interrupter (defined by a MSI-X vector) has an event ring and an Event * Ring Segment Table (ERST) associated with it. The event ring is comprised of @@ -229,48 +226,51 @@ struct xhci_op_regs { * updates the dequeue pointer. */ struct xhci_intr_reg { - __le32 irq_pending; - __le32 irq_control; + __le32 iman; + __le32 imod; __le32 erst_size; __le32 rsvd; __le64 erst_base; __le64 erst_dequeue; }; -/* irq_pending bitmasks */ -#define ER_IRQ_PENDING(p) ((p) & 0x1) -/* bits 2:31 need to be preserved */ -/* THIS IS BUGGY - FIXME - IP IS WRITE 1 TO CLEAR */ -#define ER_IRQ_CLEAR(p) ((p) & 0xfffffffe) -#define ER_IRQ_ENABLE(p) ((ER_IRQ_CLEAR(p)) | 0x2) -#define ER_IRQ_DISABLE(p) ((ER_IRQ_CLEAR(p)) & ~(0x2)) - -/* irq_control bitmasks */ -/* Minimum interval between interrupts (in 250ns intervals). The interval - * between interrupts will be longer if there are no events on the event ring. - * Default is 4000 (1 ms). +/* iman bitmasks */ +/* bit 0 - Interrupt Pending (IP), whether there is an interrupt pending. Write-1-to-clear. */ +#define IMAN_IP (1 << 0) +/* bit 1 - Interrupt Enable (IE), whether the interrupter is capable of generating an interrupt */ +#define IMAN_IE (1 << 1) + +/* imod bitmasks */ +/* + * bits 15:0 - Interrupt Moderation Interval, the minimum interval between interrupts + * (in 250ns intervals). The interval between interrupts will be longer if there are no + * events on the event ring. Default is 4000 (1 ms). */ -#define ER_IRQ_INTERVAL_MASK (0xffff) -/* Counter used to count down the time to the next interrupt - HW use only */ -#define ER_IRQ_COUNTER_MASK (0xffff << 16) +#define IMODI_MASK (0xffff) +/* bits 31:16 - Interrupt Moderation Counter, used to count down the time to the next interrupt */ +#define IMODC_MASK (0xffff << 16) /* erst_size bitmasks */ -/* Preserve bits 16:31 of erst_size */ -#define ERST_SIZE_MASK (0xffff << 16) +/* bits 15:0 - Event Ring Segment Table Size, number of ERST entries */ +#define ERST_SIZE_MASK (0xffff) /* erst_base bitmasks */ -#define ERST_BASE_RSVDP (GENMASK_ULL(5, 0)) +/* bits 63:6 - Event Ring Segment Table Base Address Register */ +#define ERST_BASE_ADDRESS_MASK GENMASK_ULL(63, 6) /* erst_dequeue bitmasks */ -/* Dequeue ERST Segment Index (DESI) - Segment number (or alias) - * where the current dequeue pointer lies. This is an optional HW hint. +/* + * bits 2:0 - Dequeue ERST Segment Index (DESI), is the segment number (or alias) where the + * current dequeue pointer lies. This is an optional HW hint. */ #define ERST_DESI_MASK (0x7) -/* Event Handler Busy (EHB) - is the event ring scheduled to be serviced by +/* + * bit 3 - Event Handler Busy (EHB), whether the event ring is scheduled to be serviced by * a work queue (or delayed service routine)? */ #define ERST_EHB (1 << 3) -#define ERST_PTR_MASK (GENMASK_ULL(63, 4)) +/* bits 63:4 - Event Ring Dequeue Pointer */ +#define ERST_PTR_MASK GENMASK_ULL(63, 4) /** * struct xhci_run_regs @@ -586,6 +586,7 @@ struct xhci_stream_info { #define SMALL_STREAM_ARRAY_SIZE 256 #define MEDIUM_STREAM_ARRAY_SIZE 1024 +#define GET_PORT_BW_ARRAY_SIZE 256 /* Some Intel xHCI host controllers need software to keep track of the bus * bandwidth. Keep track of endpoint info here. Each root port is allocated @@ -697,6 +698,8 @@ struct xhci_virt_ep { int next_frame_id; /* Use new Isoch TRB layout needed for extended TBC support */ bool use_extended_tbc; + /* set if this endpoint is controlled via sideband access*/ + struct xhci_sideband *sideband; }; enum xhci_overhead_type { @@ -759,6 +762,8 @@ struct xhci_virt_device { u16 current_mel; /* Used for the debugfs interfaces. */ void *debugfs_private; + /* set if this endpoint is controlled via sideband access*/ + struct xhci_sideband *sideband; }; /* @@ -999,6 +1004,9 @@ enum xhci_setup_dev { /* bits 16:23 are the virtual function ID */ /* bits 24:31 are the slot ID */ +/* bits 19:16 are the dev speed */ +#define DEV_SPEED_FOR_TRB(p) ((p) << 16) + /* Stop Endpoint TRB - ep_index to endpoint ID for this TRB */ #define SUSPEND_PORT_FOR_TRB(p) (((p) & 1) << 23) #define TRB_TO_SUSPEND_PORT(p) (((p) & (1 << 23)) >> 23) @@ -1371,7 +1379,7 @@ struct xhci_ring { unsigned int num_trbs_free; /* used only by xhci DbC */ unsigned int bounce_buf_len; enum xhci_ring_type type; - bool last_td_was_short; + u32 old_trb_comp_code; struct radix_tree_root *trb_address_map; }; @@ -1444,8 +1452,8 @@ struct xhci_interrupter { bool ip_autoclear; u32 isoc_bei_interval; /* For interrupter registers save and restore over suspend/resume */ - u32 s3_irq_pending; - u32 s3_irq_control; + u32 s3_iman; + u32 s3_imod; u32 s3_erst_size; u64 s3_erst_base; u64 s3_erst_dequeue; @@ -1514,10 +1522,7 @@ struct xhci_hcd { u16 max_interrupters; /* imod_interval in ns (I * 250ns) */ u32 imod_interval; - /* 4KB min, 128MB max */ - int page_size; - /* Valid values are 12 to 20, inclusive */ - int page_shift; + u32 page_size; /* MSI-X/MSI vectors */ int nvecs; /* optional clocks */ @@ -1554,6 +1559,7 @@ struct xhci_hcd { struct dma_pool *device_pool; struct dma_pool *segment_pool; struct dma_pool *small_streams_pool; + struct dma_pool *port_bw_pool; struct dma_pool *medium_streams_pool; /* Host controller watchdog timer structures */ @@ -1637,6 +1643,7 @@ struct xhci_hcd { #define XHCI_WRITE_64_HI_LO BIT_ULL(47) #define XHCI_CDNS_SCTX_QUIRK BIT_ULL(48) #define XHCI_ETRON_HOST BIT_ULL(49) +#define XHCI_LIMIT_ENDPOINT_INTERVAL_9 BIT_ULL(50) unsigned int num_active_eps; unsigned int limit_active_eps; @@ -1759,11 +1766,20 @@ static inline void xhci_write_64(struct xhci_hcd *xhci, } -/* Link TRB chain should always be set on 0.95 hosts, and AMD 0.96 ISOC rings */ +/* + * Reportedly, some chapters of v0.95 spec said that Link TRB always has its chain bit set. + * Other chapters and later specs say that it should only be set if the link is inside a TD + * which continues from the end of one segment to the next segment. + * + * Some 0.95 hardware was found to misbehave if any link TRB doesn't have the chain bit set. + * + * 0.96 hardware from AMD and NEC was found to ignore unchained isochronous link TRBs when + * "resynchronizing the pipe" after a Missed Service Error. + */ static inline bool xhci_link_chain_quirk(struct xhci_hcd *xhci, enum xhci_ring_type type) { return (xhci->quirks & XHCI_LINK_TRB_QUIRK) || - (type == TYPE_ISOC && (xhci->quirks & XHCI_AMD_0x96_HOST)); + (type == TYPE_ISOC && (xhci->quirks & (XHCI_AMD_0x96_HOST | XHCI_NEC_HOST))); } /* xHCI debugging */ @@ -1837,17 +1853,22 @@ struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci, int type, gfp_t flags); void xhci_free_container_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx); +struct xhci_container_ctx *xhci_alloc_port_bw_ctx(struct xhci_hcd *xhci, + gfp_t flags); +void xhci_free_port_bw_ctx(struct xhci_hcd *xhci, + struct xhci_container_ctx *ctx); struct xhci_interrupter * xhci_create_secondary_interrupter(struct usb_hcd *hcd, unsigned int segs, - u32 imod_interval); + u32 imod_interval, unsigned int intr_num); void xhci_remove_secondary_interrupter(struct usb_hcd *hcd, struct xhci_interrupter *ir); +void xhci_skip_sec_intr_events(struct xhci_hcd *xhci, + struct xhci_ring *ring, + struct xhci_interrupter *ir); /* xHCI host controller glue */ typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *); int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us); -int xhci_handshake_check_state(struct xhci_hcd *xhci, void __iomem *ptr, - u32 mask, u32 done, int usec, unsigned int exit_state); void xhci_quiesce(struct xhci_hcd *xhci); int xhci_halt(struct xhci_hcd *xhci); int xhci_start(struct xhci_hcd *xhci); @@ -1870,7 +1891,7 @@ int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id); int xhci_ext_cap_init(struct xhci_hcd *xhci); int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup); -int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg); +int xhci_resume(struct xhci_hcd *xhci, bool power_lost, bool is_auto_resume); irqreturn_t xhci_irq(struct usb_hcd *hcd); irqreturn_t xhci_msi_irq(int irq, void *hcd); @@ -1881,11 +1902,11 @@ int xhci_alloc_tt_info(struct xhci_hcd *xhci, struct usb_tt *tt, gfp_t mem_flags); int xhci_set_interrupter_moderation(struct xhci_interrupter *ir, u32 imod_interval); +int xhci_enable_interrupter(struct xhci_interrupter *ir); +int xhci_disable_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir); /* xHCI ring, segment, TRB, and TD functions */ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb); -struct xhci_segment *trb_in_td(struct xhci_hcd *xhci, struct xhci_td *td, - dma_addr_t suspect_dma, bool debug); int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code); void xhci_ring_cmd_db(struct xhci_hcd *xhci); int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd, @@ -1907,6 +1928,11 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd, dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed); +int xhci_queue_get_port_bw(struct xhci_hcd *xhci, + struct xhci_command *cmd, dma_addr_t in_ctx_ptr, + u8 dev_speed, bool command_must_succeed); +int xhci_get_port_bandwidth(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, + u8 dev_speed); int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd, dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed); int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd, @@ -1927,6 +1953,10 @@ unsigned int count_trbs(u64 addr, u64 len); int xhci_stop_endpoint_sync(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, int suspend, gfp_t gfp_flags); void xhci_process_cancelled_tds(struct xhci_virt_ep *ep); +void xhci_update_erst_dequeue(struct xhci_hcd *xhci, + struct xhci_interrupter *ir, + bool clear_ehb); +void xhci_add_interrupter(struct xhci_hcd *xhci, unsigned int intr_num); /* xHCI roothub code */ void xhci_set_link_state(struct xhci_hcd *xhci, struct xhci_port *port, |