diff options
Diffstat (limited to 'drivers/usb/host')
78 files changed, 2879 insertions, 1781 deletions
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig index 4448d0ab06f0..109100cc77a3 100644 --- a/drivers/usb/host/Kconfig +++ b/drivers/usb/host/Kconfig @@ -40,11 +40,11 @@ config USB_XHCI_DBGCAP config USB_XHCI_PCI tristate depends on USB_PCI - depends on USB_XHCI_PCI_RENESAS || !USB_XHCI_PCI_RENESAS default y config USB_XHCI_PCI_RENESAS tristate "Support for additional Renesas xHCI controller with firmware" + depends on USB_XHCI_PCI help Say 'Y' to enable the support for the Renesas xHCI controller with firmware. Make sure you have the firmware for the device and @@ -104,6 +104,15 @@ config USB_XHCI_RZV2M Say 'Y' to enable the support for the xHCI host controller found in Renesas RZ/V2M SoC. +config USB_XHCI_SIDEBAND + bool "xHCI support for sideband" + help + Say 'Y' to enable the support for the xHCI sideband capability. + Provide a mechanism for a sideband datapath for payload associated + with audio class endpoints. This allows for an audio DSP to use + xHCI USB endpoints directly, allowing CPU to sleep while playing + audio. + config USB_XHCI_TEGRA tristate "xHCI support for NVIDIA Tegra SoCs" depends on PHY_TEGRA_XUSB @@ -225,7 +234,7 @@ config USB_EHCI_HCD_OMAP tristate "EHCI support for OMAP3 and later chips" depends on ARCH_OMAP || COMPILE_TEST depends on NOP_USB_XCEIV - default y + default ARCH_OMAP help Enables support for the on-chip EHCI controller on OMAP3 and later chips. diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile index be4e5245c52f..4df946c05ba0 100644 --- a/drivers/usb/host/Makefile +++ b/drivers/usb/host/Makefile @@ -32,6 +32,10 @@ endif xhci-rcar-hcd-y += xhci-rcar.o xhci-rcar-hcd-$(CONFIG_USB_XHCI_RZV2M) += xhci-rzv2m.o +ifneq ($(CONFIG_USB_XHCI_SIDEBAND),) + xhci-hcd-y += xhci-sideband.o +endif + obj-$(CONFIG_USB_PCI) += pci-quirks.o obj-$(CONFIG_USB_EHCI_HCD) += ehci-hcd.o diff --git a/drivers/usb/host/bcma-hcd.c b/drivers/usb/host/bcma-hcd.c index 7558cc4d90cc..519386255886 100644 --- a/drivers/usb/host/bcma-hcd.c +++ b/drivers/usb/host/bcma-hcd.c @@ -25,7 +25,6 @@ #include <linux/module.h> #include <linux/slab.h> #include <linux/of.h> -#include <linux/of_gpio.h> #include <linux/of_platform.h> #include <linux/usb/ehci_pdriver.h> #include <linux/usb/ohci_pdriver.h> diff --git a/drivers/usb/host/ehci-atmel.c b/drivers/usb/host/ehci-atmel.c index 6a6e1c510b28..65747270fd88 100644 --- a/drivers/usb/host/ehci-atmel.c +++ b/drivers/usb/host/ehci-atmel.c @@ -220,7 +220,7 @@ static SIMPLE_DEV_PM_OPS(ehci_atmel_pm_ops, ehci_atmel_drv_suspend, static struct platform_driver ehci_atmel_driver = { .probe = ehci_atmel_drv_probe, - .remove_new = ehci_atmel_drv_remove, + .remove = ehci_atmel_drv_remove, .shutdown = usb_hcd_platform_shutdown, .driver = { .name = "atmel-ehci", diff --git a/drivers/usb/host/ehci-brcm.c b/drivers/usb/host/ehci-brcm.c index 77e42c739c58..888e8f6670d2 100644 --- a/drivers/usb/host/ehci-brcm.c +++ b/drivers/usb/host/ehci-brcm.c @@ -246,10 +246,11 @@ static const struct of_device_id brcm_ehci_of_match[] = { { .compatible = "brcm,bcm7445-ehci", }, {} }; +MODULE_DEVICE_TABLE(of, brcm_ehci_of_match); static struct platform_driver ehci_brcm_driver = { .probe = ehci_brcm_probe, - .remove_new = ehci_brcm_remove, + .remove = ehci_brcm_remove, .shutdown = usb_hcd_platform_shutdown, .driver = { .name = "ehci-brcm", diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c index c063fb042926..435001128221 100644 --- a/drivers/usb/host/ehci-dbg.c +++ b/drivers/usb/host/ehci-dbg.c @@ -430,13 +430,13 @@ static void qh_lines(struct ehci_hcd *ehci, struct ehci_qh *qh, mark = '/'; } switch ((scratch >> 8) & 0x03) { - case 0: + case PID_CODE_OUT: type = "out"; break; - case 1: + case PID_CODE_IN: type = "in"; break; - case 2: + case PID_CODE_SETUP: type = "setup"; break; default: @@ -602,10 +602,10 @@ static unsigned output_buf_tds_dir(char *buf, struct ehci_hcd *ehci, list_for_each_entry(qtd, &qh->qtd_list, qtd_list) { temp++; switch ((hc32_to_cpu(ehci, qtd->hw_token) >> 8) & 0x03) { - case 0: + case PID_CODE_OUT: type = "out"; continue; - case 1: + case PID_CODE_IN: type = "in"; continue; } diff --git a/drivers/usb/host/ehci-exynos.c b/drivers/usb/host/ehci-exynos.c index f644b131cc0b..d2a5bedf736a 100644 --- a/drivers/usb/host/ehci-exynos.c +++ b/drivers/usb/host/ehci-exynos.c @@ -48,7 +48,6 @@ struct exynos_ehci_hcd { static int exynos_ehci_get_phy(struct device *dev, struct exynos_ehci_hcd *exynos_ehci) { - struct device_node *child; struct phy *phy; int phy_number, num_phys; int ret; @@ -66,26 +65,22 @@ static int exynos_ehci_get_phy(struct device *dev, return 0; /* Get PHYs using legacy bindings */ - for_each_available_child_of_node(dev->of_node, child) { + for_each_available_child_of_node_scoped(dev->of_node, child) { ret = of_property_read_u32(child, "reg", &phy_number); if (ret) { dev_err(dev, "Failed to parse device tree\n"); - of_node_put(child); return ret; } if (phy_number >= PHY_NUMBER) { dev_err(dev, "Invalid number of PHYs\n"); - of_node_put(child); return -EINVAL; } phy = devm_of_phy_optional_get(dev, child, NULL); exynos_ehci->phy[phy_number] = phy; - if (IS_ERR(phy)) { - of_node_put(child); + if (IS_ERR(phy)) return PTR_ERR(phy); - } } exynos_ehci->legacy_phy = true; @@ -159,20 +154,16 @@ static int exynos_ehci_probe(struct platform_device *pdev) err = exynos_ehci_get_phy(&pdev->dev, exynos_ehci); if (err) - goto fail_clk; + goto fail_io; - exynos_ehci->clk = devm_clk_get(&pdev->dev, "usbhost"); + exynos_ehci->clk = devm_clk_get_enabled(&pdev->dev, "usbhost"); if (IS_ERR(exynos_ehci->clk)) { dev_err(&pdev->dev, "Failed to get usbhost clock\n"); err = PTR_ERR(exynos_ehci->clk); - goto fail_clk; + goto fail_io; } - err = clk_prepare_enable(exynos_ehci->clk); - if (err) - goto fail_clk; - hcd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(hcd->regs)) { err = PTR_ERR(hcd->regs); @@ -223,8 +214,6 @@ fail_add_hcd: exynos_ehci_phy_disable(&pdev->dev); pdev->dev.of_node = exynos_ehci->of_node; fail_io: - clk_disable_unprepare(exynos_ehci->clk); -fail_clk: usb_put_hcd(hcd); return err; } @@ -240,12 +229,9 @@ static void exynos_ehci_remove(struct platform_device *pdev) exynos_ehci_phy_disable(&pdev->dev); - clk_disable_unprepare(exynos_ehci->clk); - usb_put_hcd(hcd); } -#ifdef CONFIG_PM static int exynos_ehci_suspend(struct device *dev) { struct usb_hcd *hcd = dev_get_drvdata(dev); @@ -288,15 +274,9 @@ static int exynos_ehci_resume(struct device *dev) ehci_resume(hcd, false); return 0; } -#else -#define exynos_ehci_suspend NULL -#define exynos_ehci_resume NULL -#endif -static const struct dev_pm_ops exynos_ehci_pm_ops = { - .suspend = exynos_ehci_suspend, - .resume = exynos_ehci_resume, -}; +static DEFINE_SIMPLE_DEV_PM_OPS(exynos_ehci_pm_ops, + exynos_ehci_suspend, exynos_ehci_resume); #ifdef CONFIG_OF static const struct of_device_id exynos_ehci_match[] = { @@ -308,11 +288,11 @@ MODULE_DEVICE_TABLE(of, exynos_ehci_match); static struct platform_driver exynos_ehci_driver = { .probe = exynos_ehci_probe, - .remove_new = exynos_ehci_remove, + .remove = exynos_ehci_remove, .shutdown = usb_hcd_platform_shutdown, .driver = { .name = "exynos-ehci", - .pm = &exynos_ehci_pm_ops, + .pm = pm_ptr(&exynos_ehci_pm_ops), .of_match_table = of_match_ptr(exynos_ehci_match), } }; diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c index 5b1ce394a417..6ed2fa5418a4 100644 --- a/drivers/usb/host/ehci-fsl.c +++ b/drivers/usb/host/ehci-fsl.c @@ -410,15 +410,13 @@ static int ehci_fsl_setup(struct usb_hcd *hcd) return retval; } -struct ehci_fsl { - struct ehci_hcd ehci; - -#ifdef CONFIG_PM +struct ehci_fsl_priv { /* Saved USB PHY settings, need to restore after deep sleep. */ u32 usb_ctrl; -#endif }; +#define hcd_to_ehci_fsl_priv(h) ((struct ehci_fsl_priv *) hcd_to_ehci(h)->priv) + #ifdef CONFIG_PM #ifdef CONFIG_PPC_MPC512x @@ -566,17 +564,10 @@ static inline int ehci_fsl_mpc512x_drv_resume(struct device *dev) } #endif /* CONFIG_PPC_MPC512x */ -static struct ehci_fsl *hcd_to_ehci_fsl(struct usb_hcd *hcd) -{ - struct ehci_hcd *ehci = hcd_to_ehci(hcd); - - return container_of(ehci, struct ehci_fsl, ehci); -} - static int ehci_fsl_drv_suspend(struct device *dev) { struct usb_hcd *hcd = dev_get_drvdata(dev); - struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd); + struct ehci_fsl_priv *priv = hcd_to_ehci_fsl_priv(hcd); void __iomem *non_ehci = hcd->regs; if (of_device_is_compatible(dev->parent->of_node, @@ -589,14 +580,14 @@ static int ehci_fsl_drv_suspend(struct device *dev) if (!fsl_deep_sleep()) return 0; - ehci_fsl->usb_ctrl = ioread32be(non_ehci + FSL_SOC_USB_CTRL); + priv->usb_ctrl = ioread32be(non_ehci + FSL_SOC_USB_CTRL); return 0; } static int ehci_fsl_drv_resume(struct device *dev) { struct usb_hcd *hcd = dev_get_drvdata(dev); - struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd); + struct ehci_fsl_priv *priv = hcd_to_ehci_fsl_priv(hcd); struct ehci_hcd *ehci = hcd_to_ehci(hcd); void __iomem *non_ehci = hcd->regs; @@ -612,7 +603,7 @@ static int ehci_fsl_drv_resume(struct device *dev) usb_root_hub_lost_power(hcd->self.root_hub); /* Restore USB PHY settings and enable the controller. */ - iowrite32be(ehci_fsl->usb_ctrl, non_ehci + FSL_SOC_USB_CTRL); + iowrite32be(priv->usb_ctrl, non_ehci + FSL_SOC_USB_CTRL); ehci_reset(ehci); ehci_fsl_reinit(ehci); @@ -671,7 +662,7 @@ static int ehci_start_port_reset(struct usb_hcd *hcd, unsigned port) #endif /* CONFIG_USB_OTG */ static const struct ehci_driver_overrides ehci_fsl_overrides __initconst = { - .extra_priv_size = sizeof(struct ehci_fsl), + .extra_priv_size = sizeof(struct ehci_fsl_priv), .reset = ehci_fsl_setup, }; @@ -706,7 +697,7 @@ static void fsl_ehci_drv_remove(struct platform_device *pdev) static struct platform_driver ehci_fsl_driver = { .probe = fsl_ehci_drv_probe, - .remove_new = fsl_ehci_drv_remove, + .remove = fsl_ehci_drv_remove, .shutdown = usb_hcd_platform_shutdown, .driver = { .name = DRV_NAME, diff --git a/drivers/usb/host/ehci-grlib.c b/drivers/usb/host/ehci-grlib.c index 14150e4d3382..bd9762eaa135 100644 --- a/drivers/usb/host/ehci-grlib.c +++ b/drivers/usb/host/ehci-grlib.c @@ -168,7 +168,7 @@ MODULE_DEVICE_TABLE(of, ehci_hcd_grlib_of_match); static struct platform_driver ehci_grlib_driver = { .probe = ehci_hcd_grlib_probe, - .remove_new = ehci_hcd_grlib_remove, + .remove = ehci_hcd_grlib_remove, .shutdown = usb_hcd_platform_shutdown, .driver = { .name = "grlib-ehci", diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index 802bfafb1012..6d1d190c914d 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c @@ -32,7 +32,7 @@ #include <asm/byteorder.h> #include <asm/io.h> #include <asm/irq.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #if defined(CONFIG_PPC_PS3) #include <asm/firmware.h> @@ -466,8 +466,7 @@ static int ehci_init(struct usb_hcd *hcd) */ ehci->need_io_watchdog = 1; - hrtimer_init(&ehci->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); - ehci->hrtimer.function = ehci_hrtimer_func; + hrtimer_setup(&ehci->hrtimer, ehci_hrtimer_func, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); ehci->next_hrtimer_event = EHCI_HRTIMER_NO_EVENT; hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params); @@ -547,7 +546,7 @@ static int ehci_init(struct usb_hcd *hcd) * make problems: throughput reduction (!), data errors... */ if (park) { - park = min(park, (unsigned) 3); + park = min_t(unsigned int, park, 3); temp |= CMD_PARK; temp |= park << 8; } diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c index 2f1fc7eb8b72..cbabbe107172 100644 --- a/drivers/usb/host/ehci-mv.c +++ b/drivers/usb/host/ehci-mv.c @@ -279,7 +279,7 @@ static const struct of_device_id ehci_mv_dt_ids[] = { static struct platform_driver ehci_mv_driver = { .probe = mv_ehci_probe, - .remove_new = mv_ehci_remove, + .remove = mv_ehci_remove, .shutdown = mv_ehci_shutdown, .driver = { .name = "mv-ehci", diff --git a/drivers/usb/host/ehci-npcm7xx.c b/drivers/usb/host/ehci-npcm7xx.c index 3d3317a1a0b3..f1c7034c1e80 100644 --- a/drivers/usb/host/ehci-npcm7xx.c +++ b/drivers/usb/host/ehci-npcm7xx.c @@ -122,7 +122,7 @@ MODULE_DEVICE_TABLE(of, npcm7xx_ehci_id_table); static struct platform_driver npcm7xx_ehci_hcd_driver = { .probe = npcm7xx_ehci_hcd_drv_probe, - .remove_new = npcm7xx_ehci_hcd_drv_remove, + .remove = npcm7xx_ehci_hcd_drv_remove, .shutdown = usb_hcd_platform_shutdown, .driver = { .name = "npcm7xx-ehci", diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c index b24f371a46f3..db4a1acb27da 100644 --- a/drivers/usb/host/ehci-omap.c +++ b/drivers/usb/host/ehci-omap.c @@ -264,7 +264,7 @@ MODULE_DEVICE_TABLE(of, omap_ehci_dt_ids); static struct platform_driver ehci_hcd_omap_driver = { .probe = ehci_hcd_omap_probe, - .remove_new = ehci_hcd_omap_remove, + .remove = ehci_hcd_omap_remove, .shutdown = usb_hcd_platform_shutdown, /*.suspend = ehci_hcd_omap_suspend, */ /*.resume = ehci_hcd_omap_resume, */ diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c index ad145a54ca74..34abff8669f8 100644 --- a/drivers/usb/host/ehci-orion.c +++ b/drivers/usb/host/ehci-orion.c @@ -352,7 +352,7 @@ MODULE_DEVICE_TABLE(of, ehci_orion_dt_ids); static struct platform_driver ehci_orion_driver = { .probe = ehci_orion_drv_probe, - .remove_new = ehci_orion_drv_remove, + .remove = ehci_orion_drv_remove, .shutdown = usb_hcd_platform_shutdown, .driver = { .name = "orion-ehci", diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c index 98b073185e1c..6aab45c8525c 100644 --- a/drivers/usb/host/ehci-platform.c +++ b/drivers/usb/host/ehci-platform.c @@ -198,7 +198,8 @@ static void quirk_poll_work(struct work_struct *work) static void quirk_poll_timer(struct timer_list *t) { - struct ehci_platform_priv *priv = from_timer(priv, t, poll_timer); + struct ehci_platform_priv *priv = timer_container_of(priv, t, + poll_timer); struct ehci_hcd *ehci = container_of((void *)priv, struct ehci_hcd, priv); @@ -224,7 +225,7 @@ static void quirk_poll_init(struct ehci_platform_priv *priv) static void quirk_poll_end(struct ehci_platform_priv *priv) { - del_timer_sync(&priv->poll_timer); + timer_delete_sync(&priv->poll_timer); cancel_delayed_work(&priv->poll_work); } @@ -508,7 +509,7 @@ static SIMPLE_DEV_PM_OPS(ehci_platform_pm_ops, ehci_platform_suspend, static struct platform_driver ehci_platform_driver = { .id_table = ehci_platform_table, .probe = ehci_platform_probe, - .remove_new = ehci_platform_remove, + .remove = ehci_platform_remove, .shutdown = usb_hcd_platform_shutdown, .driver = { .name = "ehci-platform", diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c index 7fd83e806ae4..8063b9d3aebd 100644 --- a/drivers/usb/host/ehci-ppc-of.c +++ b/drivers/usb/host/ehci-ppc-of.c @@ -230,7 +230,7 @@ MODULE_DEVICE_TABLE(of, ehci_hcd_ppc_of_match); static struct platform_driver ehci_hcd_ppc_of_driver = { .probe = ehci_hcd_ppc_of_probe, - .remove_new = ehci_hcd_ppc_of_remove, + .remove = ehci_hcd_ppc_of_remove, .shutdown = usb_hcd_platform_shutdown, .driver = { .name = "ppc-of-ehci", diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c index 666f5c4db25a..ba37a9fcab92 100644 --- a/drivers/usb/host/ehci-q.c +++ b/drivers/usb/host/ehci-q.c @@ -27,10 +27,6 @@ /*-------------------------------------------------------------------------*/ -/* PID Codes that are used here, from EHCI specification, Table 3-16. */ -#define PID_CODE_IN 1 -#define PID_CODE_SETUP 2 - /* fill a qtd, returning how much of the buffer we were able to queue up */ static unsigned int @@ -230,7 +226,7 @@ static int qtd_copy_status ( /* fs/ls interrupt xfer missed the complete-split */ status = -EPROTO; } else if (token & QTD_STS_DBE) { - status = (QTD_PID (token) == 1) /* IN ? */ + status = (QTD_PID(token) == PID_CODE_IN) /* IN ? */ ? -ENOSR /* hc couldn't read data */ : -ECOMM; /* hc couldn't write data */ } else if (token & QTD_STS_XACT) { @@ -606,7 +602,7 @@ qh_urb_transaction ( /* SETUP pid */ qtd_fill(ehci, qtd, urb->setup_dma, sizeof (struct usb_ctrlrequest), - token | (2 /* "setup" */ << 8), 8); + token | (PID_CODE_SETUP << 8), 8); /* ... and always at least one more pid */ token ^= QTD_TOGGLE; @@ -620,7 +616,7 @@ qh_urb_transaction ( /* for zero length DATA stages, STATUS is always IN */ if (len == 0) - token |= (1 /* "in" */ << 8); + token |= (PID_CODE_IN << 8); } /* @@ -642,7 +638,7 @@ qh_urb_transaction ( } if (is_input) - token |= (1 /* "in" */ << 8); + token |= (PID_CODE_IN << 8); /* else it's already initted to "out" pid (0 << 8) */ maxpacket = usb_endpoint_maxp(&urb->ep->desc); @@ -709,7 +705,7 @@ qh_urb_transaction ( if (usb_pipecontrol (urb->pipe)) { one_more = 1; - token ^= 0x0100; /* "in" <--> "out" */ + token ^= (PID_CODE_IN << 8); /* "in" <--> "out" */ token |= QTD_TOGGLE; /* force DATA1 */ } else if (usb_pipeout(urb->pipe) && (urb->transfer_flags & URB_ZERO_PACKET) @@ -1203,7 +1199,7 @@ static int ehci_submit_single_step_set_feature( /* SETUP pid, and interrupt after SETUP completion */ qtd_fill(ehci, qtd, urb->setup_dma, sizeof(struct usb_ctrlrequest), - QTD_IOC | token | (2 /* "setup" */ << 8), 8); + QTD_IOC | token | (PID_CODE_SETUP << 8), 8); submit_async(ehci, urb, &qtd_list, GFP_ATOMIC); return 0; /*Return now; we shall come back after 15 seconds*/ @@ -1216,7 +1212,7 @@ static int ehci_submit_single_step_set_feature( token ^= QTD_TOGGLE; /*We need to start IN with DATA-1 Pid-sequence*/ buf = urb->transfer_dma; - token |= (1 /* "in" */ << 8); /*This is IN stage*/ + token |= (PID_CODE_IN << 8); /*This is IN stage*/ maxpacket = usb_endpoint_maxp(&urb->ep->desc); @@ -1229,7 +1225,7 @@ static int ehci_submit_single_step_set_feature( qtd->hw_alt_next = EHCI_LIST_END(ehci); /* STATUS stage for GetDesc control request */ - token ^= 0x0100; /* "in" <--> "out" */ + token ^= (PID_CODE_IN << 8); /* "in" <--> "out" */ token |= QTD_TOGGLE; /* force DATA1 */ qtd_prev = qtd; diff --git a/drivers/usb/host/ehci-sh.c b/drivers/usb/host/ehci-sh.c index d31d9506e41a..2d23690d72c5 100644 --- a/drivers/usb/host/ehci-sh.c +++ b/drivers/usb/host/ehci-sh.c @@ -119,8 +119,12 @@ static int ehci_hcd_sh_probe(struct platform_device *pdev) if (IS_ERR(priv->iclk)) priv->iclk = NULL; - clk_enable(priv->fclk); - clk_enable(priv->iclk); + ret = clk_enable(priv->fclk); + if (ret) + goto fail_request_resource; + ret = clk_enable(priv->iclk); + if (ret) + goto fail_iclk; ret = usb_add_hcd(hcd, irq, IRQF_SHARED); if (ret != 0) { @@ -136,6 +140,7 @@ static int ehci_hcd_sh_probe(struct platform_device *pdev) fail_add_hcd: clk_disable(priv->iclk); +fail_iclk: clk_disable(priv->fclk); fail_request_resource: @@ -169,7 +174,7 @@ static void ehci_hcd_sh_shutdown(struct platform_device *pdev) static struct platform_driver ehci_hcd_sh_driver = { .probe = ehci_hcd_sh_probe, - .remove_new = ehci_hcd_sh_remove, + .remove = ehci_hcd_sh_remove, .shutdown = ehci_hcd_sh_shutdown, .driver = { .name = "sh_ehci", diff --git a/drivers/usb/host/ehci-spear.c b/drivers/usb/host/ehci-spear.c index d0e94e4c9fe2..e96710192d6b 100644 --- a/drivers/usb/host/ehci-spear.c +++ b/drivers/usb/host/ehci-spear.c @@ -105,7 +105,9 @@ static int spear_ehci_hcd_drv_probe(struct platform_device *pdev) /* registers start at offset 0x0 */ hcd_to_ehci(hcd)->caps = hcd->regs; - clk_prepare_enable(sehci->clk); + retval = clk_prepare_enable(sehci->clk); + if (retval) + goto err_put_hcd; retval = usb_add_hcd(hcd, irq, IRQF_SHARED); if (retval) goto err_stop_ehci; @@ -130,8 +132,7 @@ static void spear_ehci_hcd_drv_remove(struct platform_device *pdev) usb_remove_hcd(hcd); - if (sehci->clk) - clk_disable_unprepare(sehci->clk); + clk_disable_unprepare(sehci->clk); usb_put_hcd(hcd); } @@ -143,7 +144,7 @@ MODULE_DEVICE_TABLE(of, spear_ehci_id_table); static struct platform_driver spear_ehci_hcd_driver = { .probe = spear_ehci_hcd_drv_probe, - .remove_new = spear_ehci_hcd_drv_remove, + .remove = spear_ehci_hcd_drv_remove, .shutdown = usb_hcd_platform_shutdown, .driver = { .name = "spear-ehci", diff --git a/drivers/usb/host/ehci-st.c b/drivers/usb/host/ehci-st.c index 2dbb0d86daaa..58867d816af7 100644 --- a/drivers/usb/host/ehci-st.c +++ b/drivers/usb/host/ehci-st.c @@ -320,7 +320,7 @@ MODULE_DEVICE_TABLE(of, st_ehci_ids); static struct platform_driver ehci_platform_driver = { .probe = st_ehci_platform_probe, - .remove_new = st_ehci_platform_remove, + .remove = st_ehci_platform_remove, .shutdown = usb_hcd_platform_shutdown, .driver = { .name = "st-ehci", diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c index a2112c28f631..1d16cfefabd7 100644 --- a/drivers/usb/host/ehci-xilinx-of.c +++ b/drivers/usb/host/ehci-xilinx-of.c @@ -220,7 +220,7 @@ MODULE_DEVICE_TABLE(of, ehci_hcd_xilinx_of_match); static struct platform_driver ehci_hcd_xilinx_of_driver = { .probe = ehci_hcd_xilinx_of_probe, - .remove_new = ehci_hcd_xilinx_of_remove, + .remove = ehci_hcd_xilinx_of_remove, .shutdown = usb_hcd_platform_shutdown, .driver = { .name = "xilinx-of-ehci", diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h index 1441e3400796..d7a3c8d13f6b 100644 --- a/drivers/usb/host/ehci.h +++ b/drivers/usb/host/ehci.h @@ -321,10 +321,16 @@ struct ehci_qtd { size_t length; /* length of buffer */ } __aligned(32); +/* PID Codes that are used here, from EHCI specification, Table 3-16. */ +#define PID_CODE_OUT 0 +#define PID_CODE_IN 1 +#define PID_CODE_SETUP 2 + /* mask NakCnt+T in qh->hw_alt_next */ #define QTD_MASK(ehci) cpu_to_hc32(ehci, ~0x1f) -#define IS_SHORT_READ(token) (QTD_LENGTH(token) != 0 && QTD_PID(token) == 1) +#define IS_SHORT_READ(token) (QTD_LENGTH(token) != 0 && \ + QTD_PID(token) == PID_CODE_IN) /*-------------------------------------------------------------------------*/ diff --git a/drivers/usb/host/fhci-hcd.c b/drivers/usb/host/fhci-hcd.c index 9a1b5224f239..22a0942f0bce 100644 --- a/drivers/usb/host/fhci-hcd.c +++ b/drivers/usb/host/fhci-hcd.c @@ -791,7 +791,7 @@ static struct platform_driver of_fhci_driver = { .of_match_table = of_fhci_match, }, .probe = of_fhci_probe, - .remove_new = of_fhci_remove, + .remove = of_fhci_remove, }; module_platform_driver(of_fhci_driver); diff --git a/drivers/usb/host/fhci-sched.c b/drivers/usb/host/fhci-sched.c index a45ede80edfc..c3acd410ce94 100644 --- a/drivers/usb/host/fhci-sched.c +++ b/drivers/usb/host/fhci-sched.c @@ -158,7 +158,7 @@ static int add_packet(struct fhci_usb *usb, struct ed *ed, struct td *td) struct packet *pkt; u8 *data = NULL; - /* calcalate data address,len and toggle and then add the transaction */ + /* calculate data address,len and toggle and then add the transaction */ if (td->toggle == USB_TD_TOGGLE_CARRY) td->toggle = ed->toggle_carry; @@ -679,7 +679,7 @@ static void process_done_list(unsigned long data) DECLARE_TASKLET_OLD(fhci_tasklet, process_done_list); -/* transfer complted callback */ +/* transfer completed callback */ u32 fhci_transfer_confirm_callback(struct fhci_hcd *fhci) { if (!fhci->process_done_task->state) diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c index 6cdc3d805c32..4e67b9471986 100644 --- a/drivers/usb/host/fsl-mph-dr-of.c +++ b/drivers/usb/host/fsl-mph-dr-of.c @@ -362,7 +362,7 @@ static struct platform_driver fsl_usb2_mph_dr_driver = { .of_match_table = fsl_usb2_mph_dr_of_match, }, .probe = fsl_usb2_mph_dr_of_probe, - .remove_new = fsl_usb2_mph_dr_of_remove, + .remove = fsl_usb2_mph_dr_of_remove, }; module_platform_driver(fsl_usb2_mph_dr_driver); diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c index a82d8926e922..71c22c4bd163 100644 --- a/drivers/usb/host/isp116x-hcd.c +++ b/drivers/usb/host/isp116x-hcd.c @@ -1684,7 +1684,7 @@ MODULE_ALIAS("platform:isp116x-hcd"); static struct platform_driver isp116x_driver = { .probe = isp116x_probe, - .remove_new = isp116x_remove, + .remove = isp116x_remove, .suspend = isp116x_suspend, .resume = isp116x_resume, .driver = { diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c index a52c3d858f3e..954fc5ad565b 100644 --- a/drivers/usb/host/isp1362-hcd.c +++ b/drivers/usb/host/isp1362-hcd.c @@ -83,7 +83,7 @@ #include <asm/irq.h> #include <asm/byteorder.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> static int dbg_level; #ifdef ISP1362_DEBUG @@ -2357,7 +2357,7 @@ static void isp1362_hc_stop(struct usb_hcd *hcd) pr_debug("%s:\n", __func__); - del_timer_sync(&hcd->rh_timer); + timer_delete_sync(&hcd->rh_timer); spin_lock_irqsave(&isp1362_hcd->lock, flags); @@ -2757,7 +2757,7 @@ static int isp1362_resume(struct platform_device *pdev) static struct platform_driver isp1362_driver = { .probe = isp1362_probe, - .remove_new = isp1362_remove, + .remove = isp1362_remove, .suspend = isp1362_suspend, .resume = isp1362_resume, diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c index 9fe4f48b1898..dcf31a592f5d 100644 --- a/drivers/usb/host/max3421-hcd.c +++ b/drivers/usb/host/max3421-hcd.c @@ -779,11 +779,17 @@ max3421_check_unlink(struct usb_hcd *hcd) retval = 1; dev_dbg(&spi->dev, "%s: URB %p unlinked=%d", __func__, urb, urb->unlinked); - usb_hcd_unlink_urb_from_ep(hcd, urb); - spin_unlock_irqrestore(&max3421_hcd->lock, - flags); - usb_hcd_giveback_urb(hcd, urb, 0); - spin_lock_irqsave(&max3421_hcd->lock, flags); + if (urb == max3421_hcd->curr_urb) { + max3421_hcd->urb_done = 1; + max3421_hcd->hien &= ~(BIT(MAX3421_HI_HXFRDN_BIT) | + BIT(MAX3421_HI_RCVDAV_BIT)); + } else { + usb_hcd_unlink_urb_from_ep(hcd, urb); + spin_unlock_irqrestore(&max3421_hcd->lock, + flags); + usb_hcd_giveback_urb(hcd, urb, 0); + spin_lock_irqsave(&max3421_hcd->lock, flags); + } } } } @@ -1940,6 +1946,12 @@ max3421_remove(struct spi_device *spi) usb_put_hcd(hcd); } +static const struct spi_device_id max3421_spi_ids[] = { + { "max3421" }, + { }, +}; +MODULE_DEVICE_TABLE(spi, max3421_spi_ids); + static const struct of_device_id max3421_of_match_table[] = { { .compatible = "maxim,max3421", }, {}, @@ -1949,6 +1961,7 @@ MODULE_DEVICE_TABLE(of, max3421_of_match_table); static struct spi_driver max3421_driver = { .probe = max3421_probe, .remove = max3421_remove, + .id_table = max3421_spi_ids, .driver = { .name = "max3421-hcd", .of_match_table = max3421_of_match_table, diff --git a/drivers/usb/host/octeon-hcd.c b/drivers/usb/host/octeon-hcd.c index 19d5777f5db2..361d33b0c4d2 100644 --- a/drivers/usb/host/octeon-hcd.c +++ b/drivers/usb/host/octeon-hcd.c @@ -3346,7 +3346,7 @@ static int octeon_usb_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, break; case USB_PORT_FEAT_INDICATOR: dev_dbg(dev, " INDICATOR\n"); - /* Port inidicator not supported */ + /* Port indicator not supported */ break; case USB_PORT_FEAT_C_CONNECTION: dev_dbg(dev, " C_CONNECTION\n"); @@ -3711,8 +3711,8 @@ static struct platform_driver octeon_usb_driver = { .name = "octeon-hcd", .of_match_table = octeon_usb_match, }, - .probe = octeon_usb_probe, - .remove_new = octeon_usb_remove, + .probe = octeon_usb_probe, + .remove = octeon_usb_remove, }; static int __init octeon_usb_driver_init(void) diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c index f691cd98a574..5df793dcb25d 100644 --- a/drivers/usb/host/ohci-at91.c +++ b/drivers/usb/host/ohci-at91.c @@ -685,7 +685,7 @@ static SIMPLE_DEV_PM_OPS(ohci_hcd_at91_pm_ops, ohci_hcd_at91_drv_suspend, static struct platform_driver ohci_hcd_at91_driver = { .probe = ohci_hcd_at91_drv_probe, - .remove_new = ohci_hcd_at91_drv_remove, + .remove = ohci_hcd_at91_drv_remove, .shutdown = usb_hcd_platform_shutdown, .driver = { .name = "at91_ohci", diff --git a/drivers/usb/host/ohci-da8xx.c b/drivers/usb/host/ohci-da8xx.c index d9adae53466b..3c5ca2d7c92e 100644 --- a/drivers/usb/host/ohci-da8xx.c +++ b/drivers/usb/host/ohci-da8xx.c @@ -22,7 +22,7 @@ #include <linux/regulator/consumer.h> #include <linux/usb.h> #include <linux/usb/hcd.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include "ohci.h" @@ -531,7 +531,7 @@ static const struct ohci_driver_overrides da8xx_overrides __initconst = { */ static struct platform_driver ohci_hcd_da8xx_driver = { .probe = ohci_da8xx_probe, - .remove_new = ohci_da8xx_remove, + .remove = ohci_da8xx_remove, .shutdown = usb_hcd_platform_shutdown, #ifdef CONFIG_PM .suspend = ohci_da8xx_suspend, diff --git a/drivers/usb/host/ohci-exynos.c b/drivers/usb/host/ohci-exynos.c index 20e26a474591..cc5cb0900988 100644 --- a/drivers/usb/host/ohci-exynos.c +++ b/drivers/usb/host/ohci-exynos.c @@ -37,7 +37,6 @@ struct exynos_ohci_hcd { static int exynos_ohci_get_phy(struct device *dev, struct exynos_ohci_hcd *exynos_ohci) { - struct device_node *child; struct phy *phy; int phy_number, num_phys; int ret; @@ -55,26 +54,22 @@ static int exynos_ohci_get_phy(struct device *dev, return 0; /* Get PHYs using legacy bindings */ - for_each_available_child_of_node(dev->of_node, child) { + for_each_available_child_of_node_scoped(dev->of_node, child) { ret = of_property_read_u32(child, "reg", &phy_number); if (ret) { dev_err(dev, "Failed to parse device tree\n"); - of_node_put(child); return ret; } if (phy_number >= PHY_NUMBER) { dev_err(dev, "Invalid number of PHYs\n"); - of_node_put(child); return -EINVAL; } phy = devm_of_phy_optional_get(dev, child, NULL); exynos_ohci->phy[phy_number] = phy; - if (IS_ERR(phy)) { - of_node_put(child); + if (IS_ERR(phy)) return PTR_ERR(phy); - } } exynos_ohci->legacy_phy = true; @@ -135,20 +130,16 @@ static int exynos_ohci_probe(struct platform_device *pdev) err = exynos_ohci_get_phy(&pdev->dev, exynos_ohci); if (err) - goto fail_clk; + goto fail_io; - exynos_ohci->clk = devm_clk_get(&pdev->dev, "usbhost"); + exynos_ohci->clk = devm_clk_get_enabled(&pdev->dev, "usbhost"); if (IS_ERR(exynos_ohci->clk)) { dev_err(&pdev->dev, "Failed to get usbhost clock\n"); err = PTR_ERR(exynos_ohci->clk); - goto fail_clk; + goto fail_io; } - err = clk_prepare_enable(exynos_ohci->clk); - if (err) - goto fail_clk; - hcd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(hcd->regs)) { err = PTR_ERR(hcd->regs); @@ -191,8 +182,6 @@ fail_add_hcd: exynos_ohci_phy_disable(&pdev->dev); pdev->dev.of_node = exynos_ohci->of_node; fail_io: - clk_disable_unprepare(exynos_ohci->clk); -fail_clk: usb_put_hcd(hcd); return err; } @@ -208,8 +197,6 @@ static void exynos_ohci_remove(struct platform_device *pdev) exynos_ohci_phy_disable(&pdev->dev); - clk_disable_unprepare(exynos_ohci->clk); - usb_put_hcd(hcd); } @@ -221,7 +208,6 @@ static void exynos_ohci_shutdown(struct platform_device *pdev) hcd->driver->shutdown(hcd); } -#ifdef CONFIG_PM static int exynos_ohci_suspend(struct device *dev) { struct usb_hcd *hcd = dev_get_drvdata(dev); @@ -258,19 +244,13 @@ static int exynos_ohci_resume(struct device *dev) return 0; } -#else -#define exynos_ohci_suspend NULL -#define exynos_ohci_resume NULL -#endif static const struct ohci_driver_overrides exynos_overrides __initconst = { .extra_priv_size = sizeof(struct exynos_ohci_hcd), }; -static const struct dev_pm_ops exynos_ohci_pm_ops = { - .suspend = exynos_ohci_suspend, - .resume = exynos_ohci_resume, -}; +static DEFINE_SIMPLE_DEV_PM_OPS(exynos_ohci_pm_ops, + exynos_ohci_suspend, exynos_ohci_resume); #ifdef CONFIG_OF static const struct of_device_id exynos_ohci_match[] = { @@ -282,11 +262,11 @@ MODULE_DEVICE_TABLE(of, exynos_ohci_match); static struct platform_driver exynos_ohci_driver = { .probe = exynos_ohci_probe, - .remove_new = exynos_ohci_remove, + .remove = exynos_ohci_remove, .shutdown = exynos_ohci_shutdown, .driver = { .name = "exynos-ohci", - .pm = &exynos_ohci_pm_ops, + .pm = pm_ptr(&exynos_ohci_pm_ops), .of_match_table = of_match_ptr(exynos_ohci_match), } }; @@ -308,4 +288,5 @@ module_exit(ohci_exynos_cleanup); MODULE_ALIAS("platform:exynos-ohci"); MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>"); +MODULE_DESCRIPTION("OHCI support for Samsung S5P/Exynos SoC Series"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c index 4f9982ecfb58..9c7f3008646e 100644 --- a/drivers/usb/host/ohci-hcd.c +++ b/drivers/usb/host/ohci-hcd.c @@ -44,7 +44,7 @@ #include <asm/io.h> #include <asm/irq.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <asm/byteorder.h> @@ -746,7 +746,8 @@ static int ohci_start(struct usb_hcd *hcd) */ static void io_watchdog_func(struct timer_list *t) { - struct ohci_hcd *ohci = from_timer(ohci, t, io_watchdog); + struct ohci_hcd *ohci = timer_container_of(ohci, t, + io_watchdog); bool takeback_all_pending = false; u32 status; u32 head; @@ -888,6 +889,7 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd) /* Check for an all 1's result which is a typical consequence * of dead, unclocked, or unplugged (CardBus...) devices */ +again: if (ints == ~(u32)0) { ohci->rh_state = OHCI_RH_HALTED; ohci_dbg (ohci, "device removed!\n"); @@ -982,6 +984,13 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd) } spin_unlock(&ohci->lock); + /* repeat until all enabled interrupts are handled */ + if (ohci->rh_state != OHCI_RH_HALTED) { + ints = ohci_readl(ohci, ®s->intrstatus); + if (ints && (ints & ohci_readl(ohci, ®s->intrenable))) + goto again; + } + return IRQ_HANDLED; } @@ -995,7 +1004,7 @@ static void ohci_stop (struct usb_hcd *hcd) if (quirk_nec(ohci)) flush_work(&ohci->nec_work); - del_timer_sync(&ohci->io_watchdog); + timer_delete_sync(&ohci->io_watchdog); ohci->prev_frame_no = IO_WATCHDOG_OFF; ohci_writel (ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable); diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c index 90cee192e96d..b3d734ab6201 100644 --- a/drivers/usb/host/ohci-hub.c +++ b/drivers/usb/host/ohci-hub.c @@ -315,7 +315,7 @@ static int ohci_bus_suspend (struct usb_hcd *hcd) spin_unlock_irq (&ohci->lock); if (rc == 0) { - del_timer_sync(&ohci->io_watchdog); + timer_delete_sync(&ohci->io_watchdog); ohci->prev_frame_no = IO_WATCHDOG_OFF; } return rc; diff --git a/drivers/usb/host/ohci-nxp.c b/drivers/usb/host/ohci-nxp.c index 8264c454f6bd..24d5a1dc5056 100644 --- a/drivers/usb/host/ohci-nxp.c +++ b/drivers/usb/host/ohci-nxp.c @@ -51,8 +51,6 @@ static struct hc_driver __read_mostly ohci_nxp_hc_driver; static struct i2c_client *isp1301_i2c_client; -static struct clk *usb_host_clk; - static void isp1301_configure_lpc32xx(void) { /* LPC32XX only supports DAT_SE0 USB mode */ @@ -155,6 +153,7 @@ static int ohci_hcd_nxp_probe(struct platform_device *pdev) struct resource *res; int ret = 0, irq; struct device_node *isp1301_node; + struct clk *usb_host_clk; if (pdev->dev.of_node) { isp1301_node = of_parse_phandle(pdev->dev.of_node, @@ -180,26 +179,20 @@ static int ohci_hcd_nxp_probe(struct platform_device *pdev) } /* Enable USB host clock */ - usb_host_clk = devm_clk_get(&pdev->dev, NULL); + usb_host_clk = devm_clk_get_enabled(&pdev->dev, NULL); if (IS_ERR(usb_host_clk)) { - dev_err(&pdev->dev, "failed to acquire USB OHCI clock\n"); + dev_err(&pdev->dev, "failed to acquire and start USB OHCI clock\n"); ret = PTR_ERR(usb_host_clk); goto fail_disable; } - ret = clk_prepare_enable(usb_host_clk); - if (ret < 0) { - dev_err(&pdev->dev, "failed to start USB OHCI clock\n"); - goto fail_disable; - } - isp1301_configure(); hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev)); if (!hcd) { dev_err(&pdev->dev, "Failed to allocate HC buffer\n"); ret = -ENOMEM; - goto fail_hcd; + goto fail_disable; } hcd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res); @@ -229,8 +222,6 @@ static int ohci_hcd_nxp_probe(struct platform_device *pdev) ohci_nxp_stop_hc(); fail_resource: usb_put_hcd(hcd); -fail_hcd: - clk_disable_unprepare(usb_host_clk); fail_disable: isp1301_i2c_client = NULL; return ret; @@ -243,7 +234,6 @@ static void ohci_hcd_nxp_remove(struct platform_device *pdev) usb_remove_hcd(hcd); ohci_nxp_stop_hc(); usb_put_hcd(hcd); - clk_disable_unprepare(usb_host_clk); isp1301_i2c_client = NULL; } @@ -264,7 +254,7 @@ static struct platform_driver ohci_hcd_nxp_driver = { .of_match_table = of_match_ptr(ohci_hcd_nxp_match), }, .probe = ohci_hcd_nxp_probe, - .remove_new = ohci_hcd_nxp_remove, + .remove = ohci_hcd_nxp_remove, }; static int __init ohci_nxp_init(void) diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c index 21a6f6c55e07..f6e56c4b9914 100644 --- a/drivers/usb/host/ohci-omap.c +++ b/drivers/usb/host/ohci-omap.c @@ -152,7 +152,7 @@ static int ohci_omap_reset(struct usb_hcd *hcd) rh &= ~RH_A_NOCP; - /* gpio9 for overcurrent detction */ + /* gpio9 for overcurrent detection */ omap_cfg_reg(W8_1610_GPIO9); /* for paranoia's sake: disable USB.PUEN */ @@ -390,7 +390,7 @@ static int ohci_omap_resume(struct platform_device *dev) */ static struct platform_driver ohci_hcd_omap_driver = { .probe = ohci_hcd_omap_probe, - .remove_new = ohci_hcd_omap_remove, + .remove = ohci_hcd_omap_remove, .shutdown = usb_hcd_platform_shutdown, #ifdef CONFIG_PM .suspend = ohci_omap_suspend, diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c index 900ea0d368e0..9f0a6b27e47c 100644 --- a/drivers/usb/host/ohci-pci.c +++ b/drivers/usb/host/ohci-pci.c @@ -165,6 +165,25 @@ static int ohci_quirk_amd700(struct usb_hcd *hcd) return 0; } +static int ohci_quirk_loongson(struct usb_hcd *hcd) +{ + struct pci_dev *pdev = to_pci_dev(hcd->self.controller); + + /* + * Loongson's LS7A OHCI controller (rev 0x02) has a + * flaw. MMIO register with offset 0x60/64 is treated + * as legacy PS2-compatible keyboard/mouse interface. + * Since OHCI only use 4KB BAR resource, LS7A OHCI's + * 32KB BAR is wrapped around (the 2nd 4KB BAR space + * is the same as the 1st 4KB internally). So add 4KB + * offset (0x1000) to the OHCI registers as a quirk. + */ + if (pdev->revision == 0x2) + hcd->regs += SZ_4K; /* SZ_4K = 0x1000 */ + + return 0; +} + static int ohci_quirk_qemu(struct usb_hcd *hcd) { struct ohci_hcd *ohci = hcd_to_ohci(hcd); @@ -225,6 +244,10 @@ static const struct pci_device_id ohci_pci_quirks[] = { .driver_data = (unsigned long)ohci_quirk_amd700, }, { + PCI_DEVICE(PCI_VENDOR_ID_LOONGSON, 0x7a24), + .driver_data = (unsigned long)ohci_quirk_loongson, + }, + { .vendor = PCI_VENDOR_ID_APPLE, .device = 0x003f, .subvendor = PCI_SUBVENDOR_ID_REDHAT_QUMRANET, diff --git a/drivers/usb/host/ohci-platform.c b/drivers/usb/host/ohci-platform.c index 4a75507325dd..f47ae12cde6a 100644 --- a/drivers/usb/host/ohci-platform.c +++ b/drivers/usb/host/ohci-platform.c @@ -344,7 +344,7 @@ static const struct dev_pm_ops ohci_platform_pm_ops = { static struct platform_driver ohci_platform_driver = { .id_table = ohci_platform_table, .probe = ohci_platform_probe, - .remove_new = ohci_platform_remove, + .remove = ohci_platform_remove, .shutdown = usb_hcd_platform_shutdown, .driver = { .name = "ohci-platform", diff --git a/drivers/usb/host/ohci-ppc-of.c b/drivers/usb/host/ohci-ppc-of.c index f64bfe5f4d4d..acd0a0e398a4 100644 --- a/drivers/usb/host/ohci-ppc-of.c +++ b/drivers/usb/host/ohci-ppc-of.c @@ -204,10 +204,6 @@ static const struct of_device_id ohci_hcd_ppc_of_match[] = { #ifdef CONFIG_USB_OHCI_HCD_PPC_OF_LE { .name = "usb", - .compatible = "ohci-littledian", - }, - { - .name = "usb", .compatible = "ohci-le", }, #endif @@ -223,7 +219,7 @@ MODULE_DEVICE_TABLE(of, ohci_hcd_ppc_of_match); static struct platform_driver ohci_hcd_ppc_of_driver = { .probe = ohci_hcd_ppc_of_probe, - .remove_new = ohci_hcd_ppc_of_remove, + .remove = ohci_hcd_ppc_of_remove, .shutdown = usb_hcd_platform_shutdown, .driver = { .name = "ppc-of-ohci", diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c index 3348c25ddb18..45d026e85168 100644 --- a/drivers/usb/host/ohci-pxa27x.c +++ b/drivers/usb/host/ohci-pxa27x.c @@ -569,7 +569,7 @@ static const struct dev_pm_ops ohci_hcd_pxa27x_pm_ops = { static struct platform_driver ohci_hcd_pxa27x_driver = { .probe = ohci_hcd_pxa27x_probe, - .remove_new = ohci_hcd_pxa27x_remove, + .remove = ohci_hcd_pxa27x_remove, .shutdown = usb_hcd_platform_shutdown, .driver = { .name = "pxa27x-ohci", diff --git a/drivers/usb/host/ohci-s3c2410.c b/drivers/usb/host/ohci-s3c2410.c index c5c9b4cbcb9a..66d970854357 100644 --- a/drivers/usb/host/ohci-s3c2410.c +++ b/drivers/usb/host/ohci-s3c2410.c @@ -457,7 +457,7 @@ MODULE_DEVICE_TABLE(of, ohci_hcd_s3c2410_dt_ids); static struct platform_driver ohci_hcd_s3c2410_driver = { .probe = ohci_hcd_s3c2410_probe, - .remove_new = ohci_hcd_s3c2410_remove, + .remove = ohci_hcd_s3c2410_remove, .shutdown = usb_hcd_platform_shutdown, .driver = { .name = "s3c2410-ohci", diff --git a/drivers/usb/host/ohci-sm501.c b/drivers/usb/host/ohci-sm501.c index 4b39e9d6f33a..843a5378764e 100644 --- a/drivers/usb/host/ohci-sm501.c +++ b/drivers/usb/host/ohci-sm501.c @@ -252,7 +252,7 @@ static int ohci_sm501_resume(struct platform_device *pdev) */ static struct platform_driver ohci_hcd_sm501_driver = { .probe = ohci_hcd_sm501_drv_probe, - .remove_new = ohci_hcd_sm501_drv_remove, + .remove = ohci_hcd_sm501_drv_remove, .shutdown = usb_hcd_platform_shutdown, .suspend = ohci_sm501_suspend, .resume = ohci_sm501_resume, diff --git a/drivers/usb/host/ohci-spear.c b/drivers/usb/host/ohci-spear.c index 993f347c5c28..d7131e5a4477 100644 --- a/drivers/usb/host/ohci-spear.c +++ b/drivers/usb/host/ohci-spear.c @@ -157,7 +157,7 @@ MODULE_DEVICE_TABLE(of, spear_ohci_id_table); /* Driver definition to register with the platform bus */ static struct platform_driver spear_ohci_hcd_driver = { .probe = spear_ohci_hcd_drv_probe, - .remove_new = spear_ohci_hcd_drv_remove, + .remove = spear_ohci_hcd_drv_remove, #ifdef CONFIG_PM .suspend = spear_ohci_hcd_drv_suspend, .resume = spear_ohci_hcd_drv_resume, diff --git a/drivers/usb/host/ohci-st.c b/drivers/usb/host/ohci-st.c index 214342013f7e..d1656fce5400 100644 --- a/drivers/usb/host/ohci-st.c +++ b/drivers/usb/host/ohci-st.c @@ -298,7 +298,7 @@ MODULE_DEVICE_TABLE(of, st_ohci_platform_ids); static struct platform_driver ohci_platform_driver = { .probe = st_ohci_platform_probe, - .remove_new = st_ohci_platform_remove, + .remove = st_ohci_platform_remove, .shutdown = usb_hcd_platform_shutdown, .driver = { .name = "st-ohci", diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c index d467472f9d3c..6b7c73eff081 100644 --- a/drivers/usb/host/oxu210hp-hcd.c +++ b/drivers/usb/host/oxu210hp-hcd.c @@ -15,6 +15,7 @@ #include <linux/ioport.h> #include <linux/sched.h> #include <linux/slab.h> +#include <linux/string_choices.h> #include <linux/errno.h> #include <linux/timer.h> #include <linux/list.h> @@ -27,7 +28,7 @@ #include <linux/iopoll.h> #include <asm/irq.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <linux/irq.h> #include <linux/platform_device.h> @@ -196,31 +197,6 @@ struct ehci_regs { #define PORT_RWC_BITS (PORT_CSC | PORT_PEC | PORT_OCC) } __packed; -/* Appendix C, Debug port ... intended for use with special "debug devices" - * that can help if there's no serial console. (nonstandard enumeration.) - */ -struct ehci_dbg_port { - u32 control; -#define DBGP_OWNER (1<<30) -#define DBGP_ENABLED (1<<28) -#define DBGP_DONE (1<<16) -#define DBGP_INUSE (1<<10) -#define DBGP_ERRCODE(x) (((x)>>7)&0x07) -# define DBGP_ERR_BAD 1 -# define DBGP_ERR_SIGNAL 2 -#define DBGP_ERROR (1<<6) -#define DBGP_GO (1<<5) -#define DBGP_OUT (1<<4) -#define DBGP_LEN(x) (((x)>>0)&0x0f) - u32 pids; -#define DBGP_PID_GET(x) (((x)>>16)&0xff) -#define DBGP_PID_SET(data, tok) (((data)<<8)|(tok)) - u32 data03; - u32 data47; - u32 address; -#define DBGP_EPADDR(dev, ep) (((dev)<<8)|(ep)) -} __packed; - #define QTD_NEXT(dma) cpu_to_le32((u32)dma) /* @@ -910,7 +886,7 @@ static int oxu_buf_alloc(struct oxu_hcd *oxu, struct ehci_qtd *qtd, int len) int a_blocks; /* blocks allocated */ int i, j; - /* Don't allocte bigger than supported */ + /* Don't allocate bigger than supported */ if (len > BUFFER_SIZE * BUFFER_NUM) { oxu_err(oxu, "buffer too big (%d)\n", len); return -ENOMEM; @@ -927,7 +903,7 @@ static int oxu_buf_alloc(struct oxu_hcd *oxu, struct ehci_qtd *qtd, int len) /* Find a suitable available data buffer */ for (i = 0; i < BUFFER_NUM; - i += max(a_blocks, (int)oxu->db_used[i])) { + i += max_t(int, a_blocks, oxu->db_used[i])) { /* Check all the required blocks are available */ for (j = 0; j < a_blocks; j++) @@ -1151,7 +1127,7 @@ static void ehci_mem_cleanup(struct oxu_hcd *oxu) qh_put(oxu->async); oxu->async = NULL; - del_timer(&oxu->urb_timer); + timer_delete(&oxu->urb_timer); oxu->periodic = NULL; @@ -2781,7 +2757,7 @@ static void ehci_port_power(struct oxu_hcd *oxu, int is_on) if (!HCS_PPC(oxu->hcs_params)) return; - oxu_dbg(oxu, "...power%s ports...\n", is_on ? "up" : "down"); + oxu_dbg(oxu, "...power%s ports...\n", str_up_down(is_on)); for (port = HCS_N_PORTS(oxu->hcs_params); port > 0; ) { if (is_on) oxu_hub_control(oxu_to_hcd(oxu), SetPortFeature, @@ -2979,7 +2955,7 @@ static irqreturn_t oxu_irq(struct usb_hcd *hcd) static void oxu_watchdog(struct timer_list *t) { - struct oxu_hcd *oxu = from_timer(oxu, t, watchdog); + struct oxu_hcd *oxu = timer_container_of(oxu, t, watchdog); unsigned long flags; spin_lock_irqsave(&oxu->lock, flags); @@ -3065,7 +3041,7 @@ static int oxu_hcd_init(struct usb_hcd *hcd) * make problems: throughput reduction (!), data errors... */ if (park) { - park = min(park, (unsigned) 3); + park = min_t(unsigned int, park, 3); temp |= CMD_PARK; temp |= park << 8; } @@ -3178,7 +3154,7 @@ static void oxu_stop(struct usb_hcd *hcd) ehci_port_power(oxu, 0); /* no more interrupts ... */ - del_timer_sync(&oxu->watchdog); + timer_delete_sync(&oxu->watchdog); spin_lock_irq(&oxu->lock); if (HC_IS_RUNNING(hcd->state)) @@ -3911,7 +3887,7 @@ static int oxu_bus_suspend(struct usb_hcd *hcd) spin_unlock_irq(&oxu->lock); /* turn off now-idle HC */ - del_timer_sync(&oxu->watchdog); + timer_delete_sync(&oxu->watchdog); spin_lock_irq(&oxu->lock); ehci_halt(oxu); hcd->state = HC_STATE_SUSPENDED; @@ -4314,7 +4290,7 @@ static int oxu_drv_resume(struct device *dev) static struct platform_driver oxu_driver = { .probe = oxu_drv_probe, - .remove_new = oxu_drv_remove, + .remove = oxu_drv_remove, .shutdown = oxu_drv_shutdown, .suspend = oxu_drv_suspend, .resume = oxu_drv_resume, diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index 1f9c1b1435d8..0404489c2f6a 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c @@ -958,6 +958,15 @@ static void quirk_usb_disable_ehci(struct pci_dev *pdev) * booting from USB disk or using a usb keyboard */ hcc_params = readl(base + EHCI_HCC_PARAMS); + + /* LS7A EHCI controller doesn't have extended capabilities, the + * EECP (EHCI Extended Capabilities Pointer) field of HCCPARAMS + * register should be 0x0 but it reads as 0xa0. So clear it to + * avoid error messages on boot. + */ + if (pdev->vendor == PCI_VENDOR_ID_LOONGSON && pdev->device == 0x7a14) + hcc_params &= ~(0xffL << 8); + offset = (hcc_params >> 8) & 0xff; while (offset && --count) { pci_read_config_dword(pdev, offset, &cap); diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c index 9f4bf8c5f8a5..d21a03cf5c17 100644 --- a/drivers/usb/host/r8a66597-hcd.c +++ b/drivers/usb/host/r8a66597-hcd.c @@ -297,9 +297,9 @@ static void put_child_connect_map(struct r8a66597 *r8a66597, int address) static void set_pipe_reg_addr(struct r8a66597_pipe *pipe, u8 dma_ch) { u16 pipenum = pipe->info.pipenum; - const unsigned long fifoaddr[] = {D0FIFO, D1FIFO, CFIFO}; - const unsigned long fifosel[] = {D0FIFOSEL, D1FIFOSEL, CFIFOSEL}; - const unsigned long fifoctr[] = {D0FIFOCTR, D1FIFOCTR, CFIFOCTR}; + static const unsigned long fifoaddr[] = {D0FIFO, D1FIFO, CFIFO}; + static const unsigned long fifosel[] = {D0FIFOSEL, D1FIFOSEL, CFIFOSEL}; + static const unsigned long fifoctr[] = {D0FIFOCTR, D1FIFOCTR, CFIFOCTR}; if (dma_ch > R8A66597_PIPE_NO_DMA) /* dma fifo not use? */ dma_ch = R8A66597_PIPE_NO_DMA; @@ -759,7 +759,7 @@ static void enable_r8a66597_pipe_dma(struct r8a66597 *r8a66597, struct r8a66597_pipe_info *info = &pipe->info; unsigned short mbw = mbw_value(r8a66597); - /* pipe dma is only for external controlles */ + /* pipe dma is only for external controllers */ if (r8a66597->pdata->on_chip) return; @@ -1336,7 +1336,7 @@ static void packet_read(struct r8a66597 *r8a66597, u16 pipenum) buf = (void *)urb->transfer_buffer + urb->actual_length; urb_len = urb->transfer_buffer_length - urb->actual_length; } - bufsize = min(urb_len, (int) td->maxpacket); + bufsize = min_t(int, urb_len, td->maxpacket); if (rcv_len <= bufsize) { size = rcv_len; } else { @@ -1720,7 +1720,8 @@ static void r8a66597_root_hub_control(struct r8a66597 *r8a66597, int port) static void r8a66597_interval_timer(struct timer_list *t) { - struct r8a66597_timers *timers = from_timer(timers, t, interval); + struct r8a66597_timers *timers = timer_container_of(timers, t, + interval); struct r8a66597 *r8a66597 = timers->r8a66597; unsigned long flags; u16 pipenum; @@ -1744,7 +1745,7 @@ static void r8a66597_interval_timer(struct timer_list *t) static void r8a66597_td_timer(struct timer_list *t) { - struct r8a66597_timers *timers = from_timer(timers, t, td); + struct r8a66597_timers *timers = timer_container_of(timers, t, td); struct r8a66597 *r8a66597 = timers->r8a66597; unsigned long flags; u16 pipenum; @@ -1798,7 +1799,7 @@ static void r8a66597_td_timer(struct timer_list *t) static void r8a66597_timer(struct timer_list *t) { - struct r8a66597 *r8a66597 = from_timer(r8a66597, t, rh_timer); + struct r8a66597 *r8a66597 = timer_container_of(r8a66597, t, rh_timer); unsigned long flags; int port; @@ -2384,7 +2385,7 @@ static void r8a66597_remove(struct platform_device *pdev) struct r8a66597 *r8a66597 = platform_get_drvdata(pdev); struct usb_hcd *hcd = r8a66597_to_hcd(r8a66597); - del_timer_sync(&r8a66597->rh_timer); + timer_delete_sync(&r8a66597->rh_timer); usb_remove_hcd(hcd); iounmap(r8a66597->reg); if (r8a66597->pdata->on_chip) @@ -2510,7 +2511,7 @@ clean_up: static struct platform_driver r8a66597_driver = { .probe = r8a66597_probe, - .remove_new = r8a66597_remove, + .remove = r8a66597_remove, .driver = { .name = hcd_name, .pm = R8A66597_DEV_PM_OPS, diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c index 2b871540bb50..ea3cab99c5d4 100644 --- a/drivers/usb/host/sl811-hcd.c +++ b/drivers/usb/host/sl811-hcd.c @@ -48,13 +48,14 @@ #include <linux/usb/hcd.h> #include <linux/platform_device.h> #include <linux/prefetch.h> +#include <linux/string_choices.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/byteorder.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include "sl811.h" @@ -98,7 +99,7 @@ static void port_power(struct sl811 *sl811, int is_on) if (sl811->board && sl811->board->port_power) { /* switch VBUS, at 500mA unless hub power budget gets set */ dev_dbg(hcd->self.controller, "power %s\n", - is_on ? "on" : "off"); + str_on_off(is_on)); sl811->board->port_power(hcd->self.controller, is_on); } @@ -1123,7 +1124,7 @@ sl811h_hub_descriptor ( static void sl811h_timer(struct timer_list *t) { - struct sl811 *sl811 = from_timer(sl811, t, timer); + struct sl811 *sl811 = timer_container_of(sl811, t, timer); unsigned long flags; u8 irqstat; u8 signaling = sl811->ctrl1 & SL11H_CTL1MASK_FORCE; @@ -1514,7 +1515,7 @@ sl811h_stop(struct usb_hcd *hcd) struct sl811 *sl811 = hcd_to_sl811(hcd); unsigned long flags; - del_timer_sync(&hcd->rh_timer); + timer_delete_sync(&hcd->rh_timer); spin_lock_irqsave(&sl811->lock, flags); port_power(sl811, 0); @@ -1784,7 +1785,7 @@ sl811h_resume(struct platform_device *dev) /* this driver is exported so sl811_cs can depend on it */ struct platform_driver sl811h_driver = { .probe = sl811h_probe, - .remove_new = sl811h_remove, + .remove = sl811h_remove, .suspend = sl811h_suspend, .resume = sl811h_resume, diff --git a/drivers/usb/host/uhci-grlib.c b/drivers/usb/host/uhci-grlib.c index cfebb833668e..8a1f6d1b5b56 100644 --- a/drivers/usb/host/uhci-grlib.c +++ b/drivers/usb/host/uhci-grlib.c @@ -184,7 +184,7 @@ MODULE_DEVICE_TABLE(of, uhci_hcd_grlib_of_match); static struct platform_driver uhci_grlib_driver = { .probe = uhci_hcd_grlib_probe, - .remove_new = uhci_hcd_grlib_remove, + .remove = uhci_hcd_grlib_remove, .shutdown = uhci_hcd_grlib_shutdown, .driver = { .name = "grlib-uhci", diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c index fd2408b553cf..14e6dfef16c6 100644 --- a/drivers/usb/host/uhci-hcd.c +++ b/drivers/usb/host/uhci-hcd.c @@ -716,7 +716,7 @@ static void uhci_stop(struct usb_hcd *hcd) spin_unlock_irq(&uhci->lock); synchronize_irq(hcd->irq); - del_timer_sync(&uhci->fsbr_timer); + timer_delete_sync(&uhci->fsbr_timer); release_uhci(uhci); } diff --git a/drivers/usb/host/uhci-platform.c b/drivers/usb/host/uhci-platform.c index 3dec5dd3a0d5..62318291f566 100644 --- a/drivers/usb/host/uhci-platform.c +++ b/drivers/usb/host/uhci-platform.c @@ -121,7 +121,7 @@ static int uhci_hcd_platform_probe(struct platform_device *pdev) } /* Get and enable clock if any specified */ - uhci->clk = devm_clk_get(&pdev->dev, NULL); + uhci->clk = devm_clk_get_optional(&pdev->dev, NULL); if (IS_ERR(uhci->clk)) { ret = PTR_ERR(uhci->clk); goto err_rmr; @@ -184,7 +184,7 @@ MODULE_DEVICE_TABLE(of, platform_uhci_ids); static struct platform_driver uhci_platform_driver = { .probe = uhci_hcd_platform_probe, - .remove_new = uhci_hcd_platform_remove, + .remove = uhci_hcd_platform_remove, .shutdown = uhci_hcd_platform_shutdown, .driver = { .name = "platform-uhci", diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c index 35fcb826152c..9480d4ff0111 100644 --- a/drivers/usb/host/uhci-q.c +++ b/drivers/usb/host/uhci-q.c @@ -84,14 +84,14 @@ static void uhci_urbp_wants_fsbr(struct uhci_hcd *uhci, struct urb_priv *urbp) uhci_fsbr_on(uhci); else if (uhci->fsbr_expiring) { uhci->fsbr_expiring = 0; - del_timer(&uhci->fsbr_timer); + timer_delete(&uhci->fsbr_timer); } } } static void uhci_fsbr_timeout(struct timer_list *t) { - struct uhci_hcd *uhci = from_timer(uhci, t, fsbr_timer); + struct uhci_hcd *uhci = timer_container_of(uhci, t, fsbr_timer); unsigned long flags; spin_lock_irqsave(&uhci->lock, flags); diff --git a/drivers/usb/host/xen-hcd.c b/drivers/usb/host/xen-hcd.c index 46fdab940092..1c2a95fe41e5 100644 --- a/drivers/usb/host/xen-hcd.c +++ b/drivers/usb/host/xen-hcd.c @@ -327,7 +327,7 @@ static int xenhcd_bus_suspend(struct usb_hcd *hcd) } spin_unlock_irq(&info->lock); - del_timer_sync(&info->watchdog); + timer_delete_sync(&info->watchdog); return ret; } @@ -1258,7 +1258,7 @@ static void xenhcd_disconnect(struct xenbus_device *dev) static void xenhcd_watchdog(struct timer_list *timer) { - struct xenhcd_info *info = from_timer(info, timer, watchdog); + struct xenhcd_info *info = timer_container_of(info, timer, watchdog); unsigned long flags; spin_lock_irqsave(&info->lock, flags); @@ -1307,7 +1307,7 @@ static void xenhcd_stop(struct usb_hcd *hcd) { struct xenhcd_info *info = xenhcd_hcd_to_info(hcd); - del_timer_sync(&info->watchdog); + timer_delete_sync(&info->watchdog); spin_lock_irq(&info->lock); /* cancel all urbs */ hcd->state = HC_STATE_HALT; diff --git a/drivers/usb/host/xhci-caps.h b/drivers/usb/host/xhci-caps.h index 9e94cebf4a56..4b8ff4815644 100644 --- a/drivers/usb/host/xhci-caps.h +++ b/drivers/usb/host/xhci-caps.h @@ -62,8 +62,8 @@ #define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32) -/* db_off bitmask - bits 0:1 reserved */ -#define DBOFF_MASK (~0x3) +/* db_off bitmask - bits 31:2 Doorbell Array Offset */ +#define DBOFF_MASK (0xfffffffc) /* run_regs_off bitmask - bits 0:4 reserved */ #define RTSOFF_MASK (~0x1f) @@ -83,3 +83,9 @@ #define HCC2_CIC(p) ((p) & (1 << 5)) /* true: HC support Extended TBC Capability, Isoc burst count > 65535 */ #define HCC2_ETC(p) ((p) & (1 << 6)) +/* true: HC support Extended TBC TRB Status Capability */ +#define HCC2_ETC_TSC(p) ((p) & (1 << 7)) +/* true: HC support Get/Set Extended Property Capability */ +#define HCC2_GSC(p) ((p) & (1 << 8)) +/* true: HC support Virtualization Based Trusted I/O Capability */ +#define HCC2_VTC(p) ((p) & (1 << 9)) diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c index 8a9869ef0db6..06a2edb9e86e 100644 --- a/drivers/usb/host/xhci-dbgcap.c +++ b/drivers/usb/host/xhci-dbgcap.c @@ -173,16 +173,18 @@ static void xhci_dbc_giveback(struct dbc_request *req, int status) spin_lock(&dbc->lock); } -static void xhci_dbc_flush_single_request(struct dbc_request *req) +static void trb_to_noop(union xhci_trb *trb) { - union xhci_trb *trb = req->trb; - trb->generic.field[0] = 0; trb->generic.field[1] = 0; trb->generic.field[2] = 0; trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE); trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(TRB_TR_NOOP)); +} +static void xhci_dbc_flush_single_request(struct dbc_request *req) +{ + trb_to_noop(req->trb); xhci_dbc_giveback(req, -ESHUTDOWN); } @@ -246,8 +248,9 @@ xhci_dbc_queue_trb(struct xhci_ring *ring, u32 field1, trb->generic.field[2] = cpu_to_le32(field3); trb->generic.field[3] = cpu_to_le32(field4); - trace_xhci_dbc_gadget_ep_queue(ring, &trb->generic); - + trace_xhci_dbc_gadget_ep_queue(ring, &trb->generic, + xhci_trb_virt_to_dma(ring->enq_seg, + ring->enqueue)); ring->num_trbs_free--; next = ++(ring->enqueue); if (TRB_TYPE_LINK_LE32(next->link.control)) { @@ -469,7 +472,7 @@ xhci_dbc_ring_alloc(struct device *dev, enum xhci_ring_type type, gfp_t flags) trb->link.control = cpu_to_le32(LINK_TOGGLE | TRB_TYPE(TRB_LINK)); } INIT_LIST_HEAD(&ring->td_list); - xhci_initialize_ring_info(ring, 1); + xhci_initialize_ring_info(ring); return ring; dma_fail: kfree(seg); @@ -516,7 +519,7 @@ static int xhci_dbc_mem_init(struct xhci_dbc *dbc, gfp_t flags) goto string_fail; /* Setup ERST register: */ - writel(dbc->erst.erst_size, &dbc->regs->ersts); + writel(dbc->erst.num_entries, &dbc->regs->ersts); lo_hi_writeq(dbc->erst.erst_dma_addr, &dbc->regs->erstba); deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg, @@ -649,7 +652,10 @@ static void xhci_dbc_stop(struct xhci_dbc *dbc) case DS_DISABLED: return; case DS_CONFIGURED: - case DS_STALLED: + spin_lock(&dbc->lock); + xhci_dbc_flush_requests(dbc); + spin_unlock(&dbc->lock); + if (dbc->driver->disconnect) dbc->driver->disconnect(dbc); break; @@ -670,6 +676,23 @@ static void xhci_dbc_stop(struct xhci_dbc *dbc) } static void +handle_ep_halt_changes(struct xhci_dbc *dbc, struct dbc_ep *dep, bool halted) +{ + if (halted) { + dev_info(dbc->dev, "DbC Endpoint halted\n"); + dep->halted = 1; + + } else if (dep->halted) { + dev_info(dbc->dev, "DbC Endpoint halt cleared\n"); + dep->halted = 0; + + if (!list_empty(&dep->list_pending)) + writel(DBC_DOOR_BELL_TARGET(dep->direction), + &dbc->regs->doorbell); + } +} + +static void dbc_handle_port_status(struct xhci_dbc *dbc, union xhci_trb *event) { u32 portsc; @@ -697,6 +720,7 @@ static void dbc_handle_xfer_event(struct xhci_dbc *dbc, union xhci_trb *event) struct xhci_ring *ring; int ep_id; int status; + struct xhci_ep_ctx *ep_ctx; u32 comp_code; size_t remain_length; struct dbc_request *req = NULL, *r; @@ -706,8 +730,30 @@ static void dbc_handle_xfer_event(struct xhci_dbc *dbc, union xhci_trb *event) ep_id = TRB_TO_EP_ID(le32_to_cpu(event->generic.field[3])); dep = (ep_id == EPID_OUT) ? get_out_ep(dbc) : get_in_ep(dbc); + ep_ctx = (ep_id == EPID_OUT) ? + dbc_bulkout_ctx(dbc) : dbc_bulkin_ctx(dbc); ring = dep->ring; + /* Match the pending request: */ + list_for_each_entry(r, &dep->list_pending, list_pending) { + if (r->trb_dma == event->trans_event.buffer) { + req = r; + break; + } + if (r->status == -COMP_STALL_ERROR) { + dev_warn(dbc->dev, "Give back stale stalled req\n"); + ring->num_trbs_free++; + xhci_dbc_giveback(r, 0); + } + } + + if (!req) { + dev_warn(dbc->dev, "no matched request\n"); + return; + } + + trace_xhci_dbc_handle_transfer(ring, &req->trb->generic, req->trb_dma); + switch (comp_code) { case COMP_SUCCESS: remain_length = 0; @@ -718,31 +764,49 @@ static void dbc_handle_xfer_event(struct xhci_dbc *dbc, union xhci_trb *event) case COMP_TRB_ERROR: case COMP_BABBLE_DETECTED_ERROR: case COMP_USB_TRANSACTION_ERROR: - case COMP_STALL_ERROR: dev_warn(dbc->dev, "tx error %d detected\n", comp_code); status = -comp_code; break; + case COMP_STALL_ERROR: + dev_warn(dbc->dev, "Stall error at bulk TRB %llx, remaining %zu, ep deq %llx\n", + event->trans_event.buffer, remain_length, ep_ctx->deq); + status = 0; + dep->halted = 1; + + /* + * xHC DbC may trigger a STALL bulk xfer event when host sends a + * ClearFeature(ENDPOINT_HALT) request even if there wasn't an + * active bulk transfer. + * + * Don't give back this transfer request as hardware will later + * start processing TRBs starting from this 'STALLED' TRB, + * causing TRBs and requests to be out of sync. + * + * If STALL event shows some bytes were transferred then assume + * it's an actual transfer issue and give back the request. + * In this case mark the TRB as No-Op to avoid hw from using the + * TRB again. + */ + + if ((ep_ctx->deq & ~TRB_CYCLE) == event->trans_event.buffer) { + dev_dbg(dbc->dev, "Ep stopped on Stalled TRB\n"); + if (remain_length == req->length) { + dev_dbg(dbc->dev, "Spurious stall event, keep req\n"); + req->status = -COMP_STALL_ERROR; + req->actual = 0; + return; + } + dev_dbg(dbc->dev, "Give back stalled req, but turn TRB to No-op\n"); + trb_to_noop(req->trb); + } + break; + default: dev_err(dbc->dev, "unknown tx error %d\n", comp_code); status = -comp_code; break; } - /* Match the pending request: */ - list_for_each_entry(r, &dep->list_pending, list_pending) { - if (r->trb_dma == event->trans_event.buffer) { - req = r; - break; - } - } - - if (!req) { - dev_warn(dbc->dev, "no matched request\n"); - return; - } - - trace_xhci_dbc_handle_transfer(ring, &req->trb->generic); - ring->num_trbs_free++; req->actual = req->length - remain_length; xhci_dbc_giveback(req, status); @@ -762,8 +826,8 @@ static void inc_evt_deq(struct xhci_ring *ring) static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc) { dma_addr_t deq; - struct dbc_ep *dep; union xhci_trb *evt; + enum evtreturn ret = EVT_DONE; u32 ctrl, portsc; bool update_erdp = false; @@ -814,43 +878,17 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc) return EVT_DISC; } - /* Handle endpoint stall event: */ + /* Check and handle changes in endpoint halt status */ ctrl = readl(&dbc->regs->control); - if ((ctrl & DBC_CTRL_HALT_IN_TR) || - (ctrl & DBC_CTRL_HALT_OUT_TR)) { - dev_info(dbc->dev, "DbC Endpoint stall\n"); - dbc->state = DS_STALLED; - - if (ctrl & DBC_CTRL_HALT_IN_TR) { - dep = get_in_ep(dbc); - xhci_dbc_flush_endpoint_requests(dep); - } - - if (ctrl & DBC_CTRL_HALT_OUT_TR) { - dep = get_out_ep(dbc); - xhci_dbc_flush_endpoint_requests(dep); - } - - return EVT_DONE; - } + handle_ep_halt_changes(dbc, get_in_ep(dbc), ctrl & DBC_CTRL_HALT_IN_TR); + handle_ep_halt_changes(dbc, get_out_ep(dbc), ctrl & DBC_CTRL_HALT_OUT_TR); /* Clear DbC run change bit: */ if (ctrl & DBC_CTRL_DBC_RUN_CHANGE) { writel(ctrl, &dbc->regs->control); ctrl = readl(&dbc->regs->control); } - break; - case DS_STALLED: - ctrl = readl(&dbc->regs->control); - if (!(ctrl & DBC_CTRL_HALT_IN_TR) && - !(ctrl & DBC_CTRL_HALT_OUT_TR) && - (ctrl & DBC_CTRL_DBC_RUN)) { - dbc->state = DS_CONFIGURED; - break; - } - - return EVT_DONE; default: dev_err(dbc->dev, "Unknown DbC state %d\n", dbc->state); break; @@ -866,7 +904,9 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc) */ rmb(); - trace_xhci_dbc_handle_event(dbc->ring_evt, &evt->generic); + trace_xhci_dbc_handle_event(dbc->ring_evt, &evt->generic, + xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg, + dbc->ring_evt->dequeue)); switch (le32_to_cpu(evt->event_cmd.flags) & TRB_TYPE_BITMASK) { case TRB_TYPE(TRB_PORT_STATUS): @@ -874,6 +914,7 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc) break; case TRB_TYPE(TRB_TRANSFER): dbc_handle_xfer_event(dbc, evt); + ret = EVT_XFER_DONE; break; default: break; @@ -892,7 +933,7 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc) lo_hi_writeq(deq, &dbc->regs->erdp); } - return EVT_DONE; + return ret; } static void xhci_dbc_handle_events(struct work_struct *work) @@ -901,6 +942,7 @@ static void xhci_dbc_handle_events(struct work_struct *work) struct xhci_dbc *dbc; unsigned long flags; unsigned int poll_interval; + unsigned long busypoll_timelimit; dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work); poll_interval = dbc->poll_interval; @@ -919,10 +961,20 @@ static void xhci_dbc_handle_events(struct work_struct *work) dbc->driver->disconnect(dbc); break; case EVT_DONE: - /* set fast poll rate if there are pending data transfers */ + /* + * Set fast poll rate if there are pending out transfers, or + * a transfer was recently processed + */ + busypoll_timelimit = dbc->xfer_timestamp + + msecs_to_jiffies(DBC_XFER_INACTIVITY_TIMEOUT); + if (!list_empty(&dbc->eps[BULK_OUT].list_pending) || - !list_empty(&dbc->eps[BULK_IN].list_pending)) - poll_interval = 1; + time_is_after_jiffies(busypoll_timelimit)) + poll_interval = 0; + break; + case EVT_XFER_DONE: + dbc->xfer_timestamp = jiffies; + poll_interval = 0; break; default: dev_info(dbc->dev, "stop handling dbc events\n"); @@ -939,7 +991,6 @@ static const char * const dbc_state_strings[DS_MAX] = { [DS_ENABLED] = "enabled", [DS_CONNECTED] = "connected", [DS_CONFIGURED] = "configured", - [DS_STALLED] = "stalled", }; static ssize_t dbc_show(struct device *dev, @@ -1150,11 +1201,48 @@ static ssize_t dbc_bInterfaceProtocol_store(struct device *dev, return size; } +static ssize_t dbc_poll_interval_ms_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct xhci_dbc *dbc; + struct xhci_hcd *xhci; + + xhci = hcd_to_xhci(dev_get_drvdata(dev)); + dbc = xhci->dbc; + + return sysfs_emit(buf, "%u\n", dbc->poll_interval); +} + +static ssize_t dbc_poll_interval_ms_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + struct xhci_dbc *dbc; + struct xhci_hcd *xhci; + u32 value; + int ret; + + ret = kstrtou32(buf, 0, &value); + if (ret || value > DBC_POLL_INTERVAL_MAX) + return -EINVAL; + + xhci = hcd_to_xhci(dev_get_drvdata(dev)); + dbc = xhci->dbc; + + dbc->poll_interval = value; + + mod_delayed_work(system_wq, &dbc->event_work, 0); + + return size; +} + static DEVICE_ATTR_RW(dbc); static DEVICE_ATTR_RW(dbc_idVendor); static DEVICE_ATTR_RW(dbc_idProduct); static DEVICE_ATTR_RW(dbc_bcdDevice); static DEVICE_ATTR_RW(dbc_bInterfaceProtocol); +static DEVICE_ATTR_RW(dbc_poll_interval_ms); static struct attribute *dbc_dev_attrs[] = { &dev_attr_dbc.attr, @@ -1162,6 +1250,7 @@ static struct attribute *dbc_dev_attrs[] = { &dev_attr_dbc_idProduct.attr, &dev_attr_dbc_bcdDevice.attr, &dev_attr_dbc_bInterfaceProtocol.attr, + &dev_attr_dbc_poll_interval_ms.attr, NULL }; ATTRIBUTE_GROUPS(dbc_dev); diff --git a/drivers/usb/host/xhci-dbgcap.h b/drivers/usb/host/xhci-dbgcap.h index 92661b555c2a..47ac72c2286d 100644 --- a/drivers/usb/host/xhci-dbgcap.h +++ b/drivers/usb/host/xhci-dbgcap.h @@ -81,7 +81,6 @@ enum dbc_state { DS_ENABLED, DS_CONNECTED, DS_CONFIGURED, - DS_STALLED, DS_MAX }; @@ -90,12 +89,14 @@ struct dbc_ep { struct list_head list_pending; struct xhci_ring *ring; unsigned int direction:1; + unsigned int halted:1; }; #define DBC_QUEUE_SIZE 16 #define DBC_WRITE_BUF_SIZE 8192 #define DBC_POLL_INTERVAL_DEFAULT 64 /* milliseconds */ - +#define DBC_POLL_INTERVAL_MAX 5000 /* milliseconds */ +#define DBC_XFER_INACTIVITY_TIMEOUT 10 /* milliseconds */ /* * Private structure for DbC hardware state: */ @@ -110,7 +111,7 @@ struct dbc_port { struct tasklet_struct push; struct list_head write_pool; - struct kfifo write_fifo; + unsigned int tx_boundary; bool registered; }; @@ -142,6 +143,7 @@ struct xhci_dbc { enum dbc_state state; struct delayed_work event_work; unsigned int poll_interval; /* ms */ + unsigned long xfer_timestamp; unsigned resume_required:1; struct dbc_ep eps[2]; @@ -187,6 +189,7 @@ struct dbc_request { enum evtreturn { EVT_ERR = -1, EVT_DONE, + EVT_XFER_DONE, EVT_GSER, EVT_DISC, }; diff --git a/drivers/usb/host/xhci-dbgtty.c b/drivers/usb/host/xhci-dbgtty.c index b74e98e94393..d894081d8d15 100644 --- a/drivers/usb/host/xhci-dbgtty.c +++ b/drivers/usb/host/xhci-dbgtty.c @@ -25,16 +25,26 @@ static inline struct dbc_port *dbc_to_port(struct xhci_dbc *dbc) } static unsigned int -dbc_send_packet(struct dbc_port *port, char *packet, unsigned int size) +dbc_kfifo_to_req(struct dbc_port *port, char *packet) { - unsigned int len; - - len = kfifo_len(&port->write_fifo); - if (len < size) - size = len; - if (size != 0) - size = kfifo_out(&port->write_fifo, packet, size); - return size; + unsigned int len; + + len = kfifo_len(&port->port.xmit_fifo); + + if (len == 0) + return 0; + + len = min(len, DBC_MAX_PACKET); + + if (port->tx_boundary) + len = min(port->tx_boundary, len); + + len = kfifo_out(&port->port.xmit_fifo, packet, len); + + if (port->tx_boundary) + port->tx_boundary -= len; + + return len; } static int dbc_start_tx(struct dbc_port *port) @@ -49,7 +59,7 @@ static int dbc_start_tx(struct dbc_port *port) while (!list_empty(pool)) { req = list_entry(pool->next, struct dbc_request, list_pool); - len = dbc_send_packet(port, req->buf, DBC_MAX_PACKET); + len = dbc_kfifo_to_req(port, req->buf); if (len == 0) break; do_tty_wake = true; @@ -100,15 +110,74 @@ static void dbc_start_rx(struct dbc_port *port) } } +/* + * Queue received data to tty buffer and push it. + * + * Returns nr of remaining bytes that didn't fit tty buffer, i.e. 0 if all + * bytes sucessfullt moved. In case of error returns negative errno. + * Call with lock held + */ +static int dbc_rx_push_buffer(struct dbc_port *port, struct dbc_request *req) +{ + char *packet = req->buf; + unsigned int n, size = req->actual; + int count; + + if (!req->actual) + return 0; + + /* if n_read is set then request was partially moved to tty buffer */ + n = port->n_read; + if (n) { + packet += n; + size -= n; + } + + count = tty_insert_flip_string(&port->port, packet, size); + if (count) + tty_flip_buffer_push(&port->port); + if (count != size) { + port->n_read += count; + return size - count; + } + + port->n_read = 0; + return 0; +} + static void dbc_read_complete(struct xhci_dbc *dbc, struct dbc_request *req) { unsigned long flags; struct dbc_port *port = dbc_to_port(dbc); + struct tty_struct *tty; + int untransferred; + + tty = port->port.tty; spin_lock_irqsave(&port->port_lock, flags); + + /* + * Only defer copyig data to tty buffer in case: + * - !list_empty(&port->read_queue), there are older pending data + * - tty is throttled + * - failed to copy all data to buffer, defer remaining part + */ + + if (list_empty(&port->read_queue) && tty && !tty_throttled(tty)) { + untransferred = dbc_rx_push_buffer(port, req); + if (untransferred == 0) { + list_add_tail(&req->list_pool, &port->read_pool); + if (req->status != -ESHUTDOWN) + dbc_start_rx(port); + goto out; + } + } + + /* defer moving data from req to tty buffer to a tasklet */ list_add_tail(&req->list_pool, &port->read_queue); tasklet_schedule(&port->push); +out: spin_unlock_irqrestore(&port->port_lock, flags); } @@ -213,14 +282,32 @@ static ssize_t dbc_tty_write(struct tty_struct *tty, const u8 *buf, { struct dbc_port *port = tty->driver_data; unsigned long flags; + unsigned int written = 0; spin_lock_irqsave(&port->port_lock, flags); - if (count) - count = kfifo_in(&port->write_fifo, buf, count); - dbc_start_tx(port); + + /* + * Treat tty write as one usb transfer. Make sure the writes are turned + * into TRB request having the same size boundaries as the tty writes. + * Don't add data to kfifo before previous write is turned into TRBs + */ + if (port->tx_boundary) { + spin_unlock_irqrestore(&port->port_lock, flags); + return 0; + } + + if (count) { + written = kfifo_in(&port->port.xmit_fifo, buf, count); + + if (written == count) + port->tx_boundary = kfifo_len(&port->port.xmit_fifo); + + dbc_start_tx(port); + } + spin_unlock_irqrestore(&port->port_lock, flags); - return count; + return written; } static int dbc_tty_put_char(struct tty_struct *tty, u8 ch) @@ -230,7 +317,7 @@ static int dbc_tty_put_char(struct tty_struct *tty, u8 ch) int status; spin_lock_irqsave(&port->port_lock, flags); - status = kfifo_put(&port->write_fifo, ch); + status = kfifo_put(&port->port.xmit_fifo, ch); spin_unlock_irqrestore(&port->port_lock, flags); return status; @@ -253,7 +340,11 @@ static unsigned int dbc_tty_write_room(struct tty_struct *tty) unsigned int room; spin_lock_irqsave(&port->port_lock, flags); - room = kfifo_avail(&port->write_fifo); + room = kfifo_avail(&port->port.xmit_fifo); + + if (port->tx_boundary) + room = 0; + spin_unlock_irqrestore(&port->port_lock, flags); return room; @@ -266,7 +357,7 @@ static unsigned int dbc_tty_chars_in_buffer(struct tty_struct *tty) unsigned int chars; spin_lock_irqsave(&port->port_lock, flags); - chars = kfifo_len(&port->write_fifo); + chars = kfifo_len(&port->port.xmit_fifo); spin_unlock_irqrestore(&port->port_lock, flags); return chars; @@ -299,10 +390,10 @@ static void dbc_rx_push(struct tasklet_struct *t) struct dbc_request *req; struct tty_struct *tty; unsigned long flags; - bool do_push = false; bool disconnect = false; struct dbc_port *port = from_tasklet(port, t, push); struct list_head *queue = &port->read_queue; + int untransferred; spin_lock_irqsave(&port->port_lock, flags); tty = port->port.tty; @@ -324,42 +415,15 @@ static void dbc_rx_push(struct tasklet_struct *t) break; } - if (req->actual) { - char *packet = req->buf; - unsigned int n, size = req->actual; - int count; - - n = port->n_read; - if (n) { - packet += n; - size -= n; - } - - count = tty_insert_flip_string(&port->port, packet, - size); - if (count) - do_push = true; - if (count != size) { - port->n_read += count; - break; - } - port->n_read = 0; - } + untransferred = dbc_rx_push_buffer(port, req); + if (untransferred > 0) + break; - list_move(&req->list_pool, &port->read_pool); + list_move_tail(&req->list_pool, &port->read_pool); } - if (do_push) - tty_flip_buffer_push(&port->port); - - if (!list_empty(queue) && tty) { - if (!tty_throttled(tty)) { - if (do_push) - tasklet_schedule(&port->push); - else - pr_warn("ttyDBC0: RX not scheduled?\n"); - } - } + if (!list_empty(queue)) + tasklet_schedule(&port->push); if (!disconnect) dbc_start_rx(port); @@ -424,7 +488,8 @@ static int xhci_dbc_tty_register_device(struct xhci_dbc *dbc) goto err_idr; } - ret = kfifo_alloc(&port->write_fifo, DBC_WRITE_BUF_SIZE, GFP_KERNEL); + ret = kfifo_alloc(&port->port.xmit_fifo, DBC_WRITE_BUF_SIZE, + GFP_KERNEL); if (ret) goto err_exit_port; @@ -453,7 +518,7 @@ err_free_requests: xhci_dbc_free_requests(&port->read_pool); xhci_dbc_free_requests(&port->write_pool); err_free_fifo: - kfifo_free(&port->write_fifo); + kfifo_free(&port->port.xmit_fifo); err_exit_port: idr_remove(&dbc_tty_minors, port->minor); err_idr: @@ -478,7 +543,7 @@ static void xhci_dbc_tty_unregister_device(struct xhci_dbc *dbc) idr_remove(&dbc_tty_minors, port->minor); mutex_unlock(&dbc_tty_minors_lock); - kfifo_free(&port->write_fifo); + kfifo_free(&port->port.xmit_fifo); xhci_dbc_free_requests(&port->read_pool); xhci_dbc_free_requests(&port->read_queue); xhci_dbc_free_requests(&port->write_pool); @@ -552,6 +617,7 @@ int dbc_tty_init(void) dbc_tty_driver->type = TTY_DRIVER_TYPE_SERIAL; dbc_tty_driver->subtype = SERIAL_TYPE_NORMAL; dbc_tty_driver->init_termios = tty_std_termios; + dbc_tty_driver->init_termios.c_lflag &= ~ECHO; dbc_tty_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL; dbc_tty_driver->init_termios.c_ispeed = 9600; diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c index f8ba15e7c225..c6d44977193f 100644 --- a/drivers/usb/host/xhci-debugfs.c +++ b/drivers/usb/host/xhci-debugfs.c @@ -214,14 +214,11 @@ static void xhci_ring_dump_segment(struct seq_file *s, static int xhci_ring_trb_show(struct seq_file *s, void *unused) { - int i; struct xhci_ring *ring = *(struct xhci_ring **)s->private; struct xhci_segment *seg = ring->first_seg; - for (i = 0; i < ring->num_segs; i++) { + xhci_for_each_ring_seg(ring->first_seg, seg) xhci_ring_dump_segment(s, seg); - seg = seg->next; - } return 0; } @@ -235,16 +232,7 @@ static struct xhci_file_map ring_files[] = { static int xhci_ring_open(struct inode *inode, struct file *file) { - int i; - struct xhci_file_map *f_map; - const char *file_name = file_dentry(file)->d_iname; - - for (i = 0; i < ARRAY_SIZE(ring_files); i++) { - f_map = &ring_files[i]; - - if (strcmp(f_map->name, file_name) == 0) - break; - } + const struct xhci_file_map *f_map = debugfs_get_aux(file); return single_open(file, f_map->show, inode->i_private); } @@ -291,12 +279,13 @@ static int xhci_endpoint_context_show(struct seq_file *s, void *unused) for (ep_index = 0; ep_index < 31; ep_index++) { ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); dma = dev->out_ctx->dma + (ep_index + 1) * CTX_SIZE(xhci->hcc_params); - seq_printf(s, "%pad: %s\n", &dma, + seq_printf(s, "%pad: %s, virt_state:%#x\n", &dma, xhci_decode_ep_context(str, le32_to_cpu(ep_ctx->ep_info), le32_to_cpu(ep_ctx->ep_info2), le64_to_cpu(ep_ctx->deq), - le32_to_cpu(ep_ctx->tx_info))); + le32_to_cpu(ep_ctx->tx_info)), + dev->eps[ep_index].ep_state); } return 0; @@ -320,16 +309,7 @@ static struct xhci_file_map context_files[] = { static int xhci_context_open(struct inode *inode, struct file *file) { - int i; - struct xhci_file_map *f_map; - const char *file_name = file_dentry(file)->d_iname; - - for (i = 0; i < ARRAY_SIZE(context_files); i++) { - f_map = &context_files[i]; - - if (strcmp(f_map->name, file_name) == 0) - break; - } + const struct xhci_file_map *f_map = debugfs_get_aux(file); return single_open(file, f_map->show, inode->i_private); } @@ -412,7 +392,8 @@ static void xhci_debugfs_create_files(struct xhci_hcd *xhci, int i; for (i = 0; i < nentries; i++) - debugfs_create_file(files[i].name, 0444, parent, data, fops); + debugfs_create_file_aux(files[i].name, 0444, parent, + data, &files[i], fops); } static struct dentry *xhci_debugfs_create_ring_dir(struct xhci_hcd *xhci, @@ -650,6 +631,112 @@ static void xhci_debugfs_create_ports(struct xhci_hcd *xhci, } } +static int xhci_port_bw_show(struct xhci_hcd *xhci, u8 dev_speed, + struct seq_file *s) +{ + unsigned int num_ports; + unsigned int i; + int ret; + struct xhci_container_ctx *ctx; + struct usb_hcd *hcd = xhci_to_hcd(xhci); + struct device *dev = hcd->self.controller; + + ret = pm_runtime_get_sync(dev); + if (ret < 0) + return ret; + + num_ports = HCS_MAX_PORTS(xhci->hcs_params1); + + ctx = xhci_alloc_port_bw_ctx(xhci, 0); + if (!ctx) { + pm_runtime_put_sync(dev); + return -ENOMEM; + } + + /* get roothub port bandwidth */ + ret = xhci_get_port_bandwidth(xhci, ctx, dev_speed); + if (ret) + goto err_out; + + /* print all roothub ports available bandwidth + * refer to xhci rev1_2 protocol 6.2.6 , byte 0 is reserved + */ + for (i = 1; i < num_ports+1; i++) + seq_printf(s, "port[%d] available bw: %d%%.\n", i, + ctx->bytes[i]); +err_out: + pm_runtime_put_sync(dev); + xhci_free_port_bw_ctx(xhci, ctx); + return ret; +} + +static int xhci_ss_bw_show(struct seq_file *s, void *unused) +{ + int ret; + struct xhci_hcd *xhci = (struct xhci_hcd *)s->private; + + ret = xhci_port_bw_show(xhci, USB_SPEED_SUPER, s); + return ret; +} + +static int xhci_hs_bw_show(struct seq_file *s, void *unused) +{ + int ret; + struct xhci_hcd *xhci = (struct xhci_hcd *)s->private; + + ret = xhci_port_bw_show(xhci, USB_SPEED_HIGH, s); + return ret; +} + +static int xhci_fs_bw_show(struct seq_file *s, void *unused) +{ + int ret; + struct xhci_hcd *xhci = (struct xhci_hcd *)s->private; + + ret = xhci_port_bw_show(xhci, USB_SPEED_FULL, s); + return ret; +} + +static struct xhci_file_map bw_context_files[] = { + {"SS_BW", xhci_ss_bw_show, }, + {"HS_BW", xhci_hs_bw_show, }, + {"FS_BW", xhci_fs_bw_show, }, +}; + +static int bw_context_open(struct inode *inode, struct file *file) +{ + int i; + struct xhci_file_map *f_map; + const char *file_name = file_dentry(file)->d_iname; + + for (i = 0; i < ARRAY_SIZE(bw_context_files); i++) { + f_map = &bw_context_files[i]; + + if (strcmp(f_map->name, file_name) == 0) + break; + } + + return single_open(file, f_map->show, inode->i_private); +} + +static const struct file_operations bw_fops = { + .open = bw_context_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static void xhci_debugfs_create_bandwidth(struct xhci_hcd *xhci, + struct dentry *parent) +{ + parent = debugfs_create_dir("port_bandwidth", parent); + + xhci_debugfs_create_files(xhci, bw_context_files, + ARRAY_SIZE(bw_context_files), + xhci, + parent, &bw_fops); +} + void xhci_debugfs_init(struct xhci_hcd *xhci) { struct device *dev = xhci_to_hcd(xhci)->self.controller; @@ -700,6 +787,8 @@ void xhci_debugfs_init(struct xhci_hcd *xhci) xhci->debugfs_slots = debugfs_create_dir("devices", xhci->debugfs_root); xhci_debugfs_create_ports(xhci, xhci->debugfs_root); + + xhci_debugfs_create_bandwidth(xhci, xhci->debugfs_root); } void xhci_debugfs_exit(struct xhci_hcd *xhci) diff --git a/drivers/usb/host/xhci-ext-caps.h b/drivers/usb/host/xhci-ext-caps.h index 96eb36a58738..67ecf7320c62 100644 --- a/drivers/usb/host/xhci-ext-caps.h +++ b/drivers/usb/host/xhci-ext-caps.h @@ -42,6 +42,7 @@ #define XHCI_EXT_CAPS_DEBUG 10 /* Vendor caps */ #define XHCI_EXT_CAPS_VENDOR_INTEL 192 +#define XHCI_EXT_CAPS_INTEL_SPR_SHADOW 206 /* USB Legacy Support Capability - section 7.1.1 */ #define XHCI_HC_BIOS_OWNED (1 << 16) #define XHCI_HC_OS_OWNED (1 << 24) @@ -64,6 +65,10 @@ #define XHCI_HLC (1 << 19) #define XHCI_BLC (1 << 20) +/* Intel SPR shadow capability */ +#define XHCI_INTEL_SPR_ESS_PORT_OFFSET 0x8ac4 /* SuperSpeed port control */ +#define XHCI_INTEL_SPR_TUNEN BIT(4) /* Tunnel mode enabled */ + /* command register values to disable interrupts and halt the HC */ /* start/stop HC execution - do not write unless HC is halted*/ #define XHCI_CMD_RUN (1 << 0) diff --git a/drivers/usb/host/xhci-histb.c b/drivers/usb/host/xhci-histb.c index f9a4a4b0eb57..02396c8721dc 100644 --- a/drivers/usb/host/xhci-histb.c +++ b/drivers/usb/host/xhci-histb.c @@ -355,7 +355,7 @@ static int __maybe_unused xhci_histb_resume(struct device *dev) if (!device_may_wakeup(dev)) xhci_histb_host_enable(histb); - return xhci_resume(xhci, PMSG_RESUME); + return xhci_resume(xhci, false, false); } static const struct dev_pm_ops xhci_histb_pm_ops = { @@ -373,7 +373,7 @@ MODULE_DEVICE_TABLE(of, histb_xhci_of_match); static struct platform_driver histb_xhci_driver = { .probe = xhci_histb_probe, - .remove_new = xhci_histb_remove, + .remove = xhci_histb_remove, .driver = { .name = "xhci-histb", .pm = DEV_PM_OPS, diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index 61f083de6e19..92bb84f8132a 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -10,8 +10,9 @@ #include <linux/slab.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <linux/bitfield.h> +#include <linux/pci.h> #include "xhci.h" #include "xhci-trace.h" @@ -752,6 +753,49 @@ static int xhci_exit_test_mode(struct xhci_hcd *xhci) return xhci_reset(xhci, XHCI_RESET_SHORT_USEC); } +/** + * xhci_port_is_tunneled() - Check if USB3 connection is tunneled over USB4 + * @xhci: xhci host controller + * @port: USB3 port to be checked. + * + * Some hosts can detect if a USB3 connection is native USB3 or tunneled over + * USB4. Intel hosts expose this via vendor specific extended capability 206 + * eSS PORT registers TUNEN (tunnel enabled) bit. + * + * A USB3 device must be connected to the port to detect the tunnel. + * + * Return: link tunnel mode enum, USB_LINK_UNKNOWN if host is incapable of + * detecting USB3 over USB4 tunnels. USB_LINK_NATIVE or USB_LINK_TUNNELED + * otherwise. + */ +enum usb_link_tunnel_mode xhci_port_is_tunneled(struct xhci_hcd *xhci, + struct xhci_port *port) +{ + struct usb_hcd *hcd; + void __iomem *base; + u32 offset; + + /* Don't try and probe this capability for non-Intel hosts */ + hcd = xhci_to_hcd(xhci); + if (!dev_is_pci(hcd->self.controller) || + to_pci_dev(hcd->self.controller)->vendor != PCI_VENDOR_ID_INTEL) + return USB_LINK_UNKNOWN; + + base = &xhci->cap_regs->hc_capbase; + offset = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_INTEL_SPR_SHADOW); + + if (offset && offset <= XHCI_INTEL_SPR_ESS_PORT_OFFSET) { + offset = XHCI_INTEL_SPR_ESS_PORT_OFFSET + port->hcd_portnum * 0x20; + + if (readl(base + offset) & XHCI_INTEL_SPR_TUNEN) + return USB_LINK_TUNNELED; + else + return USB_LINK_NATIVE; + } + + return USB_LINK_UNKNOWN; +} + void xhci_set_link_state(struct xhci_hcd *xhci, struct xhci_port *port, u32 link_state) { @@ -882,7 +926,7 @@ static void xhci_del_comp_mod_timer(struct xhci_hcd *xhci, u32 status, if ((xhci->port_status_u0 != all_ports_seen_u0) && port_in_u0) { xhci->port_status_u0 |= 1 << wIndex; if (xhci->port_status_u0 == all_ports_seen_u0) { - del_timer_sync(&xhci->comp_mode_recovery_timer); + timer_delete_sync(&xhci->comp_mode_recovery_timer); xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, "All USB3 ports have entered U0 already!"); xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, @@ -910,9 +954,9 @@ static int xhci_handle_usb2_port_link_resume(struct xhci_port *port, } /* did port event handler already start resume timing? */ if (!port->resume_timestamp) { - /* If not, maybe we are in a host initated resume? */ + /* If not, maybe we are in a host initiated resume? */ if (test_bit(wIndex, &bus_state->resuming_ports)) { - /* Host initated resume doesn't time the resume + /* Host initiated resume doesn't time the resume * signalling using resume_done[]. * It manually sets RESUME state, sleeps 20ms * and sets U0 state. This should probably be @@ -1834,9 +1878,10 @@ int xhci_bus_resume(struct usb_hcd *hcd) int max_ports, port_index; int sret; u32 next_state; - u32 temp, portsc; + u32 portsc; struct xhci_hub *rhub; struct xhci_port **ports; + bool disabled_irq = false; rhub = xhci_get_rhub(hcd); ports = rhub->ports; @@ -1852,17 +1897,20 @@ int xhci_bus_resume(struct usb_hcd *hcd) return -ESHUTDOWN; } - /* delay the irqs */ - temp = readl(&xhci->op_regs->command); - temp &= ~CMD_EIE; - writel(temp, &xhci->op_regs->command); - /* bus specific resume for ports we suspended at bus_suspend */ - if (hcd->speed >= HCD_USB3) + if (hcd->speed >= HCD_USB3) { next_state = XDEV_U0; - else + } else { next_state = XDEV_RESUME; - + if (bus_state->bus_suspended) { + /* + * prevent port event interrupts from interfering + * with usb2 port resume process + */ + xhci_disable_interrupter(xhci, xhci->interrupters[0]); + disabled_irq = true; + } + } port_index = max_ports; while (port_index--) { portsc = readl(ports[port_index]->addr); @@ -1888,7 +1936,7 @@ int xhci_bus_resume(struct usb_hcd *hcd) /* resume already initiated */ break; default: - /* not in a resumeable state, ignore it */ + /* not in a resumable state, ignore it */ clear_bit(port_index, &bus_state->bus_suspended); break; @@ -1930,11 +1978,9 @@ int xhci_bus_resume(struct usb_hcd *hcd) (void) readl(&xhci->op_regs->command); bus_state->next_statechange = jiffies + msecs_to_jiffies(5); - /* re-enable irqs */ - temp = readl(&xhci->op_regs->command); - temp |= CMD_EIE; - writel(temp, &xhci->op_regs->command); - temp = readl(&xhci->op_regs->command); + /* re-enable interrupter */ + if (disabled_irq) + xhci_enable_interrupter(xhci->interrupters[0]); spin_unlock_irqrestore(&xhci->lock, flags); return 0; diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 69dd86669883..6680afa4f596 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -27,14 +27,12 @@ * "All components of all Command and Transfer TRBs shall be initialized to '0'" */ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, - unsigned int cycle_state, unsigned int max_packet, unsigned int num, gfp_t flags) { struct xhci_segment *seg; dma_addr_t dma; - int i; struct device *dev = xhci_to_hcd(xhci)->self.sysdev; seg = kzalloc_node(sizeof(*seg), flags, dev_to_node(dev)); @@ -56,11 +54,6 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, return NULL; } } - /* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */ - if (cycle_state == 0) { - for (i = 0; i < TRBS_PER_SEGMENT; i++) - seg->trbs[i].link.control = cpu_to_le32(TRB_CYCLE); - } seg->num = num; seg->dma = dma; seg->next = NULL; @@ -78,85 +71,104 @@ static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg) kfree(seg); } -static void xhci_free_segments_for_ring(struct xhci_hcd *xhci, - struct xhci_segment *first) +static void xhci_ring_segments_free(struct xhci_hcd *xhci, struct xhci_ring *ring) { - struct xhci_segment *seg; + struct xhci_segment *seg, *next; + + ring->last_seg->next = NULL; + seg = ring->first_seg; - seg = first->next; - while (seg && seg != first) { - struct xhci_segment *next = seg->next; + while (seg) { + next = seg->next; xhci_segment_free(xhci, seg); seg = next; } - xhci_segment_free(xhci, first); } /* - * Make the prev segment point to the next segment. + * Only for transfer and command rings where driver is the producer, not for + * event rings. * - * Change the last TRB in the prev segment to be a Link TRB which points to the + * Change the last TRB in the segment to be a Link TRB which points to the * DMA address of the next segment. The caller needs to set any Link TRB * related flags, such as End TRB, Toggle Cycle, and no snoop. */ -static void xhci_link_segments(struct xhci_segment *prev, - struct xhci_segment *next, - enum xhci_ring_type type, bool chain_links) +static void xhci_set_link_trb(struct xhci_segment *seg, bool chain_links) { + union xhci_trb *trb; u32 val; - if (!prev || !next) + if (!seg || !seg->next) return; - prev->next = next; - if (type != TYPE_EVENT) { - prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr = - cpu_to_le64(next->dma); - /* Set the last TRB in the segment to have a TRB type ID of Link TRB */ - val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control); - val &= ~TRB_TYPE_BITMASK; - val |= TRB_TYPE(TRB_LINK); - if (chain_links) - val |= TRB_CHAIN; - prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val); - } + trb = &seg->trbs[TRBS_PER_SEGMENT - 1]; + + /* Set the last TRB in the segment to have a TRB type ID of Link TRB */ + val = le32_to_cpu(trb->link.control); + val &= ~TRB_TYPE_BITMASK; + val |= TRB_TYPE(TRB_LINK); + if (chain_links) + val |= TRB_CHAIN; + trb->link.control = cpu_to_le32(val); + trb->link.segment_ptr = cpu_to_le64(seg->next->dma); +} + +static void xhci_initialize_ring_segments(struct xhci_hcd *xhci, struct xhci_ring *ring) +{ + struct xhci_segment *seg; + bool chain_links; + + if (ring->type == TYPE_EVENT) + return; + + chain_links = xhci_link_chain_quirk(xhci, ring->type); + xhci_for_each_ring_seg(ring->first_seg, seg) + xhci_set_link_trb(seg, chain_links); + + /* See section 4.9.2.1 and 6.4.4.1 */ + ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |= cpu_to_le32(LINK_TOGGLE); } /* - * Link the ring to the new segments. + * Link the src ring segments to the dst ring. * Set Toggle Cycle for the new ring if needed. */ -static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring, - struct xhci_segment *first, struct xhci_segment *last, - unsigned int num_segs) +static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *src, struct xhci_ring *dst) { - struct xhci_segment *next, *seg; + struct xhci_segment *seg; bool chain_links; - if (!ring || !first || !last) + if (!src || !dst) return; - /* Set chain bit for 0.95 hosts, and for isoc rings on AMD 0.96 host */ - chain_links = !!(xhci_link_trb_quirk(xhci) || - (ring->type == TYPE_ISOC && - (xhci->quirks & XHCI_AMD_0x96_HOST))); + /* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */ + if (dst->cycle_state == 0) { + xhci_for_each_ring_seg(src->first_seg, seg) { + for (int i = 0; i < TRBS_PER_SEGMENT; i++) + seg->trbs[i].link.control |= cpu_to_le32(TRB_CYCLE); + } + } - next = ring->enq_seg->next; - xhci_link_segments(ring->enq_seg, first, ring->type, chain_links); - xhci_link_segments(last, next, ring->type, chain_links); - ring->num_segs += num_segs; + src->last_seg->next = dst->enq_seg->next; + dst->enq_seg->next = src->first_seg; + if (dst->type != TYPE_EVENT) { + chain_links = xhci_link_chain_quirk(xhci, dst->type); + xhci_set_link_trb(dst->enq_seg, chain_links); + xhci_set_link_trb(src->last_seg, chain_links); + } + dst->num_segs += src->num_segs; - if (ring->enq_seg == ring->last_seg) { - if (ring->type != TYPE_EVENT) { - ring->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control + if (dst->enq_seg == dst->last_seg) { + if (dst->type != TYPE_EVENT) + dst->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control &= ~cpu_to_le32(LINK_TOGGLE); - last->trbs[TRBS_PER_SEGMENT-1].link.control - |= cpu_to_le32(LINK_TOGGLE); - } - ring->last_seg = last; + + dst->last_seg = src->last_seg; + } else if (dst->type != TYPE_EVENT) { + src->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control &= ~cpu_to_le32(LINK_TOGGLE); } - for (seg = last; seg != ring->last_seg; seg = seg->next) + for (seg = dst->enq_seg; seg != dst->last_seg; seg = seg->next) seg->next->num = seg->num + 1; } @@ -227,7 +239,6 @@ static int xhci_update_stream_segment_mapping( struct radix_tree_root *trb_address_map, struct xhci_ring *ring, struct xhci_segment *first_seg, - struct xhci_segment *last_seg, gfp_t mem_flags) { struct xhci_segment *seg; @@ -237,28 +248,22 @@ static int xhci_update_stream_segment_mapping( if (WARN_ON_ONCE(trb_address_map == NULL)) return 0; - seg = first_seg; - do { + xhci_for_each_ring_seg(first_seg, seg) { ret = xhci_insert_segment_mapping(trb_address_map, ring, seg, mem_flags); if (ret) goto remove_streams; - if (seg == last_seg) - return 0; - seg = seg->next; - } while (seg != first_seg); + } return 0; remove_streams: failed_seg = seg; - seg = first_seg; - do { + xhci_for_each_ring_seg(first_seg, seg) { xhci_remove_segment_mapping(trb_address_map, seg); if (seg == failed_seg) return ret; - seg = seg->next; - } while (seg != first_seg); + } return ret; } @@ -270,17 +275,14 @@ static void xhci_remove_stream_mapping(struct xhci_ring *ring) if (WARN_ON_ONCE(ring->trb_address_map == NULL)) return; - seg = ring->first_seg; - do { + xhci_for_each_ring_seg(ring->first_seg, seg) xhci_remove_segment_mapping(ring->trb_address_map, seg); - seg = seg->next; - } while (seg != ring->first_seg); } static int xhci_update_stream_mapping(struct xhci_ring *ring, gfp_t mem_flags) { return xhci_update_stream_segment_mapping(ring->trb_address_map, ring, - ring->first_seg, ring->last_seg, mem_flags); + ring->first_seg, mem_flags); } /* XXX: Do we need the hcd structure in all these functions? */ @@ -294,14 +296,13 @@ void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring) if (ring->first_seg) { if (ring->type == TYPE_STREAM) xhci_remove_stream_mapping(ring); - xhci_free_segments_for_ring(xhci, ring->first_seg); + xhci_ring_segments_free(xhci, ring); } kfree(ring); } -void xhci_initialize_ring_info(struct xhci_ring *ring, - unsigned int cycle_state) +void xhci_initialize_ring_info(struct xhci_ring *ring) { /* The ring is empty, so the enqueue pointer == dequeue pointer */ ring->enqueue = ring->first_seg->trbs; @@ -315,7 +316,7 @@ void xhci_initialize_ring_info(struct xhci_ring *ring, * New rings are initialized with cycle state equal to 1; if we are * handling ring expansion, set the cycle state equal to the old ring. */ - ring->cycle_state = cycle_state; + ring->cycle_state = 1; /* * Each segment has a link TRB, and leave an extra TRB for SW @@ -326,45 +327,36 @@ void xhci_initialize_ring_info(struct xhci_ring *ring, EXPORT_SYMBOL_GPL(xhci_initialize_ring_info); /* Allocate segments and link them for a ring */ -static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci, - struct xhci_segment **first, struct xhci_segment **last, - unsigned int num_segs, unsigned int num, - unsigned int cycle_state, enum xhci_ring_type type, - unsigned int max_packet, gfp_t flags) +static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci, struct xhci_ring *ring, gfp_t flags) { struct xhci_segment *prev; - bool chain_links; - - /* Set chain bit for 0.95 hosts, and for isoc rings on AMD 0.96 host */ - chain_links = !!(xhci_link_trb_quirk(xhci) || - (type == TYPE_ISOC && - (xhci->quirks & XHCI_AMD_0x96_HOST))); + unsigned int num = 0; - prev = xhci_segment_alloc(xhci, cycle_state, max_packet, num, flags); + prev = xhci_segment_alloc(xhci, ring->bounce_buf_len, num, flags); if (!prev) return -ENOMEM; num++; - *first = prev; - while (num < num_segs) { + ring->first_seg = prev; + while (num < ring->num_segs) { struct xhci_segment *next; - next = xhci_segment_alloc(xhci, cycle_state, max_packet, num, - flags); + next = xhci_segment_alloc(xhci, ring->bounce_buf_len, num, flags); if (!next) goto free_segments; - xhci_link_segments(prev, next, type, chain_links); + prev->next = next; prev = next; num++; } - xhci_link_segments(prev, *first, type, chain_links); - *last = prev; + ring->last_seg = prev; + ring->last_seg->next = ring->first_seg; return 0; free_segments: - xhci_free_segments_for_ring(xhci, *first); + ring->last_seg = prev; + xhci_ring_segments_free(xhci, ring); return -ENOMEM; } @@ -375,9 +367,8 @@ free_segments: * Set the end flag and the cycle toggle bit on the last segment. * See section 4.9.1 and figures 15 and 16. */ -struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, - unsigned int num_segs, unsigned int cycle_state, - enum xhci_ring_type type, unsigned int max_packet, gfp_t flags) +struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, unsigned int num_segs, + enum xhci_ring_type type, unsigned int max_packet, gfp_t flags) { struct xhci_ring *ring; int ret; @@ -394,19 +385,12 @@ struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, if (num_segs == 0) return ring; - ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg, - &ring->last_seg, num_segs, 0, cycle_state, type, - max_packet, flags); + ret = xhci_alloc_segments_for_ring(xhci, ring, flags); if (ret) goto fail; - /* Only event ring does not use link TRB */ - if (type != TYPE_EVENT) { - /* See section 4.9.2.1 and 6.4.4.1 */ - ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |= - cpu_to_le32(LINK_TOGGLE); - } - xhci_initialize_ring_info(ring, cycle_state); + xhci_initialize_ring_segments(xhci, ring); + xhci_initialize_ring_info(ring); trace_xhci_ring_alloc(ring); return ring; @@ -430,25 +414,29 @@ void xhci_free_endpoint_ring(struct xhci_hcd *xhci, int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring, unsigned int num_new_segs, gfp_t flags) { - struct xhci_segment *first; - struct xhci_segment *last; - int ret; + struct xhci_ring new_ring; + int ret; - ret = xhci_alloc_segments_for_ring(xhci, &first, &last, - num_new_segs, ring->enq_seg->num + 1, - ring->cycle_state, ring->type, - ring->bounce_buf_len, flags); + if (num_new_segs == 0) + return 0; + + new_ring.num_segs = num_new_segs; + new_ring.bounce_buf_len = ring->bounce_buf_len; + new_ring.type = ring->type; + ret = xhci_alloc_segments_for_ring(xhci, &new_ring, flags); if (ret) return -ENOMEM; + xhci_initialize_ring_segments(xhci, &new_ring); + if (ring->type == TYPE_STREAM) { - ret = xhci_update_stream_segment_mapping(ring->trb_address_map, - ring, first, last, flags); + ret = xhci_update_stream_segment_mapping(ring->trb_address_map, ring, + new_ring.first_seg, flags); if (ret) goto free_segments; } - xhci_link_rings(xhci, ring, first, last, num_new_segs); + xhci_link_rings(xhci, &new_ring, ring); trace_xhci_ring_expansion(ring); xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion, "ring expansion succeed, now has %d segments", @@ -457,7 +445,7 @@ int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring, return 0; free_segments: - xhci_free_segments_for_ring(xhci, first); + xhci_ring_segments_free(xhci, &new_ring); return ret; } @@ -496,6 +484,35 @@ void xhci_free_container_ctx(struct xhci_hcd *xhci, kfree(ctx); } +struct xhci_container_ctx *xhci_alloc_port_bw_ctx(struct xhci_hcd *xhci, + gfp_t flags) +{ + struct xhci_container_ctx *ctx; + struct device *dev = xhci_to_hcd(xhci)->self.sysdev; + + ctx = kzalloc_node(sizeof(*ctx), flags, dev_to_node(dev)); + if (!ctx) + return NULL; + + ctx->size = GET_PORT_BW_ARRAY_SIZE; + + ctx->bytes = dma_pool_zalloc(xhci->port_bw_pool, flags, &ctx->dma); + if (!ctx->bytes) { + kfree(ctx); + return NULL; + } + return ctx; +} + +void xhci_free_port_bw_ctx(struct xhci_hcd *xhci, + struct xhci_container_ctx *ctx) +{ + if (!ctx) + return; + dma_pool_free(xhci->port_bw_pool, ctx->bytes, ctx->dma); + kfree(ctx); +} + struct xhci_input_control_ctx *xhci_get_input_control_ctx( struct xhci_container_ctx *ctx) { @@ -536,7 +553,7 @@ static void xhci_free_stream_ctx(struct xhci_hcd *xhci, struct xhci_stream_ctx *stream_ctx, dma_addr_t dma) { struct device *dev = xhci_to_hcd(xhci)->self.sysdev; - size_t size = sizeof(struct xhci_stream_ctx) * num_stream_ctxs; + size_t size = array_size(sizeof(struct xhci_stream_ctx), num_stream_ctxs); if (size > MEDIUM_STREAM_ARRAY_SIZE) dma_free_coherent(dev, size, stream_ctx, dma); @@ -561,7 +578,7 @@ static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci, gfp_t mem_flags) { struct device *dev = xhci_to_hcd(xhci)->self.sysdev; - size_t size = size_mul(sizeof(struct xhci_stream_ctx), num_stream_ctxs); + size_t size = array_size(sizeof(struct xhci_stream_ctx), num_stream_ctxs); if (size > MEDIUM_STREAM_ARRAY_SIZE) return dma_alloc_coherent(dev, size, dma, mem_flags); @@ -647,8 +664,7 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci, for (cur_stream = 1; cur_stream < num_streams; cur_stream++) { stream_info->stream_rings[cur_stream] = - xhci_ring_alloc(xhci, 2, 1, TYPE_STREAM, max_packet, - mem_flags); + xhci_ring_alloc(xhci, 2, TYPE_STREAM, max_packet, mem_flags); cur_ring = stream_info->stream_rings[cur_stream]; if (!cur_ring) goto cleanup_rings; @@ -663,6 +679,8 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci, xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n", cur_stream, addr); ret = xhci_update_stream_mapping(cur_ring, mem_flags); + + trace_xhci_alloc_stream_info_ctx(stream_info, cur_stream); if (ret) { xhci_ring_free(xhci, cur_ring); stream_info->stream_rings[cur_stream] = NULL; @@ -989,7 +1007,7 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, } /* Allocate endpoint 0 ring */ - dev->eps[0].ring = xhci_ring_alloc(xhci, 2, 1, TYPE_CTRL, 0, flags); + dev->eps[0].ring = xhci_ring_alloc(xhci, 2, TYPE_CTRL, 0, flags); if (!dev->eps[0].ring) goto fail; @@ -1431,6 +1449,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, /* Periodic endpoint bInterval limit quirk */ if (usb_endpoint_xfer_int(&ep->desc) || usb_endpoint_xfer_isoc(&ep->desc)) { + if ((xhci->quirks & XHCI_LIMIT_ENDPOINT_INTERVAL_9) && + interval >= 9) { + interval = 8; + } if ((xhci->quirks & XHCI_LIMIT_ENDPOINT_INTERVAL_7) && udev->speed >= USB_SPEED_HIGH && interval >= 7) { @@ -1466,7 +1488,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, /* Set up the endpoint ring */ virt_dev->eps[ep_index].new_ring = - xhci_ring_alloc(xhci, 2, 1, ring_type, max_packet, mem_flags); + xhci_ring_alloc(xhci, 2, ring_type, max_packet, mem_flags); if (!virt_dev->eps[ep_index].new_ring) return -ENOMEM; @@ -1638,7 +1660,7 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags) goto fail_sp; xhci->scratchpad->sp_array = dma_alloc_coherent(dev, - size_mul(sizeof(u64), num_sp), + array_size(sizeof(u64), num_sp), &xhci->scratchpad->sp_dma, flags); if (!xhci->scratchpad->sp_array) goto fail_sp2; @@ -1671,7 +1693,7 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags) kfree(xhci->scratchpad->sp_buffers); fail_sp3: - dma_free_coherent(dev, num_sp * sizeof(u64), + dma_free_coherent(dev, array_size(sizeof(u64), num_sp), xhci->scratchpad->sp_array, xhci->scratchpad->sp_dma); @@ -1700,7 +1722,7 @@ static void scratchpad_free(struct xhci_hcd *xhci) xhci->scratchpad->sp_array[i]); } kfree(xhci->scratchpad->sp_buffers); - dma_free_coherent(dev, num_sp * sizeof(u64), + dma_free_coherent(dev, array_size(sizeof(u64), num_sp), xhci->scratchpad->sp_array, xhci->scratchpad->sp_dma); kfree(xhci->scratchpad); @@ -1778,7 +1800,7 @@ static int xhci_alloc_erst(struct xhci_hcd *xhci, struct xhci_segment *seg; struct xhci_erst_entry *entry; - size = size_mul(sizeof(struct xhci_erst_entry), evt_ring->num_segs); + size = array_size(sizeof(struct xhci_erst_entry), evt_ring->num_segs); erst->entries = dma_alloc_coherent(xhci_to_hcd(xhci)->self.sysdev, size, &erst->erst_dma_addr, flags); if (!erst->entries) @@ -1813,10 +1835,10 @@ xhci_remove_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir) */ if (ir->ir_set) { tmp = readl(&ir->ir_set->erst_size); - tmp &= ERST_SIZE_MASK; + tmp &= ~ERST_SIZE_MASK; writel(tmp, &ir->ir_set->erst_size); - xhci_write_64(xhci, ERST_EHB, &ir->ir_set->erst_dequeue); + xhci_update_erst_dequeue(xhci, ir, true); } } @@ -1829,7 +1851,7 @@ xhci_free_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir) if (!ir) return; - erst_size = sizeof(struct xhci_erst_entry) * ir->erst.num_entries; + erst_size = array_size(sizeof(struct xhci_erst_entry), ir->erst.num_entries); if (ir->erst.entries) dma_free_coherent(dev, erst_size, ir->erst.entries, @@ -1859,6 +1881,11 @@ void xhci_remove_secondary_interrupter(struct usb_hcd *hcd, struct xhci_interrup return; } + /* + * Cleanup secondary interrupter to ensure there are no pending events. + * This also updates event ring dequeue pointer back to the start. + */ + xhci_skip_sec_intr_events(xhci, ir->event_ring, ir); intr_num = ir->intr_num; xhci_remove_interrupter(xhci, ir); @@ -1877,7 +1904,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) cancel_delayed_work_sync(&xhci->cmd_timer); - for (i = 0; i < xhci->max_interrupters; i++) { + for (i = 0; xhci->interrupters && i < xhci->max_interrupters; i++) { if (xhci->interrupters[i]) { xhci_remove_interrupter(xhci, xhci->interrupters[i]); xhci_free_interrupter(xhci, xhci->interrupters[i]); @@ -1918,6 +1945,11 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed small stream array pool"); + dma_pool_destroy(xhci->port_bw_pool); + xhci->port_bw_pool = NULL; + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "Freed xhci port bw array pool"); + dma_pool_destroy(xhci->medium_streams_pool); xhci->medium_streams_pool = NULL; xhci_dbg_trace(xhci, trace_xhci_dbg_init, @@ -1950,7 +1982,6 @@ no_bw: kfree(xhci->usb3_rhub.ports); kfree(xhci->hw_ports); kfree(xhci->rh_bw); - kfree(xhci->ext_caps); for (i = 0; i < xhci->num_port_caps; i++) kfree(xhci->port_caps[i].psi); kfree(xhci->port_caps); @@ -1961,12 +1992,10 @@ no_bw: xhci->usb3_rhub.ports = NULL; xhci->hw_ports = NULL; xhci->rh_bw = NULL; - xhci->ext_caps = NULL; xhci->port_caps = NULL; xhci->interrupters = NULL; xhci->page_size = 0; - xhci->page_shift = 0; xhci->usb2_rhub.bus_state.bus_suspended = 0; xhci->usb3_rhub.bus_state.bus_suspended = 0; } @@ -2089,10 +2118,7 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, port_cap->maj_rev = major_revision; port_cap->min_rev = minor_revision; - - /* cache usb2 port capabilities */ - if (major_revision < 0x03 && xhci->num_ext_caps < max_caps) - xhci->ext_caps[xhci->num_ext_caps++] = temp; + port_cap->protocol_caps = temp; if ((xhci->hci_version >= 0x100) && (major_revision != 0x03) && (temp & XHCI_HLC)) { @@ -2212,11 +2238,6 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags) XHCI_EXT_CAPS_PROTOCOL); } - xhci->ext_caps = kcalloc_node(cap_count, sizeof(*xhci->ext_caps), - flags, dev_to_node(dev)); - if (!xhci->ext_caps) - return -ENOMEM; - xhci->port_caps = kcalloc_node(cap_count, sizeof(*xhci->port_caps), flags, dev_to_node(dev)); if (!xhci->port_caps) @@ -2269,24 +2290,24 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags) } static struct xhci_interrupter * -xhci_alloc_interrupter(struct xhci_hcd *xhci, int segs, gfp_t flags) +xhci_alloc_interrupter(struct xhci_hcd *xhci, unsigned int segs, gfp_t flags) { struct device *dev = xhci_to_hcd(xhci)->self.sysdev; struct xhci_interrupter *ir; - unsigned int num_segs = segs; + unsigned int max_segs; int ret; + if (!segs) + segs = ERST_DEFAULT_SEGS; + + max_segs = BIT(HCS_ERST_MAX(xhci->hcs_params2)); + segs = min(segs, max_segs); + ir = kzalloc_node(sizeof(*ir), flags, dev_to_node(dev)); if (!ir) return NULL; - /* number of ring segments should be greater than 0 */ - if (segs <= 0) - num_segs = min_t(unsigned int, 1 << HCS_ERST_MAX(xhci->hcs_params2), - ERST_MAX_SEGS); - - ir->event_ring = xhci_ring_alloc(xhci, num_segs, 1, TYPE_EVENT, 0, - flags); + ir->event_ring = xhci_ring_alloc(xhci, segs, TYPE_EVENT, 0, flags); if (!ir->event_ring) { xhci_warn(xhci, "Failed to allocate interrupter event ring\n"); kfree(ir); @@ -2304,70 +2325,69 @@ xhci_alloc_interrupter(struct xhci_hcd *xhci, int segs, gfp_t flags) return ir; } -static int -xhci_add_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir, - unsigned int intr_num) +void xhci_add_interrupter(struct xhci_hcd *xhci, unsigned int intr_num) { + struct xhci_interrupter *ir; u64 erst_base; u32 erst_size; - if (intr_num >= xhci->max_interrupters) { - xhci_warn(xhci, "Can't add interrupter %d, max interrupters %d\n", - intr_num, xhci->max_interrupters); - return -EINVAL; - } - - if (xhci->interrupters[intr_num]) { - xhci_warn(xhci, "Interrupter %d\n already set up", intr_num); - return -EINVAL; - } - - xhci->interrupters[intr_num] = ir; + ir = xhci->interrupters[intr_num]; ir->intr_num = intr_num; ir->ir_set = &xhci->run_regs->ir_set[intr_num]; /* set ERST count with the number of entries in the segment table */ erst_size = readl(&ir->ir_set->erst_size); - erst_size &= ERST_SIZE_MASK; + erst_size &= ~ERST_SIZE_MASK; erst_size |= ir->event_ring->num_segs; writel(erst_size, &ir->ir_set->erst_size); erst_base = xhci_read_64(xhci, &ir->ir_set->erst_base); - erst_base &= ERST_BASE_RSVDP; - erst_base |= ir->erst.erst_dma_addr & ~ERST_BASE_RSVDP; - xhci_write_64(xhci, erst_base, &ir->ir_set->erst_base); + erst_base &= ~ERST_BASE_ADDRESS_MASK; + erst_base |= ir->erst.erst_dma_addr & ERST_BASE_ADDRESS_MASK; + if (xhci->quirks & XHCI_WRITE_64_HI_LO) + hi_lo_writeq(erst_base, &ir->ir_set->erst_base); + else + xhci_write_64(xhci, erst_base, &ir->ir_set->erst_base); /* Set the event ring dequeue address of this interrupter */ xhci_set_hc_event_deq(xhci, ir); - - return 0; } struct xhci_interrupter * -xhci_create_secondary_interrupter(struct usb_hcd *hcd, int num_seg) +xhci_create_secondary_interrupter(struct usb_hcd *hcd, unsigned int segs, + u32 imod_interval, unsigned int intr_num) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); struct xhci_interrupter *ir; unsigned int i; int err = -ENOSPC; - if (!xhci->interrupters || xhci->max_interrupters <= 1) + if (!xhci->interrupters || xhci->max_interrupters <= 1 || + intr_num >= xhci->max_interrupters) return NULL; - ir = xhci_alloc_interrupter(xhci, num_seg, GFP_KERNEL); + ir = xhci_alloc_interrupter(xhci, segs, GFP_KERNEL); if (!ir) return NULL; spin_lock_irq(&xhci->lock); - - /* Find available secondary interrupter, interrupter 0 is reserved for primary */ - for (i = 1; i < xhci->max_interrupters; i++) { - if (xhci->interrupters[i] == NULL) { - err = xhci_add_interrupter(xhci, ir, i); - break; + if (!intr_num) { + /* Find available secondary interrupter, interrupter 0 is reserved for primary */ + for (i = 1; i < xhci->max_interrupters; i++) { + if (!xhci->interrupters[i]) { + xhci->interrupters[i] = ir; + xhci_add_interrupter(xhci, i); + err = 0; + break; + } + } + } else { + if (!xhci->interrupters[intr_num]) { + xhci->interrupters[intr_num] = ir; + xhci_add_interrupter(xhci, intr_num); + err = 0; } } - spin_unlock_irq(&xhci->lock); if (err) { @@ -2377,8 +2397,10 @@ xhci_create_secondary_interrupter(struct usb_hcd *hcd, int num_seg) return NULL; } + xhci_set_interrupter_moderation(ir, imod_interval); + xhci_dbg(xhci, "Add secondary interrupter %d, max interrupters %d\n", - i, xhci->max_interrupters); + ir->intr_num, xhci->max_interrupters); return ir; } @@ -2386,61 +2408,21 @@ EXPORT_SYMBOL_GPL(xhci_create_secondary_interrupter); int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) { - struct xhci_interrupter *ir; struct device *dev = xhci_to_hcd(xhci)->self.sysdev; dma_addr_t dma; - unsigned int val, val2; - u64 val_64; - u32 page_size, temp; - int i; - - INIT_LIST_HEAD(&xhci->cmd_list); - - /* init command timeout work */ - INIT_DELAYED_WORK(&xhci->cmd_timer, xhci_handle_command_timeout); - init_completion(&xhci->cmd_ring_stop_completion); - - page_size = readl(&xhci->op_regs->page_size); - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "Supported page size register = 0x%x", page_size); - i = ffs(page_size); - if (i < 16) - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "Supported page size of %iK", (1 << (i+12)) / 1024); - else - xhci_warn(xhci, "WARN: no supported page size\n"); - /* Use 4K pages, since that's common and the minimum the HC supports */ - xhci->page_shift = 12; - xhci->page_size = 1 << xhci->page_shift; - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "HCD page size set to %iK", xhci->page_size / 1024); - - /* - * Program the Number of Device Slots Enabled field in the CONFIG - * register with the max value of slots the HC can handle. - */ - val = HCS_MAX_SLOTS(readl(&xhci->cap_regs->hcs_params1)); - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "// xHC can handle at most %d device slots.", val); - val2 = readl(&xhci->op_regs->config_reg); - val |= (val2 & ~HCS_SLOTS_MASK); - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "// Setting Max device slots reg = 0x%x.", val); - writel(val, &xhci->op_regs->config_reg); /* * xHCI section 5.4.6 - Device Context array must be * "physically contiguous and 64-byte (cache line) aligned". */ - xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma, - flags); + xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma, flags); if (!xhci->dcbaa) goto fail; + xhci->dcbaa->dma = dma; xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "// Device context base array address = 0x%pad (DMA), %p (virt)", - &xhci->dcbaa->dma, xhci->dcbaa); - xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr); + "Device context base array address = 0x%pad (DMA), %p (virt)", + &xhci->dcbaa->dma, xhci->dcbaa); /* * Initialize the ring segment pool. The ring must be a contiguous @@ -2449,103 +2431,84 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) * and our use of dma addresses in the trb_address_map radix tree needs * TRB_SEGMENT_SIZE alignment, so we pick the greater alignment need. */ - if (xhci->quirks & XHCI_ZHAOXIN_TRB_FETCH) + if (xhci->quirks & XHCI_TRB_OVERFETCH) + /* Buggy HC prefetches beyond segment bounds - allocate dummy space at the end */ xhci->segment_pool = dma_pool_create("xHCI ring segments", dev, TRB_SEGMENT_SIZE * 2, TRB_SEGMENT_SIZE * 2, xhci->page_size * 2); else xhci->segment_pool = dma_pool_create("xHCI ring segments", dev, TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE, xhci->page_size); + if (!xhci->segment_pool) + goto fail; /* See Table 46 and Note on Figure 55 */ - xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev, - 2112, 64, xhci->page_size); - if (!xhci->segment_pool || !xhci->device_pool) + xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev, 2112, 64, + xhci->page_size); + if (!xhci->device_pool) goto fail; - /* Linear stream context arrays don't have any boundary restrictions, + /* + * Linear stream context arrays don't have any boundary restrictions, * and only need to be 16-byte aligned. */ - xhci->small_streams_pool = - dma_pool_create("xHCI 256 byte stream ctx arrays", - dev, SMALL_STREAM_ARRAY_SIZE, 16, 0); - xhci->medium_streams_pool = - dma_pool_create("xHCI 1KB stream ctx arrays", - dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0); - /* Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE - * will be allocated with dma_alloc_coherent() + xhci->small_streams_pool = dma_pool_create("xHCI 256 byte stream ctx arrays", + dev, SMALL_STREAM_ARRAY_SIZE, 16, 0); + if (!xhci->small_streams_pool) + goto fail; + + /* + * Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE will be + * allocated with dma_alloc_coherent(). */ - if (!xhci->small_streams_pool || !xhci->medium_streams_pool) + xhci->medium_streams_pool = dma_pool_create("xHCI 1KB stream ctx arrays", + dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0); + if (!xhci->medium_streams_pool) + goto fail; + + /* + * refer to xhci rev1_2 protocol 5.3.3 max ports is 255. + * refer to xhci rev1_2 protocol 6.4.3.14 port bandwidth buffer need + * to be 16-byte aligned. + */ + xhci->port_bw_pool = dma_pool_create("xHCI 256 port bw ctx arrays", + dev, GET_PORT_BW_ARRAY_SIZE, 16, 0); + if (!xhci->port_bw_pool) goto fail; /* Set up the command ring to have one segments for now. */ - xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, 0, flags); + xhci->cmd_ring = xhci_ring_alloc(xhci, 1, TYPE_COMMAND, 0, flags); if (!xhci->cmd_ring) goto fail; - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "Allocated command ring at %p", xhci->cmd_ring); - xhci_dbg_trace(xhci, trace_xhci_dbg_init, "First segment DMA is 0x%pad", - &xhci->cmd_ring->first_seg->dma); - /* Set the address in the Command Ring Control register */ - val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); - val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | - (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) | - xhci->cmd_ring->cycle_state; - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "// Setting command ring address to 0x%016llx", val_64); - xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Allocated command ring at %p", xhci->cmd_ring); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "First segment DMA is 0x%pad", + &xhci->cmd_ring->first_seg->dma); - /* Reserve one command ring TRB for disabling LPM. + /* + * Reserve one command ring TRB for disabling LPM. * Since the USB core grabs the shared usb_bus bandwidth mutex before * disabling LPM, we only need to reserve one TRB for all devices. */ xhci->cmd_ring_reserved_trbs++; - val = readl(&xhci->cap_regs->db_off); - val &= DBOFF_MASK; - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "// Doorbell array is located at offset 0x%x from cap regs base addr", - val); - xhci->dba = (void __iomem *) xhci->cap_regs + val; - /* Allocate and set up primary interrupter 0 with an event ring. */ - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "Allocating primary event ring"); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Allocating primary event ring"); xhci->interrupters = kcalloc_node(xhci->max_interrupters, sizeof(*xhci->interrupters), flags, dev_to_node(dev)); - - ir = xhci_alloc_interrupter(xhci, 0, flags); - if (!ir) + if (!xhci->interrupters) goto fail; - if (xhci_add_interrupter(xhci, ir, 0)) + xhci->interrupters[0] = xhci_alloc_interrupter(xhci, 0, flags); + if (!xhci->interrupters[0]) goto fail; - ir->isoc_bei_interval = AVOID_BEI_INTERVAL_MAX; - - /* - * XXX: Might need to set the Interrupter Moderation Register to - * something other than the default (~1ms minimum between interrupts). - * See section 5.5.1.2. - */ - for (i = 0; i < MAX_HC_SLOTS; i++) - xhci->devs[i] = NULL; - if (scratchpad_alloc(xhci, flags)) goto fail; + if (xhci_setup_port_arrays(xhci, flags)) goto fail; - /* Enable USB 3.0 device notifications for function remote wake, which - * is necessary for allowing USB 3.0 devices to do remote wakeup from - * U3 (device suspend). - */ - temp = readl(&xhci->op_regs->dev_notification); - temp &= ~DEV_NOTE_MASK; - temp |= DEV_NOTE_FWAKE; - writel(temp, &xhci->op_regs->dev_notification); - return 0; fail: diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c index 3252e3d2d79c..208558cf822d 100644 --- a/drivers/usb/host/xhci-mtk.c +++ b/drivers/usb/host/xhci-mtk.c @@ -746,10 +746,10 @@ static int __maybe_unused xhci_mtk_suspend(struct device *dev) xhci_dbg(xhci, "%s: stop port polling\n", __func__); clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); - del_timer_sync(&hcd->rh_timer); + timer_delete_sync(&hcd->rh_timer); if (shared_hcd) { clear_bit(HCD_FLAG_POLL_RH, &shared_hcd->flags); - del_timer_sync(&shared_hcd->rh_timer); + timer_delete_sync(&shared_hcd->rh_timer); } ret = xhci_mtk_host_disable(mtk); @@ -853,7 +853,7 @@ MODULE_DEVICE_TABLE(of, mtk_xhci_of_match); static struct platform_driver mtk_xhci_driver = { .probe = xhci_mtk_probe, - .remove_new = xhci_mtk_remove, + .remove = xhci_mtk_remove, .driver = { .name = "xhci-mtk", .pm = DEV_PM_OPS, diff --git a/drivers/usb/host/xhci-mvebu.c b/drivers/usb/host/xhci-mvebu.c index 87f1597a0e5a..257e4d79971f 100644 --- a/drivers/usb/host/xhci-mvebu.c +++ b/drivers/usb/host/xhci-mvebu.c @@ -73,13 +73,3 @@ int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd) return 0; } - -int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd) -{ - struct xhci_hcd *xhci = hcd_to_xhci(hcd); - - /* Without reset on resume, the HC won't work at all */ - xhci->quirks |= XHCI_RESET_ON_RESUME; - - return 0; -} diff --git a/drivers/usb/host/xhci-mvebu.h b/drivers/usb/host/xhci-mvebu.h index 3be021793cc8..9d26e22c4842 100644 --- a/drivers/usb/host/xhci-mvebu.h +++ b/drivers/usb/host/xhci-mvebu.h @@ -12,16 +12,10 @@ struct usb_hcd; #if IS_ENABLED(CONFIG_USB_XHCI_MVEBU) int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd); -int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd); #else static inline int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd) { return 0; } - -static inline int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd) -{ - return 0; -} #endif #endif /* __LINUX_XHCI_MVEBU_H */ diff --git a/drivers/usb/host/xhci-pci-renesas.c b/drivers/usb/host/xhci-pci-renesas.c index 93f8b355bc70..620f8f0febb8 100644 --- a/drivers/usb/host/xhci-pci-renesas.c +++ b/drivers/usb/host/xhci-pci-renesas.c @@ -6,7 +6,7 @@ #include <linux/module.h> #include <linux/pci.h> #include <linux/slab.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include "xhci.h" #include "xhci-trace.h" @@ -50,6 +50,8 @@ #define RENESAS_RETRY 10000 #define RENESAS_DELAY 10 +#define RENESAS_FW_NAME "renesas_usb_fw.mem" + static int renesas_fw_download_image(struct pci_dev *dev, const u32 *fw, size_t step, bool rom) { @@ -573,12 +575,10 @@ exit: return err; } -int renesas_xhci_check_request_fw(struct pci_dev *pdev, - const struct pci_device_id *id) +static int renesas_xhci_check_request_fw(struct pci_dev *pdev, + const struct pci_device_id *id) { - struct xhci_driver_data *driver_data = - (struct xhci_driver_data *)id->driver_data; - const char *fw_name = driver_data->firmware; + const char fw_name[] = RENESAS_FW_NAME; const struct firmware *fw; bool has_rom; int err; @@ -625,6 +625,41 @@ exit: release_firmware(fw); return err; } -EXPORT_SYMBOL_GPL(renesas_xhci_check_request_fw); +static int +xhci_pci_renesas_probe(struct pci_dev *dev, const struct pci_device_id *id) +{ + int retval; + + retval = renesas_xhci_check_request_fw(dev, id); + if (retval) + return retval; + + return xhci_pci_common_probe(dev, id); +} + +static const struct pci_device_id pci_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, 0x0014) }, + { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, 0x0015) }, + { /* end: all zeroes */ } +}; +MODULE_DEVICE_TABLE(pci, pci_ids); + +static struct pci_driver xhci_renesas_pci_driver = { + .name = "xhci-pci-renesas", + .id_table = pci_ids, + + .probe = xhci_pci_renesas_probe, + .remove = xhci_pci_remove, + + .shutdown = usb_hcd_pci_shutdown, + .driver = { + .pm = pm_ptr(&usb_hcd_pci_pm_ops), + }, +}; +module_pci_driver(xhci_renesas_pci_driver); + +MODULE_DESCRIPTION("Renesas xHCI PCI Host Controller Driver"); +MODULE_FIRMWARE(RENESAS_FW_NAME); +MODULE_IMPORT_NS("xhci"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 93b697648018..00fac8b233d2 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -28,25 +28,40 @@ #define SPARSE_CNTL_ENABLE 0xC12C /* Device for a quirk */ -#define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73 -#define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000 +#define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73 +#define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000 #define PCI_DEVICE_ID_FRESCO_LOGIC_FL1009 0x1009 #define PCI_DEVICE_ID_FRESCO_LOGIC_FL1100 0x1100 #define PCI_DEVICE_ID_FRESCO_LOGIC_FL1400 0x1400 -#define PCI_VENDOR_ID_ETRON 0x1b6f -#define PCI_DEVICE_ID_EJ168 0x7023 +#define PCI_VENDOR_ID_ETRON 0x1b6f +#define PCI_DEVICE_ID_ETRON_EJ168 0x7023 +#define PCI_DEVICE_ID_ETRON_EJ188 0x7052 -#define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31 -#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31 +#define PCI_DEVICE_ID_VIA_VL805 0x3483 + +#define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31 +#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31 #define PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_XHCI 0x9cb1 #define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI 0x22b5 #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f #define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI 0x0aa8 #define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI 0x1aa8 -#define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8 -#define PCI_DEVICE_ID_INTEL_DNV_XHCI 0x19d0 +#define PCI_DEVICE_ID_INTEL_APOLLO_LAKE_XHCI 0x5aa8 +#define PCI_DEVICE_ID_INTEL_DENVERTON_XHCI 0x19d0 +#define PCI_DEVICE_ID_INTEL_ICE_LAKE_XHCI 0x8a13 +#define PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI 0x9a13 +#define PCI_DEVICE_ID_INTEL_TIGER_LAKE_PCH_XHCI 0xa0ed +#define PCI_DEVICE_ID_INTEL_COMET_LAKE_XHCI 0xa3af +#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI 0x51ed +#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_PCH_XHCI 0x54ed + +#define PCI_VENDOR_ID_PHYTIUM 0x1db7 +#define PCI_DEVICE_ID_PHYTIUM_XHCI 0xdc27 + +/* Thunderbolt */ +#define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI 0x1138 #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_XHCI 0x15b5 #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_XHCI 0x15b6 #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_XHCI 0x15c1 @@ -55,23 +70,28 @@ #define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_XHCI 0x15e9 #define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_XHCI 0x15ec #define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI 0x15f0 -#define PCI_DEVICE_ID_INTEL_ICE_LAKE_XHCI 0x8a13 -#define PCI_DEVICE_ID_INTEL_CML_XHCI 0xa3af -#define PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI 0x9a13 -#define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI 0x1138 -#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI 0x51ed -#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_PCH_XHCI 0x54ed +#define PCI_DEVICE_ID_AMD_ARIEL_TYPEC_XHCI 0x13ed +#define PCI_DEVICE_ID_AMD_ARIEL_TYPEA_XHCI 0x13ee +#define PCI_DEVICE_ID_AMD_STARSHIP_XHCI 0x148c +#define PCI_DEVICE_ID_AMD_FIREFLIGHT_15D4_XHCI 0x15d4 +#define PCI_DEVICE_ID_AMD_FIREFLIGHT_15D5_XHCI 0x15d5 +#define PCI_DEVICE_ID_AMD_RAVEN_15E0_XHCI 0x15e0 +#define PCI_DEVICE_ID_AMD_RAVEN_15E1_XHCI 0x15e1 +#define PCI_DEVICE_ID_AMD_RAVEN2_XHCI 0x15e5 #define PCI_DEVICE_ID_AMD_RENOIR_XHCI 0x1639 #define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9 #define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba #define PCI_DEVICE_ID_AMD_PROMONTORYA_2 0x43bb #define PCI_DEVICE_ID_AMD_PROMONTORYA_1 0x43bc +#define PCI_DEVICE_ID_ATI_NAVI10_7316_XHCI 0x7316 + #define PCI_DEVICE_ID_ASMEDIA_1042_XHCI 0x1042 #define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142 #define PCI_DEVICE_ID_ASMEDIA_1142_XHCI 0x1242 #define PCI_DEVICE_ID_ASMEDIA_2142_XHCI 0x2142 +#define PCI_DEVICE_ID_ASMEDIA_3042_XHCI 0x3042 #define PCI_DEVICE_ID_ASMEDIA_3242_XHCI 0x3242 static const char hcd_name[] = "xhci_hcd"; @@ -89,6 +109,10 @@ static const struct xhci_driver_overrides xhci_pci_overrides __initconst = { .update_hub_device = xhci_pci_update_hub_device, }; +/* + * Primary Legacy and MSI IRQ are synced in suspend_common(). + * All MSI-X IRQs and secondary MSI IRQs should be synced here. + */ static void xhci_msix_sync_irqs(struct xhci_hcd *xhci) { struct usb_hcd *hcd = xhci_to_hcd(xhci); @@ -101,13 +125,12 @@ static void xhci_msix_sync_irqs(struct xhci_hcd *xhci) } } -/* Free any IRQs and disable MSI-X */ +/* Legacy IRQ is freed by usb_remove_hcd() or usb_hcd_pci_shutdown() */ static void xhci_cleanup_msix(struct xhci_hcd *xhci) { struct usb_hcd *hcd = xhci_to_hcd(xhci); struct pci_dev *pdev = to_pci_dev(hcd->self.controller); - /* return if using legacy interrupt */ if (hcd->irq > 0) return; @@ -136,14 +159,11 @@ static int xhci_try_enable_msi(struct usb_hcd *hcd) hcd->irq = 0; /* - * calculate number of MSI-X vectors supported. - * - HCS_MAX_INTRS: the max number of interrupts the host can handle, - * with max number of interrupters based on the xhci HCSPARAMS1. - * - num_online_cpus: maximum MSI-X vectors per CPUs core. - * Add additional 1 vector to ensure always available interrupt. + * Calculate number of MSI/MSI-X vectors supported. + * - max_interrupters: the max number of interrupts requested, capped to xhci HCSPARAMS1. + * - num_online_cpus: one vector per CPUs core, with at least one overall. */ - xhci->nvecs = min(num_online_cpus() + 1, - HCS_MAX_INTRS(xhci->hcs_params1)); + xhci->nvecs = min(num_online_cpus() + 1, xhci->max_interrupters); /* TODO: Check with MSI Soc for sysdev */ xhci->nvecs = pci_alloc_irq_vectors(pdev, 1, xhci->nvecs, @@ -231,15 +251,6 @@ static int xhci_pci_reinit(struct xhci_hcd *xhci, struct pci_dev *pdev) static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) { struct pci_dev *pdev = to_pci_dev(dev); - struct xhci_driver_data *driver_data; - const struct pci_device_id *id; - - id = pci_match_id(to_pci_driver(pdev->dev.driver)->id_table, pdev); - - if (id && id->driver_data) { - driver_data = (struct xhci_driver_data *)id->driver_data; - xhci->quirks |= driver_data->quirks; - } /* Look for vendor-specific quirks */ if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC && @@ -270,20 +281,30 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) "QUIRK: Fresco Logic revision %u " "has broken MSI implementation", pdev->revision); - xhci->quirks |= XHCI_TRUST_TX_LENGTH; } if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC && pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1009) xhci->quirks |= XHCI_BROKEN_STREAMS; - if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC && - pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1100) - xhci->quirks |= XHCI_TRUST_TX_LENGTH; - if (pdev->vendor == PCI_VENDOR_ID_NEC) xhci->quirks |= XHCI_NEC_HOST; + if (pdev->vendor == PCI_VENDOR_ID_AMD && + (pdev->device == PCI_DEVICE_ID_AMD_ARIEL_TYPEC_XHCI || + pdev->device == PCI_DEVICE_ID_AMD_ARIEL_TYPEA_XHCI || + pdev->device == PCI_DEVICE_ID_AMD_STARSHIP_XHCI || + pdev->device == PCI_DEVICE_ID_AMD_FIREFLIGHT_15D4_XHCI || + pdev->device == PCI_DEVICE_ID_AMD_FIREFLIGHT_15D5_XHCI || + pdev->device == PCI_DEVICE_ID_AMD_RAVEN_15E0_XHCI || + pdev->device == PCI_DEVICE_ID_AMD_RAVEN_15E1_XHCI || + pdev->device == PCI_DEVICE_ID_AMD_RAVEN2_XHCI)) + xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_9; + + if (pdev->vendor == PCI_VENDOR_ID_ATI && + pdev->device == PCI_DEVICE_ID_ATI_NAVI10_7316_XHCI) + xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_9; + if (pdev->vendor == PCI_VENDOR_ID_AMD && xhci->hci_version == 0x96) xhci->quirks |= XHCI_AMD_0x96_HOST; @@ -307,11 +328,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) xhci->quirks |= XHCI_RESET_ON_RESUME; } - if (pdev->vendor == PCI_VENDOR_ID_AMD) { - xhci->quirks |= XHCI_TRUST_TX_LENGTH; - if (pdev->device == 0x43f7) - xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW; - } + if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x43f7) + xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW; if ((pdev->vendor == PCI_VENDOR_ID_AMD) && ((pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_4) || @@ -356,9 +374,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI || pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI || pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI || - pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI || - pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI || - pdev->device == PCI_DEVICE_ID_INTEL_CML_XHCI)) { + pdev->device == PCI_DEVICE_ID_INTEL_APOLLO_LAKE_XHCI || + pdev->device == PCI_DEVICE_ID_INTEL_DENVERTON_XHCI || + pdev->device == PCI_DEVICE_ID_INTEL_COMET_LAKE_XHCI)) { xhci->quirks |= XHCI_PME_STUCK_QUIRK; } if (pdev->vendor == PCI_VENDOR_ID_INTEL && @@ -367,18 +385,19 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) if (pdev->vendor == PCI_VENDOR_ID_INTEL && (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI || pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI || - pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI)) + pdev->device == PCI_DEVICE_ID_INTEL_APOLLO_LAKE_XHCI)) xhci->quirks |= XHCI_INTEL_USB_ROLE_SW; if (pdev->vendor == PCI_VENDOR_ID_INTEL && (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI || pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI || pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI || - pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI || - pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI)) + pdev->device == PCI_DEVICE_ID_INTEL_APOLLO_LAKE_XHCI || + pdev->device == PCI_DEVICE_ID_INTEL_DENVERTON_XHCI)) xhci->quirks |= XHCI_MISSING_CAS; if (pdev->vendor == PCI_VENDOR_ID_INTEL && - (pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI || + (pdev->device == PCI_DEVICE_ID_INTEL_TIGER_LAKE_PCH_XHCI || + pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI || pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_PCH_XHCI)) xhci->quirks |= XHCI_RESET_TO_DEFAULT; @@ -397,14 +416,16 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW; if (pdev->vendor == PCI_VENDOR_ID_ETRON && - pdev->device == PCI_DEVICE_ID_EJ168) { + (pdev->device == PCI_DEVICE_ID_ETRON_EJ168 || + pdev->device == PCI_DEVICE_ID_ETRON_EJ188)) { + xhci->quirks |= XHCI_ETRON_HOST; xhci->quirks |= XHCI_RESET_ON_RESUME; - xhci->quirks |= XHCI_TRUST_TX_LENGTH; xhci->quirks |= XHCI_BROKEN_STREAMS; + xhci->quirks |= XHCI_NO_SOFT_RETRY; } + if (pdev->vendor == PCI_VENDOR_ID_RENESAS && pdev->device == 0x0014) { - xhci->quirks |= XHCI_TRUST_TX_LENGTH; xhci->quirks |= XHCI_ZERO_64B_REGS; } if (pdev->vendor == PCI_VENDOR_ID_RENESAS && @@ -415,13 +436,19 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) if (pdev->vendor == PCI_VENDOR_ID_VIA) xhci->quirks |= XHCI_RESET_ON_RESUME; + if (pdev->vendor == PCI_VENDOR_ID_PHYTIUM && + pdev->device == PCI_DEVICE_ID_PHYTIUM_XHCI) + xhci->quirks |= XHCI_RESET_ON_RESUME; + /* See https://bugzilla.kernel.org/show_bug.cgi?id=79511 */ if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == 0x3432) xhci->quirks |= XHCI_BROKEN_STREAMS; - if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == 0x3483) + if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == PCI_DEVICE_ID_VIA_VL805) { xhci->quirks |= XHCI_LPM_SUPPORT; + xhci->quirks |= XHCI_TRB_OVERFETCH; + } if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && pdev->device == PCI_DEVICE_ID_ASMEDIA_1042_XHCI) { @@ -434,7 +461,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) } if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI) { - xhci->quirks |= XHCI_TRUST_TX_LENGTH; xhci->quirks |= XHCI_NO_64BIT_SUPPORT; } if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && @@ -447,6 +473,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI) xhci->quirks |= XHCI_ASMEDIA_MODIFY_FLOWCONTROL; + if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && + pdev->device == PCI_DEVICE_ID_ASMEDIA_3042_XHCI) + xhci->quirks |= XHCI_RESET_ON_RESUME; + if (pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241) xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_7; @@ -466,13 +496,17 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) if (pdev->device == 0x9202) { xhci->quirks |= XHCI_RESET_ON_RESUME; - xhci->quirks |= XHCI_ZHAOXIN_TRB_FETCH; + xhci->quirks |= XHCI_TRB_OVERFETCH; } if (pdev->device == 0x9203) - xhci->quirks |= XHCI_ZHAOXIN_TRB_FETCH; + xhci->quirks |= XHCI_TRB_OVERFETCH; } + if (pdev->vendor == PCI_VENDOR_ID_CDNS && + pdev->device == PCI_DEVICE_ID_CDNS_USBSSP) + xhci->quirks |= XHCI_CDNS_SCTX_QUIRK; + /* xHC spec requires PCI devices to support D3hot and D3cold */ if (xhci->hci_version >= 0x120) xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW; @@ -534,10 +568,9 @@ static int xhci_pci_setup(struct usb_hcd *hcd) struct xhci_hcd *xhci; struct pci_dev *pdev = to_pci_dev(hcd->self.controller); int retval; + u8 sbrn; xhci = hcd_to_xhci(hcd); - if (!xhci->sbrn) - pci_read_config_byte(pdev, XHCI_SBRN_OFFSET, &xhci->sbrn); /* imod_interval is the interrupt moderation value in nanoseconds. */ xhci->imod_interval = 40000; @@ -552,7 +585,8 @@ static int xhci_pci_setup(struct usb_hcd *hcd) if (xhci->quirks & XHCI_PME_STUCK_QUIRK) xhci_pme_acpi_rtd3_enable(pdev); - xhci_dbg(xhci, "Got SBRN %u\n", (unsigned int) xhci->sbrn); + pci_read_config_byte(pdev, XHCI_SBRN_OFFSET, &sbrn); + xhci_dbg(xhci, "Got SBRN %u\n", (unsigned int)sbrn); /* Find any debug ports */ return xhci_pci_reinit(xhci, pdev); @@ -572,21 +606,13 @@ static int xhci_pci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hd * We need to register our own PCI probe function (instead of the USB core's * function) in order to create a second roothub under xHCI. */ -static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) +int xhci_pci_common_probe(struct pci_dev *dev, const struct pci_device_id *id) { int retval; struct xhci_hcd *xhci; struct usb_hcd *hcd; - struct xhci_driver_data *driver_data; struct reset_control *reset; - driver_data = (struct xhci_driver_data *)id->driver_data; - if (driver_data && driver_data->quirks & XHCI_RENESAS_FW_QUIRK) { - retval = renesas_xhci_check_request_fw(dev, id); - if (retval) - return retval; - } - reset = devm_reset_control_get_optional_exclusive(&dev->dev, NULL); if (IS_ERR(reset)) return PTR_ERR(reset); @@ -635,12 +661,15 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) pm_runtime_put_noidle(&dev->dev); if (pci_choose_state(dev, PMSG_SUSPEND) == PCI_D0) - pm_runtime_forbid(&dev->dev); + pm_runtime_get(&dev->dev); else if (xhci->quirks & XHCI_DEFAULT_PM_RUNTIME_ALLOW) pm_runtime_allow(&dev->dev); dma_set_max_seg_size(&dev->dev, UINT_MAX); + if (device_property_read_bool(&dev->dev, "ti,pwron-active-high")) + pci_clear_and_set_config_dword(dev, 0xE0, 0, 1 << 22); + return 0; put_usb3_hcd: @@ -651,16 +680,37 @@ put_runtime_pm: pm_runtime_put_noidle(&dev->dev); return retval; } +EXPORT_SYMBOL_NS_GPL(xhci_pci_common_probe, "xhci"); -static void xhci_pci_remove(struct pci_dev *dev) +/* handled by xhci-pci-renesas if enabled */ +static const struct pci_device_id pci_ids_renesas[] = { + { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, 0x0014) }, + { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, 0x0015) }, + { /* end: all zeroes */ } +}; + +static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) +{ + if (IS_ENABLED(CONFIG_USB_XHCI_PCI_RENESAS) && + pci_match_id(pci_ids_renesas, dev)) + return -ENODEV; + + return xhci_pci_common_probe(dev, id); +} + +void xhci_pci_remove(struct pci_dev *dev) { struct xhci_hcd *xhci; + bool set_power_d3; xhci = hcd_to_xhci(pci_get_drvdata(dev)); + set_power_d3 = xhci->quirks & XHCI_SPURIOUS_WAKEUP; xhci->xhc_state |= XHCI_STATE_REMOVING; - if (xhci->quirks & XHCI_DEFAULT_PM_RUNTIME_ALLOW) + if (pci_choose_state(dev, PMSG_SUSPEND) == PCI_D0) + pm_runtime_put(&dev->dev); + else if (xhci->quirks & XHCI_DEFAULT_PM_RUNTIME_ALLOW) pm_runtime_forbid(&dev->dev); if (xhci->shared_hcd) { @@ -669,12 +719,13 @@ static void xhci_pci_remove(struct pci_dev *dev) xhci->shared_hcd = NULL; } + usb_hcd_pci_remove(dev); + /* Workaround for spurious wakeups at shutdown with HSW */ - if (xhci->quirks & XHCI_SPURIOUS_WAKEUP) + if (set_power_d3) pci_set_power_state(dev, PCI_D3hot); - - usb_hcd_pci_remove(dev); } +EXPORT_SYMBOL_NS_GPL(xhci_pci_remove, "xhci"); /* * In some Intel xHCI controllers, in order to get D3 working, @@ -781,9 +832,10 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup) static int xhci_pci_resume(struct usb_hcd *hcd, pm_message_t msg) { - struct xhci_hcd *xhci = hcd_to_xhci(hcd); - struct pci_dev *pdev = to_pci_dev(hcd->self.controller); - int retval = 0; + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + struct pci_dev *pdev = to_pci_dev(hcd->self.controller); + bool power_lost = msg.event == PM_EVENT_RESTORE; + bool is_auto_resume = msg.event == PM_EVENT_AUTO_RESUME; reset_control_reset(xhci->reset); @@ -814,8 +866,7 @@ static int xhci_pci_resume(struct usb_hcd *hcd, pm_message_t msg) if (xhci->quirks & XHCI_PME_STUCK_QUIRK) xhci_pme_quirk(hcd); - retval = xhci_resume(xhci, msg); - return retval; + return xhci_resume(xhci, power_lost, is_auto_resume); } static int xhci_pci_poweroff_late(struct usb_hcd *hcd, bool do_wakeup) @@ -882,19 +933,8 @@ static void xhci_pci_shutdown(struct usb_hcd *hcd) /*-------------------------------------------------------------------------*/ -static const struct xhci_driver_data reneses_data = { - .quirks = XHCI_RENESAS_FW_QUIRK, - .firmware = "renesas_usb_fw.mem", -}; - /* PCI driver selection metadata; PCI hotplugging uses this */ static const struct pci_device_id pci_ids[] = { - { PCI_DEVICE(0x1912, 0x0014), - .driver_data = (unsigned long)&reneses_data, - }, - { PCI_DEVICE(0x1912, 0x0015), - .driver_data = (unsigned long)&reneses_data, - }, /* handle any USB 3.0 xHCI controller */ { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_XHCI, ~0), }, @@ -902,14 +942,6 @@ static const struct pci_device_id pci_ids[] = { }; MODULE_DEVICE_TABLE(pci, pci_ids); -/* - * Without CONFIG_USB_XHCI_PCI_RENESAS renesas_xhci_check_request_fw() won't - * load firmware, so don't encumber the xhci-pci driver with it. - */ -#if IS_ENABLED(CONFIG_USB_XHCI_PCI_RENESAS) -MODULE_FIRMWARE("renesas_usb_fw.mem"); -#endif - /* pci driver glue; this is a "new style" PCI driver module */ static struct pci_driver xhci_pci_driver = { .name = hcd_name, diff --git a/drivers/usb/host/xhci-pci.h b/drivers/usb/host/xhci-pci.h index cb9a8f331a44..e87c7d9d76b8 100644 --- a/drivers/usb/host/xhci-pci.h +++ b/drivers/usb/host/xhci-pci.h @@ -4,22 +4,7 @@ #ifndef XHCI_PCI_H #define XHCI_PCI_H -#if IS_ENABLED(CONFIG_USB_XHCI_PCI_RENESAS) -int renesas_xhci_check_request_fw(struct pci_dev *dev, - const struct pci_device_id *id); - -#else -static int renesas_xhci_check_request_fw(struct pci_dev *dev, - const struct pci_device_id *id) -{ - return 0; -} - -#endif - -struct xhci_driver_data { - u64 quirks; - const char *firmware; -}; +int xhci_pci_common_probe(struct pci_dev *dev, const struct pci_device_id *id); +void xhci_pci_remove(struct pci_dev *dev); #endif diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index 3d071b875308..c79d5ed48a08 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -106,7 +106,7 @@ static const struct xhci_plat_priv xhci_plat_marvell_armada = { }; static const struct xhci_plat_priv xhci_plat_marvell_armada3700 = { - .init_quirk = xhci_mvebu_a3700_init_quirk, + .quirks = XHCI_RESET_ON_RESUME, }; static const struct xhci_plat_priv xhci_plat_brcm = { @@ -256,8 +256,19 @@ int xhci_plat_probe(struct platform_device *pdev, struct device *sysdev, const s if (device_property_read_bool(tmpdev, "xhci-sg-trb-cache-size-quirk")) xhci->quirks |= XHCI_SG_TRB_CACHE_SIZE_QUIRK; + if (device_property_read_bool(tmpdev, "write-64-hi-lo-quirk")) + xhci->quirks |= XHCI_WRITE_64_HI_LO; + + if (device_property_read_bool(tmpdev, "xhci-missing-cas-quirk")) + xhci->quirks |= XHCI_MISSING_CAS; + + if (device_property_read_bool(tmpdev, "xhci-skip-phy-init-quirk")) + xhci->quirks |= XHCI_SKIP_PHY_INIT; + device_property_read_u32(tmpdev, "imod-interval-ns", &xhci->imod_interval); + device_property_read_u16(tmpdev, "num-hc-interrupters", + &xhci->max_interrupters); } /* @@ -281,7 +292,8 @@ int xhci_plat_probe(struct platform_device *pdev, struct device *sysdev, const s hcd->tpl_support = of_usb_host_tpl_support(sysdev->of_node); - if (priv && (priv->quirks & XHCI_SKIP_PHY_INIT)) + if ((priv && (priv->quirks & XHCI_SKIP_PHY_INIT)) || + (xhci->quirks & XHCI_SKIP_PHY_INIT)) hcd->skip_phy_initialization = 1; if (priv && (priv->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK)) @@ -316,10 +328,13 @@ int xhci_plat_probe(struct platform_device *pdev, struct device *sysdev, const s } usb3_hcd = xhci_get_usb3_hcd(xhci); - if (usb3_hcd && HCC_MAX_PSA(xhci->hcc_params) >= 4) + if (usb3_hcd && HCC_MAX_PSA(xhci->hcc_params) >= 4 && + !(xhci->quirks & XHCI_BROKEN_STREAMS)) usb3_hcd->can_do_streams = 1; if (xhci->shared_hcd) { + xhci->shared_hcd->rsrc_start = hcd->rsrc_start; + xhci->shared_hcd->rsrc_len = hcd->rsrc_len; ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED); if (ret) goto put_usb3_hcd; @@ -467,9 +482,10 @@ static int xhci_plat_suspend(struct device *dev) return 0; } -static int xhci_plat_resume_common(struct device *dev, struct pm_message pmsg) +static int xhci_plat_resume_common(struct device *dev, bool power_lost) { struct usb_hcd *hcd = dev_get_drvdata(dev); + struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd); struct xhci_hcd *xhci = hcd_to_xhci(hcd); int ret; @@ -489,7 +505,7 @@ static int xhci_plat_resume_common(struct device *dev, struct pm_message pmsg) if (ret) goto disable_clks; - ret = xhci_resume(xhci, pmsg); + ret = xhci_resume(xhci, power_lost || priv->power_lost, false); if (ret) goto disable_clks; @@ -510,12 +526,12 @@ disable_clks: static int xhci_plat_resume(struct device *dev) { - return xhci_plat_resume_common(dev, PMSG_RESUME); + return xhci_plat_resume_common(dev, false); } static int xhci_plat_restore(struct device *dev) { - return xhci_plat_resume_common(dev, PMSG_RESTORE); + return xhci_plat_resume_common(dev, true); } static int __maybe_unused xhci_plat_runtime_suspend(struct device *dev) @@ -536,7 +552,7 @@ static int __maybe_unused xhci_plat_runtime_resume(struct device *dev) struct usb_hcd *hcd = dev_get_drvdata(dev); struct xhci_hcd *xhci = hcd_to_xhci(hcd); - return xhci_resume(xhci, PMSG_AUTO_RESUME); + return xhci_resume(xhci, false, true); } const struct dev_pm_ops xhci_plat_pm_ops = { @@ -557,6 +573,7 @@ EXPORT_SYMBOL_GPL(xhci_plat_pm_ops); static const struct acpi_device_id usb_xhci_acpi_match[] = { /* XHCI-compliant USB Controller */ { "PNP0D10", }, + { "PNP0D15", }, { } }; MODULE_DEVICE_TABLE(acpi, usb_xhci_acpi_match); @@ -564,7 +581,7 @@ MODULE_DEVICE_TABLE(acpi, usb_xhci_acpi_match); static struct platform_driver usb_generic_xhci_driver = { .probe = xhci_generic_plat_probe, - .remove_new = xhci_plat_remove, + .remove = xhci_plat_remove, .shutdown = usb_hcd_platform_shutdown, .driver = { .name = "xhci-hcd", diff --git a/drivers/usb/host/xhci-plat.h b/drivers/usb/host/xhci-plat.h index 2d15386f2c50..fe4f95e690fa 100644 --- a/drivers/usb/host/xhci-plat.h +++ b/drivers/usb/host/xhci-plat.h @@ -8,11 +8,14 @@ #ifndef _XHCI_PLAT_H #define _XHCI_PLAT_H -#include "xhci.h" /* for hcd_to_xhci() */ +struct device; +struct platform_device; +struct usb_hcd; struct xhci_plat_priv { const char *firmware_name; unsigned long long quirks; + bool power_lost; void (*plat_start)(struct usb_hcd *); int (*init_quirk)(struct usb_hcd *); int (*suspend_quirk)(struct usb_hcd *); diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c index ab9c5969e462..1cc082a3b793 100644 --- a/drivers/usb/host/xhci-rcar.c +++ b/drivers/usb/host/xhci-rcar.c @@ -214,8 +214,7 @@ static int xhci_rcar_resume_quirk(struct usb_hcd *hcd) */ #define SET_XHCI_PLAT_PRIV_FOR_RCAR(firmware) \ .firmware_name = firmware, \ - .quirks = XHCI_NO_64BIT_SUPPORT | XHCI_TRUST_TX_LENGTH | \ - XHCI_SLOW_SUSPEND, \ + .quirks = XHCI_NO_64BIT_SUPPORT | XHCI_SLOW_SUSPEND, \ .init_quirk = xhci_rcar_init_quirk, \ .plat_start = xhci_rcar_start, \ .resume_quirk = xhci_rcar_resume_quirk, @@ -229,8 +228,7 @@ static const struct xhci_plat_priv xhci_plat_renesas_rcar_gen3 = { }; static const struct xhci_plat_priv xhci_plat_renesas_rzv2m = { - .quirks = XHCI_NO_64BIT_SUPPORT | XHCI_TRUST_TX_LENGTH | - XHCI_SLOW_SUSPEND, + .quirks = XHCI_NO_64BIT_SUPPORT | XHCI_SLOW_SUSPEND, .init_quirk = xhci_rzv2m_init_quirk, .plat_start = xhci_rzv2m_start, }; @@ -276,7 +274,7 @@ static int xhci_renesas_probe(struct platform_device *pdev) static struct platform_driver usb_xhci_renesas_driver = { .probe = xhci_renesas_probe, - .remove_new = xhci_plat_remove, + .remove = xhci_plat_remove, .shutdown = usb_hcd_platform_shutdown, .driver = { .name = "xhci-renesas-hcd", diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 52278afea94b..94c9c9271658 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -52,8 +52,10 @@ * endpoint rings; it generates events on the event ring for these. */ +#include <linux/jiffies.h> #include <linux/scatterlist.h> #include <linux/slab.h> +#include <linux/string_choices.h> #include <linux/dma-mapping.h> #include "xhci.h" #include "xhci-trace.h" @@ -145,10 +147,8 @@ static void trb_to_noop(union xhci_trb *trb, u32 noop_type) * TRB is in a new segment. This does not skip over link TRBs, and it does not * effect the ring dequeue or enqueue pointers. */ -static void next_trb(struct xhci_hcd *xhci, - struct xhci_ring *ring, - struct xhci_segment **seg, - union xhci_trb **trb) +static void next_trb(struct xhci_segment **seg, + union xhci_trb **trb) { if (trb_is_link(*trb) || last_trb_on_seg(*seg, *trb)) { *seg = (*seg)->next; @@ -169,13 +169,16 @@ void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring) if (ring->type == TYPE_EVENT) { if (!last_trb_on_seg(ring->deq_seg, ring->dequeue)) { ring->dequeue++; - goto out; + return; } if (last_trb_on_ring(ring, ring->deq_seg, ring->dequeue)) ring->cycle_state ^= 1; ring->deq_seg = ring->deq_seg->next; ring->dequeue = ring->deq_seg->trbs; - goto out; + + trace_xhci_inc_deq(ring); + + return; } /* All other rings have link trbs */ @@ -190,18 +193,61 @@ void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring) ring->deq_seg = ring->deq_seg->next; ring->dequeue = ring->deq_seg->trbs; + trace_xhci_inc_deq(ring); + if (link_trb_count++ > ring->num_segs) { xhci_warn(xhci, "Ring is an endless link TRB loop\n"); break; } } -out: - trace_xhci_inc_deq(ring); - return; } /* + * If enqueue points at a link TRB, follow links until an ordinary TRB is reached. + * Toggle the cycle bit of passed link TRBs and optionally chain them. + */ +static void inc_enq_past_link(struct xhci_hcd *xhci, struct xhci_ring *ring, u32 chain) +{ + unsigned int link_trb_count = 0; + + while (trb_is_link(ring->enqueue)) { + + /* + * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit + * set, but other sections talk about dealing with the chain bit set. This was + * fixed in the 0.96 specification errata, but we have to assume that all 0.95 + * xHCI hardware can't handle the chain bit being cleared on a link TRB. + * + * On 0.95 and some 0.96 HCs the chain bit is set once at segment initalization + * and never changed here. On all others, modify it as requested by the caller. + */ + if (!xhci_link_chain_quirk(xhci, ring->type)) { + ring->enqueue->link.control &= cpu_to_le32(~TRB_CHAIN); + ring->enqueue->link.control |= cpu_to_le32(chain); + } + + /* Give this link TRB to the hardware */ + wmb(); + ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE); + + /* Toggle the cycle bit after the last ring segment. */ + if (link_trb_toggles_cycle(ring->enqueue)) + ring->cycle_state ^= 1; + + ring->enq_seg = ring->enq_seg->next; + ring->enqueue = ring->enq_seg->trbs; + + trace_xhci_inc_enq(ring); + + if (link_trb_count++ > ring->num_segs) { + xhci_warn(xhci, "Link TRB loop at enqueue\n"); + break; + } + } +} + +/* * See Cycle bit rules. SW is the consumer for the event ring only. * * If we've just enqueued a TRB that is in the middle of a TD (meaning the @@ -209,11 +255,6 @@ out: * If we've enqueued the last TRB in a TD, make sure the following link TRBs * have their chain bit cleared (so that each Link TRB is a separate TD). * - * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit - * set, but other sections talk about dealing with the chain bit set. This was - * fixed in the 0.96 specification errata, but we have to assume that all 0.95 - * xHCI hardware can't handle the chain bit being cleared on a link TRB. - * * @more_trbs_coming: Will you enqueue more TRBs before calling * prepare_transfer()? */ @@ -221,8 +262,6 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool more_trbs_coming) { u32 chain; - union xhci_trb *next; - unsigned int link_trb_count = 0; chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN; @@ -231,50 +270,67 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, return; } - next = ++(ring->enqueue); - - /* Update the dequeue pointer further if that was a link TRB */ - while (trb_is_link(next)) { + ring->enqueue++; - /* - * If the caller doesn't plan on enqueueing more TDs before - * ringing the doorbell, then we don't want to give the link TRB - * to the hardware just yet. We'll give the link TRB back in - * prepare_ring() just before we enqueue the TD at the top of - * the ring. - */ - if (!chain && !more_trbs_coming) - break; + /* + * If we are in the middle of a TD or the caller plans to enqueue more + * TDs as one transfer (eg. control), traverse any link TRBs right now. + * Otherwise, enqueue can stay on a link until the next prepare_ring(). + * This avoids enqueue entering deq_seg and simplifies ring expansion. + */ + if (trb_is_link(ring->enqueue) && (chain || more_trbs_coming)) + inc_enq_past_link(xhci, ring, chain); +} - /* If we're not dealing with 0.95 hardware or isoc rings on - * AMD 0.96 host, carry over the chain bit of the previous TRB - * (which may mean the chain bit is cleared). - */ - if (!(ring->type == TYPE_ISOC && - (xhci->quirks & XHCI_AMD_0x96_HOST)) && - !xhci_link_trb_quirk(xhci)) { - next->link.control &= cpu_to_le32(~TRB_CHAIN); - next->link.control |= cpu_to_le32(chain); - } - /* Give this link TRB to the hardware */ - wmb(); - next->link.control ^= cpu_to_le32(TRB_CYCLE); +/* + * If the suspect DMA address is a TRB in this TD, this function returns that + * TRB's segment. Otherwise it returns 0. + */ +static struct xhci_segment *trb_in_td(struct xhci_td *td, dma_addr_t suspect_dma) +{ + dma_addr_t start_dma; + dma_addr_t end_seg_dma; + dma_addr_t end_trb_dma; + struct xhci_segment *cur_seg; - /* Toggle the cycle bit after the last ring segment. */ - if (link_trb_toggles_cycle(next)) - ring->cycle_state ^= 1; + start_dma = xhci_trb_virt_to_dma(td->start_seg, td->start_trb); + cur_seg = td->start_seg; - ring->enq_seg = ring->enq_seg->next; - ring->enqueue = ring->enq_seg->trbs; - next = ring->enqueue; + do { + if (start_dma == 0) + return NULL; + /* We may get an event for a Link TRB in the middle of a TD */ + end_seg_dma = xhci_trb_virt_to_dma(cur_seg, + &cur_seg->trbs[TRBS_PER_SEGMENT - 1]); + /* If the end TRB isn't in this segment, this is set to 0 */ + end_trb_dma = xhci_trb_virt_to_dma(cur_seg, td->end_trb); - if (link_trb_count++ > ring->num_segs) { - xhci_warn(xhci, "%s: Ring link TRB loop\n", __func__); - break; + if (end_trb_dma > 0) { + /* The end TRB is in this segment, so suspect should be here */ + if (start_dma <= end_trb_dma) { + if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma) + return cur_seg; + } else { + /* Case for one segment with + * a TD wrapped around to the top + */ + if ((suspect_dma >= start_dma && + suspect_dma <= end_seg_dma) || + (suspect_dma >= cur_seg->dma && + suspect_dma <= end_trb_dma)) + return cur_seg; + } + return NULL; } - } + /* Might still be somewhere in this segment */ + if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma) + return cur_seg; + + cur_seg = cur_seg->next; + start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]); + } while (cur_seg != td->start_seg); - trace_xhci_inc_enq(ring); + return NULL; } /* @@ -283,7 +339,7 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, * Only for transfer and command rings where driver is the producer, not for * event rings. */ -static unsigned int xhci_num_trbs_free(struct xhci_hcd *xhci, struct xhci_ring *ring) +static unsigned int xhci_num_trbs_free(struct xhci_ring *ring) { struct xhci_segment *enq_seg = ring->enq_seg; union xhci_trb *enq = ring->enqueue; @@ -308,7 +364,7 @@ static unsigned int xhci_num_trbs_free(struct xhci_hcd *xhci, struct xhci_ring * free += last_on_seg - enq; enq_seg = enq_seg->next; enq = enq_seg->trbs; - } while (i++ <= ring->num_segs); + } while (i++ < ring->num_segs); return free; } @@ -351,10 +407,8 @@ static unsigned int xhci_ring_expansion_needed(struct xhci_hcd *xhci, struct xhc while (new_segs > 0) { seg = seg->next; if (seg == ring->deq_seg) { - xhci_dbg(xhci, "Ring expansion by %d segments needed\n", - new_segs); - xhci_dbg(xhci, "Adding %d trbs moves enq %d trbs into deq seg\n", - num_trbs, trbs_past_seg % TRBS_PER_SEGMENT); + xhci_dbg(xhci, "Adding %d trbs requires expanding ring by %d segments\n", + num_trbs, new_segs); return new_segs; } new_segs--; @@ -425,12 +479,13 @@ static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci, if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) && !(xhci->xhc_state & XHCI_STATE_DYING)) { xhci->current_cmd = cur_cmd; - xhci_mod_cmd_timer(xhci); + if (cur_cmd) + xhci_mod_cmd_timer(xhci); xhci_ring_cmd_db(xhci); } } -/* Must be called with xhci->lock held, releases and aquires lock back */ +/* Must be called with xhci->lock held, releases and acquires lock back */ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags) { struct xhci_segment *new_seg = xhci->cmd_ring->deq_seg; @@ -450,9 +505,9 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags) * avoiding corrupting the command ring pointer in case the command ring * is stopped by the time the upper dword is written. */ - next_trb(xhci, NULL, &new_seg, &new_deq); + next_trb(&new_seg, &new_deq); if (trb_is_link(new_deq)) - next_trb(xhci, NULL, &new_seg, &new_deq); + next_trb(&new_seg, &new_deq); crcr = xhci_trb_virt_to_dma(new_seg, new_deq); xhci_write_64(xhci, crcr | CMD_RING_ABORT, &xhci->op_regs->cmd_ring); @@ -463,9 +518,8 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags) * In the future we should distinguish between -ENODEV and -ETIMEDOUT * and try to recover a -ETIMEDOUT with a host controller reset. */ - ret = xhci_handshake_check_state(xhci, &xhci->op_regs->cmd_ring, - CMD_RING_RUNNING, 0, 5 * 1000 * 1000, - XHCI_STATE_REMOVING); + ret = xhci_handshake(&xhci->op_regs->cmd_ring, + CMD_RING_RUNNING, 0, 5 * 1000 * 1000); if (ret < 0) { xhci_err(xhci, "Abort failed to stop command ring: %d\n", ret); xhci_halt(xhci); @@ -644,7 +698,7 @@ static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci, int new_cycle; dma_addr_t addr; u64 hw_dequeue; - bool cycle_found = false; + bool hw_dequeue_found = false; bool td_last_trb_found = false; u32 trb_sct = 0; int ret; @@ -656,52 +710,32 @@ static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci, stream_id); return -ENODEV; } - /* - * A cancelled TD can complete with a stall if HW cached the trb. - * In this case driver can't find td, but if the ring is empty we - * can move the dequeue pointer to the current enqueue position. - * We shouldn't hit this anymore as cached cancelled TRBs are given back - * after clearing the cache, but be on the safe side and keep it anyway - */ - if (!td) { - if (list_empty(&ep_ring->td_list)) { - new_seg = ep_ring->enq_seg; - new_deq = ep_ring->enqueue; - new_cycle = ep_ring->cycle_state; - xhci_dbg(xhci, "ep ring empty, Set new dequeue = enqueue"); - goto deq_found; - } else { - xhci_warn(xhci, "Can't find new dequeue state, missing td\n"); - return -EINVAL; - } - } hw_dequeue = xhci_get_hw_deq(xhci, dev, ep_index, stream_id); new_seg = ep_ring->deq_seg; new_deq = ep_ring->dequeue; - new_cycle = hw_dequeue & 0x1; + new_cycle = le32_to_cpu(td->end_trb->generic.field[3]) & TRB_CYCLE; /* - * We want to find the pointer, segment and cycle state of the new trb - * (the one after current TD's last_trb). We know the cycle state at - * hw_dequeue, so walk the ring until both hw_dequeue and last_trb are - * found. + * Walk the ring until both the next TRB and hw_dequeue are found (don't + * move hw_dequeue back if it went forward due to a HW bug). Cycle state + * is loaded from a known good TRB, track later toggles to maintain it. */ do { - if (!cycle_found && xhci_trb_virt_to_dma(new_seg, new_deq) + if (!hw_dequeue_found && xhci_trb_virt_to_dma(new_seg, new_deq) == (dma_addr_t)(hw_dequeue & ~0xf)) { - cycle_found = true; + hw_dequeue_found = true; if (td_last_trb_found) break; } - if (new_deq == td->last_trb) + if (new_deq == td->end_trb) td_last_trb_found = true; - if (cycle_found && trb_is_link(new_deq) && + if (td_last_trb_found && trb_is_link(new_deq) && link_trb_toggles_cycle(new_deq)) new_cycle ^= 0x1; - next_trb(xhci, ep_ring, &new_seg, &new_deq); + next_trb(&new_seg, &new_deq); /* Search wrapped around, bail out */ if (new_deq == ep->ring->dequeue) { @@ -709,9 +743,7 @@ static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci, return -EINVAL; } - } while (!cycle_found || !td_last_trb_found); - -deq_found: + } while (!hw_dequeue_found || !td_last_trb_found); /* Don't update the ring cycle state for the producer (us). */ addr = xhci_trb_virt_to_dma(new_seg, new_deq); @@ -740,7 +772,7 @@ deq_found: lower_32_bits(addr) | trb_sct | new_cycle, upper_32_bits(addr), STREAM_ID_FOR_TRB(stream_id), SLOT_ID_FOR_TRB(slot_id) | - EP_ID_FOR_TRB(ep_index) | TRB_TYPE(TRB_SET_DEQ), false); + EP_INDEX_FOR_TRB(ep_index) | TRB_TYPE(TRB_SET_DEQ), false); if (ret < 0) { xhci_free_command(xhci, cmd); return ret; @@ -765,30 +797,25 @@ deq_found: * (The last TRB actually points to the ring enqueue pointer, which is not part * of this TD.) This is used to remove partially enqueued isoc TDs from a ring. */ -static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, - struct xhci_td *td, bool flip_cycle) +static void td_to_noop(struct xhci_td *td, bool flip_cycle) { struct xhci_segment *seg = td->start_seg; - union xhci_trb *trb = td->first_trb; + union xhci_trb *trb = td->start_trb; while (1) { trb_to_noop(trb, TRB_TR_NOOP); /* flip cycle if asked to */ - if (flip_cycle && trb != td->first_trb && trb != td->last_trb) + if (flip_cycle && trb != td->start_trb && trb != td->end_trb) trb->generic.field[3] ^= cpu_to_le32(TRB_CYCLE); - if (trb == td->last_trb) + if (trb == td->end_trb) break; - next_trb(xhci, ep_ring, &seg, &trb); + next_trb(&seg, &trb); } } -/* - * Must be called with xhci->lock held in interrupt context, - * releases and re-acquires xhci->lock - */ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci, struct xhci_td *cur_td, int status) { @@ -828,7 +855,7 @@ static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len, DMA_FROM_DEVICE); - /* for in tranfers we need to copy the data from bounce to sg */ + /* for in transfers we need to copy the data from bounce to sg */ if (urb->num_sgs) { len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf, seg->bounce_len, seg->bounce_offs); @@ -843,8 +870,8 @@ static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, seg->bounce_offs = 0; } -static int xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td, - struct xhci_ring *ep_ring, int status) +static void xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td, + struct xhci_ring *ep_ring, int status) { struct urb *urb = NULL; @@ -887,10 +914,18 @@ static int xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td, status = 0; xhci_giveback_urb_in_irq(xhci, td, status); } - - return 0; } +/* Give back previous TD and move on to the next TD. */ +static void xhci_dequeue_td(struct xhci_hcd *xhci, struct xhci_td *td, struct xhci_ring *ring, + u32 status) +{ + ring->dequeue = td->end_trb; + ring->deq_seg = td->end_seg; + inc_deq(xhci, ring); + + xhci_td_cleanup(xhci, td, ring, status); +} /* Complete the cancelled URBs we unlinked from td_list. */ static void xhci_giveback_invalidated_tds(struct xhci_virt_ep *ep) @@ -1001,13 +1036,20 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep) unsigned int slot_id = ep->vdev->slot_id; int err; + /* + * This is not going to work if the hardware is changing its dequeue + * pointers as we look at them. Completion handler will call us later. + */ + if (ep->ep_state & SET_DEQ_PENDING) + return 0; + xhci = ep->xhci; list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) { xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, "Removing canceled TD starting at 0x%llx (dma) in stream %u URB %p", (unsigned long long)xhci_trb_virt_to_dma( - td->start_seg, td->first_trb), + td->start_seg, td->start_trb), td->urb->stream_id, td->urb); list_del_init(&td->td_list); ring = xhci_urb_to_transfer_ring(xhci, td->urb); @@ -1026,26 +1068,39 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep) td->urb->stream_id); hw_deq &= ~0xf; - if (td->cancel_status == TD_HALTED || - trb_in_td(xhci, td->start_seg, td->first_trb, td->last_trb, hw_deq, false)) { + if (td->cancel_status == TD_HALTED || trb_in_td(td, hw_deq)) { switch (td->cancel_status) { case TD_CLEARED: /* TD is already no-op */ case TD_CLEARING_CACHE: /* set TR deq command already queued */ break; case TD_DIRTY: /* TD is cached, clear it */ case TD_HALTED: + case TD_CLEARING_CACHE_DEFERRED: + if (cached_td) { + if (cached_td->urb->stream_id != td->urb->stream_id) { + /* Multiple streams case, defer move dq */ + xhci_dbg(xhci, + "Move dq deferred: stream %u URB %p\n", + td->urb->stream_id, td->urb); + td->cancel_status = TD_CLEARING_CACHE_DEFERRED; + break; + } + + /* Should never happen, but clear the TD if it does */ + xhci_warn(xhci, + "Found multiple active URBs %p and %p in stream %u?\n", + td->urb, cached_td->urb, + td->urb->stream_id); + td_to_noop(cached_td, false); + cached_td->cancel_status = TD_CLEARED; + } + td_to_noop(td, false); td->cancel_status = TD_CLEARING_CACHE; - if (cached_td) - /* FIXME stream case, several stopped rings */ - xhci_dbg(xhci, - "Move dq past stream %u URB %p instead of stream %u URB %p\n", - td->urb->stream_id, td->urb, - cached_td->urb->stream_id, cached_td->urb); cached_td = td; break; } } else { - td_to_noop(xhci, ring, td, false); + td_to_noop(td, false); td->cancel_status = TD_CLEARED; } } @@ -1060,11 +1115,17 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep) if (err) { /* Failed to move past cached td, just set cached TDs to no-op */ list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) { - if (td->cancel_status != TD_CLEARING_CACHE) + /* + * Deferred TDs need to have the deq pointer set after the above command + * completes, so if that failed we just give up on all of them (and + * complain loudly since this could cause issues due to caching). + */ + if (td->cancel_status != TD_CLEARING_CACHE && + td->cancel_status != TD_CLEARING_CACHE_DEFERRED) continue; - xhci_dbg(xhci, "Failed to clear cancelled cached URB %p, mark clear anyway\n", - td->urb); - td_to_noop(xhci, ring, td, false); + xhci_warn(xhci, "Failed to clear cancelled cached URB %p, mark clear anyway\n", + td->urb); + td_to_noop(td, false); td->cancel_status = TD_CLEARED; } } @@ -1072,6 +1133,19 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep) } /* + * Erase queued TDs from transfer ring(s) and give back those the xHC didn't + * stop on. If necessary, queue commands to move the xHC off cancelled TDs it + * stopped on. Those will be given back later when the commands complete. + * + * Call under xhci->lock on a stopped endpoint. + */ +void xhci_process_cancelled_tds(struct xhci_virt_ep *ep) +{ + xhci_invalidate_cancelled_tds(ep); + xhci_giveback_invalidated_tds(ep); +} + +/* * Returns the TD the endpoint ring halted on. * Only call for non-running rings without streams. */ @@ -1084,8 +1158,7 @@ static struct xhci_td *find_halted_td(struct xhci_virt_ep *ep) hw_deq = xhci_get_hw_deq(ep->xhci, ep->vdev, ep->ep_index, 0); hw_deq &= ~0xf; td = list_first_entry(&ep->ring->td_list, struct xhci_td, td_list); - if (trb_in_td(ep->xhci, td->start_seg, td->first_trb, - td->last_trb, hw_deq, false)) + if (trb_in_td(td, hw_deq)) return td; } return NULL; @@ -1145,7 +1218,14 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id, */ switch (GET_EP_CTX_STATE(ep_ctx)) { case EP_STATE_HALTED: - xhci_dbg(xhci, "Stop ep completion raced with stall, reset ep\n"); + xhci_dbg(xhci, "Stop ep completion raced with stall\n"); + /* + * If the halt happened before Stop Endpoint failed, its transfer event + * should have already been handled and Reset Endpoint should be pending. + */ + if (ep->ep_state & EP_HALTED) + goto reset_done; + if (ep->ep_state & EP_HAS_STREAMS) { reset_type = EP_SOFT_RESET; } else { @@ -1156,22 +1236,45 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id, } /* reset ep, reset handler cleans up cancelled tds */ err = xhci_handle_halted_endpoint(xhci, ep, td, reset_type); + xhci_dbg(xhci, "Stop ep completion resetting ep, status %d\n", err); if (err) break; +reset_done: + /* Reset EP handler will clean up cancelled TDs */ ep->ep_state &= ~EP_STOP_CMD_PENDING; return; case EP_STATE_STOPPED: /* - * NEC uPD720200 sometimes sets this state and fails with - * Context Error while continuing to process TRBs. - * Be conservative and trust EP_CTX_STATE on other chips. + * Per xHCI 4.6.9, Stop Endpoint command on a Stopped + * EP is a Context State Error, and EP stays Stopped. + * + * But maybe it failed on Halted, and somebody ran Reset + * Endpoint later. EP state is now Stopped and EP_HALTED + * still set because Reset EP handler will run after us. */ - if (!(xhci->quirks & XHCI_NEC_HOST)) + if (ep->ep_state & EP_HALTED) break; + /* + * On some HCs EP state remains Stopped for some tens of + * us to a few ms or more after a doorbell ring, and any + * new Stop Endpoint fails without aborting the restart. + * This handler may run quickly enough to still see this + * Stopped state, but it will soon change to Running. + * + * Assume this bug on unexpected Stop Endpoint failures. + * Keep retrying until the EP starts and stops again. + */ fallthrough; case EP_STATE_RUNNING: /* Race, HW handled stop ep cmd before ep was running */ - xhci_dbg(xhci, "Stop ep completion ctx error, ep is running\n"); + xhci_dbg(xhci, "Stop ep completion ctx error, ctx_state %d\n", + GET_EP_CTX_STATE(ep_ctx)); + /* + * Don't retry forever if we guessed wrong or a defective HC never starts + * the EP or says 'Running' but fails the command. We must give back TDs. + */ + if (time_is_before_jiffies(ep->stop_time + msecs_to_jiffies(100))) + break; command = xhci_alloc_command(xhci, false, GFP_ATOMIC); if (!command) { @@ -1296,43 +1399,6 @@ void xhci_hc_died(struct xhci_hcd *xhci) usb_hc_died(xhci_to_hcd(xhci)); } -static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci, - struct xhci_virt_device *dev, - struct xhci_ring *ep_ring, - unsigned int ep_index) -{ - union xhci_trb *dequeue_temp; - - dequeue_temp = ep_ring->dequeue; - - /* If we get two back-to-back stalls, and the first stalled transfer - * ends just before a link TRB, the dequeue pointer will be left on - * the link TRB by the code in the while loop. So we have to update - * the dequeue pointer one segment further, or we'll jump off - * the segment into la-la-land. - */ - if (trb_is_link(ep_ring->dequeue)) { - ep_ring->deq_seg = ep_ring->deq_seg->next; - ep_ring->dequeue = ep_ring->deq_seg->trbs; - } - - while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) { - /* We have more usable TRBs */ - ep_ring->dequeue++; - if (trb_is_link(ep_ring->dequeue)) { - if (ep_ring->dequeue == - dev->eps[ep_index].queued_deq_ptr) - break; - ep_ring->deq_seg = ep_ring->deq_seg->next; - ep_ring->dequeue = ep_ring->deq_seg->trbs; - } - if (ep_ring->dequeue == dequeue_temp) { - xhci_dbg(xhci, "Unable to find new dequeue pointer\n"); - break; - } - } -} - /* * When we get a completion for a Set Transfer Ring Dequeue Pointer command, * we need to clear the set deq pending flag in the endpoint ring state, so that @@ -1349,6 +1415,7 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, struct xhci_virt_ep *ep; struct xhci_ep_ctx *ep_ctx; struct xhci_slot_ctx *slot_ctx; + struct xhci_stream_ctx *stream_ctx; struct xhci_td *td, *tmp_td; ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); @@ -1370,6 +1437,11 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, trace_xhci_handle_cmd_set_deq(slot_ctx); trace_xhci_handle_cmd_set_deq_ep(ep_ctx); + if (ep->ep_state & EP_HAS_STREAMS) { + stream_ctx = &ep->stream_info->stream_ctx_array[stream_id]; + trace_xhci_handle_cmd_set_deq_stream(ep->stream_info, stream_id); + } + if (cmd_comp_code != COMP_SUCCESS) { unsigned int ep_state; unsigned int slot_state; @@ -1406,9 +1478,21 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, u64 deq; /* 4.6.10 deq ptr is written to the stream ctx for streams */ if (ep->ep_state & EP_HAS_STREAMS) { - struct xhci_stream_ctx *ctx = - &ep->stream_info->stream_ctx_array[stream_id]; - deq = le64_to_cpu(ctx->stream_ring) & SCTX_DEQ_MASK; + deq = le64_to_cpu(stream_ctx->stream_ring) & SCTX_DEQ_MASK; + + /* + * Cadence xHCI controllers store some endpoint state + * information within Rsvd0 fields of Stream Endpoint + * context. This field is not cleared during Set TR + * Dequeue Pointer command which causes XDMA to skip + * over transfer ring and leads to data loss on stream + * pipe. + * To fix this issue driver must clear Rsvd0 field. + */ + if (xhci->quirks & XHCI_CDNS_SCTX_QUIRK) { + stream_ctx->reserved[0] = 0; + stream_ctx->reserved[1] = 0; + } } else { deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK; } @@ -1419,8 +1503,8 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, /* Update the ring's dequeue segment and dequeue pointer * to reflect the new position. */ - update_ring_for_set_deq_completion(xhci, ep->vdev, - ep_ring, ep_index); + ep_ring->deq_seg = ep->queued_deq_seg; + ep_ring->dequeue = ep->queued_deq_ptr; } else { xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n"); xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n", @@ -1445,8 +1529,21 @@ cleanup: ep->ep_state &= ~SET_DEQ_PENDING; ep->queued_deq_seg = NULL; ep->queued_deq_ptr = NULL; - /* Restart any rings with pending URBs */ - ring_doorbell_for_active_rings(xhci, slot_id, ep_index); + + /* Check for deferred or newly cancelled TDs */ + if (!list_empty(&ep->cancelled_td_list)) { + xhci_dbg(ep->xhci, "%s: Pending TDs to clear, continuing with invalidation\n", + __func__); + xhci_invalidate_cancelled_tds(ep); + /* Try to restart the endpoint if all is done */ + ring_doorbell_for_active_rings(xhci, slot_id, ep_index); + /* Start giving back any TDs invalidated above */ + xhci_giveback_invalidated_tds(ep); + } else { + /* Restart any rings with pending URBs */ + xhci_dbg(ep->xhci, "%s: All TDs cleared, ring doorbell\n", __func__); + ring_doorbell_for_active_rings(xhci, slot_id, ep_index); + } } static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id, @@ -1483,8 +1580,8 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id, ring_doorbell_for_active_rings(xhci, slot_id, ep_index); } -static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id, - struct xhci_command *command, u32 cmd_comp_code) +static void xhci_handle_cmd_enable_slot(int slot_id, struct xhci_command *command, + u32 cmd_comp_code) { if (cmd_comp_code == COMP_SUCCESS) command->slot_id = slot_id; @@ -1509,8 +1606,7 @@ static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id) xhci_free_device_endpoint_resources(xhci, virt_dev, true); } -static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id, - u32 cmd_comp_code) +static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id) { struct xhci_virt_device *virt_dev; struct xhci_input_control_ctx *ctrl_ctx; @@ -1585,12 +1681,13 @@ static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci, NEC_FW_MINOR(le32_to_cpu(event->status))); } -static void xhci_complete_del_and_free_cmd(struct xhci_command *cmd, u32 status) +static void xhci_complete_del_and_free_cmd(struct xhci_command *cmd, u32 comp_code, u32 comp_param) { list_del(&cmd->cmd_list); if (cmd->completion) { - cmd->status = status; + cmd->status = comp_code; + cmd->comp_param = comp_param; complete(cmd->completion); } else { kfree(cmd); @@ -1602,7 +1699,7 @@ void xhci_cleanup_command_queue(struct xhci_hcd *xhci) struct xhci_command *cur_cmd, *tmp_cmd; xhci->current_cmd = NULL; list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list) - xhci_complete_del_and_free_cmd(cur_cmd, COMP_COMMAND_ABORTED); + xhci_complete_del_and_free_cmd(cur_cmd, COMP_COMMAND_ABORTED, 0); } void xhci_handle_command_timeout(struct work_struct *work) @@ -1687,6 +1784,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, struct xhci_event_cmd *event) { unsigned int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); + u32 status = le32_to_cpu(event->status); u64 cmd_dma; dma_addr_t cmd_dequeue_dma; u32 cmd_comp_code; @@ -1702,7 +1800,15 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, cmd_dma = le64_to_cpu(event->cmd_trb); cmd_trb = xhci->cmd_ring->dequeue; - trace_xhci_handle_command(xhci->cmd_ring, &cmd_trb->generic); + trace_xhci_handle_command(xhci->cmd_ring, &cmd_trb->generic, cmd_dma); + + cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status)); + + /* If CMD ring stopped we own the trbs between enqueue and dequeue */ + if (cmd_comp_code == COMP_COMMAND_RING_STOPPED) { + complete_all(&xhci->cmd_ring_stop_completion); + return; + } cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, cmd_trb); @@ -1720,14 +1826,6 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, cancel_delayed_work(&xhci->cmd_timer); - cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status)); - - /* If CMD ring stopped we own the trbs between enqueue and dequeue */ - if (cmd_comp_code == COMP_COMMAND_RING_STOPPED) { - complete_all(&xhci->cmd_ring_stop_completion); - return; - } - if (cmd->command_trb != xhci->cmd_ring->dequeue) { xhci_err(xhci, "Command completion event does not match command\n"); @@ -1752,14 +1850,14 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3])); switch (cmd_type) { case TRB_ENABLE_SLOT: - xhci_handle_cmd_enable_slot(xhci, slot_id, cmd, cmd_comp_code); + xhci_handle_cmd_enable_slot(slot_id, cmd, cmd_comp_code); break; case TRB_DISABLE_SLOT: xhci_handle_cmd_disable_slot(xhci, slot_id); break; case TRB_CONFIG_EP: if (!cmd->completion) - xhci_handle_cmd_config_ep(xhci, slot_id, cmd_comp_code); + xhci_handle_cmd_config_ep(xhci, slot_id); break; case TRB_EVAL_CONTEXT: break; @@ -1799,6 +1897,8 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, case TRB_NEC_GET_FW: xhci_handle_cmd_nec_get_fw(xhci, event); break; + case TRB_GET_BW: + break; default: /* Skip over unknown commands on the event ring */ xhci_info(xhci, "INFO unknown command type %d\n", cmd_type); @@ -1815,7 +1915,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, } event_handled: - xhci_complete_del_and_free_cmd(cmd, cmd_comp_code); + xhci_complete_del_and_free_cmd(cmd, cmd_comp_code, COMP_PARAM(status)); inc_deq(xhci, xhci->cmd_ring); } @@ -1877,9 +1977,7 @@ static void xhci_cavium_reset_phy_quirk(struct xhci_hcd *xhci) } while (!(pll_lock_check & 0x1) && --retry_count); } -static void handle_port_status(struct xhci_hcd *xhci, - struct xhci_interrupter *ir, - union xhci_trb *event) +static void handle_port_status(struct xhci_hcd *xhci, union xhci_trb *event) { struct usb_hcd *hcd; u32 port_id; @@ -2050,73 +2148,6 @@ cleanup: spin_lock(&xhci->lock); } -/* - * This TD is defined by the TRBs starting at start_trb in start_seg and ending - * at end_trb, which may be in another segment. If the suspect DMA address is a - * TRB in this TD, this function returns that TRB's segment. Otherwise it - * returns 0. - */ -struct xhci_segment *trb_in_td(struct xhci_hcd *xhci, - struct xhci_segment *start_seg, - union xhci_trb *start_trb, - union xhci_trb *end_trb, - dma_addr_t suspect_dma, - bool debug) -{ - dma_addr_t start_dma; - dma_addr_t end_seg_dma; - dma_addr_t end_trb_dma; - struct xhci_segment *cur_seg; - - start_dma = xhci_trb_virt_to_dma(start_seg, start_trb); - cur_seg = start_seg; - - do { - if (start_dma == 0) - return NULL; - /* We may get an event for a Link TRB in the middle of a TD */ - end_seg_dma = xhci_trb_virt_to_dma(cur_seg, - &cur_seg->trbs[TRBS_PER_SEGMENT - 1]); - /* If the end TRB isn't in this segment, this is set to 0 */ - end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb); - - if (debug) - xhci_warn(xhci, - "Looking for event-dma %016llx trb-start %016llx trb-end %016llx seg-start %016llx seg-end %016llx\n", - (unsigned long long)suspect_dma, - (unsigned long long)start_dma, - (unsigned long long)end_trb_dma, - (unsigned long long)cur_seg->dma, - (unsigned long long)end_seg_dma); - - if (end_trb_dma > 0) { - /* The end TRB is in this segment, so suspect should be here */ - if (start_dma <= end_trb_dma) { - if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma) - return cur_seg; - } else { - /* Case for one segment with - * a TD wrapped around to the top - */ - if ((suspect_dma >= start_dma && - suspect_dma <= end_seg_dma) || - (suspect_dma >= cur_seg->dma && - suspect_dma <= end_trb_dma)) - return cur_seg; - } - return NULL; - } else { - /* Might still be somewhere in this segment */ - if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma) - return cur_seg; - } - cur_seg = cur_seg->next; - start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]); - } while (cur_seg != start_seg); - - return NULL; -} - static void xhci_clear_hub_tt_buffer(struct xhci_hcd *xhci, struct xhci_td *td, struct xhci_virt_ep *ep) { @@ -2134,30 +2165,34 @@ static void xhci_clear_hub_tt_buffer(struct xhci_hcd *xhci, struct xhci_td *td, } } -/* Check if an error has halted the endpoint ring. The class driver will - * cleanup the halt for a non-default control endpoint if we indicate a stall. - * However, a babble and other errors also halt the endpoint ring, and the class - * driver won't clear the halt in that case, so we need to issue a Set Transfer - * Ring Dequeue Pointer command manually. +/* + * Check if xhci internal endpoint state has gone to a "halt" state due to an + * error or stall, including default control pipe protocol stall. + * The internal halt needs to be cleared with a reset endpoint command. + * + * External device side is also halted in functional stall cases. Class driver + * will clear the device halt with a CLEAR_FEATURE(ENDPOINT_HALT) request later. */ -static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci, - struct xhci_ep_ctx *ep_ctx, - unsigned int trb_comp_code) +static bool xhci_halted_host_endpoint(struct xhci_ep_ctx *ep_ctx, unsigned int comp_code) { - /* TRB completion codes that may require a manual halt cleanup */ - if (trb_comp_code == COMP_USB_TRANSACTION_ERROR || - trb_comp_code == COMP_BABBLE_DETECTED_ERROR || - trb_comp_code == COMP_SPLIT_TRANSACTION_ERROR) - /* The 0.95 spec says a babbling control endpoint - * is not halted. The 0.96 spec says it is. Some HW - * claims to be 0.95 compliant, but it halts the control - * endpoint anyway. Check if a babble halted the - * endpoint. + /* Stall halts both internal and device side endpoint */ + if (comp_code == COMP_STALL_ERROR) + return true; + + /* TRB completion codes that may require internal halt cleanup */ + if (comp_code == COMP_USB_TRANSACTION_ERROR || + comp_code == COMP_BABBLE_DETECTED_ERROR || + comp_code == COMP_SPLIT_TRANSACTION_ERROR) + /* + * The 0.95 spec says a babbling control endpoint is not halted. + * The 0.96 spec says it is. Some HW claims to be 0.95 + * compliant, but it halts the control endpoint anyway. + * Check endpoint context if endpoint is halted. */ if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_HALTED) - return 1; + return true; - return 0; + return false; } int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code) @@ -2174,9 +2209,9 @@ int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code) return 0; } -static int finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, - struct xhci_ring *ep_ring, struct xhci_td *td, - u32 trb_comp_code) +static void finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, + struct xhci_ring *ep_ring, struct xhci_td *td, + u32 trb_comp_code) { struct xhci_ep_ctx *ep_ctx; @@ -2191,7 +2226,7 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, * stopped TDs. A stopped TD may be restarted, so don't update * the ring dequeue pointer or take this TD off any lists yet. */ - return 0; + return; case COMP_USB_TRANSACTION_ERROR: case COMP_BABBLE_DETECTED_ERROR: case COMP_SPLIT_TRANSACTION_ERROR: @@ -2216,8 +2251,8 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, !list_empty(&td->cancelled_td_list)) { xhci_dbg(xhci, "Already resolving halted ep for 0x%llx\n", (unsigned long long)xhci_trb_virt_to_dma( - td->start_seg, td->first_trb)); - return 0; + td->start_seg, td->start_trb)); + return; } /* endpoint not halted, don't reset it */ break; @@ -2225,7 +2260,7 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, /* Almost same procedure as for STALL_ERROR below */ xhci_clear_hub_tt_buffer(xhci, td, ep); xhci_handle_halted_endpoint(xhci, ep, td, EP_HARD_RESET); - return 0; + return; case COMP_STALL_ERROR: /* * xhci internal endpoint state will go to a "halt" state for @@ -2242,28 +2277,22 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, xhci_handle_halted_endpoint(xhci, ep, td, EP_HARD_RESET); - return 0; /* xhci_handle_halted_endpoint marked td cancelled */ + return; /* xhci_handle_halted_endpoint marked td cancelled */ default: break; } - /* Update ring dequeue pointer */ - ep_ring->dequeue = td->last_trb; - ep_ring->deq_seg = td->last_trb_seg; - inc_deq(xhci, ep_ring); - - return xhci_td_cleanup(xhci, td, ep_ring, td->status); + xhci_dequeue_td(xhci, td, ep_ring, td->status); } -/* sum trb lengths from ring dequeue up to stop_trb, _excluding_ stop_trb */ -static int sum_trb_lengths(struct xhci_hcd *xhci, struct xhci_ring *ring, - union xhci_trb *stop_trb) +/* sum trb lengths from the first trb up to stop_trb, _excluding_ stop_trb */ +static u32 sum_trb_lengths(struct xhci_td *td, union xhci_trb *stop_trb) { u32 sum; - union xhci_trb *trb = ring->dequeue; - struct xhci_segment *seg = ring->deq_seg; + union xhci_trb *trb = td->start_trb; + struct xhci_segment *seg = td->start_seg; - for (sum = 0; trb != stop_trb; next_trb(xhci, ring, &seg, &trb)) { + for (sum = 0; trb != stop_trb; next_trb(&seg, &trb)) { if (!trb_is_noop(trb) && !trb_is_link(trb)) sum += TRB_LEN(le32_to_cpu(trb->generic.field[2])); } @@ -2273,9 +2302,9 @@ static int sum_trb_lengths(struct xhci_hcd *xhci, struct xhci_ring *ring, /* * Process control tds, update urb status and actual_length. */ -static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, - struct xhci_ring *ep_ring, struct xhci_td *td, - union xhci_trb *ep_trb, struct xhci_transfer_event *event) +static void process_ctrl_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, + struct xhci_ring *ep_ring, struct xhci_td *td, + union xhci_trb *ep_trb, struct xhci_transfer_event *event) { struct xhci_ep_ctx *ep_ctx; u32 trb_comp_code; @@ -2327,8 +2356,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, case COMP_STOPPED_LENGTH_INVALID: goto finish_td; default: - if (!xhci_requires_manual_halt_cleanup(xhci, - ep_ctx, trb_comp_code)) + if (!xhci_halted_host_endpoint(ep_ctx, trb_comp_code)) break; xhci_dbg(xhci, "TRB error %u, halted endpoint index = %u\n", trb_comp_code, ep->ep_index); @@ -2355,7 +2383,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, td->urb_length_set = true; td->urb->actual_length = requested - remaining; xhci_dbg(xhci, "Waiting for status stage event\n"); - return 0; + return; } /* at status stage */ @@ -2363,15 +2391,15 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, td->urb->actual_length = requested; finish_td: - return finish_td(xhci, ep, ep_ring, td, trb_comp_code); + finish_td(xhci, ep, ep_ring, td, trb_comp_code); } /* * Process isochronous tds, update urb packet status and actual_length. */ -static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, - struct xhci_ring *ep_ring, struct xhci_td *td, - union xhci_trb *ep_trb, struct xhci_transfer_event *event) +static void process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, + struct xhci_ring *ep_ring, struct xhci_td *td, + union xhci_trb *ep_trb, struct xhci_transfer_event *event) { struct urb_priv *urb_priv; int idx; @@ -2399,8 +2427,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, break; if (remaining) { frame->status = short_framestatus; - if (xhci->quirks & XHCI_TRUST_TX_LENGTH) - sum_trbs_for_length = true; + sum_trbs_for_length = true; break; } frame->status = 0; @@ -2417,7 +2444,13 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, fallthrough; case COMP_ISOCH_BUFFER_OVERRUN: frame->status = -EOVERFLOW; - if (ep_trb != td->last_trb) + if (ep_trb != td->end_trb) + td->error_mid_td = true; + break; + case COMP_MISSED_SERVICE_ERROR: + frame->status = -EXDEV; + sum_trbs_for_length = true; + if (ep_trb != td->end_trb) td->error_mid_td = true; break; case COMP_INCOMPATIBLE_DEVICE_ERROR: @@ -2427,19 +2460,21 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, case COMP_USB_TRANSACTION_ERROR: frame->status = -EPROTO; sum_trbs_for_length = true; - if (ep_trb != td->last_trb) + if (ep_trb != td->end_trb) td->error_mid_td = true; break; case COMP_STOPPED: sum_trbs_for_length = true; break; case COMP_STOPPED_SHORT_PACKET: - /* field normally containing residue now contains tranferred */ + /* field normally containing residue now contains transferred */ frame->status = short_framestatus; requested = remaining; break; case COMP_STOPPED_LENGTH_INVALID: - requested = 0; + /* exclude stopped trb with invalid length from length sum */ + sum_trbs_for_length = true; + ep_trb_len = 0; remaining = 0; break; default: @@ -2452,7 +2487,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, goto finish_td; if (sum_trbs_for_length) - frame->actual_length = sum_trb_lengths(xhci, ep->ring, ep_trb) + + frame->actual_length = sum_trb_lengths(td, ep_trb) + ep_trb_len - remaining; else frame->actual_length = requested; @@ -2461,17 +2496,16 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, finish_td: /* Don't give back TD yet if we encountered an error mid TD */ - if (td->error_mid_td && ep_trb != td->last_trb) { + if (td->error_mid_td && ep_trb != td->end_trb) { xhci_dbg(xhci, "Error mid isoc TD, wait for final completion event\n"); td->urb_length_set = true; - return 0; + return; } - - return finish_td(xhci, ep, ep_ring, td, trb_comp_code); + finish_td(xhci, ep, ep_ring, td, trb_comp_code); } -static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, - struct xhci_virt_ep *ep, int status) +static void skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, + struct xhci_virt_ep *ep, int status) { struct urb_priv *urb_priv; struct usb_iso_packet_descriptor *frame; @@ -2487,20 +2521,15 @@ static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, /* calc actual length */ frame->actual_length = 0; - /* Update ring dequeue pointer */ - ep->ring->dequeue = td->last_trb; - ep->ring->deq_seg = td->last_trb_seg; - inc_deq(xhci, ep->ring); - - return xhci_td_cleanup(xhci, td, ep->ring, status); + xhci_dequeue_td(xhci, td, ep->ring, status); } /* * Process bulk and interrupt tds, update urb status and actual_length. */ -static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, - struct xhci_ring *ep_ring, struct xhci_td *td, - union xhci_trb *ep_trb, struct xhci_transfer_event *event) +static void process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, + struct xhci_ring *ep_ring, struct xhci_td *td, + union xhci_trb *ep_trb, struct xhci_transfer_event *event) { struct xhci_slot_ctx *slot_ctx; u32 trb_comp_code; @@ -2516,7 +2545,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, case COMP_SUCCESS: ep->err_count = 0; /* handle success with untransferred data as short packet */ - if (ep_trb != td->last_trb || remaining) { + if (ep_trb != td->end_trb || remaining) { xhci_warn(xhci, "WARN Successful completion on short TX\n"); xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n", td->urb->ep->desc.bEndpointAddress, @@ -2525,9 +2554,6 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, td->status = 0; break; case COMP_SHORT_PACKET: - xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n", - td->urb->ep->desc.bEndpointAddress, - requested, remaining); td->status = 0; break; case COMP_STOPPED_SHORT_PACKET: @@ -2535,9 +2561,8 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, goto finish_td; case COMP_STOPPED_LENGTH_INVALID: /* stopped on ep trb with invalid length, exclude it */ - ep_trb_len = 0; - remaining = 0; - break; + td->urb->actual_length = sum_trb_lengths(td, ep_trb); + goto finish_td; case COMP_USB_TRANSACTION_ERROR: if (xhci->quirks & XHCI_NO_SOFT_RETRY || (ep->err_count++ > MAX_SOFT_RETRY) || @@ -2547,17 +2572,17 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, td->status = 0; xhci_handle_halted_endpoint(xhci, ep, td, EP_SOFT_RESET); - return 0; + return; default: /* do nothing */ break; } - if (ep_trb == td->last_trb) + if (ep_trb == td->end_trb) td->urb->actual_length = requested - remaining; else td->urb->actual_length = - sum_trb_lengths(xhci, ep_ring, ep_trb) + + sum_trb_lengths(td, ep_trb) + ep_trb_len - remaining; finish_td: if (remaining > requested) { @@ -2566,7 +2591,50 @@ finish_td: td->urb->actual_length = 0; } - return finish_td(xhci, ep, ep_ring, td, trb_comp_code); + finish_td(xhci, ep, ep_ring, td, trb_comp_code); +} + +/* Transfer events which don't point to a transfer TRB, see xhci 4.17.4 */ +static int handle_transferless_tx_event(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, + u32 trb_comp_code) +{ + switch (trb_comp_code) { + case COMP_STALL_ERROR: + case COMP_USB_TRANSACTION_ERROR: + case COMP_INVALID_STREAM_TYPE_ERROR: + case COMP_INVALID_STREAM_ID_ERROR: + xhci_dbg(xhci, "Stream transaction error ep %u no id\n", ep->ep_index); + if (ep->err_count++ > MAX_SOFT_RETRY) + xhci_handle_halted_endpoint(xhci, ep, NULL, EP_HARD_RESET); + else + xhci_handle_halted_endpoint(xhci, ep, NULL, EP_SOFT_RESET); + break; + case COMP_RING_UNDERRUN: + case COMP_RING_OVERRUN: + case COMP_STOPPED_LENGTH_INVALID: + break; + default: + xhci_err(xhci, "Transfer event %u for unknown stream ring slot %u ep %u\n", + trb_comp_code, ep->vdev->slot_id, ep->ep_index); + return -ENODEV; + } + return 0; +} + +static bool xhci_spurious_success_tx_event(struct xhci_hcd *xhci, + struct xhci_ring *ring) +{ + switch (ring->old_trb_comp_code) { + case COMP_SHORT_PACKET: + return xhci->quirks & XHCI_SPURIOUS_SUCCESS; + case COMP_USB_TRANSACTION_ERROR: + case COMP_BABBLE_DETECTED_ERROR: + case COMP_ISOCH_BUFFER_OVERRUN: + return xhci->quirks & XHCI_ETRON_HOST && + ring->type == TYPE_ISOC; + default: + return false; + } } /* @@ -2589,8 +2657,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, int status = -EINPROGRESS; struct xhci_ep_ctx *ep_ctx; u32 trb_comp_code; - int td_num = 0; - bool handling_skipped_tds = false; + bool ring_xrun_event = false; slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; @@ -2613,36 +2680,8 @@ static int handle_tx_event(struct xhci_hcd *xhci, goto err_out; } - /* Some transfer events don't always point to a trb, see xhci 4.17.4 */ - if (!ep_ring) { - switch (trb_comp_code) { - case COMP_STALL_ERROR: - case COMP_USB_TRANSACTION_ERROR: - case COMP_INVALID_STREAM_TYPE_ERROR: - case COMP_INVALID_STREAM_ID_ERROR: - xhci_dbg(xhci, "Stream transaction error ep %u no id\n", - ep_index); - if (ep->err_count++ > MAX_SOFT_RETRY) - xhci_handle_halted_endpoint(xhci, ep, NULL, - EP_HARD_RESET); - else - xhci_handle_halted_endpoint(xhci, ep, NULL, - EP_SOFT_RESET); - goto cleanup; - case COMP_RING_UNDERRUN: - case COMP_RING_OVERRUN: - case COMP_STOPPED_LENGTH_INVALID: - goto cleanup; - default: - xhci_err(xhci, "ERROR Transfer event for unknown stream ring slot %u ep %u\n", - slot_id, ep_index); - goto err_out; - } - } - - /* Count current td numbers if ep->skip is set */ - if (ep->skip) - td_num += list_count_nodes(&ep_ring->td_list); + if (!ep_ring) + return handle_transferless_tx_event(xhci, ep, trb_comp_code); /* Look for common error cases */ switch (trb_comp_code) { @@ -2650,15 +2689,11 @@ static int handle_tx_event(struct xhci_hcd *xhci, * transfer type */ case COMP_SUCCESS: - if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) - break; - if (xhci->quirks & XHCI_TRUST_TX_LENGTH || - ep_ring->last_td_was_short) + if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) { trb_comp_code = COMP_SHORT_PACKET; - else - xhci_warn_ratelimited(xhci, - "WARN Successful completion on short TX for slot %u ep %u: needs XHCI_TRUST_TX_LENGTH quirk?\n", - slot_id, ep_index); + xhci_dbg(xhci, "Successful completion on short TX for slot %u ep %u with last td comp code %d\n", + slot_id, ep_index, ep_ring->old_trb_comp_code); + } break; case COMP_SHORT_PACKET: break; @@ -2728,21 +2763,13 @@ static int handle_tx_event(struct xhci_hcd *xhci, * a Ring Overrun Event for IN Isoch endpoint or Ring * Underrun Event for OUT Isoch endpoint. */ - xhci_dbg(xhci, "underrun event on endpoint\n"); - if (!list_empty(&ep_ring->td_list)) - xhci_dbg(xhci, "Underrun Event for slot %d ep %d " - "still with TDs queued?\n", - TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), - ep_index); - goto cleanup; + xhci_dbg(xhci, "Underrun event on slot %u ep %u\n", slot_id, ep_index); + ring_xrun_event = true; + break; case COMP_RING_OVERRUN: - xhci_dbg(xhci, "overrun event on endpoint\n"); - if (!list_empty(&ep_ring->td_list)) - xhci_dbg(xhci, "Overrun Event for slot %d ep %d " - "still with TDs queued?\n", - TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), - ep_index); - goto cleanup; + xhci_dbg(xhci, "Overrun event on slot %u ep %u\n", slot_id, ep_index); + ring_xrun_event = true; + break; case COMP_MISSED_SERVICE_ERROR: /* * When encounter missed service error, one or more isoc tds @@ -2752,15 +2779,15 @@ static int handle_tx_event(struct xhci_hcd *xhci, */ ep->skip = true; xhci_dbg(xhci, - "Miss service interval error for slot %u ep %u, set skip flag\n", - slot_id, ep_index); - goto cleanup; + "Miss service interval error for slot %u ep %u, set skip flag%s\n", + slot_id, ep_index, ep_trb_dma ? ", skip now" : ""); + break; case COMP_NO_PING_RESPONSE_ERROR: ep->skip = true; xhci_dbg(xhci, "No Ping response error for slot %u ep %u, Skip one Isoc TD\n", slot_id, ep_index); - goto cleanup; + return 0; case COMP_INCOMPATIBLE_DEVICE_ERROR: /* needs disable slot command to recover */ @@ -2777,134 +2804,122 @@ static int handle_tx_event(struct xhci_hcd *xhci, xhci_warn(xhci, "ERROR Unknown event condition %u for slot %u ep %u , HC probably busted\n", trb_comp_code, slot_id, ep_index); - goto cleanup; + if (ep->skip) + break; + return 0; } - do { - /* This TRB should be in the TD at the head of this ring's - * TD list. - */ - if (list_empty(&ep_ring->td_list)) { - /* - * Don't print wanings if it's due to a stopped endpoint - * generating an extra completion event if the device - * was suspended. Or, a event for the last TRB of a - * short TD we already got a short event for. - * The short TD is already removed from the TD list. - */ + /* + * xhci 4.10.2 states isoc endpoints should continue + * processing the next TD if there was an error mid TD. + * So host like NEC don't generate an event for the last + * isoc TRB even if the IOC flag is set. + * xhci 4.9.1 states that if there are errors in mult-TRB + * TDs xHC should generate an error for that TRB, and if xHC + * proceeds to the next TD it should genete an event for + * any TRB with IOC flag on the way. Other host follow this. + * + * We wait for the final IOC event, but if we get an event + * anywhere outside this TD, just give it back already. + */ + td = list_first_entry_or_null(&ep_ring->td_list, struct xhci_td, td_list); - if (!(trb_comp_code == COMP_STOPPED || - trb_comp_code == COMP_STOPPED_LENGTH_INVALID || - ep_ring->last_td_was_short)) { - xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n", - TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), - ep_index); - } - if (ep->skip) { - ep->skip = false; - xhci_dbg(xhci, "td_list is empty while skip flag set. Clear skip flag for slot %u ep %u.\n", - slot_id, ep_index); - } - if (trb_comp_code == COMP_STALL_ERROR || - xhci_requires_manual_halt_cleanup(xhci, ep_ctx, - trb_comp_code)) { - xhci_handle_halted_endpoint(xhci, ep, NULL, - EP_HARD_RESET); - } - goto cleanup; - } + if (td && td->error_mid_td && !trb_in_td(td, ep_trb_dma)) { + xhci_dbg(xhci, "Missing TD completion event after mid TD error\n"); + xhci_dequeue_td(xhci, td, ep_ring, td->status); + } - /* We've skipped all the TDs on the ep ring when ep->skip set */ - if (ep->skip && td_num == 0) { - ep->skip = false; - xhci_dbg(xhci, "All tds on the ep_ring skipped. Clear skip flag for slot %u ep %u.\n", - slot_id, ep_index); - goto cleanup; + /* If the TRB pointer is NULL, missed TDs will be skipped on the next event */ + if (trb_comp_code == COMP_MISSED_SERVICE_ERROR && !ep_trb_dma) + return 0; + + if (list_empty(&ep_ring->td_list)) { + /* + * Don't print wanings if ring is empty due to a stopped endpoint generating an + * extra completion event if the device was suspended. Or, a event for the last TRB + * of a short TD we already got a short event for. The short TD is already removed + * from the TD list. + */ + if (trb_comp_code != COMP_STOPPED && + trb_comp_code != COMP_STOPPED_LENGTH_INVALID && + !ring_xrun_event && + !xhci_spurious_success_tx_event(xhci, ep_ring)) { + xhci_warn(xhci, "Event TRB for slot %u ep %u with no TDs queued\n", + slot_id, ep_index); } + ep->skip = false; + goto check_endpoint_halted; + } + + do { td = list_first_entry(&ep_ring->td_list, struct xhci_td, td_list); - if (ep->skip) - td_num--; /* Is this a TRB in the currently executing TD? */ - ep_seg = trb_in_td(xhci, td->start_seg, td->first_trb, - td->last_trb, ep_trb_dma, false); - - /* - * Skip the Force Stopped Event. The event_trb(event_dma) of FSE - * is not in the current TD pointed by ep_ring->dequeue because - * that the hardware dequeue pointer still at the previous TRB - * of the current TD. The previous TRB maybe a Link TD or the - * last TRB of the previous TD. The command completion handle - * will take care the rest. - */ - if (!ep_seg && (trb_comp_code == COMP_STOPPED || - trb_comp_code == COMP_STOPPED_LENGTH_INVALID)) { - goto cleanup; - } + ep_seg = trb_in_td(td, ep_trb_dma); if (!ep_seg) { if (ep->skip && usb_endpoint_xfer_isoc(&td->urb->ep->desc)) { + /* this event is unlikely to match any TD, don't skip them all */ + if (trb_comp_code == COMP_STOPPED_LENGTH_INVALID) + return 0; + skip_isoc_td(xhci, td, ep, status); - goto cleanup; + + if (!list_empty(&ep_ring->td_list)) { + if (ring_xrun_event) { + /* + * If we are here, we are on xHCI 1.0 host with no + * idea how many TDs were missed or where the xrun + * occurred. New TDs may have been added after the + * xrun, so skip only one TD to be safe. + */ + xhci_dbg(xhci, "Skipped one TD for slot %u ep %u", + slot_id, ep_index); + return 0; + } + continue; + } + + xhci_dbg(xhci, "All TDs skipped for slot %u ep %u. Clear skip flag.\n", + slot_id, ep_index); + ep->skip = false; + td = NULL; + goto check_endpoint_halted; } + /* TD was queued after xrun, maybe xrun was on a link, don't panic yet */ + if (ring_xrun_event) + return 0; + /* - * Some hosts give a spurious success event after a short - * transfer. Ignore it. + * Skip the Force Stopped Event. The 'ep_trb' of FSE is not in the current + * TD pointed by 'ep_ring->dequeue' because that the hardware dequeue + * pointer still at the previous TRB of the current TD. The previous TRB + * maybe a Link TD or the last TRB of the previous TD. The command + * completion handle will take care the rest. */ - if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) && - ep_ring->last_td_was_short) { - ep_ring->last_td_was_short = false; - goto cleanup; + if (trb_comp_code == COMP_STOPPED || + trb_comp_code == COMP_STOPPED_LENGTH_INVALID) { + return 0; } /* - * xhci 4.10.2 states isoc endpoints should continue - * processing the next TD if there was an error mid TD. - * So host like NEC don't generate an event for the last - * isoc TRB even if the IOC flag is set. - * xhci 4.9.1 states that if there are errors in mult-TRB - * TDs xHC should generate an error for that TRB, and if xHC - * proceeds to the next TD it should genete an event for - * any TRB with IOC flag on the way. Other host follow this. - * So this event might be for the next TD. + * Some hosts give a spurious success event after a short + * transfer or error on last TRB. Ignore it. */ - if (td->error_mid_td && - !list_is_last(&td->td_list, &ep_ring->td_list)) { - struct xhci_td *td_next = list_next_entry(td, td_list); - - ep_seg = trb_in_td(xhci, td_next->start_seg, td_next->first_trb, - td_next->last_trb, ep_trb_dma, false); - if (ep_seg) { - /* give back previous TD, start handling new */ - xhci_dbg(xhci, "Missing TD completion event after mid TD error\n"); - ep_ring->dequeue = td->last_trb; - ep_ring->deq_seg = td->last_trb_seg; - inc_deq(xhci, ep_ring); - xhci_td_cleanup(xhci, td, ep_ring, td->status); - td = td_next; - } + if (xhci_spurious_success_tx_event(xhci, ep_ring)) { + xhci_dbg(xhci, "Spurious event dma %pad, comp_code %u after %u\n", + &ep_trb_dma, trb_comp_code, ep_ring->old_trb_comp_code); + ep_ring->old_trb_comp_code = 0; + return 0; } - if (!ep_seg) { - /* HC is busted, give up! */ - xhci_err(xhci, - "ERROR Transfer event TRB DMA ptr not " - "part of current TD ep_index %d " - "comp_code %u\n", ep_index, - trb_comp_code); - trb_in_td(xhci, td->start_seg, td->first_trb, - td->last_trb, ep_trb_dma, true); - return -ESHUTDOWN; - } + /* HC is busted, give up! */ + goto debug_finding_td; } - if (trb_comp_code == COMP_SHORT_PACKET) - ep_ring->last_td_was_short = true; - else - ep_ring->last_td_was_short = false; if (ep->skip) { xhci_dbg(xhci, @@ -2913,53 +2928,58 @@ static int handle_tx_event(struct xhci_hcd *xhci, ep->skip = false; } - ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma) / - sizeof(*ep_trb)]; - - trace_xhci_handle_transfer(ep_ring, - (struct xhci_generic_trb *) ep_trb); - - /* - * No-op TRB could trigger interrupts in a case where - * a URB was killed and a STALL_ERROR happens right - * after the endpoint ring stopped. Reset the halted - * endpoint. Otherwise, the endpoint remains stalled - * indefinitely. - */ - - if (trb_is_noop(ep_trb)) { - if (trb_comp_code == COMP_STALL_ERROR || - xhci_requires_manual_halt_cleanup(xhci, ep_ctx, - trb_comp_code)) - xhci_handle_halted_endpoint(xhci, ep, td, - EP_HARD_RESET); - goto cleanup; - } - - td->status = status; - - /* update the urb's actual_length and give back to the core */ - if (usb_endpoint_xfer_control(&td->urb->ep->desc)) - process_ctrl_td(xhci, ep, ep_ring, td, ep_trb, event); - else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc)) - process_isoc_td(xhci, ep, ep_ring, td, ep_trb, event); - else - process_bulk_intr_td(xhci, ep, ep_ring, td, ep_trb, event); -cleanup: - handling_skipped_tds = ep->skip && - trb_comp_code != COMP_MISSED_SERVICE_ERROR && - trb_comp_code != COMP_NO_PING_RESPONSE_ERROR; - /* * If ep->skip is set, it means there are missed tds on the * endpoint ring need to take care of. * Process them as short transfer until reach the td pointed by * the event. */ - } while (handling_skipped_tds); + } while (ep->skip); + + ep_ring->old_trb_comp_code = trb_comp_code; + + /* Get out if a TD was queued at enqueue after the xrun occurred */ + if (ring_xrun_event) + return 0; + + ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma) / sizeof(*ep_trb)]; + trace_xhci_handle_transfer(ep_ring, (struct xhci_generic_trb *) ep_trb, ep_trb_dma); + /* + * No-op TRB could trigger interrupts in a case where a URB was killed + * and a STALL_ERROR happens right after the endpoint ring stopped. + * Reset the halted endpoint. Otherwise, the endpoint remains stalled + * indefinitely. + */ + + if (trb_is_noop(ep_trb)) + goto check_endpoint_halted; + + td->status = status; + + /* update the urb's actual_length and give back to the core */ + if (usb_endpoint_xfer_control(&td->urb->ep->desc)) + process_ctrl_td(xhci, ep, ep_ring, td, ep_trb, event); + else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc)) + process_isoc_td(xhci, ep, ep_ring, td, ep_trb, event); + else + process_bulk_intr_td(xhci, ep, ep_ring, td, ep_trb, event); return 0; +check_endpoint_halted: + if (xhci_halted_host_endpoint(ep_ctx, trb_comp_code)) + xhci_handle_halted_endpoint(xhci, ep, td, EP_HARD_RESET); + + return 0; + +debug_finding_td: + xhci_err(xhci, "Event dma %pad for ep %d status %d not part of TD at %016llx - %016llx\n", + &ep_trb_dma, ep_index, trb_comp_code, + (unsigned long long)xhci_trb_virt_to_dma(td->start_seg, td->start_trb), + (unsigned long long)xhci_trb_virt_to_dma(td->end_seg, td->end_trb)); + + return -ESHUTDOWN; + err_out: xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n", (unsigned long long) xhci_trb_virt_to_dma( @@ -2981,7 +3001,9 @@ static int xhci_handle_event_trb(struct xhci_hcd *xhci, struct xhci_interrupter { u32 trb_type; - trace_xhci_handle_event(ir->event_ring, &event->generic); + trace_xhci_handle_event(ir->event_ring, &event->generic, + xhci_trb_virt_to_dma(ir->event_ring->deq_seg, + ir->event_ring->dequeue)); /* * Barrier between reading the TRB_CYCLE (valid) flag before, and any @@ -2996,7 +3018,7 @@ static int xhci_handle_event_trb(struct xhci_hcd *xhci, struct xhci_interrupter handle_cmd_completion(xhci, &event->event_cmd); break; case TRB_PORT_STATUS: - handle_port_status(xhci, ir, event); + handle_port_status(xhci, event); break; case TRB_TRANSFER: handle_tx_event(xhci, ir, &event->trans_event); @@ -3026,9 +3048,9 @@ static int xhci_handle_event_trb(struct xhci_hcd *xhci, struct xhci_interrupter * - When all events have finished * - To avoid "Event Ring Full Error" condition */ -static void xhci_update_erst_dequeue(struct xhci_hcd *xhci, - struct xhci_interrupter *ir, - bool clear_ehb) +void xhci_update_erst_dequeue(struct xhci_hcd *xhci, + struct xhci_interrupter *ir, + bool clear_ehb) { u64 temp_64; dma_addr_t deq; @@ -3056,15 +3078,17 @@ static void xhci_update_erst_dequeue(struct xhci_hcd *xhci, } /* Clear the interrupt pending bit for a specific interrupter. */ -static void xhci_clear_interrupt_pending(struct xhci_hcd *xhci, - struct xhci_interrupter *ir) +static void xhci_clear_interrupt_pending(struct xhci_interrupter *ir) { if (!ir->ip_autoclear) { - u32 irq_pending; + u32 iman; + + iman = readl(&ir->ir_set->iman); + iman |= IMAN_IP; + writel(iman, &ir->ir_set->iman); - irq_pending = readl(&ir->ir_set->irq_pending); - irq_pending |= IMAN_IP; - writel(irq_pending, &ir->ir_set->irq_pending); + /* Read operation to guarantee the write has been flushed from posted buffers */ + readl(&ir->ir_set->iman); } } @@ -3072,13 +3096,14 @@ static void xhci_clear_interrupt_pending(struct xhci_hcd *xhci, * Handle all OS-owned events on an interrupter event ring. It may drop * and reaquire xhci->lock between event processing. */ -static int xhci_handle_events(struct xhci_hcd *xhci, struct xhci_interrupter *ir) +static int xhci_handle_events(struct xhci_hcd *xhci, struct xhci_interrupter *ir, + bool skip_events) { int event_loop = 0; - int err; + int err = 0; u64 temp; - xhci_clear_interrupt_pending(xhci, ir); + xhci_clear_interrupt_pending(ir); /* Event ring hasn't been allocated yet. */ if (!ir->event_ring || !ir->event_ring->dequeue) { @@ -3098,7 +3123,8 @@ static int xhci_handle_events(struct xhci_hcd *xhci, struct xhci_interrupter *ir /* Process all OS owned event TRBs on this event ring */ while (unhandled_event_trb(ir->event_ring)) { - err = xhci_handle_event_trb(xhci, ir, ir->event_ring->dequeue); + if (!skip_events) + err = xhci_handle_event_trb(xhci, ir, ir->event_ring->dequeue); /* * If half a segment of events have been handled in one go then @@ -3126,6 +3152,37 @@ static int xhci_handle_events(struct xhci_hcd *xhci, struct xhci_interrupter *ir } /* + * Move the event ring dequeue pointer to skip events kept in the secondary + * event ring. This is used to ensure that pending events in the ring are + * acknowledged, so the xHCI HCD can properly enter suspend/resume. The + * secondary ring is typically maintained by an external component. + */ +void xhci_skip_sec_intr_events(struct xhci_hcd *xhci, + struct xhci_ring *ring, struct xhci_interrupter *ir) +{ + union xhci_trb *current_trb; + u64 erdp_reg; + dma_addr_t deq; + + /* disable irq, ack pending interrupt and ack all pending events */ + xhci_disable_interrupter(xhci, ir); + + /* last acked event trb is in erdp reg */ + erdp_reg = xhci_read_64(xhci, &ir->ir_set->erst_dequeue); + deq = (dma_addr_t)(erdp_reg & ERST_PTR_MASK); + if (!deq) { + xhci_err(xhci, "event ring handling not required\n"); + return; + } + + current_trb = ir->event_ring->dequeue; + /* read cycle state of the last acked trb to find out CCS */ + ring->cycle_state = le32_to_cpu(current_trb->event_cmd.flags) & TRB_CYCLE; + + xhci_handle_events(xhci, ir, true); +} + +/* * xHCI spec says we can get an interrupt, and if the HC has an error condition, * we might get bad data out of the event ring. Section 4.10.2.7 has a list of * indicators of an event TRB error, but we check the status *first* to be safe. @@ -3133,7 +3190,7 @@ static int xhci_handle_events(struct xhci_hcd *xhci, struct xhci_interrupter *ir irqreturn_t xhci_irq(struct usb_hcd *hcd) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); - irqreturn_t ret = IRQ_NONE; + irqreturn_t ret = IRQ_HANDLED; u32 status; spin_lock(&xhci->lock); @@ -3141,12 +3198,13 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd) status = readl(&xhci->op_regs->status); if (status == ~(u32)0) { xhci_hc_died(xhci); - ret = IRQ_HANDLED; goto out; } - if (!(status & STS_EINT)) + if (!(status & STS_EINT)) { + ret = IRQ_NONE; goto out; + } if (status & STS_HCE) { xhci_warn(xhci, "WARNING: Host Controller Error\n"); @@ -3156,7 +3214,6 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd) if (status & STS_FATAL) { xhci_warn(xhci, "WARNING: Host System Error\n"); xhci_halt(xhci); - ret = IRQ_HANDLED; goto out; } @@ -3167,10 +3224,9 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd) */ status |= STS_EINT; writel(status, &xhci->op_regs->status); - ret = IRQ_HANDLED; /* This is the handler of the primary interrupter */ - xhci_handle_events(xhci, xhci->interrupters[0]); + xhci_handle_events(xhci, xhci->interrupters[0], false); out: spin_unlock(&xhci->lock); @@ -3206,7 +3262,8 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, wmb(); trb->field[3] = cpu_to_le32(field4); - trace_xhci_queue_trb(ring, trb); + trace_xhci_queue_trb(ring, trb, + xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue)); inc_enq(xhci, ring, more_trbs_coming); } @@ -3218,7 +3275,6 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, u32 ep_state, unsigned int num_trbs, gfp_t mem_flags) { - unsigned int link_trb_count = 0; unsigned int new_segs = 0; /* Make sure the endpoint has been added to xHC schedule */ @@ -3252,7 +3308,7 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, if (ep_ring != xhci->cmd_ring) { new_segs = xhci_ring_expansion_needed(xhci, ep_ring, num_trbs); - } else if (xhci_num_trbs_free(xhci, ep_ring) <= num_trbs) { + } else if (xhci_num_trbs_free(ep_ring) <= num_trbs) { xhci_err(xhci, "Do not support expand command ring\n"); return -ENOMEM; } @@ -3266,35 +3322,9 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, } } - while (trb_is_link(ep_ring->enqueue)) { - /* If we're not dealing with 0.95 hardware or isoc rings - * on AMD 0.96 host, clear the chain bit. - */ - if (!xhci_link_trb_quirk(xhci) && - !(ep_ring->type == TYPE_ISOC && - (xhci->quirks & XHCI_AMD_0x96_HOST))) - ep_ring->enqueue->link.control &= - cpu_to_le32(~TRB_CHAIN); - else - ep_ring->enqueue->link.control |= - cpu_to_le32(TRB_CHAIN); - - wmb(); - ep_ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE); - - /* Toggle the cycle bit after the last ring segment. */ - if (link_trb_toggles_cycle(ep_ring->enqueue)) - ep_ring->cycle_state ^= 1; - - ep_ring->enq_seg = ep_ring->enq_seg->next; - ep_ring->enqueue = ep_ring->enq_seg->trbs; - - /* prevent infinite loop if all first trbs are link trbs */ - if (link_trb_count++ > ep_ring->num_segs) { - xhci_warn(xhci, "Ring is an endless link TRB loop\n"); - return -EINVAL; - } - } + /* Ensure that new TRBs won't overwrite a link */ + if (trb_is_link(ep_ring->enqueue)) + inc_enq_past_link(xhci, ep_ring, 0); if (last_trb_on_seg(ep_ring->enq_seg, ep_ring->enqueue)) { xhci_warn(xhci, "Missing link TRB at end of ring segment\n"); @@ -3348,7 +3378,7 @@ static int prepare_transfer(struct xhci_hcd *xhci, /* Add this TD to the tail of the endpoint ring's TD list */ list_add_tail(&td->td_list, &ep_ring->td_list); td->start_seg = ep_ring->enq_seg; - td->first_trb = ep_ring->enqueue; + td->start_trb = ep_ring->enqueue; return 0; } @@ -3427,8 +3457,7 @@ static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id, xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id); } -static void check_interval(struct xhci_hcd *xhci, struct urb *urb, - struct xhci_ep_ctx *ep_ctx) +static void check_interval(struct urb *urb, struct xhci_ep_ctx *ep_ctx) { int xhci_interval; int ep_interval; @@ -3447,8 +3476,8 @@ static void check_interval(struct xhci_hcd *xhci, struct urb *urb, if (xhci_interval != ep_interval) { dev_dbg_ratelimited(&urb->dev->dev, "Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n", - ep_interval, ep_interval == 1 ? "" : "s", - xhci_interval, xhci_interval == 1 ? "" : "s"); + ep_interval, str_plural(ep_interval), + xhci_interval, str_plural(xhci_interval)); urb->interval = xhci_interval; /* Convert back to frames for LS/FS devices */ if (urb->dev->speed == USB_SPEED_LOW || @@ -3469,7 +3498,7 @@ int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct xhci_ep_ctx *ep_ctx; ep_ctx = xhci_get_ep_ctx(xhci, xhci->devs[slot_id]->out_ctx, ep_index); - check_interval(xhci, urb, ep_ctx); + check_interval(urb, ep_ctx); return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index); } @@ -3688,8 +3717,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, field &= ~TRB_CHAIN; field |= TRB_IOC; more_trbs_coming = false; - td->last_trb = ring->enqueue; - td->last_trb_seg = ring->enq_seg; + td->end_trb = ring->enqueue; + td->end_seg = ring->enq_seg; if (xhci_urb_suitable_for_idt(urb)) { memcpy(&send_addr, urb->transfer_buffer, trb_buff_len); @@ -3715,7 +3744,6 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, upper_32_bits(send_addr), length_field, field); - td->num_trbs++; addr += trb_buff_len; sent_len = trb_buff_len; @@ -3738,11 +3766,10 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, urb->stream_id, 1, urb, 1, mem_flags); - urb_priv->td[1].last_trb = ring->enqueue; - urb_priv->td[1].last_trb_seg = ring->enq_seg; + urb_priv->td[1].end_trb = ring->enqueue; + urb_priv->td[1].end_seg = ring->enq_seg; field = TRB_TYPE(TRB_NORMAL) | ring->cycle_state | TRB_IOC; queue_trb(xhci, ring, 0, 0, 0, TRB_INTR_TARGET(0), field); - urb_priv->td[1].num_trbs++; } check_trb_math(urb, enqd_len); @@ -3776,6 +3803,20 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, if (!urb->setup_packet) return -EINVAL; + if ((xhci->quirks & XHCI_ETRON_HOST) && + urb->dev->speed >= USB_SPEED_SUPER) { + /* + * If next available TRB is the Link TRB in the ring segment then + * enqueue a No Op TRB, this can prevent the Setup and Data Stage + * TRB to be breaked by the Link TRB. + */ + if (last_trb_on_seg(ep_ring->enq_seg, ep_ring->enqueue + 1)) { + field = TRB_TYPE(TRB_TR_NOOP) | ep_ring->cycle_state; + queue_trb(xhci, ep_ring, false, 0, 0, + TRB_INTR_TARGET(0), field); + } + } + /* 1 TRB for setup, 1 for status */ num_trbs = 2; /* @@ -3793,7 +3834,6 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, urb_priv = urb->hcpriv; td = &urb_priv->td[0]; - td->num_trbs = num_trbs; /* * Don't give the first TRB to the hardware (by toggling the cycle bit) @@ -3865,8 +3905,8 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, } /* Save the DMA address of the last TRB in the TD */ - td->last_trb = ep_ring->enqueue; - td->last_trb_seg = ep_ring->enq_seg; + td->end_trb = ep_ring->enqueue; + td->end_seg = ep_ring->enq_seg; /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */ /* If the device sent data, the status stage is an OUT transfer */ @@ -3991,10 +4031,6 @@ static int xhci_get_isoc_frame_id(struct xhci_hcd *xhci, start_frame_id = (start_frame_id >> 3) & 0x7ff; end_frame_id = (end_frame_id >> 3) & 0x7ff; - xhci_dbg(xhci, "%s: index %d, reg 0x%x start_frame_id 0x%x, end_frame_id 0x%x, start_frame 0x%x\n", - __func__, index, readl(&xhci->run_regs->microframe_index), - start_frame_id, end_frame_id, start_frame); - if (start_frame_id < end_frame_id) { if (start_frame > end_frame_id || start_frame < start_frame_id) @@ -4114,7 +4150,6 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, goto cleanup; } td = &urb_priv->td[i]; - td->num_trbs = trbs_per_td; /* use SIA as default, if frame id is used overwrite it */ sia_frame_id = TRB_SIA; if (!(urb->transfer_flags & URB_ISO_ASAP) && @@ -4156,8 +4191,8 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, field |= TRB_CHAIN; } else { more_trbs_coming = false; - td->last_trb = ep_ring->enqueue; - td->last_trb_seg = ep_ring->enq_seg; + td->end_trb = ep_ring->enqueue; + td->end_seg = ep_ring->enq_seg; field |= TRB_IOC; if (trb_block_event_intr(xhci, num_tds, i, ir)) field |= TRB_BEI; @@ -4223,14 +4258,14 @@ cleanup: /* Use the first TD as a temporary variable to turn the TDs we've queued * into No-ops with a software-owned cycle bit. That way the hardware * won't accidentally start executing bogus TDs when we partially - * overwrite them. td->first_trb and td->start_seg are already set. + * overwrite them. td->start_trb and td->start_seg are already set. */ - urb_priv->td[0].last_trb = ep_ring->enqueue; + urb_priv->td[0].end_trb = ep_ring->enqueue; /* Every TRB except the first & last will have its cycle bit flipped. */ - td_to_noop(xhci, ep_ring, &urb_priv->td[0], true); + td_to_noop(&urb_priv->td[0], true); /* Reset the ring enqueue back to the first TRB and its cycle bit. */ - ep_ring->enqueue = urb_priv->td[0].first_trb; + ep_ring->enqueue = urb_priv->td[0].start_trb; ep_ring->enq_seg = urb_priv->td[0].start_seg; ep_ring->cycle_state = start_cycle; usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb); @@ -4278,7 +4313,7 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, * Check interval value. This should be done before we start to * calculate the start frame value. */ - check_interval(xhci, urb, ep_ctx); + check_interval(urb, ep_ctx); /* Calculate the start frame and put it in urb->start_frame. */ if (HCC_CFC(xhci->hcc_params) && !list_empty(&ep_ring->td_list)) { @@ -4413,6 +4448,17 @@ int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, command_must_succeed); } +/* Queue a get root hub port bandwidth command TRB */ +int xhci_queue_get_port_bw(struct xhci_hcd *xhci, + struct xhci_command *cmd, dma_addr_t in_ctx_ptr, + u8 dev_speed, bool command_must_succeed) +{ + return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), + upper_32_bits(in_ctx_ptr), 0, + TRB_TYPE(TRB_GET_BW) | DEV_SPEED_FOR_TRB(dev_speed), + command_must_succeed); +} + /* Queue an evaluate context command TRB */ int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd, dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed) @@ -4431,7 +4477,7 @@ int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd, int slot_id, unsigned int ep_index, int suspend) { u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); - u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); + u32 trb_ep_index = EP_INDEX_FOR_TRB(ep_index); u32 type = TRB_TYPE(TRB_STOP_RING); u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend); @@ -4444,7 +4490,7 @@ int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd, enum xhci_ep_reset_type reset_type) { u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); - u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); + u32 trb_ep_index = EP_INDEX_FOR_TRB(ep_index); u32 type = TRB_TYPE(TRB_RESET_EP); if (reset_type == EP_SOFT_RESET) diff --git a/drivers/usb/host/xhci-rzv2m.c b/drivers/usb/host/xhci-rzv2m.c index ec65b24eafa8..4f59867d7117 100644 --- a/drivers/usb/host/xhci-rzv2m.c +++ b/drivers/usb/host/xhci-rzv2m.c @@ -6,6 +6,7 @@ */ #include <linux/usb/rzv2m_usb3drd.h> +#include "xhci.h" #include "xhci-plat.h" #include "xhci-rzv2m.h" diff --git a/drivers/usb/host/xhci-sideband.c b/drivers/usb/host/xhci-sideband.c new file mode 100644 index 000000000000..d49f9886dd84 --- /dev/null +++ b/drivers/usb/host/xhci-sideband.c @@ -0,0 +1,457 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * xHCI host controller sideband support + * + * Copyright (c) 2023-2025, Intel Corporation. + * + * Author: Mathias Nyman + */ + +#include <linux/usb/xhci-sideband.h> +#include <linux/dma-direct.h> + +#include "xhci.h" + +/* sideband internal helpers */ +static struct sg_table * +xhci_ring_to_sgtable(struct xhci_sideband *sb, struct xhci_ring *ring) +{ + struct xhci_segment *seg; + struct sg_table *sgt; + unsigned int n_pages; + struct page **pages; + struct device *dev; + size_t sz; + int i; + + dev = xhci_to_hcd(sb->xhci)->self.sysdev; + sz = ring->num_segs * TRB_SEGMENT_SIZE; + n_pages = PAGE_ALIGN(sz) >> PAGE_SHIFT; + pages = kvmalloc_array(n_pages, sizeof(struct page *), GFP_KERNEL); + if (!pages) + return NULL; + + sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); + if (!sgt) { + kvfree(pages); + return NULL; + } + + seg = ring->first_seg; + if (!seg) + goto err; + /* + * Rings can potentially have multiple segments, create an array that + * carries page references to allocated segments. Utilize the + * sg_alloc_table_from_pages() to create the sg table, and to ensure + * that page links are created. + */ + for (i = 0; i < ring->num_segs; i++) { + dma_get_sgtable(dev, sgt, seg->trbs, seg->dma, + TRB_SEGMENT_SIZE); + pages[i] = sg_page(sgt->sgl); + sg_free_table(sgt); + seg = seg->next; + } + + if (sg_alloc_table_from_pages(sgt, pages, n_pages, 0, sz, GFP_KERNEL)) + goto err; + + /* + * Save first segment dma address to sg dma_address field for the sideband + * client to have access to the IOVA of the ring. + */ + sg_dma_address(sgt->sgl) = ring->first_seg->dma; + + return sgt; + +err: + kvfree(pages); + kfree(sgt); + + return NULL; +} + +static void +__xhci_sideband_remove_endpoint(struct xhci_sideband *sb, struct xhci_virt_ep *ep) +{ + /* + * Issue a stop endpoint command when an endpoint is removed. + * The stop ep cmd handler will handle the ring cleanup. + */ + xhci_stop_endpoint_sync(sb->xhci, ep, 0, GFP_KERNEL); + + ep->sideband = NULL; + sb->eps[ep->ep_index] = NULL; +} + +/* sideband api functions */ + +/** + * xhci_sideband_notify_ep_ring_free - notify client of xfer ring free + * @sb: sideband instance for this usb device + * @ep_index: usb endpoint index + * + * Notifies the xHCI sideband client driver of a xHCI transfer ring free + * routine. This will allow for the client to ensure that all transfers + * are completed. + * + * The callback should be synchronous, as the ring free happens after. + */ +void xhci_sideband_notify_ep_ring_free(struct xhci_sideband *sb, + unsigned int ep_index) +{ + struct xhci_sideband_event evt; + + evt.type = XHCI_SIDEBAND_XFER_RING_FREE; + evt.evt_data = &ep_index; + + if (sb->notify_client) + sb->notify_client(sb->intf, &evt); +} +EXPORT_SYMBOL_GPL(xhci_sideband_notify_ep_ring_free); + +/** + * xhci_sideband_add_endpoint - add endpoint to sideband access list + * @sb: sideband instance for this usb device + * @host_ep: usb host endpoint + * + * Adds an endpoint to the list of sideband accessed endpoints for this usb + * device. + * After an endpoint is added the sideband client can get the endpoint transfer + * ring buffer by calling xhci_sideband_endpoint_buffer() + * + * Return: 0 on success, negative error otherwise. + */ +int +xhci_sideband_add_endpoint(struct xhci_sideband *sb, + struct usb_host_endpoint *host_ep) +{ + struct xhci_virt_ep *ep; + unsigned int ep_index; + + mutex_lock(&sb->mutex); + ep_index = xhci_get_endpoint_index(&host_ep->desc); + ep = &sb->vdev->eps[ep_index]; + + if (ep->ep_state & EP_HAS_STREAMS) { + mutex_unlock(&sb->mutex); + return -EINVAL; + } + + /* + * Note, we don't know the DMA mask of the audio DSP device, if its + * smaller than for xhci it won't be able to access the endpoint ring + * buffer. This could be solved by not allowing the audio class driver + * to add the endpoint the normal way, but instead offload it immediately, + * and let this function add the endpoint and allocate the ring buffer + * with the smallest common DMA mask + */ + if (sb->eps[ep_index] || ep->sideband) { + mutex_unlock(&sb->mutex); + return -EBUSY; + } + + ep->sideband = sb; + sb->eps[ep_index] = ep; + mutex_unlock(&sb->mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(xhci_sideband_add_endpoint); + +/** + * xhci_sideband_remove_endpoint - remove endpoint from sideband access list + * @sb: sideband instance for this usb device + * @host_ep: usb host endpoint + * + * Removes an endpoint from the list of sideband accessed endpoints for this usb + * device. + * sideband client should no longer touch the endpoint transfer buffer after + * calling this. + * + * Return: 0 on success, negative error otherwise. + */ +int +xhci_sideband_remove_endpoint(struct xhci_sideband *sb, + struct usb_host_endpoint *host_ep) +{ + struct xhci_virt_ep *ep; + unsigned int ep_index; + + mutex_lock(&sb->mutex); + ep_index = xhci_get_endpoint_index(&host_ep->desc); + ep = sb->eps[ep_index]; + + if (!ep || !ep->sideband || ep->sideband != sb) { + mutex_unlock(&sb->mutex); + return -ENODEV; + } + + __xhci_sideband_remove_endpoint(sb, ep); + xhci_initialize_ring_info(ep->ring); + mutex_unlock(&sb->mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(xhci_sideband_remove_endpoint); + +int +xhci_sideband_stop_endpoint(struct xhci_sideband *sb, + struct usb_host_endpoint *host_ep) +{ + struct xhci_virt_ep *ep; + unsigned int ep_index; + + ep_index = xhci_get_endpoint_index(&host_ep->desc); + ep = sb->eps[ep_index]; + + if (!ep || !ep->sideband || ep->sideband != sb) + return -EINVAL; + + return xhci_stop_endpoint_sync(sb->xhci, ep, 0, GFP_KERNEL); +} +EXPORT_SYMBOL_GPL(xhci_sideband_stop_endpoint); + +/** + * xhci_sideband_get_endpoint_buffer - gets the endpoint transfer buffer address + * @sb: sideband instance for this usb device + * @host_ep: usb host endpoint + * + * Returns the address of the endpoint buffer where xHC controller reads queued + * transfer TRBs from. This is the starting address of the ringbuffer where the + * sideband client should write TRBs to. + * + * Caller needs to free the returned sg_table + * + * Return: struct sg_table * if successful. NULL otherwise. + */ +struct sg_table * +xhci_sideband_get_endpoint_buffer(struct xhci_sideband *sb, + struct usb_host_endpoint *host_ep) +{ + struct xhci_virt_ep *ep; + unsigned int ep_index; + + ep_index = xhci_get_endpoint_index(&host_ep->desc); + ep = sb->eps[ep_index]; + + if (!ep || !ep->ring || !ep->sideband || ep->sideband != sb) + return NULL; + + return xhci_ring_to_sgtable(sb, ep->ring); +} +EXPORT_SYMBOL_GPL(xhci_sideband_get_endpoint_buffer); + +/** + * xhci_sideband_get_event_buffer - return the event buffer for this device + * @sb: sideband instance for this usb device + * + * If a secondary xhci interupter is set up for this usb device then this + * function returns the address of the event buffer where xHC writes + * the transfer completion events. + * + * Caller needs to free the returned sg_table + * + * Return: struct sg_table * if successful. NULL otherwise. + */ +struct sg_table * +xhci_sideband_get_event_buffer(struct xhci_sideband *sb) +{ + if (!sb || !sb->ir) + return NULL; + + return xhci_ring_to_sgtable(sb, sb->ir->event_ring); +} +EXPORT_SYMBOL_GPL(xhci_sideband_get_event_buffer); + +/** + * xhci_sideband_create_interrupter - creates a new interrupter for this sideband + * @sb: sideband instance for this usb device + * @num_seg: number of event ring segments to allocate + * @ip_autoclear: IP autoclearing support such as MSI implemented + * + * Sets up a xhci interrupter that can be used for this sideband accessed usb + * device. Transfer events for this device can be routed to this interrupters + * event ring by setting the 'Interrupter Target' field correctly when queueing + * the transfer TRBs. + * Once this interrupter is created the interrupter target ID can be obtained + * by calling xhci_sideband_interrupter_id() + * + * Returns 0 on success, negative error otherwise + */ +int +xhci_sideband_create_interrupter(struct xhci_sideband *sb, int num_seg, + bool ip_autoclear, u32 imod_interval, int intr_num) +{ + int ret = 0; + + if (!sb || !sb->xhci) + return -ENODEV; + + mutex_lock(&sb->mutex); + if (sb->ir) { + ret = -EBUSY; + goto out; + } + + sb->ir = xhci_create_secondary_interrupter(xhci_to_hcd(sb->xhci), + num_seg, imod_interval, + intr_num); + if (!sb->ir) { + ret = -ENOMEM; + goto out; + } + + sb->ir->ip_autoclear = ip_autoclear; + +out: + mutex_unlock(&sb->mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(xhci_sideband_create_interrupter); + +/** + * xhci_sideband_remove_interrupter - remove the interrupter from a sideband + * @sb: sideband instance for this usb device + * + * Removes a registered interrupt for a sideband. This would allow for other + * sideband users to utilize this interrupter. + */ +void +xhci_sideband_remove_interrupter(struct xhci_sideband *sb) +{ + if (!sb || !sb->ir) + return; + + mutex_lock(&sb->mutex); + xhci_remove_secondary_interrupter(xhci_to_hcd(sb->xhci), sb->ir); + + sb->ir = NULL; + mutex_unlock(&sb->mutex); +} +EXPORT_SYMBOL_GPL(xhci_sideband_remove_interrupter); + +/** + * xhci_sideband_interrupter_id - return the interrupter target id + * @sb: sideband instance for this usb device + * + * If a secondary xhci interrupter is set up for this usb device then this + * function returns the ID used by the interrupter. The sideband client + * needs to write this ID to the 'Interrupter Target' field of the transfer TRBs + * it queues on the endpoints transfer ring to ensure transfer completion event + * are written by xHC to the correct interrupter event ring. + * + * Returns interrupter id on success, negative error othgerwise + */ +int +xhci_sideband_interrupter_id(struct xhci_sideband *sb) +{ + if (!sb || !sb->ir) + return -ENODEV; + + return sb->ir->intr_num; +} +EXPORT_SYMBOL_GPL(xhci_sideband_interrupter_id); + +/** + * xhci_sideband_register - register a sideband for a usb device + * @intf: usb interface associated with the sideband device + * + * Allows for clients to utilize XHCI interrupters and fetch transfer and event + * ring parameters for executing data transfers. + * + * Return: pointer to a new xhci_sideband instance if successful. NULL otherwise. + */ +struct xhci_sideband * +xhci_sideband_register(struct usb_interface *intf, enum xhci_sideband_type type, + int (*notify_client)(struct usb_interface *intf, + struct xhci_sideband_event *evt)) +{ + struct usb_device *udev = interface_to_usbdev(intf); + struct usb_hcd *hcd = bus_to_hcd(udev->bus); + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + struct xhci_virt_device *vdev; + struct xhci_sideband *sb; + + /* + * Make sure the usb device is connected to a xhci controller. Fail + * registration if the type is anything other than XHCI_SIDEBAND_VENDOR, + * as this is the only type that is currently supported by xhci-sideband. + */ + if (!udev->slot_id || type != XHCI_SIDEBAND_VENDOR) + return NULL; + + sb = kzalloc_node(sizeof(*sb), GFP_KERNEL, dev_to_node(hcd->self.sysdev)); + if (!sb) + return NULL; + + mutex_init(&sb->mutex); + + /* check this device isn't already controlled via sideband */ + spin_lock_irq(&xhci->lock); + + vdev = xhci->devs[udev->slot_id]; + + if (!vdev || vdev->sideband) { + xhci_warn(xhci, "XHCI sideband for slot %d already in use\n", + udev->slot_id); + spin_unlock_irq(&xhci->lock); + kfree(sb); + return NULL; + } + + sb->xhci = xhci; + sb->vdev = vdev; + sb->intf = intf; + sb->type = type; + sb->notify_client = notify_client; + vdev->sideband = sb; + + spin_unlock_irq(&xhci->lock); + + return sb; +} +EXPORT_SYMBOL_GPL(xhci_sideband_register); + +/** + * xhci_sideband_unregister - unregister sideband access to a usb device + * @sb: sideband instance to be unregistered + * + * Unregisters sideband access to a usb device and frees the sideband + * instance. + * After this the endpoint and interrupter event buffers should no longer + * be accessed via sideband. The xhci driver can now take over handling + * the buffers. + */ +void +xhci_sideband_unregister(struct xhci_sideband *sb) +{ + struct xhci_hcd *xhci; + int i; + + if (!sb) + return; + + xhci = sb->xhci; + + mutex_lock(&sb->mutex); + for (i = 0; i < EP_CTX_PER_DEV; i++) + if (sb->eps[i]) + __xhci_sideband_remove_endpoint(sb, sb->eps[i]); + mutex_unlock(&sb->mutex); + + xhci_sideband_remove_interrupter(sb); + + spin_lock_irq(&xhci->lock); + sb->xhci = NULL; + sb->vdev->sideband = NULL; + spin_unlock_irq(&xhci->lock); + + kfree(sb); +} +EXPORT_SYMBOL_GPL(xhci_sideband_unregister); +MODULE_DESCRIPTION("xHCI sideband driver for secondary interrupter management"); +MODULE_LICENSE("GPL"); diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c index 6246d5ad1468..0c7af44d4dae 100644 --- a/drivers/usb/host/xhci-tegra.c +++ b/drivers/usb/host/xhci-tegra.c @@ -26,6 +26,7 @@ #include <linux/regulator/consumer.h> #include <linux/reset.h> #include <linux/slab.h> +#include <linux/string_choices.h> #include <linux/usb/otg.h> #include <linux/usb/phy.h> #include <linux/usb/role.h> @@ -724,7 +725,7 @@ static void tegra_xusb_mbox_handle(struct tegra_xusb *tegra, if (err < 0) { dev_err(dev, "failed to %s LFPS detection on USB3#%u: %d\n", - enable ? "enable" : "disable", port, err); + str_enable_disable(enable), port, err); rsp.cmd = MBOX_CMD_NAK; } else { rsp.cmd = MBOX_CMD_ACK; @@ -1349,7 +1350,7 @@ static void tegra_xhci_id_work(struct work_struct *work) u32 status; int ret; - dev_dbg(tegra->dev, "host mode %s\n", tegra->host_mode ? "on" : "off"); + dev_dbg(tegra->dev, "host mode %s\n", str_on_off(tegra->host_mode)); mutex_lock(&tegra->lock); @@ -1363,6 +1364,7 @@ static void tegra_xhci_id_work(struct work_struct *work) tegra->otg_usb3_port = tegra_xusb_padctl_get_usb3_companion(tegra->padctl, tegra->otg_usb2_port); + pm_runtime_get_sync(tegra->dev); if (tegra->host_mode) { /* switch to host mode */ if (tegra->otg_usb3_port >= 0) { @@ -1392,6 +1394,7 @@ static void tegra_xhci_id_work(struct work_struct *work) } tegra_xhci_set_port_power(tegra, true, true); + pm_runtime_mark_last_busy(tegra->dev); } else { if (tegra->otg_usb3_port >= 0) @@ -1399,6 +1402,7 @@ static void tegra_xhci_id_work(struct work_struct *work) tegra_xhci_set_port_power(tegra, true, false); } + pm_runtime_put_autosuspend(tegra->dev); } #if IS_ENABLED(CONFIG_PM) || IS_ENABLED(CONFIG_PM_SLEEP) @@ -1667,7 +1671,7 @@ static int tegra_xusb_probe(struct platform_device *pdev) goto put_padctl; } - if (!of_property_read_bool(pdev->dev.of_node, "power-domains")) { + if (!of_property_present(pdev->dev.of_node, "power-domains")) { tegra->host_rst = devm_reset_control_get(&pdev->dev, "xusb_host"); if (IS_ERR(tegra->host_rst)) { @@ -2161,11 +2165,11 @@ static void tegra_xhci_program_utmi_power_lp0_exit(struct tegra_xusb *tegra) } } -static int tegra_xusb_enter_elpg(struct tegra_xusb *tegra, bool runtime) +static int tegra_xusb_enter_elpg(struct tegra_xusb *tegra, bool is_auto_resume) { struct xhci_hcd *xhci = hcd_to_xhci(tegra->hcd); struct device *dev = tegra->dev; - bool wakeup = runtime ? true : device_may_wakeup(dev); + bool wakeup = is_auto_resume ? true : device_may_wakeup(dev); unsigned int i; int err; u32 usbcmd; @@ -2183,7 +2187,7 @@ static int tegra_xusb_enter_elpg(struct tegra_xusb *tegra, bool runtime) goto out; } - for (i = 0; i < tegra->num_usb_phys; i++) { + for (i = 0; i < xhci->usb2_rhub.num_ports; i++) { if (!xhci->usb2_rhub.ports[i]) continue; portsc = readl(xhci->usb2_rhub.ports[i]->addr); @@ -2231,11 +2235,11 @@ out: return err; } -static int tegra_xusb_exit_elpg(struct tegra_xusb *tegra, bool runtime) +static int tegra_xusb_exit_elpg(struct tegra_xusb *tegra, bool is_auto_resume) { struct xhci_hcd *xhci = hcd_to_xhci(tegra->hcd); struct device *dev = tegra->dev; - bool wakeup = runtime ? true : device_may_wakeup(dev); + bool wakeup = is_auto_resume ? true : device_may_wakeup(dev); unsigned int i; u32 usbcmd; int err; @@ -2286,7 +2290,7 @@ static int tegra_xusb_exit_elpg(struct tegra_xusb *tegra, bool runtime) if (wakeup) tegra_xhci_disable_phy_sleepwalk(tegra); - err = xhci_resume(xhci, runtime ? PMSG_AUTO_RESUME : PMSG_RESUME); + err = xhci_resume(xhci, false, is_auto_resume); if (err < 0) { dev_err(tegra->dev, "failed to resume XHCI: %d\n", err); goto disable_phy; @@ -2664,7 +2668,7 @@ MODULE_DEVICE_TABLE(of, tegra_xusb_of_match); static struct platform_driver tegra_xusb_driver = { .probe = tegra_xusb_probe, - .remove_new = tegra_xusb_remove, + .remove = tegra_xusb_remove, .shutdown = tegra_xusb_shutdown, .driver = { .name = "tegra-xusb", diff --git a/drivers/usb/host/xhci-trace.h b/drivers/usb/host/xhci-trace.h index 1740000d54c2..bfb5c5c17012 100644 --- a/drivers/usb/host/xhci-trace.h +++ b/drivers/usb/host/xhci-trace.h @@ -108,9 +108,10 @@ DEFINE_EVENT(xhci_log_ctx, xhci_address_ctx, ); DECLARE_EVENT_CLASS(xhci_log_trb, - TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb), - TP_ARGS(ring, trb), + TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb, dma_addr_t dma), + TP_ARGS(ring, trb, dma), TP_STRUCT__entry( + __field(dma_addr_t, dma) __field(u32, type) __field(u32, field0) __field(u32, field1) @@ -118,51 +119,54 @@ DECLARE_EVENT_CLASS(xhci_log_trb, __field(u32, field3) ), TP_fast_assign( + __entry->dma = dma; __entry->type = ring->type; __entry->field0 = le32_to_cpu(trb->field[0]); __entry->field1 = le32_to_cpu(trb->field[1]); __entry->field2 = le32_to_cpu(trb->field[2]); __entry->field3 = le32_to_cpu(trb->field[3]); ), - TP_printk("%s: %s", xhci_ring_type_string(__entry->type), + TP_printk("%s: @%pad %s", + xhci_ring_type_string(__entry->type), &__entry->dma, xhci_decode_trb(__get_buf(XHCI_MSG_MAX), XHCI_MSG_MAX, __entry->field0, __entry->field1, __entry->field2, __entry->field3) ) ); DEFINE_EVENT(xhci_log_trb, xhci_handle_event, - TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb), - TP_ARGS(ring, trb) + TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb, dma_addr_t dma), + TP_ARGS(ring, trb, dma) ); DEFINE_EVENT(xhci_log_trb, xhci_handle_command, - TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb), - TP_ARGS(ring, trb) + TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb, dma_addr_t dma), + TP_ARGS(ring, trb, dma) ); DEFINE_EVENT(xhci_log_trb, xhci_handle_transfer, - TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb), - TP_ARGS(ring, trb) + TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb, dma_addr_t dma), + TP_ARGS(ring, trb, dma) ); DEFINE_EVENT(xhci_log_trb, xhci_queue_trb, - TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb), - TP_ARGS(ring, trb) + TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb, dma_addr_t dma), + TP_ARGS(ring, trb, dma) + ); DEFINE_EVENT(xhci_log_trb, xhci_dbc_handle_event, - TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb), - TP_ARGS(ring, trb) + TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb, dma_addr_t dma), + TP_ARGS(ring, trb, dma) ); DEFINE_EVENT(xhci_log_trb, xhci_dbc_handle_transfer, - TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb), - TP_ARGS(ring, trb) + TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb, dma_addr_t dma), + TP_ARGS(ring, trb, dma) ); DEFINE_EVENT(xhci_log_trb, xhci_dbc_gadget_ep_queue, - TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb), - TP_ARGS(ring, trb) + TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb, dma_addr_t dma), + TP_ARGS(ring, trb, dma) ); DECLARE_EVENT_CLASS(xhci_log_free_virt_dev, @@ -172,8 +176,7 @@ DECLARE_EVENT_CLASS(xhci_log_free_virt_dev, __field(void *, vdev) __field(unsigned long long, out_ctx) __field(unsigned long long, in_ctx) - __field(int, hcd_portnum) - __field(int, hw_portnum) + __field(int, slot_id) __field(u16, current_mel) ), @@ -181,13 +184,12 @@ DECLARE_EVENT_CLASS(xhci_log_free_virt_dev, __entry->vdev = vdev; __entry->in_ctx = (unsigned long long) vdev->in_ctx->dma; __entry->out_ctx = (unsigned long long) vdev->out_ctx->dma; - __entry->hcd_portnum = (int) vdev->rhub_port->hcd_portnum; - __entry->hw_portnum = (int) vdev->rhub_port->hw_portnum; + __entry->slot_id = (int) vdev->slot_id; __entry->current_mel = (u16) vdev->current_mel; ), - TP_printk("vdev %p ctx %llx | %llx hcd_portnum %d hw_portnum %d current_mel %d", - __entry->vdev, __entry->in_ctx, __entry->out_ctx, - __entry->hcd_portnum, __entry->hw_portnum, __entry->current_mel + TP_printk("vdev %p slot %d ctx %llx | %llx current_mel %d", + __entry->vdev, __entry->slot_id, __entry->in_ctx, + __entry->out_ctx, __entry->current_mel ) ); @@ -252,6 +254,7 @@ DECLARE_EVENT_CLASS(xhci_log_urb, TP_PROTO(struct urb *urb), TP_ARGS(urb), TP_STRUCT__entry( + __string(devname, dev_name(&urb->dev->dev)) __field(void *, urb) __field(unsigned int, pipe) __field(unsigned int, stream) @@ -267,6 +270,7 @@ DECLARE_EVENT_CLASS(xhci_log_urb, __field(int, slot_id) ), TP_fast_assign( + __assign_str(devname); __entry->urb = urb; __entry->pipe = urb->pipe; __entry->stream = urb->stream_id; @@ -281,7 +285,8 @@ DECLARE_EVENT_CLASS(xhci_log_urb, __entry->type = usb_endpoint_type(&urb->ep->desc); __entry->slot_id = urb->dev->slot_id; ), - TP_printk("ep%d%s-%s: urb %p pipe %u slot %d length %d/%d sgs %d/%d stream %d flags %08x", + TP_printk("%s ep%d%s-%s: urb %p pipe %u slot %d length %d/%d sgs %d/%d stream %d flags %08x", + __get_str(devname), __entry->epnum, __entry->dir_in ? "in" : "out", __print_symbolic(__entry->type, { USB_ENDPOINT_XFER_INT, "intr" }, @@ -309,6 +314,37 @@ DEFINE_EVENT(xhci_log_urb, xhci_urb_dequeue, TP_ARGS(urb) ); +DECLARE_EVENT_CLASS(xhci_log_stream_ctx, + TP_PROTO(struct xhci_stream_info *info, unsigned int stream_id), + TP_ARGS(info, stream_id), + TP_STRUCT__entry( + __field(unsigned int, stream_id) + __field(u64, stream_ring) + __field(dma_addr_t, ctx_array_dma) + + ), + TP_fast_assign( + __entry->stream_id = stream_id; + __entry->stream_ring = le64_to_cpu(info->stream_ctx_array[stream_id].stream_ring); + __entry->ctx_array_dma = info->ctx_array_dma + stream_id * 16; + + ), + TP_printk("stream %u ctx @%pad: SCT %llu deq %llx", __entry->stream_id, + &__entry->ctx_array_dma, CTX_TO_SCT(__entry->stream_ring), + __entry->stream_ring + ) +); + +DEFINE_EVENT(xhci_log_stream_ctx, xhci_alloc_stream_info_ctx, + TP_PROTO(struct xhci_stream_info *info, unsigned int stream_id), + TP_ARGS(info, stream_id) +); + +DEFINE_EVENT(xhci_log_stream_ctx, xhci_handle_cmd_set_deq_stream, + TP_PROTO(struct xhci_stream_info *info, unsigned int stream_id), + TP_ARGS(info, stream_id) +); + DECLARE_EVENT_CLASS(xhci_log_ep_ctx, TP_PROTO(struct xhci_ep_ctx *ctx), TP_ARGS(ctx), @@ -453,8 +489,6 @@ DECLARE_EVENT_CLASS(xhci_log_ring, __field(void *, ring) __field(dma_addr_t, enq) __field(dma_addr_t, deq) - __field(dma_addr_t, enq_seg) - __field(dma_addr_t, deq_seg) __field(unsigned int, num_segs) __field(unsigned int, stream_id) __field(unsigned int, cycle_state) @@ -465,17 +499,15 @@ DECLARE_EVENT_CLASS(xhci_log_ring, __entry->type = ring->type; __entry->num_segs = ring->num_segs; __entry->stream_id = ring->stream_id; - __entry->enq_seg = ring->enq_seg->dma; - __entry->deq_seg = ring->deq_seg->dma; __entry->cycle_state = ring->cycle_state; __entry->bounce_buf_len = ring->bounce_buf_len; __entry->enq = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue); __entry->deq = xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue); ), - TP_printk("%s %p: enq %pad(%pad) deq %pad(%pad) segs %d stream %d bounce %d cycle %d", + TP_printk("%s %p: enq %pad deq %pad segs %d stream %d bounce %d cycle %d", xhci_ring_type_string(__entry->type), __entry->ring, - &__entry->enq, &__entry->enq_seg, - &__entry->deq, &__entry->deq_seg, + &__entry->enq, + &__entry->deq, __entry->num_segs, __entry->stream_id, __entry->bounce_buf_len, diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 8579603edaff..8a819e853288 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -8,6 +8,7 @@ * Some code borrowed from the Linux EHCI driver. */ +#include <linux/jiffies.h> #include <linux/pci.h> #include <linux/iommu.h> #include <linux/iopoll.h> @@ -16,8 +17,10 @@ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/slab.h> +#include <linux/string_choices.h> #include <linux/dmi.h> #include <linux/dma-mapping.h> +#include <linux/usb/xhci-sideband.h> #include "xhci.h" #include "xhci-trace.h" @@ -40,15 +43,15 @@ MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default"); static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring) { - struct xhci_segment *seg = ring->first_seg; + struct xhci_segment *seg; if (!td || !td->start_seg) return false; - do { + + xhci_for_each_ring_seg(ring->first_seg, seg) { if (seg == td->start_seg) return true; - seg = seg->next; - } while (seg && seg != ring->first_seg); + } return false; } @@ -82,29 +85,6 @@ int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us) } /* - * xhci_handshake_check_state - same as xhci_handshake but takes an additional - * exit_state parameter, and bails out with an error immediately when xhc_state - * has exit_state flag set. - */ -int xhci_handshake_check_state(struct xhci_hcd *xhci, void __iomem *ptr, - u32 mask, u32 done, int usec, unsigned int exit_state) -{ - u32 result; - int ret; - - ret = readl_poll_timeout_atomic(ptr, result, - (result & mask) == done || - result == U32_MAX || - xhci->xhc_state & exit_state, - 1, usec); - - if (result == U32_MAX || xhci->xhc_state & exit_state) - return -ENODEV; - - return ret; -} - -/* * Disable interrupts and begin the xHCI halting process. */ void xhci_quiesce(struct xhci_hcd *xhci) @@ -224,8 +204,7 @@ int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us) if (xhci->quirks & XHCI_INTEL_HOST) udelay(1000); - ret = xhci_handshake_check_state(xhci, &xhci->op_regs->command, - CMD_RESET, 0, timeout_us, XHCI_STATE_REMOVING); + ret = xhci_handshake(&xhci->op_regs->command, CMD_RESET, 0, timeout_us); if (ret) return ret; @@ -320,45 +299,56 @@ static void xhci_zero_64b_regs(struct xhci_hcd *xhci) xhci_info(xhci, "Fault detected\n"); } -static int xhci_enable_interrupter(struct xhci_interrupter *ir) +int xhci_enable_interrupter(struct xhci_interrupter *ir) { u32 iman; if (!ir || !ir->ir_set) return -EINVAL; - iman = readl(&ir->ir_set->irq_pending); - writel(ER_IRQ_ENABLE(iman), &ir->ir_set->irq_pending); + iman = readl(&ir->ir_set->iman); + iman |= IMAN_IE; + writel(iman, &ir->ir_set->iman); + /* Read operation to guarantee the write has been flushed from posted buffers */ + readl(&ir->ir_set->iman); return 0; } -static int xhci_disable_interrupter(struct xhci_interrupter *ir) +int xhci_disable_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir) { u32 iman; if (!ir || !ir->ir_set) return -EINVAL; - iman = readl(&ir->ir_set->irq_pending); - writel(ER_IRQ_DISABLE(iman), &ir->ir_set->irq_pending); + iman = readl(&ir->ir_set->iman); + iman &= ~IMAN_IE; + writel(iman, &ir->ir_set->iman); + + iman = readl(&ir->ir_set->iman); + if (iman & IMAN_IP) + xhci_dbg(xhci, "%s: Interrupt pending\n", __func__); return 0; } /* interrupt moderation interval imod_interval in nanoseconds */ -static int xhci_set_interrupter_moderation(struct xhci_interrupter *ir, - u32 imod_interval) +int xhci_set_interrupter_moderation(struct xhci_interrupter *ir, + u32 imod_interval) { u32 imod; - if (!ir || !ir->ir_set || imod_interval > U16_MAX * 250) + if (!ir || !ir->ir_set) return -EINVAL; - imod = readl(&ir->ir_set->irq_control); - imod &= ~ER_IRQ_INTERVAL_MASK; - imod |= (imod_interval / 250) & ER_IRQ_INTERVAL_MASK; - writel(imod, &ir->ir_set->irq_control); + /* IMODI value in IMOD register is in 250ns increments */ + imod_interval = umin(imod_interval / 250, IMODI_MASK); + + imod = readl(&ir->ir_set->imod); + imod &= ~IMODI_MASK; + imod |= imod_interval; + writel(imod, &ir->ir_set->imod); return 0; } @@ -371,7 +361,7 @@ static void compliance_mode_recovery(struct timer_list *t) u32 temp; int i; - xhci = from_timer(xhci, t, comp_mode_recovery_timer); + xhci = timer_container_of(xhci, t, comp_mode_recovery_timer); rhub = &xhci->usb3_rhub; hcd = rhub->hcd; @@ -458,6 +448,82 @@ static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci) return (xhci->port_status_u0 == ((1 << xhci->usb3_rhub.num_ports) - 1)); } +static void xhci_hcd_page_size(struct xhci_hcd *xhci) +{ + u32 page_size; + + page_size = readl(&xhci->op_regs->page_size) & XHCI_PAGE_SIZE_MASK; + if (!is_power_of_2(page_size)) { + xhci_warn(xhci, "Invalid page size register = 0x%x\n", page_size); + /* Fallback to 4K page size, since that's common */ + page_size = 1; + } + + xhci->page_size = page_size << 12; + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "HCD page size set to %iK", + xhci->page_size >> 10); +} + +static void xhci_enable_max_dev_slots(struct xhci_hcd *xhci) +{ + u32 config_reg; + u32 max_slots; + + max_slots = HCS_MAX_SLOTS(xhci->hcs_params1); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xHC can handle at most %d device slots", + max_slots); + + config_reg = readl(&xhci->op_regs->config_reg); + config_reg &= ~HCS_SLOTS_MASK; + config_reg |= max_slots; + + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Setting Max device slots reg = 0x%x", + config_reg); + writel(config_reg, &xhci->op_regs->config_reg); +} + +static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci) +{ + dma_addr_t deq_dma; + u64 crcr; + + deq_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, xhci->cmd_ring->dequeue); + deq_dma &= CMD_RING_PTR_MASK; + + crcr = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); + crcr &= ~CMD_RING_PTR_MASK; + crcr |= deq_dma; + + crcr &= ~CMD_RING_CYCLE; + crcr |= xhci->cmd_ring->cycle_state; + + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Setting command ring address to 0x%llx", crcr); + xhci_write_64(xhci, crcr, &xhci->op_regs->cmd_ring); +} + +static void xhci_set_doorbell_ptr(struct xhci_hcd *xhci) +{ + u32 offset; + + offset = readl(&xhci->cap_regs->db_off) & DBOFF_MASK; + xhci->dba = (void __iomem *)xhci->cap_regs + offset; + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "Doorbell array is located at offset 0x%x from cap regs base addr", offset); +} + +/* + * Enable USB 3.0 device notifications for function remote wake, which is necessary + * for allowing USB 3.0 devices to do remote wakeup from U3 (device suspend). + */ +static void xhci_set_dev_notifications(struct xhci_hcd *xhci) +{ + u32 dev_notf; + + dev_notf = readl(&xhci->op_regs->dev_notification); + dev_notf &= ~DEV_NOTE_MASK; + dev_notf |= DEV_NOTE_FWAKE; + writel(dev_notf, &xhci->op_regs->dev_notification); +} /* * Initialize memory for HCD and xHC (one-time init). @@ -471,18 +537,37 @@ static int xhci_init(struct usb_hcd *hcd) struct xhci_hcd *xhci = hcd_to_xhci(hcd); int retval; - xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init"); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Starting %s", __func__); spin_lock_init(&xhci->lock); - if (xhci->hci_version == 0x95 && link_quirk) { - xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, - "QUIRK: Not clearing Link TRB chain bits."); - xhci->quirks |= XHCI_LINK_TRB_QUIRK; - } else { - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "xHCI doesn't need link TRB QUIRK"); - } + + INIT_LIST_HEAD(&xhci->cmd_list); + INIT_DELAYED_WORK(&xhci->cmd_timer, xhci_handle_command_timeout); + init_completion(&xhci->cmd_ring_stop_completion); + xhci_hcd_page_size(xhci); + memset(xhci->devs, 0, MAX_HC_SLOTS * sizeof(*xhci->devs)); + retval = xhci_mem_init(xhci, GFP_KERNEL); - xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init"); + if (retval) + return retval; + + /* Set the Number of Device Slots Enabled to the maximum supported value */ + xhci_enable_max_dev_slots(xhci); + + /* Set the address in the Command Ring Control register */ + xhci_set_cmd_ring_deq(xhci); + + /* Set Device Context Base Address Array pointer */ + xhci_write_64(xhci, xhci->dcbaa->dma, &xhci->op_regs->dcbaa_ptr); + + /* Set Doorbell array pointer */ + xhci_set_doorbell_ptr(xhci); + + /* Set USB 3.0 device notifications for function remote wake */ + xhci_set_dev_notifications(xhci); + + /* Initialize the Primary interrupter */ + xhci_add_interrupter(xhci, 0); + xhci->interrupters[0]->isoc_bei_interval = AVOID_BEI_INTERVAL_MAX; /* Initializing Compliance Mode Recovery Data If Needed */ if (xhci_compliance_mode_recovery_timer_quirk_check()) { @@ -490,7 +575,8 @@ static int xhci_init(struct usb_hcd *hcd) compliance_mode_recovery_timer_init(xhci); } - return retval; + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished %s", __func__); + return 0; } /*-------------------------------------------------------------------------*/ @@ -632,7 +718,7 @@ void xhci_stop(struct usb_hcd *hcd) /* Deleting Compliance Mode Recovery Timer */ if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && (!(xhci_all_ports_seen_u0(xhci)))) { - del_timer_sync(&xhci->comp_mode_recovery_timer); + timer_delete_sync(&xhci->comp_mode_recovery_timer); xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, "%s: compliance mode recovery timer deleted", __func__); @@ -645,7 +731,7 @@ void xhci_stop(struct usb_hcd *hcd) "// Disabling event ring interrupts"); temp = readl(&xhci->op_regs->status); writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status); - xhci_disable_interrupter(ir); + xhci_disable_interrupter(xhci, ir); xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory"); xhci_mem_cleanup(xhci); @@ -677,11 +763,11 @@ void xhci_shutdown(struct usb_hcd *hcd) xhci_dbg(xhci, "%s: stopping usb%d port polling.\n", __func__, hcd->self.busnum); clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); - del_timer_sync(&hcd->rh_timer); + timer_delete_sync(&hcd->rh_timer); if (xhci->shared_hcd) { clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); - del_timer_sync(&xhci->shared_hcd->rh_timer); + timer_delete_sync(&xhci->shared_hcd->rh_timer); } spin_lock_irq(&xhci->lock); @@ -724,8 +810,8 @@ static void xhci_save_registers(struct xhci_hcd *xhci) ir->s3_erst_size = readl(&ir->ir_set->erst_size); ir->s3_erst_base = xhci_read_64(xhci, &ir->ir_set->erst_base); ir->s3_erst_dequeue = xhci_read_64(xhci, &ir->ir_set->erst_dequeue); - ir->s3_irq_pending = readl(&ir->ir_set->irq_pending); - ir->s3_irq_control = readl(&ir->ir_set->irq_control); + ir->s3_iman = readl(&ir->ir_set->iman); + ir->s3_imod = readl(&ir->ir_set->imod); } } @@ -748,28 +834,11 @@ static void xhci_restore_registers(struct xhci_hcd *xhci) writel(ir->s3_erst_size, &ir->ir_set->erst_size); xhci_write_64(xhci, ir->s3_erst_base, &ir->ir_set->erst_base); xhci_write_64(xhci, ir->s3_erst_dequeue, &ir->ir_set->erst_dequeue); - writel(ir->s3_irq_pending, &ir->ir_set->irq_pending); - writel(ir->s3_irq_control, &ir->ir_set->irq_control); + writel(ir->s3_iman, &ir->ir_set->iman); + writel(ir->s3_imod, &ir->ir_set->imod); } } -static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci) -{ - u64 val_64; - - /* step 2: initialize command ring buffer */ - val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); - val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | - (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, - xhci->cmd_ring->dequeue) & - (u64) ~CMD_RING_RSVD_BITS) | - xhci->cmd_ring->cycle_state; - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "// Setting command ring address to 0x%llx", - (long unsigned long) val_64); - xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); -} - /* * The whole command ring must be cleared to zero when we suspend the host. * @@ -785,16 +854,14 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci) struct xhci_segment *seg; ring = xhci->cmd_ring; - seg = ring->deq_seg; - do { - memset(seg->trbs, 0, - sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1)); - seg->trbs[TRBS_PER_SEGMENT - 1].link.control &= - cpu_to_le32(~TRB_CYCLE); - seg = seg->next; - } while (seg != ring->deq_seg); - - xhci_initialize_ring_info(ring, 1); + xhci_for_each_ring_seg(ring->first_seg, seg) { + /* erase all TRBs before the link */ + memset(seg->trbs, 0, sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1)); + /* clear link cycle bit */ + seg->trbs[TRBS_PER_SEGMENT - 1].link.control &= cpu_to_le32(~TRB_CYCLE); + } + + xhci_initialize_ring_info(ring); /* * Reset the hardware dequeue pointer. * Yes, this will need to be re-written after resume, but we're paranoid @@ -915,10 +982,10 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup) xhci_dbg(xhci, "%s: stopping usb%d port polling.\n", __func__, hcd->self.busnum); clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); - del_timer_sync(&hcd->rh_timer); + timer_delete_sync(&hcd->rh_timer); if (xhci->shared_hcd) { clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); - del_timer_sync(&xhci->shared_hcd->rh_timer); + timer_delete_sync(&xhci->shared_hcd->rh_timer); } if (xhci->quirks & XHCI_SUSPEND_DELAY) @@ -985,7 +1052,7 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup) */ if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && (!(xhci_all_ports_seen_u0(xhci)))) { - del_timer_sync(&xhci->comp_mode_recovery_timer); + timer_delete_sync(&xhci->comp_mode_recovery_timer); xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, "%s: compliance mode recovery timer deleted", __func__); @@ -1001,16 +1068,14 @@ EXPORT_SYMBOL_GPL(xhci_suspend); * This is called when the machine transition from S3/S4 mode. * */ -int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg) +int xhci_resume(struct xhci_hcd *xhci, bool power_lost, bool is_auto_resume) { - bool hibernated = (msg.event == PM_EVENT_RESTORE); u32 command, temp = 0; struct usb_hcd *hcd = xhci_to_hcd(xhci); int retval = 0; bool comp_timer_running = false; bool pending_portevent = false; bool suspended_usb3_devs = false; - bool reinit_xhc = false; if (!hcd->state) return 0; @@ -1029,10 +1094,10 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg) spin_lock_irq(&xhci->lock); - if (hibernated || xhci->quirks & XHCI_RESET_ON_RESUME || xhci->broken_suspend) - reinit_xhc = true; + if (xhci->quirks & XHCI_RESET_ON_RESUME || xhci->broken_suspend) + power_lost = true; - if (!reinit_xhc) { + if (!power_lost) { /* * Some controllers might lose power during suspend, so wait * for controller not ready bit to clear, just as in xHC init. @@ -1072,15 +1137,15 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg) /* re-initialize the HC on Restore Error, or Host Controller Error */ if ((temp & (STS_SRE | STS_HCE)) && !(xhci->xhc_state & XHCI_STATE_REMOVING)) { - reinit_xhc = true; - if (!xhci->broken_suspend) + if (!power_lost) xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp); + power_lost = true; } - if (reinit_xhc) { + if (power_lost) { if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !(xhci_all_ports_seen_u0(xhci))) { - del_timer_sync(&xhci->comp_mode_recovery_timer); + timer_delete_sync(&xhci->comp_mode_recovery_timer); xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, "Compliance Mode Recovery Timer deleted!"); } @@ -1093,7 +1158,10 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg) xhci_dbg(xhci, "Stop HCD\n"); xhci_halt(xhci); xhci_zero_64b_regs(xhci); - retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC); + if (xhci->xhc_state & XHCI_STATE_REMOVING) + retval = -ENODEV; + else + retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC); spin_unlock_irq(&xhci->lock); if (retval) return retval; @@ -1101,7 +1169,7 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg) xhci_dbg(xhci, "// Disabling event ring interrupts\n"); temp = readl(&xhci->op_regs->status); writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status); - xhci_disable_interrupter(xhci->interrupters[0]); + xhci_disable_interrupter(xhci, xhci->interrupters[0]); xhci_dbg(xhci, "cleaning up memory\n"); xhci_mem_cleanup(xhci); @@ -1125,10 +1193,20 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg) xhci_dbg(xhci, "Start the secondary HCD\n"); retval = xhci_run(xhci->shared_hcd); } - + if (retval) + return retval; + /* + * Resume roothubs unconditionally as PORTSC change bits are not + * immediately visible after xHC reset + */ hcd->state = HC_STATE_SUSPENDED; - if (xhci->shared_hcd) + + if (xhci->shared_hcd) { xhci->shared_hcd->state = HC_STATE_SUSPENDED; + usb_hcd_resume_root_hub(xhci->shared_hcd); + } + usb_hcd_resume_root_hub(hcd); + goto done; } @@ -1152,7 +1230,6 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg) xhci_dbc_resume(xhci); - done: if (retval == 0) { /* * Resume roothubs only if there are pending events. @@ -1166,8 +1243,7 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg) pending_portevent = xhci_pending_portevent(xhci); - if (suspended_usb3_devs && !pending_portevent && - msg.event == PM_EVENT_AUTO_RESUME) { + if (suspended_usb3_devs && !pending_portevent && is_auto_resume) { msleep(120); pending_portevent = xhci_pending_portevent(xhci); } @@ -1178,6 +1254,7 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg) usb_hcd_resume_root_hub(hcd); } } +done: /* * If system is subject to the Quirk, Compliance Mode Timer needs to * be re-initialized Always after a system resume. Ports are subject @@ -1359,6 +1436,7 @@ static void xhci_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb) * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and * HCDs. Find the index for an endpoint given its descriptor. Use the return * value to right shift 1 for the bitmask. + * @desc: USB endpoint descriptor to determine index for * * Index = (epnum * 2) + direction - 1, * where direction = 0 for OUT, 1 for IN. @@ -1746,7 +1824,7 @@ static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) urb->ep->desc.bEndpointAddress, (unsigned long long) xhci_trb_virt_to_dma( urb_priv->td[i].start_seg, - urb_priv->td[i].first_trb)); + urb_priv->td[i].start_trb)); for (; i < urb_priv->num_tds; i++) { td = &urb_priv->td[i]; @@ -1758,15 +1836,27 @@ static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) } } - /* Queue a stop endpoint command, but only if this is - * the first cancellation to be handled. - */ - if (!(ep->ep_state & EP_STOP_CMD_PENDING)) { + /* These completion handlers will sort out cancelled TDs for us */ + if (ep->ep_state & (EP_STOP_CMD_PENDING | EP_HALTED | SET_DEQ_PENDING)) { + xhci_dbg(xhci, "Not queuing Stop Endpoint on slot %d ep %d in state 0x%x\n", + urb->dev->slot_id, ep_index, ep->ep_state); + goto done; + } + + /* In this case no commands are pending but the endpoint is stopped */ + if (ep->ep_state & EP_CLEARING_TT) { + /* and cancelled TDs can be given back right away */ + xhci_dbg(xhci, "Invalidating TDs instantly on slot %d ep %d in state 0x%x\n", + urb->dev->slot_id, ep_index, ep->ep_state); + xhci_process_cancelled_tds(ep); + } else { + /* Otherwise, queue a new Stop Endpoint command */ command = xhci_alloc_command(xhci, false, GFP_ATOMIC); if (!command) { ret = -ENOMEM; goto done; } + ep->stop_time = jiffies; ep->ep_state |= EP_STOP_CMD_PENDING; xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id, ep_index, 0); @@ -2784,6 +2874,51 @@ static int xhci_reserve_bandwidth(struct xhci_hcd *xhci, return -ENOMEM; } +/* + * Synchronous XHCI stop endpoint helper. Issues the stop endpoint command and + * waits for the command completion before returning. This does not call + * xhci_handle_cmd_stop_ep(), which has additional handling for 'context error' + * cases, along with transfer ring cleanup. + * + * xhci_stop_endpoint_sync() is intended to be utilized by clients that manage + * their own transfer ring, such as offload situations. + */ +int xhci_stop_endpoint_sync(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, int suspend, + gfp_t gfp_flags) +{ + struct xhci_command *command; + unsigned long flags; + int ret; + + command = xhci_alloc_command(xhci, true, gfp_flags); + if (!command) + return -ENOMEM; + + spin_lock_irqsave(&xhci->lock, flags); + ret = xhci_queue_stop_endpoint(xhci, command, ep->vdev->slot_id, + ep->ep_index, suspend); + if (ret < 0) { + spin_unlock_irqrestore(&xhci->lock, flags); + goto out; + } + + xhci_ring_cmd_db(xhci); + spin_unlock_irqrestore(&xhci->lock, flags); + + wait_for_completion(command->completion); + + /* No handling for COMP_CONTEXT_STATE_ERROR done at command completion*/ + if (command->status == COMP_COMMAND_ABORTED || + command->status == COMP_COMMAND_RING_STOPPED) { + xhci_warn(xhci, "Timeout while waiting for stop endpoint command\n"); + ret = -ETIME; + } +out: + xhci_free_command(xhci, command); + + return ret; +} +EXPORT_SYMBOL_GPL(xhci_stop_endpoint_sync); /* Issue a configure endpoint command or evaluate context command * and wait for it to finish. @@ -2827,7 +2962,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci, xhci->num_active_eps); return -ENOMEM; } - if ((xhci->quirks & XHCI_SW_BW_CHECKING) && + if ((xhci->quirks & XHCI_SW_BW_CHECKING) && !ctx_change && xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) { if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) xhci_free_host_resources(xhci, ctrl_ctx); @@ -3032,6 +3167,42 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) } EXPORT_SYMBOL_GPL(xhci_reset_bandwidth); +/* Get the available bandwidth of the ports under the xhci roothub */ +int xhci_get_port_bandwidth(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, + u8 dev_speed) +{ + struct xhci_command *cmd; + unsigned long flags; + int ret; + + if (!ctx || !xhci) + return -EINVAL; + + cmd = xhci_alloc_command(xhci, true, GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + cmd->in_ctx = ctx; + + /* get xhci port bandwidth, refer to xhci rev1_2 protocol 4.6.15 */ + spin_lock_irqsave(&xhci->lock, flags); + + ret = xhci_queue_get_port_bw(xhci, cmd, ctx->dma, dev_speed, 0); + if (ret) { + spin_unlock_irqrestore(&xhci->lock, flags); + goto err_out; + } + xhci_ring_cmd_db(xhci); + spin_unlock_irqrestore(&xhci->lock, flags); + + wait_for_completion(cmd->completion); +err_out: + kfree(cmd->completion); + kfree(cmd); + + return ret; +} + static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci, struct xhci_container_ctx *in_ctx, struct xhci_container_ctx *out_ctx, @@ -3682,6 +3853,8 @@ void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci, xhci->num_active_eps); } +static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev); + /* * This submits a Reset Device Command, which will set the device state to 0, * set the device address to 0, and disable all the endpoints except the default @@ -3752,6 +3925,23 @@ static int xhci_discover_or_reset_device(struct usb_hcd *hcd, SLOT_STATE_DISABLED) return 0; + if (xhci->quirks & XHCI_ETRON_HOST) { + /* + * Obtaining a new device slot to inform the xHCI host that + * the USB device has been reset. + */ + ret = xhci_disable_slot(xhci, udev->slot_id); + xhci_free_virt_device(xhci, udev->slot_id); + if (!ret) { + ret = xhci_alloc_dev(hcd, udev); + if (ret == 1) + ret = 0; + else + ret = -EINVAL; + } + return ret; + } + trace_xhci_discover_or_reset_device(slot_ctx); xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id); @@ -3835,6 +4025,8 @@ static int xhci_discover_or_reset_device(struct usb_hcd *hcd, } if (ep->ring) { + if (ep->sideband) + xhci_sideband_notify_ep_ring_free(ep->sideband, i); xhci_debugfs_remove_endpoint(xhci, virt_dev, i); xhci_free_endpoint_ring(xhci, virt_dev, i); } @@ -4190,8 +4382,10 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, mutex_unlock(&xhci->mutex); ret = xhci_disable_slot(xhci, udev->slot_id); xhci_free_virt_device(xhci, udev->slot_id); - if (!ret) - xhci_alloc_dev(hcd, udev); + if (!ret) { + if (xhci_alloc_dev(hcd, udev) == 1) + xhci_setup_addressable_virt_dev(xhci, udev); + } kfree(command->completion); kfree(command); return -EPROTO; @@ -4447,7 +4641,7 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, hlpm_addr = ports[port_num]->addr + PORTHLPMC; xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n", - enable ? "enable" : "disable", port_num + 1); + str_enable_disable(enable), port_num + 1); if (enable) { /* Host supports BESL timeout instead of HIRD */ @@ -4507,35 +4701,27 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, return 0; } -/* check if a usb2 port supports a given extened capability protocol - * only USB2 ports extended protocol capability values are cached. - * Return 1 if capability is supported - */ -static int xhci_check_usb2_port_capability(struct xhci_hcd *xhci, int port, - unsigned capability) -{ - u32 port_offset, port_count; - int i; - - for (i = 0; i < xhci->num_ext_caps; i++) { - if (xhci->ext_caps[i] & capability) { - /* port offsets starts at 1 */ - port_offset = XHCI_EXT_PORT_OFF(xhci->ext_caps[i]) - 1; - port_count = XHCI_EXT_PORT_COUNT(xhci->ext_caps[i]); - if (port >= port_offset && - port < port_offset + port_count) - return 1; - } - } - return 0; -} - static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); - int portnum = udev->portnum - 1; + struct xhci_port *port; + u32 capability; - if (hcd->speed >= HCD_USB3 || !udev->lpm_capable) + /* Check if USB3 device at root port is tunneled over USB4 */ + if (hcd->speed >= HCD_USB3 && !udev->parent->parent) { + port = xhci->usb3_rhub.ports[udev->portnum - 1]; + + udev->tunnel_mode = xhci_port_is_tunneled(xhci, port); + if (udev->tunnel_mode == USB_LINK_UNKNOWN) + dev_dbg(&udev->dev, "link tunnel state unknown\n"); + else if (udev->tunnel_mode == USB_LINK_TUNNELED) + dev_dbg(&udev->dev, "tunneled over USB4 link\n"); + else if (udev->tunnel_mode == USB_LINK_NATIVE) + dev_dbg(&udev->dev, "native USB 3.x link\n"); + return 0; + } + + if (hcd->speed >= HCD_USB3 || !udev->lpm_capable || !xhci->hw_lpm_support) return 0; /* we only support lpm for non-hub device connected to root hub yet */ @@ -4543,14 +4729,14 @@ static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev) udev->descriptor.bDeviceClass == USB_CLASS_HUB) return 0; - if (xhci->hw_lpm_support == 1 && - xhci_check_usb2_port_capability( - xhci, portnum, XHCI_HLC)) { + port = xhci->usb2_rhub.ports[udev->portnum - 1]; + capability = port->port_cap->protocol_caps; + + if (capability & XHCI_HLC) { udev->usb2_hw_lpm_capable = 1; udev->l1_params.timeout = XHCI_L1_TIMEOUT; udev->l1_params.besl = XHCI_DEFAULT_BESL; - if (xhci_check_usb2_port_capability(xhci, portnum, - XHCI_BLC)) + if (capability & XHCI_BLC) udev->usb2_hw_lpm_besl_capable = 1; } @@ -4686,8 +4872,8 @@ static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci, */ if (timeout_ns <= USB3_LPM_U1_MAX_TIMEOUT) return timeout_ns; - dev_dbg(&udev->dev, "Hub-initiated U1 disabled " - "due to long timeout %llu ms\n", timeout_ns); + dev_dbg(&udev->dev, "Hub-initiated U1 disabled due to long timeout %lluus\n", + timeout_ns); return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U1); } @@ -4744,8 +4930,8 @@ static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci, */ if (timeout_ns <= USB3_LPM_U2_MAX_TIMEOUT) return timeout_ns; - dev_dbg(&udev->dev, "Hub-initiated U2 disabled " - "due to long timeout %llu ms\n", timeout_ns); + dev_dbg(&udev->dev, "Hub-initiated U2 disabled due to long timeout %lluus\n", + timeout_ns * 256); return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U2); } @@ -5247,6 +5433,11 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) if (xhci->hci_version > 0x96) xhci->quirks |= XHCI_SPURIOUS_SUCCESS; + if (xhci->hci_version == 0x95 && link_quirk) { + xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits"); + xhci->quirks |= XHCI_LINK_TRB_QUIRK; + } + /* Make sure the HC is halted. */ retval = xhci_halt(xhci); if (retval) diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 6f4bf98a6282..a20f4e7cd43a 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -17,6 +17,7 @@ #include <linux/kernel.h> #include <linux/usb/hcd.h> #include <linux/io-64-nonatomic-lo-hi.h> +#include <linux/io-64-nonatomic-hi-lo.h> /* Code sharing between pci-quirks and xhci hcd */ #include "xhci-ext-caps.h" @@ -151,10 +152,6 @@ struct xhci_op_regs { #define XHCI_RESET_LONG_USEC (10 * 1000 * 1000) #define XHCI_RESET_SHORT_USEC (250 * 1000) -/* IMAN - Interrupt Management Register */ -#define IMAN_IE (1 << 1) -#define IMAN_IP (1 << 0) - /* USBSTS - USB status - status bitmasks */ /* HC not running - set to 1 when run/stop bit is cleared. */ #define STS_HALT XHCI_STS_HALT @@ -183,23 +180,22 @@ struct xhci_op_regs { * notification type that matches a bit set in this bit field. */ #define DEV_NOTE_MASK (0xffff) -#define ENABLE_DEV_NOTE(x) (1 << (x)) /* Most of the device notification types should only be used for debug. * SW does need to pay attention to function wake notifications. */ -#define DEV_NOTE_FWAKE ENABLE_DEV_NOTE(1) +#define DEV_NOTE_FWAKE (1 << 1) /* CRCR - Command Ring Control Register - cmd_ring bitmasks */ -/* bit 0 is the command ring cycle state */ +/* bit 0 - Cycle bit indicates the ownership of the command ring */ +#define CMD_RING_CYCLE (1 << 0) /* stop ring operation after completion of the currently executing command */ #define CMD_RING_PAUSE (1 << 1) /* stop ring immediately - abort the currently executing command */ #define CMD_RING_ABORT (1 << 2) /* true: command ring is running */ #define CMD_RING_RUNNING (1 << 3) -/* bits 4:5 reserved and should be preserved */ -/* Command Ring pointer - bit mask for the lower 32 bits. */ -#define CMD_RING_RSVD_BITS (0x3f) +/* bits 63:6 - Command Ring pointer */ +#define CMD_RING_PTR_MASK GENMASK_ULL(63, 6) /* CONFIG - Configure Register - config_reg bitmasks */ /* bits 0:7 - maximum number of device slots enabled (NumSlotsEn) */ @@ -210,15 +206,17 @@ struct xhci_op_regs { #define CONFIG_CIE (1 << 9) /* bits 10:31 - reserved and should be preserved */ +/* bits 15:0 - HCD page shift bit */ +#define XHCI_PAGE_SIZE_MASK 0xffff + /** - * struct xhci_intr_reg - Interrupt Register Set - * @irq_pending: IMAN - Interrupt Management Register. Used to enable + * struct xhci_intr_reg - Interrupt Register Set, v1.2 section 5.5.2. + * @iman: IMAN - Interrupt Management Register. Used to enable * interrupts and check for pending interrupts. - * @irq_control: IMOD - Interrupt Moderation Register. - * Used to throttle interrupts. - * @erst_size: Number of segments in the Event Ring Segment Table (ERST). - * @erst_base: ERST base address. - * @erst_dequeue: Event ring dequeue pointer. + * @imod: IMOD - Interrupt Moderation Register. Used to throttle interrupts. + * @erst_size: ERSTSZ - Number of segments in the Event Ring Segment Table (ERST). + * @erst_base: ERSTBA - Event ring segment table base address. + * @erst_dequeue: ERDP - Event ring dequeue pointer. * * Each interrupter (defined by a MSI-X vector) has an event ring and an Event * Ring Segment Table (ERST) associated with it. The event ring is comprised of @@ -228,48 +226,51 @@ struct xhci_op_regs { * updates the dequeue pointer. */ struct xhci_intr_reg { - __le32 irq_pending; - __le32 irq_control; + __le32 iman; + __le32 imod; __le32 erst_size; __le32 rsvd; __le64 erst_base; __le64 erst_dequeue; }; -/* irq_pending bitmasks */ -#define ER_IRQ_PENDING(p) ((p) & 0x1) -/* bits 2:31 need to be preserved */ -/* THIS IS BUGGY - FIXME - IP IS WRITE 1 TO CLEAR */ -#define ER_IRQ_CLEAR(p) ((p) & 0xfffffffe) -#define ER_IRQ_ENABLE(p) ((ER_IRQ_CLEAR(p)) | 0x2) -#define ER_IRQ_DISABLE(p) ((ER_IRQ_CLEAR(p)) & ~(0x2)) - -/* irq_control bitmasks */ -/* Minimum interval between interrupts (in 250ns intervals). The interval - * between interrupts will be longer if there are no events on the event ring. - * Default is 4000 (1 ms). +/* iman bitmasks */ +/* bit 0 - Interrupt Pending (IP), whether there is an interrupt pending. Write-1-to-clear. */ +#define IMAN_IP (1 << 0) +/* bit 1 - Interrupt Enable (IE), whether the interrupter is capable of generating an interrupt */ +#define IMAN_IE (1 << 1) + +/* imod bitmasks */ +/* + * bits 15:0 - Interrupt Moderation Interval, the minimum interval between interrupts + * (in 250ns intervals). The interval between interrupts will be longer if there are no + * events on the event ring. Default is 4000 (1 ms). */ -#define ER_IRQ_INTERVAL_MASK (0xffff) -/* Counter used to count down the time to the next interrupt - HW use only */ -#define ER_IRQ_COUNTER_MASK (0xffff << 16) +#define IMODI_MASK (0xffff) +/* bits 31:16 - Interrupt Moderation Counter, used to count down the time to the next interrupt */ +#define IMODC_MASK (0xffff << 16) /* erst_size bitmasks */ -/* Preserve bits 16:31 of erst_size */ -#define ERST_SIZE_MASK (0xffff << 16) +/* bits 15:0 - Event Ring Segment Table Size, number of ERST entries */ +#define ERST_SIZE_MASK (0xffff) /* erst_base bitmasks */ -#define ERST_BASE_RSVDP (GENMASK_ULL(5, 0)) +/* bits 63:6 - Event Ring Segment Table Base Address Register */ +#define ERST_BASE_ADDRESS_MASK GENMASK_ULL(63, 6) /* erst_dequeue bitmasks */ -/* Dequeue ERST Segment Index (DESI) - Segment number (or alias) - * where the current dequeue pointer lies. This is an optional HW hint. +/* + * bits 2:0 - Dequeue ERST Segment Index (DESI), is the segment number (or alias) where the + * current dequeue pointer lies. This is an optional HW hint. */ #define ERST_DESI_MASK (0x7) -/* Event Handler Busy (EHB) - is the event ring scheduled to be serviced by +/* + * bit 3 - Event Handler Busy (EHB), whether the event ring is scheduled to be serviced by * a work queue (or delayed service routine)? */ #define ERST_EHB (1 << 3) -#define ERST_PTR_MASK (GENMASK_ULL(63, 4)) +/* bits 63:4 - Event Ring Dequeue Pointer */ +#define ERST_PTR_MASK GENMASK_ULL(63, 4) /** * struct xhci_run_regs @@ -528,6 +529,7 @@ struct xhci_command { /* Input context for changing device state */ struct xhci_container_ctx *in_ctx; u32 status; + u32 comp_param; int slot_id; /* If completion is null, no one is waiting on this command * and the structure can be freed after the command completes. @@ -553,6 +555,7 @@ struct xhci_stream_ctx { /* Stream Context Types (section 6.4.1) - bits 3:1 of stream ctx deq ptr */ #define SCT_FOR_CTX(p) (((p) & 0x7) << 1) +#define CTX_TO_SCT(p) (((p) >> 1) & 0x7) /* Secondary stream array type, dequeue pointer is to a transfer ring */ #define SCT_SEC_TR 0 /* Primary stream array type, dequeue pointer is to a transfer ring */ @@ -583,6 +586,7 @@ struct xhci_stream_info { #define SMALL_STREAM_ARRAY_SIZE 256 #define MEDIUM_STREAM_ARRAY_SIZE 1024 +#define GET_PORT_BW_ARRAY_SIZE 256 /* Some Intel xHCI host controllers need software to keep track of the bus * bandwidth. Keep track of endpoint info here. Each root port is allocated @@ -689,10 +693,13 @@ struct xhci_virt_ep { /* Bandwidth checking storage */ struct xhci_bw_info bw_info; struct list_head bw_endpoint_list; + unsigned long stop_time; /* Isoch Frame ID checking storage */ int next_frame_id; /* Use new Isoch TRB layout needed for extended TBC support */ bool use_extended_tbc; + /* set if this endpoint is controlled via sideband access*/ + struct xhci_sideband *sideband; }; enum xhci_overhead_type { @@ -755,6 +762,8 @@ struct xhci_virt_device { u16 current_mel; /* Used for the debugfs interfaces. */ void *debugfs_private; + /* set if this endpoint is controlled via sideband access*/ + struct xhci_sideband *sideband; }; /* @@ -805,13 +814,19 @@ struct xhci_transfer_event { __le32 flags; }; +/* Transfer event flags bitfield, also for select command completion events */ +#define TRB_TO_SLOT_ID(p) (((p) >> 24) & 0xff) +#define SLOT_ID_FOR_TRB(p) (((p) & 0xff) << 24) + +#define TRB_TO_EP_ID(p) (((p) >> 16) & 0x1f) /* Endpoint ID 1 - 31 */ +#define EP_ID_FOR_TRB(p) (((p) & 0x1f) << 16) + +#define TRB_TO_EP_INDEX(p) (TRB_TO_EP_ID(p) - 1) /* Endpoint index 0 - 30 */ +#define EP_INDEX_FOR_TRB(p) ((((p) + 1) & 0x1f) << 16) + /* Transfer event TRB length bit mask */ -/* bits 0:23 */ #define EVENT_TRB_LEN(p) ((p) & 0xffffff) -/** Transfer Event bit fields **/ -#define TRB_TO_EP_ID(p) (((p) >> 16) & 0x1f) - /* Completion Code - only applicable for some types of TRBs */ #define COMP_CODE_MASK (0xff << 24) #define GET_COMP_CODE(p) (((p) & COMP_CODE_MASK) >> 24) @@ -950,7 +965,8 @@ struct xhci_event_cmd { __le32 flags; }; -/* flags bitmasks */ +/* status bitmasks */ +#define COMP_PARAM(p) ((p) & 0xffffff) /* Command Completion Parameter */ /* Address device - disable SetAddress */ #define TRB_BSR (1<<9) @@ -987,13 +1003,11 @@ enum xhci_setup_dev { /* bits 16:23 are the virtual function ID */ /* bits 24:31 are the slot ID */ -#define TRB_TO_SLOT_ID(p) (((p) & (0xff<<24)) >> 24) -#define SLOT_ID_FOR_TRB(p) (((p) & 0xff) << 24) -/* Stop Endpoint TRB - ep_index to endpoint ID for this TRB */ -#define TRB_TO_EP_INDEX(p) ((((p) & (0x1f << 16)) >> 16) - 1) -#define EP_ID_FOR_TRB(p) ((((p) + 1) & 0x1f) << 16) +/* bits 19:16 are the dev speed */ +#define DEV_SPEED_FOR_TRB(p) ((p) << 16) +/* Stop Endpoint TRB - ep_index to endpoint ID for this TRB */ #define SUSPEND_PORT_FOR_TRB(p) (((p) & 1) << 23) #define TRB_TO_SUSPEND_PORT(p) (((p) & (1 << 23)) >> 23) #define LAST_EP_INDEX 30 @@ -1001,7 +1015,7 @@ enum xhci_setup_dev { /* Set TR Dequeue Pointer command TRB fields, 6.4.3.9 */ #define TRB_TO_STREAM_ID(p) ((((p) & (0xffff << 16)) >> 16)) #define STREAM_ID_FOR_TRB(p) ((((p)) & 0xffff) << 16) -#define SCT_FOR_TRB(p) (((p) << 1) & 0x7) +#define SCT_FOR_TRB(p) (((p) & 0x7) << 1) /* Link TRB specific fields */ #define TRB_TC (1<<1) @@ -1023,9 +1037,6 @@ enum xhci_setup_dev { /* Interrupter Target - which MSI-X vector to target the completion event at */ #define TRB_INTR_TARGET(p) (((p) & 0x3ff) << 22) #define GET_INTR_TARGET(p) (((p) >> 22) & 0x3ff) -/* Total burst count field, Rsvdz on xhci 1.1 with Extended TBC enabled (ETE) */ -#define TRB_TBC(p) (((p) & 0x3) << 7) -#define TRB_TLBPC(p) (((p) & 0xf) << 16) /* Cycle bit - indicates TRB ownership by HC or HCD */ #define TRB_CYCLE (1<<0) @@ -1059,6 +1070,12 @@ enum xhci_setup_dev { /* Isochronous TRB specific fields */ #define TRB_SIA (1<<31) #define TRB_FRAME_ID(p) (((p) & 0x7ff) << 20) +#define GET_FRAME_ID(p) (((p) >> 20) & 0x7ff) +/* Total burst count field, Rsvdz on xhci 1.1 with Extended TBC enabled (ETE) */ +#define TRB_TBC(p) (((p) & 0x3) << 7) +#define GET_TBC(p) (((p) >> 7) & 0x3) +#define TRB_TLBPC(p) (((p) & 0xf) << 16) +#define GET_TLBPC(p) (((p) >> 16) & 0xf) /* TRB cache size for xHC with TRB cache */ #define TRB_CACHE_SIZE_HS 8 @@ -1259,6 +1276,9 @@ static inline const char *xhci_trb_type_string(u8 type) #define AVOID_BEI_INTERVAL_MIN 8 #define AVOID_BEI_INTERVAL_MAX 32 +#define xhci_for_each_ring_seg(head, seg) \ + for (seg = head; seg != NULL; seg = (seg->next != head ? seg->next : NULL)) + struct xhci_segment { union xhci_trb *trbs; /* private to HCD */ @@ -1276,6 +1296,7 @@ enum xhci_cancelled_td_status { TD_DIRTY = 0, TD_HALTED, TD_CLEARING_CACHE, + TD_CLEARING_CACHE_DEFERRED, TD_CLEARED, }; @@ -1286,14 +1307,13 @@ struct xhci_td { enum xhci_cancelled_td_status cancel_status; struct urb *urb; struct xhci_segment *start_seg; - union xhci_trb *first_trb; - union xhci_trb *last_trb; - struct xhci_segment *last_trb_seg; + union xhci_trb *start_trb; + struct xhci_segment *end_seg; + union xhci_trb *end_trb; struct xhci_segment *bounce_seg; /* actual_length of the URB has already been set */ bool urb_length_set; bool error_mid_td; - unsigned int num_trbs; }; /* @@ -1359,7 +1379,7 @@ struct xhci_ring { unsigned int num_trbs_free; /* used only by xhci DbC */ unsigned int bounce_buf_len; enum xhci_ring_type type; - bool last_td_was_short; + u32 old_trb_comp_code; struct radix_tree_root *trb_address_map; }; @@ -1376,8 +1396,6 @@ struct xhci_erst { unsigned int num_entries; /* xhci->event_ring keeps track of segment dma addresses */ dma_addr_t erst_dma_addr; - /* Num entries the ERST can contain */ - unsigned int erst_size; }; struct xhci_scratchpad { @@ -1392,8 +1410,8 @@ struct urb_priv { struct xhci_td td[] __counted_by(num_tds); }; -/* Reasonable limit for number of Event Ring segments (spec allows 32k) */ -#define ERST_MAX_SEGS 2 +/* Number of Event Ring segments to allocate, when amount is not specified. (spec allows 32k) */ +#define ERST_DEFAULT_SEGS 2 /* Poll every 60 seconds */ #define POLL_TIMEOUT 60 /* Stop endpoint command timeout (secs) for URB cancellation watchdog timer */ @@ -1434,8 +1452,8 @@ struct xhci_interrupter { bool ip_autoclear; u32 isoc_bei_interval; /* For interrupter registers save and restore over suspend/resume */ - u32 s3_irq_pending; - u32 s3_irq_control; + u32 s3_iman; + u32 s3_imod; u32 s3_erst_size; u64 s3_erst_base; u64 s3_erst_dequeue; @@ -1451,6 +1469,7 @@ struct xhci_port_cap { u8 psi_uid_count; u8 maj_rev; u8 min_rev; + u32 protocol_caps; }; struct xhci_port { @@ -1499,19 +1518,11 @@ struct xhci_hcd { spinlock_t lock; /* packed release number */ - u8 sbrn; u16 hci_version; - u8 max_slots; u16 max_interrupters; - u8 max_ports; - u8 isoc_threshold; /* imod_interval in ns (I * 250ns) */ u32 imod_interval; - int event_ring_max; - /* 4KB min, 128MB max */ - int page_size; - /* Valid values are 12 to 20, inclusive */ - int page_shift; + u32 page_size; /* MSI-X/MSI vectors */ int nvecs; /* optional clocks */ @@ -1548,6 +1559,7 @@ struct xhci_hcd { struct dma_pool *device_pool; struct dma_pool *segment_pool; struct dma_pool *small_streams_pool; + struct dma_pool *port_bw_pool; struct dma_pool *medium_streams_pool; /* Host controller watchdog timer structures */ @@ -1589,7 +1601,7 @@ struct xhci_hcd { #define XHCI_RESET_ON_RESUME BIT_ULL(7) #define XHCI_SW_BW_CHECKING BIT_ULL(8) #define XHCI_AMD_0x96_HOST BIT_ULL(9) -#define XHCI_TRUST_TX_LENGTH BIT_ULL(10) +#define XHCI_TRUST_TX_LENGTH BIT_ULL(10) /* Deprecated */ #define XHCI_LPM_SUPPORT BIT_ULL(11) #define XHCI_INTEL_HOST BIT_ULL(12) #define XHCI_SPURIOUS_REBOOT BIT_ULL(13) @@ -1617,7 +1629,7 @@ struct xhci_hcd { #define XHCI_DEFAULT_PM_RUNTIME_ALLOW BIT_ULL(33) #define XHCI_RESET_PLL_ON_DISCONNECT BIT_ULL(34) #define XHCI_SNPS_BROKEN_SUSPEND BIT_ULL(35) -#define XHCI_RENESAS_FW_QUIRK BIT_ULL(36) +/* Reserved. It was XHCI_RENESAS_FW_QUIRK */ #define XHCI_SKIP_PHY_INIT BIT_ULL(37) #define XHCI_DISABLE_SPARSE BIT_ULL(38) #define XHCI_SG_TRB_CACHE_SIZE_QUIRK BIT_ULL(39) @@ -1626,8 +1638,12 @@ struct xhci_hcd { #define XHCI_EP_CTX_BROKEN_DCS BIT_ULL(42) #define XHCI_SUSPEND_RESUME_CLKS BIT_ULL(43) #define XHCI_RESET_TO_DEFAULT BIT_ULL(44) -#define XHCI_ZHAOXIN_TRB_FETCH BIT_ULL(45) +#define XHCI_TRB_OVERFETCH BIT_ULL(45) #define XHCI_ZHAOXIN_HOST BIT_ULL(46) +#define XHCI_WRITE_64_HI_LO BIT_ULL(47) +#define XHCI_CDNS_SCTX_QUIRK BIT_ULL(48) +#define XHCI_ETRON_HOST BIT_ULL(49) +#define XHCI_LIMIT_ENDPOINT_INTERVAL_9 BIT_ULL(50) unsigned int num_active_eps; unsigned int limit_active_eps; @@ -1640,9 +1656,6 @@ struct xhci_hcd { unsigned broken_suspend:1; /* Indicates that omitting hcd is supported if root hub has no ports */ unsigned allow_single_roothub:1; - /* cached usb2 extened protocol capabilites */ - u32 *ext_caps; - unsigned int num_ext_caps; /* cached extended protocol port capabilities */ struct xhci_port_cap *port_caps; unsigned int num_port_caps; @@ -1729,8 +1742,6 @@ static inline bool xhci_has_one_roothub(struct xhci_hcd *xhci) dev_err(xhci_to_hcd(xhci)->self.controller , fmt , ## args) #define xhci_warn(xhci, fmt, args...) \ dev_warn(xhci_to_hcd(xhci)->self.controller , fmt , ## args) -#define xhci_warn_ratelimited(xhci, fmt, args...) \ - dev_warn_ratelimited(xhci_to_hcd(xhci)->self.controller , fmt , ## args) #define xhci_info(xhci, fmt, args...) \ dev_info(xhci_to_hcd(xhci)->self.controller , fmt , ## args) @@ -1754,9 +1765,21 @@ static inline void xhci_write_64(struct xhci_hcd *xhci, lo_hi_writeq(val, regs); } -static inline int xhci_link_trb_quirk(struct xhci_hcd *xhci) + +/* + * Reportedly, some chapters of v0.95 spec said that Link TRB always has its chain bit set. + * Other chapters and later specs say that it should only be set if the link is inside a TD + * which continues from the end of one segment to the next segment. + * + * Some 0.95 hardware was found to misbehave if any link TRB doesn't have the chain bit set. + * + * 0.96 hardware from AMD and NEC was found to ignore unchained isochronous link TRBs when + * "resynchronizing the pipe" after a Missed Service Error. + */ +static inline bool xhci_link_chain_quirk(struct xhci_hcd *xhci, enum xhci_ring_type type) { - return xhci->quirks & XHCI_LINK_TRB_QUIRK; + return (xhci->quirks & XHCI_LINK_TRB_QUIRK) || + (type == TYPE_ISOC && (xhci->quirks & (XHCI_AMD_0x96_HOST | XHCI_NEC_HOST))); } /* xHCI debugging */ @@ -1794,14 +1817,12 @@ void xhci_slot_copy(struct xhci_hcd *xhci, int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, struct usb_device *udev, struct usb_host_endpoint *ep, gfp_t mem_flags); -struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, - unsigned int num_segs, unsigned int cycle_state, +struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, unsigned int num_segs, enum xhci_ring_type type, unsigned int max_packet, gfp_t flags); void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring); int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring, unsigned int num_trbs, gfp_t flags); -void xhci_initialize_ring_info(struct xhci_ring *ring, - unsigned int cycle_state); +void xhci_initialize_ring_info(struct xhci_ring *ring); void xhci_free_endpoint_ring(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, unsigned int ep_index); @@ -1832,16 +1853,22 @@ struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci, int type, gfp_t flags); void xhci_free_container_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx); +struct xhci_container_ctx *xhci_alloc_port_bw_ctx(struct xhci_hcd *xhci, + gfp_t flags); +void xhci_free_port_bw_ctx(struct xhci_hcd *xhci, + struct xhci_container_ctx *ctx); struct xhci_interrupter * -xhci_create_secondary_interrupter(struct usb_hcd *hcd, int num_seg); +xhci_create_secondary_interrupter(struct usb_hcd *hcd, unsigned int segs, + u32 imod_interval, unsigned int intr_num); void xhci_remove_secondary_interrupter(struct usb_hcd *hcd, struct xhci_interrupter *ir); +void xhci_skip_sec_intr_events(struct xhci_hcd *xhci, + struct xhci_ring *ring, + struct xhci_interrupter *ir); /* xHCI host controller glue */ typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *); int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us); -int xhci_handshake_check_state(struct xhci_hcd *xhci, void __iomem *ptr, - u32 mask, u32 done, int usec, unsigned int exit_state); void xhci_quiesce(struct xhci_hcd *xhci); int xhci_halt(struct xhci_hcd *xhci); int xhci_start(struct xhci_hcd *xhci); @@ -1864,7 +1891,7 @@ int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id); int xhci_ext_cap_init(struct xhci_hcd *xhci); int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup); -int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg); +int xhci_resume(struct xhci_hcd *xhci, bool power_lost, bool is_auto_resume); irqreturn_t xhci_irq(struct usb_hcd *hcd); irqreturn_t xhci_msi_irq(int irq, void *hcd); @@ -1873,12 +1900,13 @@ int xhci_alloc_tt_info(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, struct usb_device *hdev, struct usb_tt *tt, gfp_t mem_flags); +int xhci_set_interrupter_moderation(struct xhci_interrupter *ir, + u32 imod_interval); +int xhci_enable_interrupter(struct xhci_interrupter *ir); +int xhci_disable_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir); /* xHCI ring, segment, TRB, and TD functions */ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb); -struct xhci_segment *trb_in_td(struct xhci_hcd *xhci, - struct xhci_segment *start_seg, union xhci_trb *start_trb, - union xhci_trb *end_trb, dma_addr_t suspect_dma, bool debug); int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code); void xhci_ring_cmd_db(struct xhci_hcd *xhci); int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd, @@ -1900,6 +1928,11 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd, dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed); +int xhci_queue_get_port_bw(struct xhci_hcd *xhci, + struct xhci_command *cmd, dma_addr_t in_ctx_ptr, + u8 dev_speed, bool command_must_succeed); +int xhci_get_port_bandwidth(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, + u8 dev_speed); int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd, dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed); int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd, @@ -1907,10 +1940,6 @@ int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd, enum xhci_ep_reset_type reset_type); int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd, u32 slot_id); -void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int slot_id, - unsigned int ep_index, unsigned int stream_id, - struct xhci_td *td); -void xhci_stop_endpoint_command_watchdog(struct timer_list *t); void xhci_handle_command_timeout(struct work_struct *work); void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id, @@ -1921,6 +1950,13 @@ void xhci_ring_doorbell_for_active_rings(struct xhci_hcd *xhci, void xhci_cleanup_command_queue(struct xhci_hcd *xhci); void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring); unsigned int count_trbs(u64 addr, u64 len); +int xhci_stop_endpoint_sync(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, + int suspend, gfp_t gfp_flags); +void xhci_process_cancelled_tds(struct xhci_virt_ep *ep); +void xhci_update_erst_dequeue(struct xhci_hcd *xhci, + struct xhci_interrupter *ir, + bool clear_ehb); +void xhci_add_interrupter(struct xhci_hcd *xhci, unsigned int intr_num); /* xHCI roothub code */ void xhci_set_link_state(struct xhci_hcd *xhci, struct xhci_port *port, @@ -1932,7 +1968,8 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, int xhci_hub_status_data(struct usb_hcd *hcd, char *buf); int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1); struct xhci_hub *xhci_get_rhub(struct usb_hcd *hcd); - +enum usb_link_tunnel_mode xhci_port_is_tunneled(struct xhci_hcd *xhci, + struct xhci_port *port); void xhci_hc_died(struct xhci_hcd *xhci); #ifdef CONFIG_PM @@ -2027,8 +2064,7 @@ static inline const char *xhci_decode_trb(char *str, size_t size, field1, field0, xhci_trb_comp_code_string(GET_COMP_CODE(field2)), EVENT_TRB_LEN(field2), TRB_TO_SLOT_ID(field3), - /* Macro decrements 1, maybe it shouldn't?!? */ - TRB_TO_EP_INDEX(field3) + 1, + TRB_TO_EP_ID(field3), xhci_trb_type_string(type), field3 & EVENT_DATA ? 'E' : 'e', field3 & TRB_CYCLE ? 'C' : 'c'); @@ -2078,7 +2114,6 @@ static inline const char *xhci_decode_trb(char *str, size_t size, field3 & TRB_CYCLE ? 'C' : 'c'); break; case TRB_NORMAL: - case TRB_ISOC: case TRB_EVENT_DATA: case TRB_TR_NOOP: snprintf(str, size, @@ -2095,7 +2130,25 @@ static inline const char *xhci_decode_trb(char *str, size_t size, field3 & TRB_ENT ? 'E' : 'e', field3 & TRB_CYCLE ? 'C' : 'c'); break; - + case TRB_ISOC: + snprintf(str, size, + "Buffer %08x%08x length %d TD size/TBC %d intr %d type '%s' TBC %u TLBPC %u frame_id %u flags %c:%c:%c:%c:%c:%c:%c:%c:%c", + field1, field0, TRB_LEN(field2), GET_TD_SIZE(field2), + GET_INTR_TARGET(field2), + xhci_trb_type_string(type), + GET_TBC(field3), + GET_TLBPC(field3), + GET_FRAME_ID(field3), + field3 & TRB_SIA ? 'S' : 's', + field3 & TRB_BEI ? 'B' : 'b', + field3 & TRB_IDT ? 'I' : 'i', + field3 & TRB_IOC ? 'I' : 'i', + field3 & TRB_CHAIN ? 'C' : 'c', + field3 & TRB_NO_SNOOP ? 'S' : 's', + field3 & TRB_ISP ? 'I' : 'i', + field3 & TRB_ENT ? 'E' : 'e', + field3 & TRB_CYCLE ? 'C' : 'c'); + break; case TRB_CMD_NOOP: case TRB_ENABLE_SLOT: snprintf(str, size, @@ -2143,8 +2196,7 @@ static inline const char *xhci_decode_trb(char *str, size_t size, xhci_trb_type_string(type), field1, field0, TRB_TO_SLOT_ID(field3), - /* Macro decrements 1, maybe it shouldn't?!? */ - TRB_TO_EP_INDEX(field3) + 1, + TRB_TO_EP_ID(field3), field3 & TRB_TSP ? 'T' : 't', field3 & TRB_CYCLE ? 'C' : 'c'); break; @@ -2154,8 +2206,7 @@ static inline const char *xhci_decode_trb(char *str, size_t size, xhci_trb_type_string(type), TRB_TO_SLOT_ID(field3), TRB_TO_SUSPEND_PORT(field3), - /* Macro decrements 1, maybe it shouldn't?!? */ - TRB_TO_EP_INDEX(field3) + 1, + TRB_TO_EP_ID(field3), field3 & TRB_CYCLE ? 'C' : 'c'); break; case TRB_SET_DEQ: @@ -2165,8 +2216,7 @@ static inline const char *xhci_decode_trb(char *str, size_t size, field1, field0, TRB_TO_STREAM_ID(field2), TRB_TO_SLOT_ID(field3), - /* Macro decrements 1, maybe it shouldn't?!? */ - TRB_TO_EP_INDEX(field3) + 1, + TRB_TO_EP_ID(field3), field3 & TRB_CYCLE ? 'C' : 'c'); break; case TRB_RESET_DEV: @@ -2340,7 +2390,12 @@ static inline const char *xhci_decode_portsc(char *str, u32 portsc) { int ret; - ret = sprintf(str, "%s %s %s Link:%s PortSpeed:%d ", + ret = sprintf(str, "0x%08x ", portsc); + + if (portsc == ~(u32)0) + return str; + + ret += sprintf(str + ret, "%s %s %s Link:%s PortSpeed:%d ", portsc & PORT_POWER ? "Powered" : "Powered-off", portsc & PORT_CONNECT ? "Connected" : "Not-connected", portsc & PORT_PE ? "Enabled" : "Disabled", |