summaryrefslogtreecommitdiff
path: root/drivers/usb/host
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/host')
-rw-r--r--drivers/usb/host/Kconfig2
-rw-r--r--drivers/usb/host/bcma-hcd.c1
-rw-r--r--drivers/usb/host/ehci-atmel.c2
-rw-r--r--drivers/usb/host/ehci-brcm.c3
-rw-r--r--drivers/usb/host/ehci-dbg.c10
-rw-r--r--drivers/usb/host/ehci-exynos.c38
-rw-r--r--drivers/usb/host/ehci-fsl.c2
-rw-r--r--drivers/usb/host/ehci-grlib.c2
-rw-r--r--drivers/usb/host/ehci-hcd.c4
-rw-r--r--drivers/usb/host/ehci-mv.c2
-rw-r--r--drivers/usb/host/ehci-npcm7xx.c2
-rw-r--r--drivers/usb/host/ehci-omap.c2
-rw-r--r--drivers/usb/host/ehci-orion.c20
-rw-r--r--drivers/usb/host/ehci-platform.c2
-rw-r--r--drivers/usb/host/ehci-ppc-of.c2
-rw-r--r--drivers/usb/host/ehci-q.c20
-rw-r--r--drivers/usb/host/ehci-sh.c11
-rw-r--r--drivers/usb/host/ehci-spear.c9
-rw-r--r--drivers/usb/host/ehci-st.c2
-rw-r--r--drivers/usb/host/ehci-xilinx-of.c2
-rw-r--r--drivers/usb/host/ehci.h8
-rw-r--r--drivers/usb/host/fhci-hcd.c2
-rw-r--r--drivers/usb/host/fhci-sched.c4
-rw-r--r--drivers/usb/host/fsl-mph-dr-of.c2
-rw-r--r--drivers/usb/host/isp116x-hcd.c2
-rw-r--r--drivers/usb/host/isp1362-hcd.c4
-rw-r--r--drivers/usb/host/max3421-hcd.c16
-rw-r--r--drivers/usb/host/octeon-hcd.c6
-rw-r--r--drivers/usb/host/ohci-at91.c2
-rw-r--r--drivers/usb/host/ohci-da8xx.c4
-rw-r--r--drivers/usb/host/ohci-exynos.c39
-rw-r--r--drivers/usb/host/ohci-hcd.c10
-rw-r--r--drivers/usb/host/ohci-nxp.c20
-rw-r--r--drivers/usb/host/ohci-omap.c4
-rw-r--r--drivers/usb/host/ohci-platform.c2
-rw-r--r--drivers/usb/host/ohci-ppc-of.c6
-rw-r--r--drivers/usb/host/ohci-pxa27x.c3
-rw-r--r--drivers/usb/host/ohci-s3c2410.c2
-rw-r--r--drivers/usb/host/ohci-sm501.c2
-rw-r--r--drivers/usb/host/ohci-spear.c2
-rw-r--r--drivers/usb/host/ohci-st.c2
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c38
-rw-r--r--drivers/usb/host/pci-quirks.c9
-rw-r--r--drivers/usb/host/r8a66597-hcd.c12
-rw-r--r--drivers/usb/host/sl811-hcd.c9
-rw-r--r--drivers/usb/host/uhci-grlib.c2
-rw-r--r--drivers/usb/host/uhci-platform.c2
-rw-r--r--drivers/usb/host/xhci-caps.h91
-rw-r--r--drivers/usb/host/xhci-dbgcap.c197
-rw-r--r--drivers/usb/host/xhci-dbgcap.h8
-rw-r--r--drivers/usb/host/xhci-dbgtty.c173
-rw-r--r--drivers/usb/host/xhci-debugfs.c35
-rw-r--r--drivers/usb/host/xhci-ext-caps.h5
-rw-r--r--drivers/usb/host/xhci-histb.c2
-rw-r--r--drivers/usb/host/xhci-hub.c121
-rw-r--r--drivers/usb/host/xhci-mem.c391
-rw-r--r--drivers/usb/host/xhci-mtk-sch.c14
-rw-r--r--drivers/usb/host/xhci-mtk.c2
-rw-r--r--drivers/usb/host/xhci-pci-renesas.c49
-rw-r--r--drivers/usb/host/xhci-pci.c204
-rw-r--r--drivers/usb/host/xhci-pci.h19
-rw-r--r--drivers/usb/host/xhci-plat.c17
-rw-r--r--drivers/usb/host/xhci-plat.h4
-rw-r--r--drivers/usb/host/xhci-port.h176
-rw-r--r--drivers/usb/host/xhci-rcar.c8
-rw-r--r--drivers/usb/host/xhci-ring.c1100
-rw-r--r--drivers/usb/host/xhci-rzv2m.c1
-rw-r--r--drivers/usb/host/xhci-tegra.c11
-rw-r--r--drivers/usb/host/xhci-trace.h96
-rw-r--r--drivers/usb/host/xhci.c258
-rw-r--r--drivers/usb/host/xhci.h419
71 files changed, 2013 insertions, 1740 deletions
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 4448d0ab06f0..d011d6c753ed 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -40,11 +40,11 @@ config USB_XHCI_DBGCAP
config USB_XHCI_PCI
tristate
depends on USB_PCI
- depends on USB_XHCI_PCI_RENESAS || !USB_XHCI_PCI_RENESAS
default y
config USB_XHCI_PCI_RENESAS
tristate "Support for additional Renesas xHCI controller with firmware"
+ depends on USB_XHCI_PCI
help
Say 'Y' to enable the support for the Renesas xHCI controller with
firmware. Make sure you have the firmware for the device and
diff --git a/drivers/usb/host/bcma-hcd.c b/drivers/usb/host/bcma-hcd.c
index 7558cc4d90cc..519386255886 100644
--- a/drivers/usb/host/bcma-hcd.c
+++ b/drivers/usb/host/bcma-hcd.c
@@ -25,7 +25,6 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/of_platform.h>
#include <linux/usb/ehci_pdriver.h>
#include <linux/usb/ohci_pdriver.h>
diff --git a/drivers/usb/host/ehci-atmel.c b/drivers/usb/host/ehci-atmel.c
index 6a6e1c510b28..65747270fd88 100644
--- a/drivers/usb/host/ehci-atmel.c
+++ b/drivers/usb/host/ehci-atmel.c
@@ -220,7 +220,7 @@ static SIMPLE_DEV_PM_OPS(ehci_atmel_pm_ops, ehci_atmel_drv_suspend,
static struct platform_driver ehci_atmel_driver = {
.probe = ehci_atmel_drv_probe,
- .remove_new = ehci_atmel_drv_remove,
+ .remove = ehci_atmel_drv_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "atmel-ehci",
diff --git a/drivers/usb/host/ehci-brcm.c b/drivers/usb/host/ehci-brcm.c
index 77e42c739c58..888e8f6670d2 100644
--- a/drivers/usb/host/ehci-brcm.c
+++ b/drivers/usb/host/ehci-brcm.c
@@ -246,10 +246,11 @@ static const struct of_device_id brcm_ehci_of_match[] = {
{ .compatible = "brcm,bcm7445-ehci", },
{}
};
+MODULE_DEVICE_TABLE(of, brcm_ehci_of_match);
static struct platform_driver ehci_brcm_driver = {
.probe = ehci_brcm_probe,
- .remove_new = ehci_brcm_remove,
+ .remove = ehci_brcm_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "ehci-brcm",
diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c
index c063fb042926..435001128221 100644
--- a/drivers/usb/host/ehci-dbg.c
+++ b/drivers/usb/host/ehci-dbg.c
@@ -430,13 +430,13 @@ static void qh_lines(struct ehci_hcd *ehci, struct ehci_qh *qh,
mark = '/';
}
switch ((scratch >> 8) & 0x03) {
- case 0:
+ case PID_CODE_OUT:
type = "out";
break;
- case 1:
+ case PID_CODE_IN:
type = "in";
break;
- case 2:
+ case PID_CODE_SETUP:
type = "setup";
break;
default:
@@ -602,10 +602,10 @@ static unsigned output_buf_tds_dir(char *buf, struct ehci_hcd *ehci,
list_for_each_entry(qtd, &qh->qtd_list, qtd_list) {
temp++;
switch ((hc32_to_cpu(ehci, qtd->hw_token) >> 8) & 0x03) {
- case 0:
+ case PID_CODE_OUT:
type = "out";
continue;
- case 1:
+ case PID_CODE_IN:
type = "in";
continue;
}
diff --git a/drivers/usb/host/ehci-exynos.c b/drivers/usb/host/ehci-exynos.c
index f644b131cc0b..d2a5bedf736a 100644
--- a/drivers/usb/host/ehci-exynos.c
+++ b/drivers/usb/host/ehci-exynos.c
@@ -48,7 +48,6 @@ struct exynos_ehci_hcd {
static int exynos_ehci_get_phy(struct device *dev,
struct exynos_ehci_hcd *exynos_ehci)
{
- struct device_node *child;
struct phy *phy;
int phy_number, num_phys;
int ret;
@@ -66,26 +65,22 @@ static int exynos_ehci_get_phy(struct device *dev,
return 0;
/* Get PHYs using legacy bindings */
- for_each_available_child_of_node(dev->of_node, child) {
+ for_each_available_child_of_node_scoped(dev->of_node, child) {
ret = of_property_read_u32(child, "reg", &phy_number);
if (ret) {
dev_err(dev, "Failed to parse device tree\n");
- of_node_put(child);
return ret;
}
if (phy_number >= PHY_NUMBER) {
dev_err(dev, "Invalid number of PHYs\n");
- of_node_put(child);
return -EINVAL;
}
phy = devm_of_phy_optional_get(dev, child, NULL);
exynos_ehci->phy[phy_number] = phy;
- if (IS_ERR(phy)) {
- of_node_put(child);
+ if (IS_ERR(phy))
return PTR_ERR(phy);
- }
}
exynos_ehci->legacy_phy = true;
@@ -159,20 +154,16 @@ static int exynos_ehci_probe(struct platform_device *pdev)
err = exynos_ehci_get_phy(&pdev->dev, exynos_ehci);
if (err)
- goto fail_clk;
+ goto fail_io;
- exynos_ehci->clk = devm_clk_get(&pdev->dev, "usbhost");
+ exynos_ehci->clk = devm_clk_get_enabled(&pdev->dev, "usbhost");
if (IS_ERR(exynos_ehci->clk)) {
dev_err(&pdev->dev, "Failed to get usbhost clock\n");
err = PTR_ERR(exynos_ehci->clk);
- goto fail_clk;
+ goto fail_io;
}
- err = clk_prepare_enable(exynos_ehci->clk);
- if (err)
- goto fail_clk;
-
hcd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(hcd->regs)) {
err = PTR_ERR(hcd->regs);
@@ -223,8 +214,6 @@ fail_add_hcd:
exynos_ehci_phy_disable(&pdev->dev);
pdev->dev.of_node = exynos_ehci->of_node;
fail_io:
- clk_disable_unprepare(exynos_ehci->clk);
-fail_clk:
usb_put_hcd(hcd);
return err;
}
@@ -240,12 +229,9 @@ static void exynos_ehci_remove(struct platform_device *pdev)
exynos_ehci_phy_disable(&pdev->dev);
- clk_disable_unprepare(exynos_ehci->clk);
-
usb_put_hcd(hcd);
}
-#ifdef CONFIG_PM
static int exynos_ehci_suspend(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
@@ -288,15 +274,9 @@ static int exynos_ehci_resume(struct device *dev)
ehci_resume(hcd, false);
return 0;
}
-#else
-#define exynos_ehci_suspend NULL
-#define exynos_ehci_resume NULL
-#endif
-static const struct dev_pm_ops exynos_ehci_pm_ops = {
- .suspend = exynos_ehci_suspend,
- .resume = exynos_ehci_resume,
-};
+static DEFINE_SIMPLE_DEV_PM_OPS(exynos_ehci_pm_ops,
+ exynos_ehci_suspend, exynos_ehci_resume);
#ifdef CONFIG_OF
static const struct of_device_id exynos_ehci_match[] = {
@@ -308,11 +288,11 @@ MODULE_DEVICE_TABLE(of, exynos_ehci_match);
static struct platform_driver exynos_ehci_driver = {
.probe = exynos_ehci_probe,
- .remove_new = exynos_ehci_remove,
+ .remove = exynos_ehci_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "exynos-ehci",
- .pm = &exynos_ehci_pm_ops,
+ .pm = pm_ptr(&exynos_ehci_pm_ops),
.of_match_table = of_match_ptr(exynos_ehci_match),
}
};
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 5b1ce394a417..26f13278d4d6 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -706,7 +706,7 @@ static void fsl_ehci_drv_remove(struct platform_device *pdev)
static struct platform_driver ehci_fsl_driver = {
.probe = fsl_ehci_drv_probe,
- .remove_new = fsl_ehci_drv_remove,
+ .remove = fsl_ehci_drv_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = DRV_NAME,
diff --git a/drivers/usb/host/ehci-grlib.c b/drivers/usb/host/ehci-grlib.c
index 14150e4d3382..bd9762eaa135 100644
--- a/drivers/usb/host/ehci-grlib.c
+++ b/drivers/usb/host/ehci-grlib.c
@@ -168,7 +168,7 @@ MODULE_DEVICE_TABLE(of, ehci_hcd_grlib_of_match);
static struct platform_driver ehci_grlib_driver = {
.probe = ehci_hcd_grlib_probe,
- .remove_new = ehci_hcd_grlib_remove,
+ .remove = ehci_hcd_grlib_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "grlib-ehci",
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 802bfafb1012..6de79ac5e6a4 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -32,7 +32,7 @@
#include <asm/byteorder.h>
#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#if defined(CONFIG_PPC_PS3)
#include <asm/firmware.h>
@@ -547,7 +547,7 @@ static int ehci_init(struct usb_hcd *hcd)
* make problems: throughput reduction (!), data errors...
*/
if (park) {
- park = min(park, (unsigned) 3);
+ park = min_t(unsigned int, park, 3);
temp |= CMD_PARK;
temp |= park << 8;
}
diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c
index 2f1fc7eb8b72..cbabbe107172 100644
--- a/drivers/usb/host/ehci-mv.c
+++ b/drivers/usb/host/ehci-mv.c
@@ -279,7 +279,7 @@ static const struct of_device_id ehci_mv_dt_ids[] = {
static struct platform_driver ehci_mv_driver = {
.probe = mv_ehci_probe,
- .remove_new = mv_ehci_remove,
+ .remove = mv_ehci_remove,
.shutdown = mv_ehci_shutdown,
.driver = {
.name = "mv-ehci",
diff --git a/drivers/usb/host/ehci-npcm7xx.c b/drivers/usb/host/ehci-npcm7xx.c
index 3d3317a1a0b3..f1c7034c1e80 100644
--- a/drivers/usb/host/ehci-npcm7xx.c
+++ b/drivers/usb/host/ehci-npcm7xx.c
@@ -122,7 +122,7 @@ MODULE_DEVICE_TABLE(of, npcm7xx_ehci_id_table);
static struct platform_driver npcm7xx_ehci_hcd_driver = {
.probe = npcm7xx_ehci_hcd_drv_probe,
- .remove_new = npcm7xx_ehci_hcd_drv_remove,
+ .remove = npcm7xx_ehci_hcd_drv_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "npcm7xx-ehci",
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index b24f371a46f3..db4a1acb27da 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -264,7 +264,7 @@ MODULE_DEVICE_TABLE(of, omap_ehci_dt_ids);
static struct platform_driver ehci_hcd_omap_driver = {
.probe = ehci_hcd_omap_probe,
- .remove_new = ehci_hcd_omap_remove,
+ .remove = ehci_hcd_omap_remove,
.shutdown = usb_hcd_platform_shutdown,
/*.suspend = ehci_hcd_omap_suspend, */
/*.resume = ehci_hcd_omap_resume, */
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index 6c47ab0a491d..34abff8669f8 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -66,6 +66,15 @@ struct orion_ehci_hcd {
static struct hc_driver __read_mostly ehci_orion_hc_driver;
/*
+ * Legacy DMA mask is 32 bit.
+ * AC5 has the DDR starting at 8GB, hence it requires
+ * a larger (34-bit) DMA mask, in order for DMA allocations
+ * to succeed:
+ */
+static const u64 dma_mask_orion = DMA_BIT_MASK(32);
+static const u64 dma_mask_ac5 = DMA_BIT_MASK(34);
+
+/*
* Implement Orion USB controller specification guidelines
*/
static void orion_usb_phy_v1_setup(struct usb_hcd *hcd)
@@ -211,6 +220,7 @@ static int ehci_orion_drv_probe(struct platform_device *pdev)
int irq, err;
enum orion_ehci_phy_ver phy_version;
struct orion_ehci_hcd *priv;
+ u64 *dma_mask_ptr;
if (usb_disabled())
return -ENODEV;
@@ -228,7 +238,8 @@ static int ehci_orion_drv_probe(struct platform_device *pdev)
* set. Since shared usb code relies on it, set it here for
* now. Once we have dma capability bindings this can go away.
*/
- err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ dma_mask_ptr = (u64 *)of_device_get_match_data(&pdev->dev);
+ err = dma_coerce_mask_and_coherent(&pdev->dev, *dma_mask_ptr);
if (err)
goto err;
@@ -332,15 +343,16 @@ static void ehci_orion_drv_remove(struct platform_device *pdev)
}
static const struct of_device_id ehci_orion_dt_ids[] = {
- { .compatible = "marvell,orion-ehci", },
- { .compatible = "marvell,armada-3700-ehci", },
+ { .compatible = "marvell,orion-ehci", .data = &dma_mask_orion},
+ { .compatible = "marvell,armada-3700-ehci", .data = &dma_mask_orion},
+ { .compatible = "marvell,ac5-ehci", .data = &dma_mask_ac5},
{},
};
MODULE_DEVICE_TABLE(of, ehci_orion_dt_ids);
static struct platform_driver ehci_orion_driver = {
.probe = ehci_orion_drv_probe,
- .remove_new = ehci_orion_drv_remove,
+ .remove = ehci_orion_drv_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "orion-ehci",
diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c
index 98b073185e1c..cdf41886e8ca 100644
--- a/drivers/usb/host/ehci-platform.c
+++ b/drivers/usb/host/ehci-platform.c
@@ -508,7 +508,7 @@ static SIMPLE_DEV_PM_OPS(ehci_platform_pm_ops, ehci_platform_suspend,
static struct platform_driver ehci_platform_driver = {
.id_table = ehci_platform_table,
.probe = ehci_platform_probe,
- .remove_new = ehci_platform_remove,
+ .remove = ehci_platform_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "ehci-platform",
diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c
index 7fd83e806ae4..8063b9d3aebd 100644
--- a/drivers/usb/host/ehci-ppc-of.c
+++ b/drivers/usb/host/ehci-ppc-of.c
@@ -230,7 +230,7 @@ MODULE_DEVICE_TABLE(of, ehci_hcd_ppc_of_match);
static struct platform_driver ehci_hcd_ppc_of_driver = {
.probe = ehci_hcd_ppc_of_probe,
- .remove_new = ehci_hcd_ppc_of_remove,
+ .remove = ehci_hcd_ppc_of_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "ppc-of-ehci",
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 666f5c4db25a..ba37a9fcab92 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -27,10 +27,6 @@
/*-------------------------------------------------------------------------*/
-/* PID Codes that are used here, from EHCI specification, Table 3-16. */
-#define PID_CODE_IN 1
-#define PID_CODE_SETUP 2
-
/* fill a qtd, returning how much of the buffer we were able to queue up */
static unsigned int
@@ -230,7 +226,7 @@ static int qtd_copy_status (
/* fs/ls interrupt xfer missed the complete-split */
status = -EPROTO;
} else if (token & QTD_STS_DBE) {
- status = (QTD_PID (token) == 1) /* IN ? */
+ status = (QTD_PID(token) == PID_CODE_IN) /* IN ? */
? -ENOSR /* hc couldn't read data */
: -ECOMM; /* hc couldn't write data */
} else if (token & QTD_STS_XACT) {
@@ -606,7 +602,7 @@ qh_urb_transaction (
/* SETUP pid */
qtd_fill(ehci, qtd, urb->setup_dma,
sizeof (struct usb_ctrlrequest),
- token | (2 /* "setup" */ << 8), 8);
+ token | (PID_CODE_SETUP << 8), 8);
/* ... and always at least one more pid */
token ^= QTD_TOGGLE;
@@ -620,7 +616,7 @@ qh_urb_transaction (
/* for zero length DATA stages, STATUS is always IN */
if (len == 0)
- token |= (1 /* "in" */ << 8);
+ token |= (PID_CODE_IN << 8);
}
/*
@@ -642,7 +638,7 @@ qh_urb_transaction (
}
if (is_input)
- token |= (1 /* "in" */ << 8);
+ token |= (PID_CODE_IN << 8);
/* else it's already initted to "out" pid (0 << 8) */
maxpacket = usb_endpoint_maxp(&urb->ep->desc);
@@ -709,7 +705,7 @@ qh_urb_transaction (
if (usb_pipecontrol (urb->pipe)) {
one_more = 1;
- token ^= 0x0100; /* "in" <--> "out" */
+ token ^= (PID_CODE_IN << 8); /* "in" <--> "out" */
token |= QTD_TOGGLE; /* force DATA1 */
} else if (usb_pipeout(urb->pipe)
&& (urb->transfer_flags & URB_ZERO_PACKET)
@@ -1203,7 +1199,7 @@ static int ehci_submit_single_step_set_feature(
/* SETUP pid, and interrupt after SETUP completion */
qtd_fill(ehci, qtd, urb->setup_dma,
sizeof(struct usb_ctrlrequest),
- QTD_IOC | token | (2 /* "setup" */ << 8), 8);
+ QTD_IOC | token | (PID_CODE_SETUP << 8), 8);
submit_async(ehci, urb, &qtd_list, GFP_ATOMIC);
return 0; /*Return now; we shall come back after 15 seconds*/
@@ -1216,7 +1212,7 @@ static int ehci_submit_single_step_set_feature(
token ^= QTD_TOGGLE; /*We need to start IN with DATA-1 Pid-sequence*/
buf = urb->transfer_dma;
- token |= (1 /* "in" */ << 8); /*This is IN stage*/
+ token |= (PID_CODE_IN << 8); /*This is IN stage*/
maxpacket = usb_endpoint_maxp(&urb->ep->desc);
@@ -1229,7 +1225,7 @@ static int ehci_submit_single_step_set_feature(
qtd->hw_alt_next = EHCI_LIST_END(ehci);
/* STATUS stage for GetDesc control request */
- token ^= 0x0100; /* "in" <--> "out" */
+ token ^= (PID_CODE_IN << 8); /* "in" <--> "out" */
token |= QTD_TOGGLE; /* force DATA1 */
qtd_prev = qtd;
diff --git a/drivers/usb/host/ehci-sh.c b/drivers/usb/host/ehci-sh.c
index d31d9506e41a..2d23690d72c5 100644
--- a/drivers/usb/host/ehci-sh.c
+++ b/drivers/usb/host/ehci-sh.c
@@ -119,8 +119,12 @@ static int ehci_hcd_sh_probe(struct platform_device *pdev)
if (IS_ERR(priv->iclk))
priv->iclk = NULL;
- clk_enable(priv->fclk);
- clk_enable(priv->iclk);
+ ret = clk_enable(priv->fclk);
+ if (ret)
+ goto fail_request_resource;
+ ret = clk_enable(priv->iclk);
+ if (ret)
+ goto fail_iclk;
ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (ret != 0) {
@@ -136,6 +140,7 @@ static int ehci_hcd_sh_probe(struct platform_device *pdev)
fail_add_hcd:
clk_disable(priv->iclk);
+fail_iclk:
clk_disable(priv->fclk);
fail_request_resource:
@@ -169,7 +174,7 @@ static void ehci_hcd_sh_shutdown(struct platform_device *pdev)
static struct platform_driver ehci_hcd_sh_driver = {
.probe = ehci_hcd_sh_probe,
- .remove_new = ehci_hcd_sh_remove,
+ .remove = ehci_hcd_sh_remove,
.shutdown = ehci_hcd_sh_shutdown,
.driver = {
.name = "sh_ehci",
diff --git a/drivers/usb/host/ehci-spear.c b/drivers/usb/host/ehci-spear.c
index d0e94e4c9fe2..e96710192d6b 100644
--- a/drivers/usb/host/ehci-spear.c
+++ b/drivers/usb/host/ehci-spear.c
@@ -105,7 +105,9 @@ static int spear_ehci_hcd_drv_probe(struct platform_device *pdev)
/* registers start at offset 0x0 */
hcd_to_ehci(hcd)->caps = hcd->regs;
- clk_prepare_enable(sehci->clk);
+ retval = clk_prepare_enable(sehci->clk);
+ if (retval)
+ goto err_put_hcd;
retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (retval)
goto err_stop_ehci;
@@ -130,8 +132,7 @@ static void spear_ehci_hcd_drv_remove(struct platform_device *pdev)
usb_remove_hcd(hcd);
- if (sehci->clk)
- clk_disable_unprepare(sehci->clk);
+ clk_disable_unprepare(sehci->clk);
usb_put_hcd(hcd);
}
@@ -143,7 +144,7 @@ MODULE_DEVICE_TABLE(of, spear_ehci_id_table);
static struct platform_driver spear_ehci_hcd_driver = {
.probe = spear_ehci_hcd_drv_probe,
- .remove_new = spear_ehci_hcd_drv_remove,
+ .remove = spear_ehci_hcd_drv_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "spear-ehci",
diff --git a/drivers/usb/host/ehci-st.c b/drivers/usb/host/ehci-st.c
index 2dbb0d86daaa..58867d816af7 100644
--- a/drivers/usb/host/ehci-st.c
+++ b/drivers/usb/host/ehci-st.c
@@ -320,7 +320,7 @@ MODULE_DEVICE_TABLE(of, st_ehci_ids);
static struct platform_driver ehci_platform_driver = {
.probe = st_ehci_platform_probe,
- .remove_new = st_ehci_platform_remove,
+ .remove = st_ehci_platform_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "st-ehci",
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c
index a2112c28f631..1d16cfefabd7 100644
--- a/drivers/usb/host/ehci-xilinx-of.c
+++ b/drivers/usb/host/ehci-xilinx-of.c
@@ -220,7 +220,7 @@ MODULE_DEVICE_TABLE(of, ehci_hcd_xilinx_of_match);
static struct platform_driver ehci_hcd_xilinx_of_driver = {
.probe = ehci_hcd_xilinx_of_probe,
- .remove_new = ehci_hcd_xilinx_of_remove,
+ .remove = ehci_hcd_xilinx_of_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "xilinx-of-ehci",
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index 1441e3400796..d7a3c8d13f6b 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -321,10 +321,16 @@ struct ehci_qtd {
size_t length; /* length of buffer */
} __aligned(32);
+/* PID Codes that are used here, from EHCI specification, Table 3-16. */
+#define PID_CODE_OUT 0
+#define PID_CODE_IN 1
+#define PID_CODE_SETUP 2
+
/* mask NakCnt+T in qh->hw_alt_next */
#define QTD_MASK(ehci) cpu_to_hc32(ehci, ~0x1f)
-#define IS_SHORT_READ(token) (QTD_LENGTH(token) != 0 && QTD_PID(token) == 1)
+#define IS_SHORT_READ(token) (QTD_LENGTH(token) != 0 && \
+ QTD_PID(token) == PID_CODE_IN)
/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/host/fhci-hcd.c b/drivers/usb/host/fhci-hcd.c
index 9a1b5224f239..22a0942f0bce 100644
--- a/drivers/usb/host/fhci-hcd.c
+++ b/drivers/usb/host/fhci-hcd.c
@@ -791,7 +791,7 @@ static struct platform_driver of_fhci_driver = {
.of_match_table = of_fhci_match,
},
.probe = of_fhci_probe,
- .remove_new = of_fhci_remove,
+ .remove = of_fhci_remove,
};
module_platform_driver(of_fhci_driver);
diff --git a/drivers/usb/host/fhci-sched.c b/drivers/usb/host/fhci-sched.c
index a45ede80edfc..c3acd410ce94 100644
--- a/drivers/usb/host/fhci-sched.c
+++ b/drivers/usb/host/fhci-sched.c
@@ -158,7 +158,7 @@ static int add_packet(struct fhci_usb *usb, struct ed *ed, struct td *td)
struct packet *pkt;
u8 *data = NULL;
- /* calcalate data address,len and toggle and then add the transaction */
+ /* calculate data address,len and toggle and then add the transaction */
if (td->toggle == USB_TD_TOGGLE_CARRY)
td->toggle = ed->toggle_carry;
@@ -679,7 +679,7 @@ static void process_done_list(unsigned long data)
DECLARE_TASKLET_OLD(fhci_tasklet, process_done_list);
-/* transfer complted callback */
+/* transfer completed callback */
u32 fhci_transfer_confirm_callback(struct fhci_hcd *fhci)
{
if (!fhci->process_done_task->state)
diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c
index 6cdc3d805c32..4e67b9471986 100644
--- a/drivers/usb/host/fsl-mph-dr-of.c
+++ b/drivers/usb/host/fsl-mph-dr-of.c
@@ -362,7 +362,7 @@ static struct platform_driver fsl_usb2_mph_dr_driver = {
.of_match_table = fsl_usb2_mph_dr_of_match,
},
.probe = fsl_usb2_mph_dr_of_probe,
- .remove_new = fsl_usb2_mph_dr_of_remove,
+ .remove = fsl_usb2_mph_dr_of_remove,
};
module_platform_driver(fsl_usb2_mph_dr_driver);
diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c
index a82d8926e922..71c22c4bd163 100644
--- a/drivers/usb/host/isp116x-hcd.c
+++ b/drivers/usb/host/isp116x-hcd.c
@@ -1684,7 +1684,7 @@ MODULE_ALIAS("platform:isp116x-hcd");
static struct platform_driver isp116x_driver = {
.probe = isp116x_probe,
- .remove_new = isp116x_remove,
+ .remove = isp116x_remove,
.suspend = isp116x_suspend,
.resume = isp116x_resume,
.driver = {
diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c
index a52c3d858f3e..2d3a082cb52f 100644
--- a/drivers/usb/host/isp1362-hcd.c
+++ b/drivers/usb/host/isp1362-hcd.c
@@ -83,7 +83,7 @@
#include <asm/irq.h>
#include <asm/byteorder.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
static int dbg_level;
#ifdef ISP1362_DEBUG
@@ -2757,7 +2757,7 @@ static int isp1362_resume(struct platform_device *pdev)
static struct platform_driver isp1362_driver = {
.probe = isp1362_probe,
- .remove_new = isp1362_remove,
+ .remove = isp1362_remove,
.suspend = isp1362_suspend,
.resume = isp1362_resume,
diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c
index 9fe4f48b1898..0881fdd1823e 100644
--- a/drivers/usb/host/max3421-hcd.c
+++ b/drivers/usb/host/max3421-hcd.c
@@ -779,11 +779,17 @@ max3421_check_unlink(struct usb_hcd *hcd)
retval = 1;
dev_dbg(&spi->dev, "%s: URB %p unlinked=%d",
__func__, urb, urb->unlinked);
- usb_hcd_unlink_urb_from_ep(hcd, urb);
- spin_unlock_irqrestore(&max3421_hcd->lock,
- flags);
- usb_hcd_giveback_urb(hcd, urb, 0);
- spin_lock_irqsave(&max3421_hcd->lock, flags);
+ if (urb == max3421_hcd->curr_urb) {
+ max3421_hcd->urb_done = 1;
+ max3421_hcd->hien &= ~(BIT(MAX3421_HI_HXFRDN_BIT) |
+ BIT(MAX3421_HI_RCVDAV_BIT));
+ } else {
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+ spin_unlock_irqrestore(&max3421_hcd->lock,
+ flags);
+ usb_hcd_giveback_urb(hcd, urb, 0);
+ spin_lock_irqsave(&max3421_hcd->lock, flags);
+ }
}
}
}
diff --git a/drivers/usb/host/octeon-hcd.c b/drivers/usb/host/octeon-hcd.c
index 19d5777f5db2..361d33b0c4d2 100644
--- a/drivers/usb/host/octeon-hcd.c
+++ b/drivers/usb/host/octeon-hcd.c
@@ -3346,7 +3346,7 @@ static int octeon_usb_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
break;
case USB_PORT_FEAT_INDICATOR:
dev_dbg(dev, " INDICATOR\n");
- /* Port inidicator not supported */
+ /* Port indicator not supported */
break;
case USB_PORT_FEAT_C_CONNECTION:
dev_dbg(dev, " C_CONNECTION\n");
@@ -3711,8 +3711,8 @@ static struct platform_driver octeon_usb_driver = {
.name = "octeon-hcd",
.of_match_table = octeon_usb_match,
},
- .probe = octeon_usb_probe,
- .remove_new = octeon_usb_remove,
+ .probe = octeon_usb_probe,
+ .remove = octeon_usb_remove,
};
static int __init octeon_usb_driver_init(void)
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index f691cd98a574..5df793dcb25d 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -685,7 +685,7 @@ static SIMPLE_DEV_PM_OPS(ohci_hcd_at91_pm_ops, ohci_hcd_at91_drv_suspend,
static struct platform_driver ohci_hcd_at91_driver = {
.probe = ohci_hcd_at91_drv_probe,
- .remove_new = ohci_hcd_at91_drv_remove,
+ .remove = ohci_hcd_at91_drv_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "at91_ohci",
diff --git a/drivers/usb/host/ohci-da8xx.c b/drivers/usb/host/ohci-da8xx.c
index d9adae53466b..3c5ca2d7c92e 100644
--- a/drivers/usb/host/ohci-da8xx.c
+++ b/drivers/usb/host/ohci-da8xx.c
@@ -22,7 +22,7 @@
#include <linux/regulator/consumer.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "ohci.h"
@@ -531,7 +531,7 @@ static const struct ohci_driver_overrides da8xx_overrides __initconst = {
*/
static struct platform_driver ohci_hcd_da8xx_driver = {
.probe = ohci_da8xx_probe,
- .remove_new = ohci_da8xx_remove,
+ .remove = ohci_da8xx_remove,
.shutdown = usb_hcd_platform_shutdown,
#ifdef CONFIG_PM
.suspend = ohci_da8xx_suspend,
diff --git a/drivers/usb/host/ohci-exynos.c b/drivers/usb/host/ohci-exynos.c
index 20e26a474591..cc5cb0900988 100644
--- a/drivers/usb/host/ohci-exynos.c
+++ b/drivers/usb/host/ohci-exynos.c
@@ -37,7 +37,6 @@ struct exynos_ohci_hcd {
static int exynos_ohci_get_phy(struct device *dev,
struct exynos_ohci_hcd *exynos_ohci)
{
- struct device_node *child;
struct phy *phy;
int phy_number, num_phys;
int ret;
@@ -55,26 +54,22 @@ static int exynos_ohci_get_phy(struct device *dev,
return 0;
/* Get PHYs using legacy bindings */
- for_each_available_child_of_node(dev->of_node, child) {
+ for_each_available_child_of_node_scoped(dev->of_node, child) {
ret = of_property_read_u32(child, "reg", &phy_number);
if (ret) {
dev_err(dev, "Failed to parse device tree\n");
- of_node_put(child);
return ret;
}
if (phy_number >= PHY_NUMBER) {
dev_err(dev, "Invalid number of PHYs\n");
- of_node_put(child);
return -EINVAL;
}
phy = devm_of_phy_optional_get(dev, child, NULL);
exynos_ohci->phy[phy_number] = phy;
- if (IS_ERR(phy)) {
- of_node_put(child);
+ if (IS_ERR(phy))
return PTR_ERR(phy);
- }
}
exynos_ohci->legacy_phy = true;
@@ -135,20 +130,16 @@ static int exynos_ohci_probe(struct platform_device *pdev)
err = exynos_ohci_get_phy(&pdev->dev, exynos_ohci);
if (err)
- goto fail_clk;
+ goto fail_io;
- exynos_ohci->clk = devm_clk_get(&pdev->dev, "usbhost");
+ exynos_ohci->clk = devm_clk_get_enabled(&pdev->dev, "usbhost");
if (IS_ERR(exynos_ohci->clk)) {
dev_err(&pdev->dev, "Failed to get usbhost clock\n");
err = PTR_ERR(exynos_ohci->clk);
- goto fail_clk;
+ goto fail_io;
}
- err = clk_prepare_enable(exynos_ohci->clk);
- if (err)
- goto fail_clk;
-
hcd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(hcd->regs)) {
err = PTR_ERR(hcd->regs);
@@ -191,8 +182,6 @@ fail_add_hcd:
exynos_ohci_phy_disable(&pdev->dev);
pdev->dev.of_node = exynos_ohci->of_node;
fail_io:
- clk_disable_unprepare(exynos_ohci->clk);
-fail_clk:
usb_put_hcd(hcd);
return err;
}
@@ -208,8 +197,6 @@ static void exynos_ohci_remove(struct platform_device *pdev)
exynos_ohci_phy_disable(&pdev->dev);
- clk_disable_unprepare(exynos_ohci->clk);
-
usb_put_hcd(hcd);
}
@@ -221,7 +208,6 @@ static void exynos_ohci_shutdown(struct platform_device *pdev)
hcd->driver->shutdown(hcd);
}
-#ifdef CONFIG_PM
static int exynos_ohci_suspend(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
@@ -258,19 +244,13 @@ static int exynos_ohci_resume(struct device *dev)
return 0;
}
-#else
-#define exynos_ohci_suspend NULL
-#define exynos_ohci_resume NULL
-#endif
static const struct ohci_driver_overrides exynos_overrides __initconst = {
.extra_priv_size = sizeof(struct exynos_ohci_hcd),
};
-static const struct dev_pm_ops exynos_ohci_pm_ops = {
- .suspend = exynos_ohci_suspend,
- .resume = exynos_ohci_resume,
-};
+static DEFINE_SIMPLE_DEV_PM_OPS(exynos_ohci_pm_ops,
+ exynos_ohci_suspend, exynos_ohci_resume);
#ifdef CONFIG_OF
static const struct of_device_id exynos_ohci_match[] = {
@@ -282,11 +262,11 @@ MODULE_DEVICE_TABLE(of, exynos_ohci_match);
static struct platform_driver exynos_ohci_driver = {
.probe = exynos_ohci_probe,
- .remove_new = exynos_ohci_remove,
+ .remove = exynos_ohci_remove,
.shutdown = exynos_ohci_shutdown,
.driver = {
.name = "exynos-ohci",
- .pm = &exynos_ohci_pm_ops,
+ .pm = pm_ptr(&exynos_ohci_pm_ops),
.of_match_table = of_match_ptr(exynos_ohci_match),
}
};
@@ -308,4 +288,5 @@ module_exit(ohci_exynos_cleanup);
MODULE_ALIAS("platform:exynos-ohci");
MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
+MODULE_DESCRIPTION("OHCI support for Samsung S5P/Exynos SoC Series");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 4f9982ecfb58..9b24181fee60 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -44,7 +44,7 @@
#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <asm/byteorder.h>
@@ -888,6 +888,7 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd)
/* Check for an all 1's result which is a typical consequence
* of dead, unclocked, or unplugged (CardBus...) devices
*/
+again:
if (ints == ~(u32)0) {
ohci->rh_state = OHCI_RH_HALTED;
ohci_dbg (ohci, "device removed!\n");
@@ -982,6 +983,13 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd)
}
spin_unlock(&ohci->lock);
+ /* repeat until all enabled interrupts are handled */
+ if (ohci->rh_state != OHCI_RH_HALTED) {
+ ints = ohci_readl(ohci, &regs->intrstatus);
+ if (ints && (ints & ohci_readl(ohci, &regs->intrenable)))
+ goto again;
+ }
+
return IRQ_HANDLED;
}
diff --git a/drivers/usb/host/ohci-nxp.c b/drivers/usb/host/ohci-nxp.c
index 8264c454f6bd..24d5a1dc5056 100644
--- a/drivers/usb/host/ohci-nxp.c
+++ b/drivers/usb/host/ohci-nxp.c
@@ -51,8 +51,6 @@ static struct hc_driver __read_mostly ohci_nxp_hc_driver;
static struct i2c_client *isp1301_i2c_client;
-static struct clk *usb_host_clk;
-
static void isp1301_configure_lpc32xx(void)
{
/* LPC32XX only supports DAT_SE0 USB mode */
@@ -155,6 +153,7 @@ static int ohci_hcd_nxp_probe(struct platform_device *pdev)
struct resource *res;
int ret = 0, irq;
struct device_node *isp1301_node;
+ struct clk *usb_host_clk;
if (pdev->dev.of_node) {
isp1301_node = of_parse_phandle(pdev->dev.of_node,
@@ -180,26 +179,20 @@ static int ohci_hcd_nxp_probe(struct platform_device *pdev)
}
/* Enable USB host clock */
- usb_host_clk = devm_clk_get(&pdev->dev, NULL);
+ usb_host_clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(usb_host_clk)) {
- dev_err(&pdev->dev, "failed to acquire USB OHCI clock\n");
+ dev_err(&pdev->dev, "failed to acquire and start USB OHCI clock\n");
ret = PTR_ERR(usb_host_clk);
goto fail_disable;
}
- ret = clk_prepare_enable(usb_host_clk);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to start USB OHCI clock\n");
- goto fail_disable;
- }
-
isp1301_configure();
hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
if (!hcd) {
dev_err(&pdev->dev, "Failed to allocate HC buffer\n");
ret = -ENOMEM;
- goto fail_hcd;
+ goto fail_disable;
}
hcd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
@@ -229,8 +222,6 @@ static int ohci_hcd_nxp_probe(struct platform_device *pdev)
ohci_nxp_stop_hc();
fail_resource:
usb_put_hcd(hcd);
-fail_hcd:
- clk_disable_unprepare(usb_host_clk);
fail_disable:
isp1301_i2c_client = NULL;
return ret;
@@ -243,7 +234,6 @@ static void ohci_hcd_nxp_remove(struct platform_device *pdev)
usb_remove_hcd(hcd);
ohci_nxp_stop_hc();
usb_put_hcd(hcd);
- clk_disable_unprepare(usb_host_clk);
isp1301_i2c_client = NULL;
}
@@ -264,7 +254,7 @@ static struct platform_driver ohci_hcd_nxp_driver = {
.of_match_table = of_match_ptr(ohci_hcd_nxp_match),
},
.probe = ohci_hcd_nxp_probe,
- .remove_new = ohci_hcd_nxp_remove,
+ .remove = ohci_hcd_nxp_remove,
};
static int __init ohci_nxp_init(void)
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
index 21a6f6c55e07..f6e56c4b9914 100644
--- a/drivers/usb/host/ohci-omap.c
+++ b/drivers/usb/host/ohci-omap.c
@@ -152,7 +152,7 @@ static int ohci_omap_reset(struct usb_hcd *hcd)
rh &= ~RH_A_NOCP;
- /* gpio9 for overcurrent detction */
+ /* gpio9 for overcurrent detection */
omap_cfg_reg(W8_1610_GPIO9);
/* for paranoia's sake: disable USB.PUEN */
@@ -390,7 +390,7 @@ static int ohci_omap_resume(struct platform_device *dev)
*/
static struct platform_driver ohci_hcd_omap_driver = {
.probe = ohci_hcd_omap_probe,
- .remove_new = ohci_hcd_omap_remove,
+ .remove = ohci_hcd_omap_remove,
.shutdown = usb_hcd_platform_shutdown,
#ifdef CONFIG_PM
.suspend = ohci_omap_suspend,
diff --git a/drivers/usb/host/ohci-platform.c b/drivers/usb/host/ohci-platform.c
index 4a75507325dd..f47ae12cde6a 100644
--- a/drivers/usb/host/ohci-platform.c
+++ b/drivers/usb/host/ohci-platform.c
@@ -344,7 +344,7 @@ static const struct dev_pm_ops ohci_platform_pm_ops = {
static struct platform_driver ohci_platform_driver = {
.id_table = ohci_platform_table,
.probe = ohci_platform_probe,
- .remove_new = ohci_platform_remove,
+ .remove = ohci_platform_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "ohci-platform",
diff --git a/drivers/usb/host/ohci-ppc-of.c b/drivers/usb/host/ohci-ppc-of.c
index f64bfe5f4d4d..acd0a0e398a4 100644
--- a/drivers/usb/host/ohci-ppc-of.c
+++ b/drivers/usb/host/ohci-ppc-of.c
@@ -204,10 +204,6 @@ static const struct of_device_id ohci_hcd_ppc_of_match[] = {
#ifdef CONFIG_USB_OHCI_HCD_PPC_OF_LE
{
.name = "usb",
- .compatible = "ohci-littledian",
- },
- {
- .name = "usb",
.compatible = "ohci-le",
},
#endif
@@ -223,7 +219,7 @@ MODULE_DEVICE_TABLE(of, ohci_hcd_ppc_of_match);
static struct platform_driver ohci_hcd_ppc_of_driver = {
.probe = ohci_hcd_ppc_of_probe,
- .remove_new = ohci_hcd_ppc_of_remove,
+ .remove = ohci_hcd_ppc_of_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "ppc-of-ohci",
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index 357d9aee38a3..45d026e85168 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -27,7 +27,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_platform.h>
-#include <linux/of_gpio.h>
#include <linux/platform_data/usb-ohci-pxa27x.h>
#include <linux/platform_data/pxa2xx_udc.h>
#include <linux/platform_device.h>
@@ -570,7 +569,7 @@ static const struct dev_pm_ops ohci_hcd_pxa27x_pm_ops = {
static struct platform_driver ohci_hcd_pxa27x_driver = {
.probe = ohci_hcd_pxa27x_probe,
- .remove_new = ohci_hcd_pxa27x_remove,
+ .remove = ohci_hcd_pxa27x_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "pxa27x-ohci",
diff --git a/drivers/usb/host/ohci-s3c2410.c b/drivers/usb/host/ohci-s3c2410.c
index c5c9b4cbcb9a..66d970854357 100644
--- a/drivers/usb/host/ohci-s3c2410.c
+++ b/drivers/usb/host/ohci-s3c2410.c
@@ -457,7 +457,7 @@ MODULE_DEVICE_TABLE(of, ohci_hcd_s3c2410_dt_ids);
static struct platform_driver ohci_hcd_s3c2410_driver = {
.probe = ohci_hcd_s3c2410_probe,
- .remove_new = ohci_hcd_s3c2410_remove,
+ .remove = ohci_hcd_s3c2410_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "s3c2410-ohci",
diff --git a/drivers/usb/host/ohci-sm501.c b/drivers/usb/host/ohci-sm501.c
index 4b39e9d6f33a..843a5378764e 100644
--- a/drivers/usb/host/ohci-sm501.c
+++ b/drivers/usb/host/ohci-sm501.c
@@ -252,7 +252,7 @@ static int ohci_sm501_resume(struct platform_device *pdev)
*/
static struct platform_driver ohci_hcd_sm501_driver = {
.probe = ohci_hcd_sm501_drv_probe,
- .remove_new = ohci_hcd_sm501_drv_remove,
+ .remove = ohci_hcd_sm501_drv_remove,
.shutdown = usb_hcd_platform_shutdown,
.suspend = ohci_sm501_suspend,
.resume = ohci_sm501_resume,
diff --git a/drivers/usb/host/ohci-spear.c b/drivers/usb/host/ohci-spear.c
index 993f347c5c28..d7131e5a4477 100644
--- a/drivers/usb/host/ohci-spear.c
+++ b/drivers/usb/host/ohci-spear.c
@@ -157,7 +157,7 @@ MODULE_DEVICE_TABLE(of, spear_ohci_id_table);
/* Driver definition to register with the platform bus */
static struct platform_driver spear_ohci_hcd_driver = {
.probe = spear_ohci_hcd_drv_probe,
- .remove_new = spear_ohci_hcd_drv_remove,
+ .remove = spear_ohci_hcd_drv_remove,
#ifdef CONFIG_PM
.suspend = spear_ohci_hcd_drv_suspend,
.resume = spear_ohci_hcd_drv_resume,
diff --git a/drivers/usb/host/ohci-st.c b/drivers/usb/host/ohci-st.c
index 214342013f7e..d1656fce5400 100644
--- a/drivers/usb/host/ohci-st.c
+++ b/drivers/usb/host/ohci-st.c
@@ -298,7 +298,7 @@ MODULE_DEVICE_TABLE(of, st_ohci_platform_ids);
static struct platform_driver ohci_platform_driver = {
.probe = st_ohci_platform_probe,
- .remove_new = st_ohci_platform_remove,
+ .remove = st_ohci_platform_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "st-ohci",
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index d467472f9d3c..fce800ba4c61 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -15,6 +15,7 @@
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/errno.h>
#include <linux/timer.h>
#include <linux/list.h>
@@ -27,7 +28,7 @@
#include <linux/iopoll.h>
#include <asm/irq.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/irq.h>
#include <linux/platform_device.h>
@@ -196,31 +197,6 @@ struct ehci_regs {
#define PORT_RWC_BITS (PORT_CSC | PORT_PEC | PORT_OCC)
} __packed;
-/* Appendix C, Debug port ... intended for use with special "debug devices"
- * that can help if there's no serial console. (nonstandard enumeration.)
- */
-struct ehci_dbg_port {
- u32 control;
-#define DBGP_OWNER (1<<30)
-#define DBGP_ENABLED (1<<28)
-#define DBGP_DONE (1<<16)
-#define DBGP_INUSE (1<<10)
-#define DBGP_ERRCODE(x) (((x)>>7)&0x07)
-# define DBGP_ERR_BAD 1
-# define DBGP_ERR_SIGNAL 2
-#define DBGP_ERROR (1<<6)
-#define DBGP_GO (1<<5)
-#define DBGP_OUT (1<<4)
-#define DBGP_LEN(x) (((x)>>0)&0x0f)
- u32 pids;
-#define DBGP_PID_GET(x) (((x)>>16)&0xff)
-#define DBGP_PID_SET(data, tok) (((data)<<8)|(tok))
- u32 data03;
- u32 data47;
- u32 address;
-#define DBGP_EPADDR(dev, ep) (((dev)<<8)|(ep))
-} __packed;
-
#define QTD_NEXT(dma) cpu_to_le32((u32)dma)
/*
@@ -910,7 +886,7 @@ static int oxu_buf_alloc(struct oxu_hcd *oxu, struct ehci_qtd *qtd, int len)
int a_blocks; /* blocks allocated */
int i, j;
- /* Don't allocte bigger than supported */
+ /* Don't allocate bigger than supported */
if (len > BUFFER_SIZE * BUFFER_NUM) {
oxu_err(oxu, "buffer too big (%d)\n", len);
return -ENOMEM;
@@ -927,7 +903,7 @@ static int oxu_buf_alloc(struct oxu_hcd *oxu, struct ehci_qtd *qtd, int len)
/* Find a suitable available data buffer */
for (i = 0; i < BUFFER_NUM;
- i += max(a_blocks, (int)oxu->db_used[i])) {
+ i += max_t(int, a_blocks, oxu->db_used[i])) {
/* Check all the required blocks are available */
for (j = 0; j < a_blocks; j++)
@@ -2781,7 +2757,7 @@ static void ehci_port_power(struct oxu_hcd *oxu, int is_on)
if (!HCS_PPC(oxu->hcs_params))
return;
- oxu_dbg(oxu, "...power%s ports...\n", is_on ? "up" : "down");
+ oxu_dbg(oxu, "...power%s ports...\n", str_up_down(is_on));
for (port = HCS_N_PORTS(oxu->hcs_params); port > 0; ) {
if (is_on)
oxu_hub_control(oxu_to_hcd(oxu), SetPortFeature,
@@ -3065,7 +3041,7 @@ static int oxu_hcd_init(struct usb_hcd *hcd)
* make problems: throughput reduction (!), data errors...
*/
if (park) {
- park = min(park, (unsigned) 3);
+ park = min_t(unsigned int, park, 3);
temp |= CMD_PARK;
temp |= park << 8;
}
@@ -4314,7 +4290,7 @@ static int oxu_drv_resume(struct device *dev)
static struct platform_driver oxu_driver = {
.probe = oxu_drv_probe,
- .remove_new = oxu_drv_remove,
+ .remove = oxu_drv_remove,
.shutdown = oxu_drv_shutdown,
.suspend = oxu_drv_suspend,
.resume = oxu_drv_resume,
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 1f9c1b1435d8..0404489c2f6a 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -958,6 +958,15 @@ static void quirk_usb_disable_ehci(struct pci_dev *pdev)
* booting from USB disk or using a usb keyboard
*/
hcc_params = readl(base + EHCI_HCC_PARAMS);
+
+ /* LS7A EHCI controller doesn't have extended capabilities, the
+ * EECP (EHCI Extended Capabilities Pointer) field of HCCPARAMS
+ * register should be 0x0 but it reads as 0xa0. So clear it to
+ * avoid error messages on boot.
+ */
+ if (pdev->vendor == PCI_VENDOR_ID_LOONGSON && pdev->device == 0x7a14)
+ hcc_params &= ~(0xffL << 8);
+
offset = (hcc_params >> 8) & 0xff;
while (offset && --count) {
pci_read_config_dword(pdev, offset, &cap);
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index 9f4bf8c5f8a5..a44992e2561b 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -297,9 +297,9 @@ static void put_child_connect_map(struct r8a66597 *r8a66597, int address)
static void set_pipe_reg_addr(struct r8a66597_pipe *pipe, u8 dma_ch)
{
u16 pipenum = pipe->info.pipenum;
- const unsigned long fifoaddr[] = {D0FIFO, D1FIFO, CFIFO};
- const unsigned long fifosel[] = {D0FIFOSEL, D1FIFOSEL, CFIFOSEL};
- const unsigned long fifoctr[] = {D0FIFOCTR, D1FIFOCTR, CFIFOCTR};
+ static const unsigned long fifoaddr[] = {D0FIFO, D1FIFO, CFIFO};
+ static const unsigned long fifosel[] = {D0FIFOSEL, D1FIFOSEL, CFIFOSEL};
+ static const unsigned long fifoctr[] = {D0FIFOCTR, D1FIFOCTR, CFIFOCTR};
if (dma_ch > R8A66597_PIPE_NO_DMA) /* dma fifo not use? */
dma_ch = R8A66597_PIPE_NO_DMA;
@@ -759,7 +759,7 @@ static void enable_r8a66597_pipe_dma(struct r8a66597 *r8a66597,
struct r8a66597_pipe_info *info = &pipe->info;
unsigned short mbw = mbw_value(r8a66597);
- /* pipe dma is only for external controlles */
+ /* pipe dma is only for external controllers */
if (r8a66597->pdata->on_chip)
return;
@@ -1336,7 +1336,7 @@ static void packet_read(struct r8a66597 *r8a66597, u16 pipenum)
buf = (void *)urb->transfer_buffer + urb->actual_length;
urb_len = urb->transfer_buffer_length - urb->actual_length;
}
- bufsize = min(urb_len, (int) td->maxpacket);
+ bufsize = min_t(int, urb_len, td->maxpacket);
if (rcv_len <= bufsize) {
size = rcv_len;
} else {
@@ -2510,7 +2510,7 @@ clean_up:
static struct platform_driver r8a66597_driver = {
.probe = r8a66597_probe,
- .remove_new = r8a66597_remove,
+ .remove = r8a66597_remove,
.driver = {
.name = hcd_name,
.pm = R8A66597_DEV_PM_OPS,
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index 0956495bba57..fa2e4badd288 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -48,13 +48,14 @@
#include <linux/usb/hcd.h>
#include <linux/platform_device.h>
#include <linux/prefetch.h>
+#include <linux/string_choices.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/byteorder.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "sl811.h"
@@ -98,7 +99,7 @@ static void port_power(struct sl811 *sl811, int is_on)
if (sl811->board && sl811->board->port_power) {
/* switch VBUS, at 500mA unless hub power budget gets set */
dev_dbg(hcd->self.controller, "power %s\n",
- is_on ? "on" : "off");
+ str_on_off(is_on));
sl811->board->port_power(hcd->self.controller, is_on);
}
@@ -585,6 +586,7 @@ done(struct sl811 *sl811, struct sl811h_ep *ep, u8 bank)
finish_request(sl811, ep, urb, urbstat);
}
+#ifdef QUIRK2
static inline u8 checkdone(struct sl811 *sl811)
{
u8 ctl;
@@ -616,6 +618,7 @@ static inline u8 checkdone(struct sl811 *sl811)
#endif
return irqstat;
}
+#endif
static irqreturn_t sl811h_irq(struct usb_hcd *hcd)
{
@@ -1782,7 +1785,7 @@ sl811h_resume(struct platform_device *dev)
/* this driver is exported so sl811_cs can depend on it */
struct platform_driver sl811h_driver = {
.probe = sl811h_probe,
- .remove_new = sl811h_remove,
+ .remove = sl811h_remove,
.suspend = sl811h_suspend,
.resume = sl811h_resume,
diff --git a/drivers/usb/host/uhci-grlib.c b/drivers/usb/host/uhci-grlib.c
index cfebb833668e..8a1f6d1b5b56 100644
--- a/drivers/usb/host/uhci-grlib.c
+++ b/drivers/usb/host/uhci-grlib.c
@@ -184,7 +184,7 @@ MODULE_DEVICE_TABLE(of, uhci_hcd_grlib_of_match);
static struct platform_driver uhci_grlib_driver = {
.probe = uhci_hcd_grlib_probe,
- .remove_new = uhci_hcd_grlib_remove,
+ .remove = uhci_hcd_grlib_remove,
.shutdown = uhci_hcd_grlib_shutdown,
.driver = {
.name = "grlib-uhci",
diff --git a/drivers/usb/host/uhci-platform.c b/drivers/usb/host/uhci-platform.c
index 3dec5dd3a0d5..a7c934404ebc 100644
--- a/drivers/usb/host/uhci-platform.c
+++ b/drivers/usb/host/uhci-platform.c
@@ -184,7 +184,7 @@ MODULE_DEVICE_TABLE(of, platform_uhci_ids);
static struct platform_driver uhci_platform_driver = {
.probe = uhci_hcd_platform_probe,
- .remove_new = uhci_hcd_platform_remove,
+ .remove = uhci_hcd_platform_remove,
.shutdown = uhci_hcd_platform_shutdown,
.driver = {
.name = "platform-uhci",
diff --git a/drivers/usb/host/xhci-caps.h b/drivers/usb/host/xhci-caps.h
new file mode 100644
index 000000000000..f6b9a00a0ab9
--- /dev/null
+++ b/drivers/usb/host/xhci-caps.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* hc_capbase bitmasks */
+/* bits 7:0 - how long is the Capabilities register */
+#define HC_LENGTH(p) XHCI_HC_LENGTH(p)
+/* bits 31:16 */
+#define HC_VERSION(p) (((p) >> 16) & 0xffff)
+
+/* HCSPARAMS1 - hcs_params1 - bitmasks */
+/* bits 0:7, Max Device Slots */
+#define HCS_MAX_SLOTS(p) (((p) >> 0) & 0xff)
+#define HCS_SLOTS_MASK 0xff
+/* bits 8:18, Max Interrupters */
+#define HCS_MAX_INTRS(p) (((p) >> 8) & 0x7ff)
+/* bits 24:31, Max Ports - max value is 0x7F = 127 ports */
+#define HCS_MAX_PORTS(p) (((p) >> 24) & 0x7f)
+
+/* HCSPARAMS2 - hcs_params2 - bitmasks */
+/* bits 0:3, frames or uframes that SW needs to queue transactions
+ * ahead of the HW to meet periodic deadlines */
+#define HCS_IST(p) (((p) >> 0) & 0xf)
+/* bits 4:7, max number of Event Ring segments */
+#define HCS_ERST_MAX(p) (((p) >> 4) & 0xf)
+/* bits 21:25 Hi 5 bits of Scratchpad buffers SW must allocate for the HW */
+/* bit 26 Scratchpad restore - for save/restore HW state - not used yet */
+/* bits 27:31 Lo 5 bits of Scratchpad buffers SW must allocate for the HW */
+#define HCS_MAX_SCRATCHPAD(p) ((((p) >> 16) & 0x3e0) | (((p) >> 27) & 0x1f))
+
+/* HCSPARAMS3 - hcs_params3 - bitmasks */
+/* bits 0:7, Max U1 to U0 latency for the roothub ports */
+#define HCS_U1_LATENCY(p) (((p) >> 0) & 0xff)
+/* bits 16:31, Max U2 to U0 latency for the roothub ports */
+#define HCS_U2_LATENCY(p) (((p) >> 16) & 0xffff)
+
+/* HCCPARAMS - hcc_params - bitmasks */
+/* true: HC can use 64-bit address pointers */
+#define HCC_64BIT_ADDR(p) ((p) & (1 << 0))
+/* true: HC can do bandwidth negotiation */
+#define HCC_BANDWIDTH_NEG(p) ((p) & (1 << 1))
+/* true: HC uses 64-byte Device Context structures
+ * FIXME 64-byte context structures aren't supported yet.
+ */
+#define HCC_64BYTE_CONTEXT(p) ((p) & (1 << 2))
+/* true: HC has port power switches */
+#define HCC_PPC(p) ((p) & (1 << 3))
+/* true: HC has port indicators */
+#define HCS_INDICATOR(p) ((p) & (1 << 4))
+/* true: HC has Light HC Reset Capability */
+#define HCC_LIGHT_RESET(p) ((p) & (1 << 5))
+/* true: HC supports latency tolerance messaging */
+#define HCC_LTC(p) ((p) & (1 << 6))
+/* true: no secondary Stream ID Support */
+#define HCC_NSS(p) ((p) & (1 << 7))
+/* true: HC supports Stopped - Short Packet */
+#define HCC_SPC(p) ((p) & (1 << 9))
+/* true: HC has Contiguous Frame ID Capability */
+#define HCC_CFC(p) ((p) & (1 << 11))
+/* Max size for Primary Stream Arrays - 2^(n+1), where n is bits 12:15 */
+#define HCC_MAX_PSA(p) (1 << ((((p) >> 12) & 0xf) + 1))
+/* Extended Capabilities pointer from PCI base - section 5.3.6 */
+#define HCC_EXT_CAPS(p) XHCI_HCC_EXT_CAPS(p)
+
+#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
+
+/* db_off bitmask - bits 0:1 reserved */
+#define DBOFF_MASK (~0x3)
+
+/* run_regs_off bitmask - bits 0:4 reserved */
+#define RTSOFF_MASK (~0x1f)
+
+/* HCCPARAMS2 - hcc_params2 - bitmasks */
+/* true: HC supports U3 entry Capability */
+#define HCC2_U3C(p) ((p) & (1 << 0))
+/* true: HC supports Configure endpoint command Max exit latency too large */
+#define HCC2_CMC(p) ((p) & (1 << 1))
+/* true: HC supports Force Save context Capability */
+#define HCC2_FSC(p) ((p) & (1 << 2))
+/* true: HC supports Compliance Transition Capability */
+#define HCC2_CTC(p) ((p) & (1 << 3))
+/* true: HC support Large ESIT payload Capability > 48k */
+#define HCC2_LEC(p) ((p) & (1 << 4))
+/* true: HC support Configuration Information Capability */
+#define HCC2_CIC(p) ((p) & (1 << 5))
+/* true: HC support Extended TBC Capability, Isoc burst count > 65535 */
+#define HCC2_ETC(p) ((p) & (1 << 6))
+/* true: HC support Extended TBC TRB Status Capability */
+#define HCC2_ETC_TSC(p) ((p) & (1 << 7))
+/* true: HC support Get/Set Extended Property Capability */
+#define HCC2_GSC(p) ((p) & (1 << 8))
+/* true: HC support Virtualization Based Trusted I/O Capability */
+#define HCC2_VTC(p) ((p) & (1 << 9))
diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c
index d82935d31126..fd7895b24367 100644
--- a/drivers/usb/host/xhci-dbgcap.c
+++ b/drivers/usb/host/xhci-dbgcap.c
@@ -173,16 +173,18 @@ static void xhci_dbc_giveback(struct dbc_request *req, int status)
spin_lock(&dbc->lock);
}
-static void xhci_dbc_flush_single_request(struct dbc_request *req)
+static void trb_to_noop(union xhci_trb *trb)
{
- union xhci_trb *trb = req->trb;
-
trb->generic.field[0] = 0;
trb->generic.field[1] = 0;
trb->generic.field[2] = 0;
trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(TRB_TR_NOOP));
+}
+static void xhci_dbc_flush_single_request(struct dbc_request *req)
+{
+ trb_to_noop(req->trb);
xhci_dbc_giveback(req, -ESHUTDOWN);
}
@@ -246,8 +248,9 @@ xhci_dbc_queue_trb(struct xhci_ring *ring, u32 field1,
trb->generic.field[2] = cpu_to_le32(field3);
trb->generic.field[3] = cpu_to_le32(field4);
- trace_xhci_dbc_gadget_ep_queue(ring, &trb->generic);
-
+ trace_xhci_dbc_gadget_ep_queue(ring, &trb->generic,
+ xhci_trb_virt_to_dma(ring->enq_seg,
+ ring->enqueue));
ring->num_trbs_free--;
next = ++(ring->enqueue);
if (TRB_TYPE_LINK_LE32(next->link.control)) {
@@ -469,7 +472,7 @@ xhci_dbc_ring_alloc(struct device *dev, enum xhci_ring_type type, gfp_t flags)
trb->link.control = cpu_to_le32(LINK_TOGGLE | TRB_TYPE(TRB_LINK));
}
INIT_LIST_HEAD(&ring->td_list);
- xhci_initialize_ring_info(ring, 1);
+ xhci_initialize_ring_info(ring);
return ring;
dma_fail:
kfree(seg);
@@ -516,7 +519,7 @@ static int xhci_dbc_mem_init(struct xhci_dbc *dbc, gfp_t flags)
goto string_fail;
/* Setup ERST register: */
- writel(dbc->erst.erst_size, &dbc->regs->ersts);
+ writel(dbc->erst.num_entries, &dbc->regs->ersts);
lo_hi_writeq(dbc->erst.erst_dma_addr, &dbc->regs->erstba);
deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
@@ -634,7 +637,8 @@ static int xhci_dbc_start(struct xhci_dbc *dbc)
return ret;
}
- return mod_delayed_work(system_wq, &dbc->event_work, 1);
+ return mod_delayed_work(system_wq, &dbc->event_work,
+ msecs_to_jiffies(dbc->poll_interval));
}
static void xhci_dbc_stop(struct xhci_dbc *dbc)
@@ -648,7 +652,6 @@ static void xhci_dbc_stop(struct xhci_dbc *dbc)
case DS_DISABLED:
return;
case DS_CONFIGURED:
- case DS_STALLED:
if (dbc->driver->disconnect)
dbc->driver->disconnect(dbc);
break;
@@ -669,6 +672,23 @@ static void xhci_dbc_stop(struct xhci_dbc *dbc)
}
static void
+handle_ep_halt_changes(struct xhci_dbc *dbc, struct dbc_ep *dep, bool halted)
+{
+ if (halted) {
+ dev_info(dbc->dev, "DbC Endpoint halted\n");
+ dep->halted = 1;
+
+ } else if (dep->halted) {
+ dev_info(dbc->dev, "DbC Endpoint halt cleared\n");
+ dep->halted = 0;
+
+ if (!list_empty(&dep->list_pending))
+ writel(DBC_DOOR_BELL_TARGET(dep->direction),
+ &dbc->regs->doorbell);
+ }
+}
+
+static void
dbc_handle_port_status(struct xhci_dbc *dbc, union xhci_trb *event)
{
u32 portsc;
@@ -696,6 +716,7 @@ static void dbc_handle_xfer_event(struct xhci_dbc *dbc, union xhci_trb *event)
struct xhci_ring *ring;
int ep_id;
int status;
+ struct xhci_ep_ctx *ep_ctx;
u32 comp_code;
size_t remain_length;
struct dbc_request *req = NULL, *r;
@@ -705,8 +726,30 @@ static void dbc_handle_xfer_event(struct xhci_dbc *dbc, union xhci_trb *event)
ep_id = TRB_TO_EP_ID(le32_to_cpu(event->generic.field[3]));
dep = (ep_id == EPID_OUT) ?
get_out_ep(dbc) : get_in_ep(dbc);
+ ep_ctx = (ep_id == EPID_OUT) ?
+ dbc_bulkout_ctx(dbc) : dbc_bulkin_ctx(dbc);
ring = dep->ring;
+ /* Match the pending request: */
+ list_for_each_entry(r, &dep->list_pending, list_pending) {
+ if (r->trb_dma == event->trans_event.buffer) {
+ req = r;
+ break;
+ }
+ if (r->status == -COMP_STALL_ERROR) {
+ dev_warn(dbc->dev, "Give back stale stalled req\n");
+ ring->num_trbs_free++;
+ xhci_dbc_giveback(r, 0);
+ }
+ }
+
+ if (!req) {
+ dev_warn(dbc->dev, "no matched request\n");
+ return;
+ }
+
+ trace_xhci_dbc_handle_transfer(ring, &req->trb->generic, req->trb_dma);
+
switch (comp_code) {
case COMP_SUCCESS:
remain_length = 0;
@@ -717,31 +760,49 @@ static void dbc_handle_xfer_event(struct xhci_dbc *dbc, union xhci_trb *event)
case COMP_TRB_ERROR:
case COMP_BABBLE_DETECTED_ERROR:
case COMP_USB_TRANSACTION_ERROR:
- case COMP_STALL_ERROR:
dev_warn(dbc->dev, "tx error %d detected\n", comp_code);
status = -comp_code;
break;
+ case COMP_STALL_ERROR:
+ dev_warn(dbc->dev, "Stall error at bulk TRB %llx, remaining %zu, ep deq %llx\n",
+ event->trans_event.buffer, remain_length, ep_ctx->deq);
+ status = 0;
+ dep->halted = 1;
+
+ /*
+ * xHC DbC may trigger a STALL bulk xfer event when host sends a
+ * ClearFeature(ENDPOINT_HALT) request even if there wasn't an
+ * active bulk transfer.
+ *
+ * Don't give back this transfer request as hardware will later
+ * start processing TRBs starting from this 'STALLED' TRB,
+ * causing TRBs and requests to be out of sync.
+ *
+ * If STALL event shows some bytes were transferred then assume
+ * it's an actual transfer issue and give back the request.
+ * In this case mark the TRB as No-Op to avoid hw from using the
+ * TRB again.
+ */
+
+ if ((ep_ctx->deq & ~TRB_CYCLE) == event->trans_event.buffer) {
+ dev_dbg(dbc->dev, "Ep stopped on Stalled TRB\n");
+ if (remain_length == req->length) {
+ dev_dbg(dbc->dev, "Spurious stall event, keep req\n");
+ req->status = -COMP_STALL_ERROR;
+ req->actual = 0;
+ return;
+ }
+ dev_dbg(dbc->dev, "Give back stalled req, but turn TRB to No-op\n");
+ trb_to_noop(req->trb);
+ }
+ break;
+
default:
dev_err(dbc->dev, "unknown tx error %d\n", comp_code);
status = -comp_code;
break;
}
- /* Match the pending request: */
- list_for_each_entry(r, &dep->list_pending, list_pending) {
- if (r->trb_dma == event->trans_event.buffer) {
- req = r;
- break;
- }
- }
-
- if (!req) {
- dev_warn(dbc->dev, "no matched request\n");
- return;
- }
-
- trace_xhci_dbc_handle_transfer(ring, &req->trb->generic);
-
ring->num_trbs_free++;
req->actual = req->length - remain_length;
xhci_dbc_giveback(req, status);
@@ -761,7 +822,6 @@ static void inc_evt_deq(struct xhci_ring *ring)
static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
{
dma_addr_t deq;
- struct dbc_ep *dep;
union xhci_trb *evt;
u32 ctrl, portsc;
bool update_erdp = false;
@@ -813,43 +873,17 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
return EVT_DISC;
}
- /* Handle endpoint stall event: */
+ /* Check and handle changes in endpoint halt status */
ctrl = readl(&dbc->regs->control);
- if ((ctrl & DBC_CTRL_HALT_IN_TR) ||
- (ctrl & DBC_CTRL_HALT_OUT_TR)) {
- dev_info(dbc->dev, "DbC Endpoint stall\n");
- dbc->state = DS_STALLED;
-
- if (ctrl & DBC_CTRL_HALT_IN_TR) {
- dep = get_in_ep(dbc);
- xhci_dbc_flush_endpoint_requests(dep);
- }
-
- if (ctrl & DBC_CTRL_HALT_OUT_TR) {
- dep = get_out_ep(dbc);
- xhci_dbc_flush_endpoint_requests(dep);
- }
-
- return EVT_DONE;
- }
+ handle_ep_halt_changes(dbc, get_in_ep(dbc), ctrl & DBC_CTRL_HALT_IN_TR);
+ handle_ep_halt_changes(dbc, get_out_ep(dbc), ctrl & DBC_CTRL_HALT_OUT_TR);
/* Clear DbC run change bit: */
if (ctrl & DBC_CTRL_DBC_RUN_CHANGE) {
writel(ctrl, &dbc->regs->control);
ctrl = readl(&dbc->regs->control);
}
-
break;
- case DS_STALLED:
- ctrl = readl(&dbc->regs->control);
- if (!(ctrl & DBC_CTRL_HALT_IN_TR) &&
- !(ctrl & DBC_CTRL_HALT_OUT_TR) &&
- (ctrl & DBC_CTRL_DBC_RUN)) {
- dbc->state = DS_CONFIGURED;
- break;
- }
-
- return EVT_DONE;
default:
dev_err(dbc->dev, "Unknown DbC state %d\n", dbc->state);
break;
@@ -865,7 +899,9 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
*/
rmb();
- trace_xhci_dbc_handle_event(dbc->ring_evt, &evt->generic);
+ trace_xhci_dbc_handle_event(dbc->ring_evt, &evt->generic,
+ xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
+ dbc->ring_evt->dequeue));
switch (le32_to_cpu(evt->event_cmd.flags) & TRB_TYPE_BITMASK) {
case TRB_TYPE(TRB_PORT_STATUS):
@@ -899,8 +935,10 @@ static void xhci_dbc_handle_events(struct work_struct *work)
enum evtreturn evtr;
struct xhci_dbc *dbc;
unsigned long flags;
+ unsigned int poll_interval;
dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work);
+ poll_interval = dbc->poll_interval;
spin_lock_irqsave(&dbc->lock, flags);
evtr = xhci_dbc_do_handle_events(dbc);
@@ -916,13 +954,18 @@ static void xhci_dbc_handle_events(struct work_struct *work)
dbc->driver->disconnect(dbc);
break;
case EVT_DONE:
+ /* set fast poll rate if there are pending data transfers */
+ if (!list_empty(&dbc->eps[BULK_OUT].list_pending) ||
+ !list_empty(&dbc->eps[BULK_IN].list_pending))
+ poll_interval = 0;
break;
default:
dev_info(dbc->dev, "stop handling dbc events\n");
return;
}
- mod_delayed_work(system_wq, &dbc->event_work, 1);
+ mod_delayed_work(system_wq, &dbc->event_work,
+ msecs_to_jiffies(poll_interval));
}
static const char * const dbc_state_strings[DS_MAX] = {
@@ -931,7 +974,6 @@ static const char * const dbc_state_strings[DS_MAX] = {
[DS_ENABLED] = "enabled",
[DS_CONNECTED] = "connected",
[DS_CONFIGURED] = "configured",
- [DS_STALLED] = "stalled",
};
static ssize_t dbc_show(struct device *dev,
@@ -1142,11 +1184,48 @@ static ssize_t dbc_bInterfaceProtocol_store(struct device *dev,
return size;
}
+static ssize_t dbc_poll_interval_ms_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct xhci_dbc *dbc;
+ struct xhci_hcd *xhci;
+
+ xhci = hcd_to_xhci(dev_get_drvdata(dev));
+ dbc = xhci->dbc;
+
+ return sysfs_emit(buf, "%u\n", dbc->poll_interval);
+}
+
+static ssize_t dbc_poll_interval_ms_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct xhci_dbc *dbc;
+ struct xhci_hcd *xhci;
+ u32 value;
+ int ret;
+
+ ret = kstrtou32(buf, 0, &value);
+ if (ret || value > DBC_POLL_INTERVAL_MAX)
+ return -EINVAL;
+
+ xhci = hcd_to_xhci(dev_get_drvdata(dev));
+ dbc = xhci->dbc;
+
+ dbc->poll_interval = value;
+
+ mod_delayed_work(system_wq, &dbc->event_work, 0);
+
+ return size;
+}
+
static DEVICE_ATTR_RW(dbc);
static DEVICE_ATTR_RW(dbc_idVendor);
static DEVICE_ATTR_RW(dbc_idProduct);
static DEVICE_ATTR_RW(dbc_bcdDevice);
static DEVICE_ATTR_RW(dbc_bInterfaceProtocol);
+static DEVICE_ATTR_RW(dbc_poll_interval_ms);
static struct attribute *dbc_dev_attrs[] = {
&dev_attr_dbc.attr,
@@ -1154,6 +1233,7 @@ static struct attribute *dbc_dev_attrs[] = {
&dev_attr_dbc_idProduct.attr,
&dev_attr_dbc_bcdDevice.attr,
&dev_attr_dbc_bInterfaceProtocol.attr,
+ &dev_attr_dbc_poll_interval_ms.attr,
NULL
};
ATTRIBUTE_GROUPS(dbc_dev);
@@ -1175,6 +1255,7 @@ xhci_alloc_dbc(struct device *dev, void __iomem *base, const struct dbc_driver *
dbc->idVendor = DBC_VENDOR_ID;
dbc->bcdDevice = DBC_DEVICE_REV;
dbc->bInterfaceProtocol = DBC_PROTOCOL;
+ dbc->poll_interval = DBC_POLL_INTERVAL_DEFAULT;
if (readl(&dbc->regs->control) & DBC_CTRL_DBC_ENABLE)
goto err;
diff --git a/drivers/usb/host/xhci-dbgcap.h b/drivers/usb/host/xhci-dbgcap.h
index e39e3ae1677a..9dc8f4d8077c 100644
--- a/drivers/usb/host/xhci-dbgcap.h
+++ b/drivers/usb/host/xhci-dbgcap.h
@@ -81,7 +81,6 @@ enum dbc_state {
DS_ENABLED,
DS_CONNECTED,
DS_CONFIGURED,
- DS_STALLED,
DS_MAX
};
@@ -90,11 +89,13 @@ struct dbc_ep {
struct list_head list_pending;
struct xhci_ring *ring;
unsigned int direction:1;
+ unsigned int halted:1;
};
#define DBC_QUEUE_SIZE 16
#define DBC_WRITE_BUF_SIZE 8192
-
+#define DBC_POLL_INTERVAL_DEFAULT 64 /* milliseconds */
+#define DBC_POLL_INTERVAL_MAX 5000 /* milliseconds */
/*
* Private structure for DbC hardware state:
*/
@@ -109,7 +110,7 @@ struct dbc_port {
struct tasklet_struct push;
struct list_head write_pool;
- struct kfifo write_fifo;
+ unsigned int tx_boundary;
bool registered;
};
@@ -140,6 +141,7 @@ struct xhci_dbc {
enum dbc_state state;
struct delayed_work event_work;
+ unsigned int poll_interval; /* ms */
unsigned resume_required:1;
struct dbc_ep eps[2];
diff --git a/drivers/usb/host/xhci-dbgtty.c b/drivers/usb/host/xhci-dbgtty.c
index b74e98e94393..60ed753c85bb 100644
--- a/drivers/usb/host/xhci-dbgtty.c
+++ b/drivers/usb/host/xhci-dbgtty.c
@@ -25,16 +25,26 @@ static inline struct dbc_port *dbc_to_port(struct xhci_dbc *dbc)
}
static unsigned int
-dbc_send_packet(struct dbc_port *port, char *packet, unsigned int size)
+dbc_kfifo_to_req(struct dbc_port *port, char *packet)
{
- unsigned int len;
-
- len = kfifo_len(&port->write_fifo);
- if (len < size)
- size = len;
- if (size != 0)
- size = kfifo_out(&port->write_fifo, packet, size);
- return size;
+ unsigned int len;
+
+ len = kfifo_len(&port->port.xmit_fifo);
+
+ if (len == 0)
+ return 0;
+
+ len = min(len, DBC_MAX_PACKET);
+
+ if (port->tx_boundary)
+ len = min(port->tx_boundary, len);
+
+ len = kfifo_out(&port->port.xmit_fifo, packet, len);
+
+ if (port->tx_boundary)
+ port->tx_boundary -= len;
+
+ return len;
}
static int dbc_start_tx(struct dbc_port *port)
@@ -49,7 +59,7 @@ static int dbc_start_tx(struct dbc_port *port)
while (!list_empty(pool)) {
req = list_entry(pool->next, struct dbc_request, list_pool);
- len = dbc_send_packet(port, req->buf, DBC_MAX_PACKET);
+ len = dbc_kfifo_to_req(port, req->buf);
if (len == 0)
break;
do_tty_wake = true;
@@ -100,15 +110,74 @@ static void dbc_start_rx(struct dbc_port *port)
}
}
+/*
+ * Queue received data to tty buffer and push it.
+ *
+ * Returns nr of remaining bytes that didn't fit tty buffer, i.e. 0 if all
+ * bytes sucessfullt moved. In case of error returns negative errno.
+ * Call with lock held
+ */
+static int dbc_rx_push_buffer(struct dbc_port *port, struct dbc_request *req)
+{
+ char *packet = req->buf;
+ unsigned int n, size = req->actual;
+ int count;
+
+ if (!req->actual)
+ return 0;
+
+ /* if n_read is set then request was partially moved to tty buffer */
+ n = port->n_read;
+ if (n) {
+ packet += n;
+ size -= n;
+ }
+
+ count = tty_insert_flip_string(&port->port, packet, size);
+ if (count)
+ tty_flip_buffer_push(&port->port);
+ if (count != size) {
+ port->n_read += count;
+ return size - count;
+ }
+
+ port->n_read = 0;
+ return 0;
+}
+
static void
dbc_read_complete(struct xhci_dbc *dbc, struct dbc_request *req)
{
unsigned long flags;
struct dbc_port *port = dbc_to_port(dbc);
+ struct tty_struct *tty;
+ int untransferred;
+
+ tty = port->port.tty;
spin_lock_irqsave(&port->port_lock, flags);
+
+ /*
+ * Only defer copyig data to tty buffer in case:
+ * - !list_empty(&port->read_queue), there are older pending data
+ * - tty is throttled
+ * - failed to copy all data to buffer, defer remaining part
+ */
+
+ if (list_empty(&port->read_queue) && tty && !tty_throttled(tty)) {
+ untransferred = dbc_rx_push_buffer(port, req);
+ if (untransferred == 0) {
+ list_add_tail(&req->list_pool, &port->read_pool);
+ if (req->status != -ESHUTDOWN)
+ dbc_start_rx(port);
+ goto out;
+ }
+ }
+
+ /* defer moving data from req to tty buffer to a tasklet */
list_add_tail(&req->list_pool, &port->read_queue);
tasklet_schedule(&port->push);
+out:
spin_unlock_irqrestore(&port->port_lock, flags);
}
@@ -213,14 +282,32 @@ static ssize_t dbc_tty_write(struct tty_struct *tty, const u8 *buf,
{
struct dbc_port *port = tty->driver_data;
unsigned long flags;
+ unsigned int written = 0;
spin_lock_irqsave(&port->port_lock, flags);
- if (count)
- count = kfifo_in(&port->write_fifo, buf, count);
- dbc_start_tx(port);
+
+ /*
+ * Treat tty write as one usb transfer. Make sure the writes are turned
+ * into TRB request having the same size boundaries as the tty writes.
+ * Don't add data to kfifo before previous write is turned into TRBs
+ */
+ if (port->tx_boundary) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return 0;
+ }
+
+ if (count) {
+ written = kfifo_in(&port->port.xmit_fifo, buf, count);
+
+ if (written == count)
+ port->tx_boundary = kfifo_len(&port->port.xmit_fifo);
+
+ dbc_start_tx(port);
+ }
+
spin_unlock_irqrestore(&port->port_lock, flags);
- return count;
+ return written;
}
static int dbc_tty_put_char(struct tty_struct *tty, u8 ch)
@@ -230,7 +317,7 @@ static int dbc_tty_put_char(struct tty_struct *tty, u8 ch)
int status;
spin_lock_irqsave(&port->port_lock, flags);
- status = kfifo_put(&port->write_fifo, ch);
+ status = kfifo_put(&port->port.xmit_fifo, ch);
spin_unlock_irqrestore(&port->port_lock, flags);
return status;
@@ -253,7 +340,11 @@ static unsigned int dbc_tty_write_room(struct tty_struct *tty)
unsigned int room;
spin_lock_irqsave(&port->port_lock, flags);
- room = kfifo_avail(&port->write_fifo);
+ room = kfifo_avail(&port->port.xmit_fifo);
+
+ if (port->tx_boundary)
+ room = 0;
+
spin_unlock_irqrestore(&port->port_lock, flags);
return room;
@@ -266,7 +357,7 @@ static unsigned int dbc_tty_chars_in_buffer(struct tty_struct *tty)
unsigned int chars;
spin_lock_irqsave(&port->port_lock, flags);
- chars = kfifo_len(&port->write_fifo);
+ chars = kfifo_len(&port->port.xmit_fifo);
spin_unlock_irqrestore(&port->port_lock, flags);
return chars;
@@ -299,10 +390,10 @@ static void dbc_rx_push(struct tasklet_struct *t)
struct dbc_request *req;
struct tty_struct *tty;
unsigned long flags;
- bool do_push = false;
bool disconnect = false;
struct dbc_port *port = from_tasklet(port, t, push);
struct list_head *queue = &port->read_queue;
+ int untransferred;
spin_lock_irqsave(&port->port_lock, flags);
tty = port->port.tty;
@@ -324,42 +415,15 @@ static void dbc_rx_push(struct tasklet_struct *t)
break;
}
- if (req->actual) {
- char *packet = req->buf;
- unsigned int n, size = req->actual;
- int count;
-
- n = port->n_read;
- if (n) {
- packet += n;
- size -= n;
- }
-
- count = tty_insert_flip_string(&port->port, packet,
- size);
- if (count)
- do_push = true;
- if (count != size) {
- port->n_read += count;
- break;
- }
- port->n_read = 0;
- }
+ untransferred = dbc_rx_push_buffer(port, req);
+ if (untransferred > 0)
+ break;
- list_move(&req->list_pool, &port->read_pool);
+ list_move_tail(&req->list_pool, &port->read_pool);
}
- if (do_push)
- tty_flip_buffer_push(&port->port);
-
- if (!list_empty(queue) && tty) {
- if (!tty_throttled(tty)) {
- if (do_push)
- tasklet_schedule(&port->push);
- else
- pr_warn("ttyDBC0: RX not scheduled?\n");
- }
- }
+ if (!list_empty(queue))
+ tasklet_schedule(&port->push);
if (!disconnect)
dbc_start_rx(port);
@@ -424,7 +488,8 @@ static int xhci_dbc_tty_register_device(struct xhci_dbc *dbc)
goto err_idr;
}
- ret = kfifo_alloc(&port->write_fifo, DBC_WRITE_BUF_SIZE, GFP_KERNEL);
+ ret = kfifo_alloc(&port->port.xmit_fifo, DBC_WRITE_BUF_SIZE,
+ GFP_KERNEL);
if (ret)
goto err_exit_port;
@@ -453,7 +518,7 @@ err_free_requests:
xhci_dbc_free_requests(&port->read_pool);
xhci_dbc_free_requests(&port->write_pool);
err_free_fifo:
- kfifo_free(&port->write_fifo);
+ kfifo_free(&port->port.xmit_fifo);
err_exit_port:
idr_remove(&dbc_tty_minors, port->minor);
err_idr:
@@ -478,7 +543,7 @@ static void xhci_dbc_tty_unregister_device(struct xhci_dbc *dbc)
idr_remove(&dbc_tty_minors, port->minor);
mutex_unlock(&dbc_tty_minors_lock);
- kfifo_free(&port->write_fifo);
+ kfifo_free(&port->port.xmit_fifo);
xhci_dbc_free_requests(&port->read_pool);
xhci_dbc_free_requests(&port->read_queue);
xhci_dbc_free_requests(&port->write_pool);
diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c
index f8ba15e7c225..1f5ef174abea 100644
--- a/drivers/usb/host/xhci-debugfs.c
+++ b/drivers/usb/host/xhci-debugfs.c
@@ -214,14 +214,11 @@ static void xhci_ring_dump_segment(struct seq_file *s,
static int xhci_ring_trb_show(struct seq_file *s, void *unused)
{
- int i;
struct xhci_ring *ring = *(struct xhci_ring **)s->private;
struct xhci_segment *seg = ring->first_seg;
- for (i = 0; i < ring->num_segs; i++) {
+ xhci_for_each_ring_seg(ring->first_seg, seg)
xhci_ring_dump_segment(s, seg);
- seg = seg->next;
- }
return 0;
}
@@ -235,16 +232,7 @@ static struct xhci_file_map ring_files[] = {
static int xhci_ring_open(struct inode *inode, struct file *file)
{
- int i;
- struct xhci_file_map *f_map;
- const char *file_name = file_dentry(file)->d_iname;
-
- for (i = 0; i < ARRAY_SIZE(ring_files); i++) {
- f_map = &ring_files[i];
-
- if (strcmp(f_map->name, file_name) == 0)
- break;
- }
+ const struct xhci_file_map *f_map = debugfs_get_aux(file);
return single_open(file, f_map->show, inode->i_private);
}
@@ -291,12 +279,13 @@ static int xhci_endpoint_context_show(struct seq_file *s, void *unused)
for (ep_index = 0; ep_index < 31; ep_index++) {
ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
dma = dev->out_ctx->dma + (ep_index + 1) * CTX_SIZE(xhci->hcc_params);
- seq_printf(s, "%pad: %s\n", &dma,
+ seq_printf(s, "%pad: %s, virt_state:%#x\n", &dma,
xhci_decode_ep_context(str,
le32_to_cpu(ep_ctx->ep_info),
le32_to_cpu(ep_ctx->ep_info2),
le64_to_cpu(ep_ctx->deq),
- le32_to_cpu(ep_ctx->tx_info)));
+ le32_to_cpu(ep_ctx->tx_info)),
+ dev->eps[ep_index].ep_state);
}
return 0;
@@ -320,16 +309,7 @@ static struct xhci_file_map context_files[] = {
static int xhci_context_open(struct inode *inode, struct file *file)
{
- int i;
- struct xhci_file_map *f_map;
- const char *file_name = file_dentry(file)->d_iname;
-
- for (i = 0; i < ARRAY_SIZE(context_files); i++) {
- f_map = &context_files[i];
-
- if (strcmp(f_map->name, file_name) == 0)
- break;
- }
+ const struct xhci_file_map *f_map = debugfs_get_aux(file);
return single_open(file, f_map->show, inode->i_private);
}
@@ -412,7 +392,8 @@ static void xhci_debugfs_create_files(struct xhci_hcd *xhci,
int i;
for (i = 0; i < nentries; i++)
- debugfs_create_file(files[i].name, 0444, parent, data, fops);
+ debugfs_create_file_aux(files[i].name, 0444, parent,
+ data, &files[i], fops);
}
static struct dentry *xhci_debugfs_create_ring_dir(struct xhci_hcd *xhci,
diff --git a/drivers/usb/host/xhci-ext-caps.h b/drivers/usb/host/xhci-ext-caps.h
index 96eb36a58738..67ecf7320c62 100644
--- a/drivers/usb/host/xhci-ext-caps.h
+++ b/drivers/usb/host/xhci-ext-caps.h
@@ -42,6 +42,7 @@
#define XHCI_EXT_CAPS_DEBUG 10
/* Vendor caps */
#define XHCI_EXT_CAPS_VENDOR_INTEL 192
+#define XHCI_EXT_CAPS_INTEL_SPR_SHADOW 206
/* USB Legacy Support Capability - section 7.1.1 */
#define XHCI_HC_BIOS_OWNED (1 << 16)
#define XHCI_HC_OS_OWNED (1 << 24)
@@ -64,6 +65,10 @@
#define XHCI_HLC (1 << 19)
#define XHCI_BLC (1 << 20)
+/* Intel SPR shadow capability */
+#define XHCI_INTEL_SPR_ESS_PORT_OFFSET 0x8ac4 /* SuperSpeed port control */
+#define XHCI_INTEL_SPR_TUNEN BIT(4) /* Tunnel mode enabled */
+
/* command register values to disable interrupts and halt the HC */
/* start/stop HC execution - do not write unless HC is halted*/
#define XHCI_CMD_RUN (1 << 0)
diff --git a/drivers/usb/host/xhci-histb.c b/drivers/usb/host/xhci-histb.c
index f9a4a4b0eb57..8a7d46dae62c 100644
--- a/drivers/usb/host/xhci-histb.c
+++ b/drivers/usb/host/xhci-histb.c
@@ -373,7 +373,7 @@ MODULE_DEVICE_TABLE(of, histb_xhci_of_match);
static struct platform_driver histb_xhci_driver = {
.probe = xhci_histb_probe,
- .remove_new = xhci_histb_remove,
+ .remove = xhci_histb_remove,
.driver = {
.name = "xhci-histb",
.pm = DEV_PM_OPS,
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 0980ade2a234..69c278b64084 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -10,8 +10,9 @@
#include <linux/slab.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/bitfield.h>
+#include <linux/pci.h>
#include "xhci.h"
#include "xhci-trace.h"
@@ -448,38 +449,6 @@ u32 xhci_port_state_to_neutral(u32 state)
}
EXPORT_SYMBOL_GPL(xhci_port_state_to_neutral);
-/**
- * xhci_find_slot_id_by_port() - Find slot id of a usb device on a roothub port
- * @hcd: pointer to hcd of the roothub
- * @xhci: pointer to xhci structure
- * @port: one-based port number of the port in this roothub.
- *
- * Return: Slot id of the usb device connected to the root port, 0 if not found
- */
-
-int xhci_find_slot_id_by_port(struct usb_hcd *hcd, struct xhci_hcd *xhci,
- u16 port)
-{
- int slot_id;
- int i;
- enum usb_device_speed speed;
-
- slot_id = 0;
- for (i = 0; i < MAX_HC_SLOTS; i++) {
- if (!xhci->devs[i] || !xhci->devs[i]->udev)
- continue;
- speed = xhci->devs[i]->udev->speed;
- if (((speed >= USB_SPEED_SUPER) == (hcd->speed >= HCD_USB3))
- && xhci->devs[i]->fake_port == port) {
- slot_id = i;
- break;
- }
- }
-
- return slot_id;
-}
-EXPORT_SYMBOL_GPL(xhci_find_slot_id_by_port);
-
/*
* Stop device
* It issues stop endpoint command for EP 0 to 30. And wait the last command
@@ -784,6 +753,49 @@ static int xhci_exit_test_mode(struct xhci_hcd *xhci)
return xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
}
+/**
+ * xhci_port_is_tunneled() - Check if USB3 connection is tunneled over USB4
+ * @xhci: xhci host controller
+ * @port: USB3 port to be checked.
+ *
+ * Some hosts can detect if a USB3 connection is native USB3 or tunneled over
+ * USB4. Intel hosts expose this via vendor specific extended capability 206
+ * eSS PORT registers TUNEN (tunnel enabled) bit.
+ *
+ * A USB3 device must be connected to the port to detect the tunnel.
+ *
+ * Return: link tunnel mode enum, USB_LINK_UNKNOWN if host is incapable of
+ * detecting USB3 over USB4 tunnels. USB_LINK_NATIVE or USB_LINK_TUNNELED
+ * otherwise.
+ */
+enum usb_link_tunnel_mode xhci_port_is_tunneled(struct xhci_hcd *xhci,
+ struct xhci_port *port)
+{
+ struct usb_hcd *hcd;
+ void __iomem *base;
+ u32 offset;
+
+ /* Don't try and probe this capability for non-Intel hosts */
+ hcd = xhci_to_hcd(xhci);
+ if (!dev_is_pci(hcd->self.controller) ||
+ to_pci_dev(hcd->self.controller)->vendor != PCI_VENDOR_ID_INTEL)
+ return USB_LINK_UNKNOWN;
+
+ base = &xhci->cap_regs->hc_capbase;
+ offset = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_INTEL_SPR_SHADOW);
+
+ if (offset && offset <= XHCI_INTEL_SPR_ESS_PORT_OFFSET) {
+ offset = XHCI_INTEL_SPR_ESS_PORT_OFFSET + port->hcd_portnum * 0x20;
+
+ if (readl(base + offset) & XHCI_INTEL_SPR_TUNEN)
+ return USB_LINK_TUNNELED;
+ else
+ return USB_LINK_NATIVE;
+ }
+
+ return USB_LINK_UNKNOWN;
+}
+
void xhci_set_link_state(struct xhci_hcd *xhci, struct xhci_port *port,
u32 link_state)
{
@@ -930,7 +942,6 @@ static int xhci_handle_usb2_port_link_resume(struct xhci_port *port,
struct xhci_bus_state *bus_state;
struct xhci_hcd *xhci;
struct usb_hcd *hcd;
- int slot_id;
u32 wIndex;
hcd = port->rhub->hcd;
@@ -943,9 +954,9 @@ static int xhci_handle_usb2_port_link_resume(struct xhci_port *port,
}
/* did port event handler already start resume timing? */
if (!port->resume_timestamp) {
- /* If not, maybe we are in a host initated resume? */
+ /* If not, maybe we are in a host initiated resume? */
if (test_bit(wIndex, &bus_state->resuming_ports)) {
- /* Host initated resume doesn't time the resume
+ /* Host initiated resume doesn't time the resume
* signalling using resume_done[].
* It manually sets RESUME state, sleeps 20ms
* and sets U0 state. This should probably be
@@ -986,13 +997,11 @@ static int xhci_handle_usb2_port_link_resume(struct xhci_port *port,
spin_lock_irqsave(&xhci->lock, *flags);
if (time_left) {
- slot_id = xhci_find_slot_id_by_port(hcd, xhci,
- wIndex + 1);
- if (!slot_id) {
+ if (!port->slot_id) {
xhci_dbg(xhci, "slot_id is zero\n");
return -ENODEV;
}
- xhci_ring_device(xhci, slot_id);
+ xhci_ring_device(xhci, port->slot_id);
} else {
int port_status = readl(port->addr);
@@ -1202,7 +1211,6 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
unsigned long flags;
u32 temp, status;
int retval = 0;
- int slot_id;
struct xhci_bus_state *bus_state;
u16 link_state = 0;
u16 wake_mask = 0;
@@ -1332,15 +1340,13 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
goto error;
}
- slot_id = xhci_find_slot_id_by_port(hcd, xhci,
- portnum1);
- if (!slot_id) {
+ if (!port->slot_id) {
xhci_warn(xhci, "slot_id is zero\n");
goto error;
}
/* unlock to execute stop endpoint commands */
spin_unlock_irqrestore(&xhci->lock, flags);
- xhci_stop_device(xhci, slot_id, 1);
+ xhci_stop_device(xhci, port->slot_id, 1);
spin_lock_irqsave(&xhci->lock, flags);
xhci_set_link_state(xhci, port, XDEV_U3);
@@ -1463,14 +1469,12 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
if (link_state == USB_SS_PORT_LS_U3) {
int retries = 16;
- slot_id = xhci_find_slot_id_by_port(hcd, xhci,
- portnum1);
- if (slot_id) {
+ if (port->slot_id) {
/* unlock to execute stop endpoint
* commands */
spin_unlock_irqrestore(&xhci->lock,
flags);
- xhci_stop_device(xhci, slot_id, 1);
+ xhci_stop_device(xhci, port->slot_id, 1);
spin_lock_irqsave(&xhci->lock, flags);
}
xhci_set_link_state(xhci, port, USB_SS_PORT_LS_U3);
@@ -1584,13 +1588,11 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
}
bus_state->port_c_suspend |= 1 << wIndex;
- slot_id = xhci_find_slot_id_by_port(hcd, xhci,
- portnum1);
- if (!slot_id) {
+ if (!port->slot_id) {
xhci_dbg(xhci, "slot_id is zero\n");
goto error;
}
- xhci_ring_device(xhci, slot_id);
+ xhci_ring_device(xhci, port->slot_id);
break;
case USB_PORT_FEAT_C_SUSPEND:
bus_state->port_c_suspend &= ~(1 << wIndex);
@@ -1821,10 +1823,7 @@ retry:
if (!portsc_buf[port_index])
continue;
if (test_bit(port_index, &bus_state->bus_suspended)) {
- int slot_id;
-
- slot_id = xhci_find_slot_id_by_port(hcd, xhci,
- port_index + 1);
+ int slot_id = ports[port_index]->slot_id;
if (slot_id) {
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_stop_device(xhci, slot_id, 1);
@@ -1877,7 +1876,6 @@ int xhci_bus_resume(struct usb_hcd *hcd)
struct xhci_bus_state *bus_state;
unsigned long flags;
int max_ports, port_index;
- int slot_id;
int sret;
u32 next_state;
u32 temp, portsc;
@@ -1934,7 +1932,7 @@ int xhci_bus_resume(struct usb_hcd *hcd)
/* resume already initiated */
break;
default:
- /* not in a resumeable state, ignore it */
+ /* not in a resumable state, ignore it */
clear_bit(port_index,
&bus_state->bus_suspended);
break;
@@ -1970,9 +1968,8 @@ int xhci_bus_resume(struct usb_hcd *hcd)
continue;
}
xhci_test_and_clear_bit(xhci, ports[port_index], PORT_PLC);
- slot_id = xhci_find_slot_id_by_port(hcd, xhci, port_index + 1);
- if (slot_id)
- xhci_ring_device(xhci, slot_id);
+ if (ports[port_index]->slot_id)
+ xhci_ring_device(xhci, ports[port_index]->slot_id);
}
(void) readl(&xhci->op_regs->command);
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index a7716202a8dd..fdf0c1008225 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -27,14 +27,12 @@
* "All components of all Command and Transfer TRBs shall be initialized to '0'"
*/
static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
- unsigned int cycle_state,
unsigned int max_packet,
unsigned int num,
gfp_t flags)
{
struct xhci_segment *seg;
dma_addr_t dma;
- int i;
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
seg = kzalloc_node(sizeof(*seg), flags, dev_to_node(dev));
@@ -56,11 +54,6 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
return NULL;
}
}
- /* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */
- if (cycle_state == 0) {
- for (i = 0; i < TRBS_PER_SEGMENT; i++)
- seg->trbs[i].link.control = cpu_to_le32(TRB_CYCLE);
- }
seg->num = num;
seg->dma = dma;
seg->next = NULL;
@@ -78,85 +71,104 @@ static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
kfree(seg);
}
-static void xhci_free_segments_for_ring(struct xhci_hcd *xhci,
- struct xhci_segment *first)
+static void xhci_ring_segments_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
{
- struct xhci_segment *seg;
+ struct xhci_segment *seg, *next;
- seg = first->next;
- while (seg != first) {
- struct xhci_segment *next = seg->next;
+ ring->last_seg->next = NULL;
+ seg = ring->first_seg;
+
+ while (seg) {
+ next = seg->next;
xhci_segment_free(xhci, seg);
seg = next;
}
- xhci_segment_free(xhci, first);
}
/*
- * Make the prev segment point to the next segment.
+ * Only for transfer and command rings where driver is the producer, not for
+ * event rings.
*
- * Change the last TRB in the prev segment to be a Link TRB which points to the
+ * Change the last TRB in the segment to be a Link TRB which points to the
* DMA address of the next segment. The caller needs to set any Link TRB
* related flags, such as End TRB, Toggle Cycle, and no snoop.
*/
-static void xhci_link_segments(struct xhci_segment *prev,
- struct xhci_segment *next,
- enum xhci_ring_type type, bool chain_links)
+static void xhci_set_link_trb(struct xhci_segment *seg, bool chain_links)
{
+ union xhci_trb *trb;
u32 val;
- if (!prev || !next)
+ if (!seg || !seg->next)
return;
- prev->next = next;
- if (type != TYPE_EVENT) {
- prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr =
- cpu_to_le64(next->dma);
- /* Set the last TRB in the segment to have a TRB type ID of Link TRB */
- val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control);
- val &= ~TRB_TYPE_BITMASK;
- val |= TRB_TYPE(TRB_LINK);
- if (chain_links)
- val |= TRB_CHAIN;
- prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
- }
+ trb = &seg->trbs[TRBS_PER_SEGMENT - 1];
+
+ /* Set the last TRB in the segment to have a TRB type ID of Link TRB */
+ val = le32_to_cpu(trb->link.control);
+ val &= ~TRB_TYPE_BITMASK;
+ val |= TRB_TYPE(TRB_LINK);
+ if (chain_links)
+ val |= TRB_CHAIN;
+ trb->link.control = cpu_to_le32(val);
+ trb->link.segment_ptr = cpu_to_le64(seg->next->dma);
+}
+
+static void xhci_initialize_ring_segments(struct xhci_hcd *xhci, struct xhci_ring *ring)
+{
+ struct xhci_segment *seg;
+ bool chain_links;
+
+ if (ring->type == TYPE_EVENT)
+ return;
+
+ chain_links = xhci_link_chain_quirk(xhci, ring->type);
+ xhci_for_each_ring_seg(ring->first_seg, seg)
+ xhci_set_link_trb(seg, chain_links);
+
+ /* See section 4.9.2.1 and 6.4.4.1 */
+ ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |= cpu_to_le32(LINK_TOGGLE);
}
/*
- * Link the ring to the new segments.
+ * Link the src ring segments to the dst ring.
* Set Toggle Cycle for the new ring if needed.
*/
-static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring,
- struct xhci_segment *first, struct xhci_segment *last,
- unsigned int num_segs)
+static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *src, struct xhci_ring *dst)
{
- struct xhci_segment *next, *seg;
+ struct xhci_segment *seg;
bool chain_links;
- if (!ring || !first || !last)
+ if (!src || !dst)
return;
- /* Set chain bit for 0.95 hosts, and for isoc rings on AMD 0.96 host */
- chain_links = !!(xhci_link_trb_quirk(xhci) ||
- (ring->type == TYPE_ISOC &&
- (xhci->quirks & XHCI_AMD_0x96_HOST)));
+ /* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */
+ if (dst->cycle_state == 0) {
+ xhci_for_each_ring_seg(src->first_seg, seg) {
+ for (int i = 0; i < TRBS_PER_SEGMENT; i++)
+ seg->trbs[i].link.control |= cpu_to_le32(TRB_CYCLE);
+ }
+ }
- next = ring->enq_seg->next;
- xhci_link_segments(ring->enq_seg, first, ring->type, chain_links);
- xhci_link_segments(last, next, ring->type, chain_links);
- ring->num_segs += num_segs;
+ src->last_seg->next = dst->enq_seg->next;
+ dst->enq_seg->next = src->first_seg;
+ if (dst->type != TYPE_EVENT) {
+ chain_links = xhci_link_chain_quirk(xhci, dst->type);
+ xhci_set_link_trb(dst->enq_seg, chain_links);
+ xhci_set_link_trb(src->last_seg, chain_links);
+ }
+ dst->num_segs += src->num_segs;
- if (ring->enq_seg == ring->last_seg) {
- if (ring->type != TYPE_EVENT) {
- ring->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control
+ if (dst->enq_seg == dst->last_seg) {
+ if (dst->type != TYPE_EVENT)
+ dst->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control
&= ~cpu_to_le32(LINK_TOGGLE);
- last->trbs[TRBS_PER_SEGMENT-1].link.control
- |= cpu_to_le32(LINK_TOGGLE);
- }
- ring->last_seg = last;
+
+ dst->last_seg = src->last_seg;
+ } else if (dst->type != TYPE_EVENT) {
+ src->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control &= ~cpu_to_le32(LINK_TOGGLE);
}
- for (seg = last; seg != ring->last_seg; seg = seg->next)
+ for (seg = dst->enq_seg; seg != dst->last_seg; seg = seg->next)
seg->next->num = seg->num + 1;
}
@@ -227,7 +239,6 @@ static int xhci_update_stream_segment_mapping(
struct radix_tree_root *trb_address_map,
struct xhci_ring *ring,
struct xhci_segment *first_seg,
- struct xhci_segment *last_seg,
gfp_t mem_flags)
{
struct xhci_segment *seg;
@@ -237,28 +248,22 @@ static int xhci_update_stream_segment_mapping(
if (WARN_ON_ONCE(trb_address_map == NULL))
return 0;
- seg = first_seg;
- do {
+ xhci_for_each_ring_seg(first_seg, seg) {
ret = xhci_insert_segment_mapping(trb_address_map,
ring, seg, mem_flags);
if (ret)
goto remove_streams;
- if (seg == last_seg)
- return 0;
- seg = seg->next;
- } while (seg != first_seg);
+ }
return 0;
remove_streams:
failed_seg = seg;
- seg = first_seg;
- do {
+ xhci_for_each_ring_seg(first_seg, seg) {
xhci_remove_segment_mapping(trb_address_map, seg);
if (seg == failed_seg)
return ret;
- seg = seg->next;
- } while (seg != first_seg);
+ }
return ret;
}
@@ -270,17 +275,14 @@ static void xhci_remove_stream_mapping(struct xhci_ring *ring)
if (WARN_ON_ONCE(ring->trb_address_map == NULL))
return;
- seg = ring->first_seg;
- do {
+ xhci_for_each_ring_seg(ring->first_seg, seg)
xhci_remove_segment_mapping(ring->trb_address_map, seg);
- seg = seg->next;
- } while (seg != ring->first_seg);
}
static int xhci_update_stream_mapping(struct xhci_ring *ring, gfp_t mem_flags)
{
return xhci_update_stream_segment_mapping(ring->trb_address_map, ring,
- ring->first_seg, ring->last_seg, mem_flags);
+ ring->first_seg, mem_flags);
}
/* XXX: Do we need the hcd structure in all these functions? */
@@ -294,14 +296,13 @@ void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
if (ring->first_seg) {
if (ring->type == TYPE_STREAM)
xhci_remove_stream_mapping(ring);
- xhci_free_segments_for_ring(xhci, ring->first_seg);
+ xhci_ring_segments_free(xhci, ring);
}
kfree(ring);
}
-void xhci_initialize_ring_info(struct xhci_ring *ring,
- unsigned int cycle_state)
+void xhci_initialize_ring_info(struct xhci_ring *ring)
{
/* The ring is empty, so the enqueue pointer == dequeue pointer */
ring->enqueue = ring->first_seg->trbs;
@@ -315,7 +316,7 @@ void xhci_initialize_ring_info(struct xhci_ring *ring,
* New rings are initialized with cycle state equal to 1; if we are
* handling ring expansion, set the cycle state equal to the old ring.
*/
- ring->cycle_state = cycle_state;
+ ring->cycle_state = 1;
/*
* Each segment has a link TRB, and leave an extra TRB for SW
@@ -326,49 +327,37 @@ void xhci_initialize_ring_info(struct xhci_ring *ring,
EXPORT_SYMBOL_GPL(xhci_initialize_ring_info);
/* Allocate segments and link them for a ring */
-static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
- struct xhci_segment **first, struct xhci_segment **last,
- unsigned int num_segs, unsigned int num,
- unsigned int cycle_state, enum xhci_ring_type type,
- unsigned int max_packet, gfp_t flags)
+static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci, struct xhci_ring *ring, gfp_t flags)
{
struct xhci_segment *prev;
- bool chain_links;
+ unsigned int num = 0;
- /* Set chain bit for 0.95 hosts, and for isoc rings on AMD 0.96 host */
- chain_links = !!(xhci_link_trb_quirk(xhci) ||
- (type == TYPE_ISOC &&
- (xhci->quirks & XHCI_AMD_0x96_HOST)));
-
- prev = xhci_segment_alloc(xhci, cycle_state, max_packet, num, flags);
+ prev = xhci_segment_alloc(xhci, ring->bounce_buf_len, num, flags);
if (!prev)
return -ENOMEM;
num++;
- *first = prev;
- while (num < num_segs) {
+ ring->first_seg = prev;
+ while (num < ring->num_segs) {
struct xhci_segment *next;
- next = xhci_segment_alloc(xhci, cycle_state, max_packet, num,
- flags);
- if (!next) {
- prev = *first;
- while (prev) {
- next = prev->next;
- xhci_segment_free(xhci, prev);
- prev = next;
- }
- return -ENOMEM;
- }
- xhci_link_segments(prev, next, type, chain_links);
+ next = xhci_segment_alloc(xhci, ring->bounce_buf_len, num, flags);
+ if (!next)
+ goto free_segments;
+ prev->next = next;
prev = next;
num++;
}
- xhci_link_segments(prev, *first, type, chain_links);
- *last = prev;
+ ring->last_seg = prev;
+ ring->last_seg->next = ring->first_seg;
return 0;
+
+free_segments:
+ ring->last_seg = prev;
+ xhci_ring_segments_free(xhci, ring);
+ return -ENOMEM;
}
/*
@@ -378,9 +367,8 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
* Set the end flag and the cycle toggle bit on the last segment.
* See section 4.9.1 and figures 15 and 16.
*/
-struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
- unsigned int num_segs, unsigned int cycle_state,
- enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
+struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, unsigned int num_segs,
+ enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
{
struct xhci_ring *ring;
int ret;
@@ -397,19 +385,12 @@ struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
if (num_segs == 0)
return ring;
- ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg,
- &ring->last_seg, num_segs, 0, cycle_state, type,
- max_packet, flags);
+ ret = xhci_alloc_segments_for_ring(xhci, ring, flags);
if (ret)
goto fail;
- /* Only event ring does not use link TRB */
- if (type != TYPE_EVENT) {
- /* See section 4.9.2.1 and 6.4.4.1 */
- ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |=
- cpu_to_le32(LINK_TOGGLE);
- }
- xhci_initialize_ring_info(ring, cycle_state);
+ xhci_initialize_ring_segments(xhci, ring);
+ xhci_initialize_ring_info(ring);
trace_xhci_ring_alloc(ring);
return ring;
@@ -433,39 +414,39 @@ void xhci_free_endpoint_ring(struct xhci_hcd *xhci,
int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
unsigned int num_new_segs, gfp_t flags)
{
- struct xhci_segment *first;
- struct xhci_segment *last;
- int ret;
+ struct xhci_ring new_ring;
+ int ret;
+
+ if (num_new_segs == 0)
+ return 0;
- ret = xhci_alloc_segments_for_ring(xhci, &first, &last,
- num_new_segs, ring->enq_seg->num + 1,
- ring->cycle_state, ring->type,
- ring->bounce_buf_len, flags);
+ new_ring.num_segs = num_new_segs;
+ new_ring.bounce_buf_len = ring->bounce_buf_len;
+ new_ring.type = ring->type;
+ ret = xhci_alloc_segments_for_ring(xhci, &new_ring, flags);
if (ret)
return -ENOMEM;
- if (ring->type == TYPE_STREAM)
- ret = xhci_update_stream_segment_mapping(ring->trb_address_map,
- ring, first, last, flags);
- if (ret) {
- struct xhci_segment *next;
- do {
- next = first->next;
- xhci_segment_free(xhci, first);
- if (first == last)
- break;
- first = next;
- } while (true);
- return ret;
+ xhci_initialize_ring_segments(xhci, &new_ring);
+
+ if (ring->type == TYPE_STREAM) {
+ ret = xhci_update_stream_segment_mapping(ring->trb_address_map, ring,
+ new_ring.first_seg, flags);
+ if (ret)
+ goto free_segments;
}
- xhci_link_rings(xhci, ring, first, last, num_new_segs);
+ xhci_link_rings(xhci, &new_ring, ring);
trace_xhci_ring_expansion(ring);
xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
"ring expansion succeed, now has %d segments",
ring->num_segs);
return 0;
+
+free_segments:
+ xhci_ring_segments_free(xhci, &new_ring);
+ return ret;
}
struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
@@ -543,7 +524,7 @@ static void xhci_free_stream_ctx(struct xhci_hcd *xhci,
struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
{
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
- size_t size = sizeof(struct xhci_stream_ctx) * num_stream_ctxs;
+ size_t size = array_size(sizeof(struct xhci_stream_ctx), num_stream_ctxs);
if (size > MEDIUM_STREAM_ARRAY_SIZE)
dma_free_coherent(dev, size, stream_ctx, dma);
@@ -568,7 +549,7 @@ static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
gfp_t mem_flags)
{
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
- size_t size = size_mul(sizeof(struct xhci_stream_ctx), num_stream_ctxs);
+ size_t size = array_size(sizeof(struct xhci_stream_ctx), num_stream_ctxs);
if (size > MEDIUM_STREAM_ARRAY_SIZE)
return dma_alloc_coherent(dev, size, dma, mem_flags);
@@ -654,8 +635,7 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
stream_info->stream_rings[cur_stream] =
- xhci_ring_alloc(xhci, 2, 1, TYPE_STREAM, max_packet,
- mem_flags);
+ xhci_ring_alloc(xhci, 2, TYPE_STREAM, max_packet, mem_flags);
cur_ring = stream_info->stream_rings[cur_stream];
if (!cur_ring)
goto cleanup_rings;
@@ -670,6 +650,8 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n", cur_stream, addr);
ret = xhci_update_stream_mapping(cur_ring, mem_flags);
+
+ trace_xhci_alloc_stream_info_ctx(stream_info, cur_stream);
if (ret) {
xhci_ring_free(xhci, cur_ring);
stream_info->stream_rings[cur_stream] = NULL;
@@ -789,15 +771,14 @@ static void xhci_free_tt_info(struct xhci_hcd *xhci,
bool slot_found = false;
/* If the device never made it past the Set Address stage,
- * it may not have the real_port set correctly.
+ * it may not have the root hub port pointer set correctly.
*/
- if (virt_dev->real_port == 0 ||
- virt_dev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
- xhci_dbg(xhci, "Bad real port.\n");
+ if (!virt_dev->rhub_port) {
+ xhci_dbg(xhci, "Bad rhub port.\n");
return;
}
- tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts);
+ tt_list_head = &(xhci->rh_bw[virt_dev->rhub_port->hw_portnum].tts);
list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
/* Multi-TT hubs will have more than one entry */
if (tt_info->slot_id == slot_id) {
@@ -834,7 +815,7 @@ int xhci_alloc_tt_info(struct xhci_hcd *xhci,
goto free_tts;
INIT_LIST_HEAD(&tt_info->tt_list);
list_add(&tt_info->tt_list,
- &xhci->rh_bw[virt_dev->real_port - 1].tts);
+ &xhci->rh_bw[virt_dev->rhub_port->hw_portnum].tts);
tt_info->slot_id = virt_dev->udev->slot_id;
if (tt->multi)
tt_info->ttport = i+1;
@@ -908,6 +889,8 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
if (dev->udev && dev->udev->slot_id)
dev->udev->slot_id = 0;
+ if (dev->rhub_port && dev->rhub_port->slot_id == slot_id)
+ dev->rhub_port->slot_id = 0;
kfree(xhci->devs[slot_id]);
xhci->devs[slot_id] = NULL;
}
@@ -929,13 +912,12 @@ static void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_i
if (!vdev)
return;
- if (vdev->real_port == 0 ||
- vdev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
- xhci_dbg(xhci, "Bad vdev->real_port.\n");
+ if (!vdev->rhub_port) {
+ xhci_dbg(xhci, "Bad rhub port.\n");
goto out;
}
- tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts);
+ tt_list_head = &(xhci->rh_bw[vdev->rhub_port->hw_portnum].tts);
list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
/* is this a hub device that added a tt_info to the tts list */
if (tt_info->slot_id == slot_id) {
@@ -996,7 +978,7 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
}
/* Allocate endpoint 0 ring */
- dev->eps[0].ring = xhci_ring_alloc(xhci, 2, 1, TYPE_CTRL, 0, flags);
+ dev->eps[0].ring = xhci_ring_alloc(xhci, 2, TYPE_CTRL, 0, flags);
if (!dev->eps[0].ring)
goto fail;
@@ -1051,16 +1033,16 @@ void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
* The xHCI roothub may have ports of differing speeds in any order in the port
* status registers.
*
- * The xHCI hardware wants to know the roothub port number that the USB device
+ * The xHCI hardware wants to know the roothub port that the USB device
* is attached to (or the roothub port its ancestor hub is attached to). All we
* know is the index of that port under either the USB 2.0 or the USB 3.0
* roothub, but that doesn't give us the real index into the HW port status
- * registers. Call xhci_find_raw_port_number() to get real index.
+ * registers.
*/
-static u32 xhci_find_real_port_number(struct xhci_hcd *xhci,
- struct usb_device *udev)
+static struct xhci_port *xhci_find_rhub_port(struct xhci_hcd *xhci, struct usb_device *udev)
{
struct usb_device *top_dev;
+ struct xhci_hub *rhub;
struct usb_hcd *hcd;
if (udev->speed >= USB_SPEED_SUPER)
@@ -1072,7 +1054,8 @@ static u32 xhci_find_real_port_number(struct xhci_hcd *xhci,
top_dev = top_dev->parent)
/* Found device below root hub */;
- return xhci_find_raw_port_number(hcd, top_dev->portnum);
+ rhub = xhci_get_rhub(hcd);
+ return rhub->ports[top_dev->portnum - 1];
}
/* Setup an xHCI virtual device for a Set Address command */
@@ -1081,9 +1064,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
struct xhci_virt_device *dev;
struct xhci_ep_ctx *ep0_ctx;
struct xhci_slot_ctx *slot_ctx;
- u32 port_num;
u32 max_packets;
- struct usb_device *top_dev;
dev = xhci->devs[udev->slot_id];
/* Slot ID 0 is reserved */
@@ -1124,18 +1105,15 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
return -EINVAL;
}
/* Find the root hub port this device is under */
- port_num = xhci_find_real_port_number(xhci, udev);
- if (!port_num)
+ dev->rhub_port = xhci_find_rhub_port(xhci, udev);
+ if (!dev->rhub_port)
return -EINVAL;
- slot_ctx->dev_info2 |= cpu_to_le32(ROOT_HUB_PORT(port_num));
- /* Set the port number in the virtual_device to the faked port number */
- for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
- top_dev = top_dev->parent)
- /* Found device below root hub */;
- dev->fake_port = top_dev->portnum;
- dev->real_port = port_num;
- xhci_dbg(xhci, "Set root hub portnum to %d\n", port_num);
- xhci_dbg(xhci, "Set fake root hub portnum to %d\n", dev->fake_port);
+ /* Slot ID is set to the device directly below the root hub */
+ if (!udev->parent->parent)
+ dev->rhub_port->slot_id = udev->slot_id;
+ slot_ctx->dev_info2 |= cpu_to_le32(ROOT_HUB_PORT(dev->rhub_port->hw_portnum + 1));
+ xhci_dbg(xhci, "Slot ID %d: HW portnum %d, hcd portnum %d\n",
+ udev->slot_id, dev->rhub_port->hw_portnum, dev->rhub_port->hcd_portnum);
/* Find the right bandwidth table that this device will be a part of.
* If this is a full speed device attached directly to a root port (or a
@@ -1144,12 +1122,12 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
* will never be created for the HS root hub.
*/
if (!udev->tt || !udev->tt->hub->parent) {
- dev->bw_table = &xhci->rh_bw[port_num - 1].bw_table;
+ dev->bw_table = &xhci->rh_bw[dev->rhub_port->hw_portnum].bw_table;
} else {
struct xhci_root_port_bw_info *rh_bw;
struct xhci_tt_bw_info *tt_bw;
- rh_bw = &xhci->rh_bw[port_num - 1];
+ rh_bw = &xhci->rh_bw[dev->rhub_port->hw_portnum];
/* Find the right TT. */
list_for_each_entry(tt_bw, &rh_bw->tts, tt_list) {
if (tt_bw->slot_id != udev->tt->hub->slot_id)
@@ -1477,7 +1455,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
/* Set up the endpoint ring */
virt_dev->eps[ep_index].new_ring =
- xhci_ring_alloc(xhci, 2, 1, ring_type, max_packet, mem_flags);
+ xhci_ring_alloc(xhci, 2, ring_type, max_packet, mem_flags);
if (!virt_dev->eps[ep_index].new_ring)
return -ENOMEM;
@@ -1649,7 +1627,7 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
goto fail_sp;
xhci->scratchpad->sp_array = dma_alloc_coherent(dev,
- size_mul(sizeof(u64), num_sp),
+ array_size(sizeof(u64), num_sp),
&xhci->scratchpad->sp_dma, flags);
if (!xhci->scratchpad->sp_array)
goto fail_sp2;
@@ -1682,7 +1660,7 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
kfree(xhci->scratchpad->sp_buffers);
fail_sp3:
- dma_free_coherent(dev, num_sp * sizeof(u64),
+ dma_free_coherent(dev, array_size(sizeof(u64), num_sp),
xhci->scratchpad->sp_array,
xhci->scratchpad->sp_dma);
@@ -1711,7 +1689,7 @@ static void scratchpad_free(struct xhci_hcd *xhci)
xhci->scratchpad->sp_array[i]);
}
kfree(xhci->scratchpad->sp_buffers);
- dma_free_coherent(dev, num_sp * sizeof(u64),
+ dma_free_coherent(dev, array_size(sizeof(u64), num_sp),
xhci->scratchpad->sp_array,
xhci->scratchpad->sp_dma);
kfree(xhci->scratchpad);
@@ -1789,7 +1767,7 @@ static int xhci_alloc_erst(struct xhci_hcd *xhci,
struct xhci_segment *seg;
struct xhci_erst_entry *entry;
- size = size_mul(sizeof(struct xhci_erst_entry), evt_ring->num_segs);
+ size = array_size(sizeof(struct xhci_erst_entry), evt_ring->num_segs);
erst->entries = dma_alloc_coherent(xhci_to_hcd(xhci)->self.sysdev,
size, &erst->erst_dma_addr, flags);
if (!erst->entries)
@@ -1840,7 +1818,7 @@ xhci_free_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
if (!ir)
return;
- erst_size = sizeof(struct xhci_erst_entry) * ir->erst.num_entries;
+ erst_size = array_size(sizeof(struct xhci_erst_entry), ir->erst.num_entries);
if (ir->erst.entries)
dma_free_coherent(dev, erst_size,
ir->erst.entries,
@@ -1888,7 +1866,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
cancel_delayed_work_sync(&xhci->cmd_timer);
- for (i = 0; i < xhci->max_interrupters; i++) {
+ for (i = 0; xhci->interrupters && i < xhci->max_interrupters; i++) {
if (xhci->interrupters[i]) {
xhci_remove_interrupter(xhci, xhci->interrupters[i]);
xhci_free_interrupter(xhci, xhci->interrupters[i]);
@@ -1961,7 +1939,6 @@ no_bw:
kfree(xhci->usb3_rhub.ports);
kfree(xhci->hw_ports);
kfree(xhci->rh_bw);
- kfree(xhci->ext_caps);
for (i = 0; i < xhci->num_port_caps; i++)
kfree(xhci->port_caps[i].psi);
kfree(xhci->port_caps);
@@ -1972,7 +1949,6 @@ no_bw:
xhci->usb3_rhub.ports = NULL;
xhci->hw_ports = NULL;
xhci->rh_bw = NULL;
- xhci->ext_caps = NULL;
xhci->port_caps = NULL;
xhci->interrupters = NULL;
@@ -2100,10 +2076,7 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
port_cap->maj_rev = major_revision;
port_cap->min_rev = minor_revision;
-
- /* cache usb2 port capabilities */
- if (major_revision < 0x03 && xhci->num_ext_caps < max_caps)
- xhci->ext_caps[xhci->num_ext_caps++] = temp;
+ port_cap->protocol_caps = temp;
if ((xhci->hci_version >= 0x100) && (major_revision != 0x03) &&
(temp & XHCI_HLC)) {
@@ -2223,11 +2196,6 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
XHCI_EXT_CAPS_PROTOCOL);
}
- xhci->ext_caps = kcalloc_node(cap_count, sizeof(*xhci->ext_caps),
- flags, dev_to_node(dev));
- if (!xhci->ext_caps)
- return -ENOMEM;
-
xhci->port_caps = kcalloc_node(cap_count, sizeof(*xhci->port_caps),
flags, dev_to_node(dev));
if (!xhci->port_caps)
@@ -2280,24 +2248,24 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
}
static struct xhci_interrupter *
-xhci_alloc_interrupter(struct xhci_hcd *xhci, int segs, gfp_t flags)
+xhci_alloc_interrupter(struct xhci_hcd *xhci, unsigned int segs, gfp_t flags)
{
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
struct xhci_interrupter *ir;
- unsigned int num_segs = segs;
+ unsigned int max_segs;
int ret;
+ if (!segs)
+ segs = ERST_DEFAULT_SEGS;
+
+ max_segs = BIT(HCS_ERST_MAX(xhci->hcs_params2));
+ segs = min(segs, max_segs);
+
ir = kzalloc_node(sizeof(*ir), flags, dev_to_node(dev));
if (!ir)
return NULL;
- /* number of ring segments should be greater than 0 */
- if (segs <= 0)
- num_segs = min_t(unsigned int, 1 << HCS_ERST_MAX(xhci->hcs_params2),
- ERST_MAX_SEGS);
-
- ir->event_ring = xhci_ring_alloc(xhci, num_segs, 1, TYPE_EVENT, 0,
- flags);
+ ir->event_ring = xhci_ring_alloc(xhci, segs, TYPE_EVENT, 0, flags);
if (!ir->event_ring) {
xhci_warn(xhci, "Failed to allocate interrupter event ring\n");
kfree(ir);
@@ -2346,7 +2314,10 @@ xhci_add_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir,
erst_base = xhci_read_64(xhci, &ir->ir_set->erst_base);
erst_base &= ERST_BASE_RSVDP;
erst_base |= ir->erst.erst_dma_addr & ~ERST_BASE_RSVDP;
- xhci_write_64(xhci, erst_base, &ir->ir_set->erst_base);
+ if (xhci->quirks & XHCI_WRITE_64_HI_LO)
+ hi_lo_writeq(erst_base, &ir->ir_set->erst_base);
+ else
+ xhci_write_64(xhci, erst_base, &ir->ir_set->erst_base);
/* Set the event ring dequeue address of this interrupter */
xhci_set_hc_event_deq(xhci, ir);
@@ -2355,7 +2326,8 @@ xhci_add_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir,
}
struct xhci_interrupter *
-xhci_create_secondary_interrupter(struct usb_hcd *hcd, int num_seg)
+xhci_create_secondary_interrupter(struct usb_hcd *hcd, unsigned int segs,
+ u32 imod_interval)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct xhci_interrupter *ir;
@@ -2365,7 +2337,7 @@ xhci_create_secondary_interrupter(struct usb_hcd *hcd, int num_seg)
if (!xhci->interrupters || xhci->max_interrupters <= 1)
return NULL;
- ir = xhci_alloc_interrupter(xhci, num_seg, GFP_KERNEL);
+ ir = xhci_alloc_interrupter(xhci, segs, GFP_KERNEL);
if (!ir)
return NULL;
@@ -2388,6 +2360,11 @@ xhci_create_secondary_interrupter(struct usb_hcd *hcd, int num_seg)
return NULL;
}
+ err = xhci_set_interrupter_moderation(ir, imod_interval);
+ if (err)
+ xhci_warn(xhci, "Failed to set interrupter %d moderation to %uns\n",
+ i, imod_interval);
+
xhci_dbg(xhci, "Add secondary interrupter %d, max interrupters %d\n",
i, xhci->max_interrupters);
@@ -2460,7 +2437,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
* and our use of dma addresses in the trb_address_map radix tree needs
* TRB_SEGMENT_SIZE alignment, so we pick the greater alignment need.
*/
- if (xhci->quirks & XHCI_ZHAOXIN_TRB_FETCH)
+ if (xhci->quirks & XHCI_TRB_OVERFETCH)
+ /* Buggy HC prefetches beyond segment bounds - allocate dummy space at the end */
xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
TRB_SEGMENT_SIZE * 2, TRB_SEGMENT_SIZE * 2, xhci->page_size * 2);
else
@@ -2490,7 +2468,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
goto fail;
/* Set up the command ring to have one segments for now. */
- xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, 0, flags);
+ xhci->cmd_ring = xhci_ring_alloc(xhci, 1, TYPE_COMMAND, 0, flags);
if (!xhci->cmd_ring)
goto fail;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
@@ -2533,13 +2511,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
if (xhci_add_interrupter(xhci, ir, 0))
goto fail;
- xhci->isoc_bei_interval = AVOID_BEI_INTERVAL_MAX;
+ ir->isoc_bei_interval = AVOID_BEI_INTERVAL_MAX;
- /*
- * XXX: Might need to set the Interrupter Moderation Register to
- * something other than the default (~1ms minimum between interrupts).
- * See section 5.5.1.2.
- */
for (i = 0; i < MAX_HC_SLOTS; i++)
xhci->devs[i] = NULL;
diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c
index 61f3f8bbdcea..27eb384a3963 100644
--- a/drivers/usb/host/xhci-mtk-sch.c
+++ b/drivers/usb/host/xhci-mtk-sch.c
@@ -122,10 +122,6 @@ static u32 get_bw_boundary(enum usb_device_speed speed)
* each HS root port is treated as a single bandwidth domain,
* but each SS root port is treated as two bandwidth domains, one for IN eps,
* one for OUT eps.
-* @real_port value is defined as follow according to xHCI spec:
-* 1 for SSport0, ..., N+1 for SSportN, N+2 for HSport0, N+3 for HSport1, etc
-* so the bandwidth domain array is organized as follow for simplification:
-* SSport0-OUT, SSport0-IN, ..., SSportX-OUT, SSportX-IN, HSport0, ..., HSportY
*/
static struct mu3h_sch_bw_info *
get_bw_info(struct xhci_hcd_mtk *mtk, struct usb_device *udev,
@@ -136,19 +132,19 @@ get_bw_info(struct xhci_hcd_mtk *mtk, struct usb_device *udev,
int bw_index;
virt_dev = xhci->devs[udev->slot_id];
- if (!virt_dev->real_port) {
- WARN_ONCE(1, "%s invalid real_port\n", dev_name(&udev->dev));
+ if (!virt_dev->rhub_port) {
+ WARN_ONCE(1, "%s invalid rhub port\n", dev_name(&udev->dev));
return NULL;
}
if (udev->speed >= USB_SPEED_SUPER) {
if (usb_endpoint_dir_out(&ep->desc))
- bw_index = (virt_dev->real_port - 1) * 2;
+ bw_index = (virt_dev->rhub_port->hw_portnum) * 2;
else
- bw_index = (virt_dev->real_port - 1) * 2 + 1;
+ bw_index = (virt_dev->rhub_port->hw_portnum) * 2 + 1;
} else {
/* add one more for each SS port */
- bw_index = virt_dev->real_port + xhci->usb3_rhub.num_ports - 1;
+ bw_index = virt_dev->rhub_port->hw_portnum + xhci->usb3_rhub.num_ports;
}
return &mtk->sch_array[bw_index];
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
index 3252e3d2d79c..904831344440 100644
--- a/drivers/usb/host/xhci-mtk.c
+++ b/drivers/usb/host/xhci-mtk.c
@@ -853,7 +853,7 @@ MODULE_DEVICE_TABLE(of, mtk_xhci_of_match);
static struct platform_driver mtk_xhci_driver = {
.probe = xhci_mtk_probe,
- .remove_new = xhci_mtk_remove,
+ .remove = xhci_mtk_remove,
.driver = {
.name = "xhci-mtk",
.pm = DEV_PM_OPS,
diff --git a/drivers/usb/host/xhci-pci-renesas.c b/drivers/usb/host/xhci-pci-renesas.c
index 93f8b355bc70..620f8f0febb8 100644
--- a/drivers/usb/host/xhci-pci-renesas.c
+++ b/drivers/usb/host/xhci-pci-renesas.c
@@ -6,7 +6,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/slab.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "xhci.h"
#include "xhci-trace.h"
@@ -50,6 +50,8 @@
#define RENESAS_RETRY 10000
#define RENESAS_DELAY 10
+#define RENESAS_FW_NAME "renesas_usb_fw.mem"
+
static int renesas_fw_download_image(struct pci_dev *dev,
const u32 *fw, size_t step, bool rom)
{
@@ -573,12 +575,10 @@ exit:
return err;
}
-int renesas_xhci_check_request_fw(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int renesas_xhci_check_request_fw(struct pci_dev *pdev,
+ const struct pci_device_id *id)
{
- struct xhci_driver_data *driver_data =
- (struct xhci_driver_data *)id->driver_data;
- const char *fw_name = driver_data->firmware;
+ const char fw_name[] = RENESAS_FW_NAME;
const struct firmware *fw;
bool has_rom;
int err;
@@ -625,6 +625,41 @@ exit:
release_firmware(fw);
return err;
}
-EXPORT_SYMBOL_GPL(renesas_xhci_check_request_fw);
+static int
+xhci_pci_renesas_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ int retval;
+
+ retval = renesas_xhci_check_request_fw(dev, id);
+ if (retval)
+ return retval;
+
+ return xhci_pci_common_probe(dev, id);
+}
+
+static const struct pci_device_id pci_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, 0x0014) },
+ { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, 0x0015) },
+ { /* end: all zeroes */ }
+};
+MODULE_DEVICE_TABLE(pci, pci_ids);
+
+static struct pci_driver xhci_renesas_pci_driver = {
+ .name = "xhci-pci-renesas",
+ .id_table = pci_ids,
+
+ .probe = xhci_pci_renesas_probe,
+ .remove = xhci_pci_remove,
+
+ .shutdown = usb_hcd_pci_shutdown,
+ .driver = {
+ .pm = pm_ptr(&usb_hcd_pci_pm_ops),
+ },
+};
+module_pci_driver(xhci_renesas_pci_driver);
+
+MODULE_DESCRIPTION("Renesas xHCI PCI Host Controller Driver");
+MODULE_FIRMWARE(RENESAS_FW_NAME);
+MODULE_IMPORT_NS("xhci");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index b534ca9752be..54460d11f7ee 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -28,25 +28,40 @@
#define SPARSE_CNTL_ENABLE 0xC12C
/* Device for a quirk */
-#define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73
-#define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000
+#define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73
+#define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000
#define PCI_DEVICE_ID_FRESCO_LOGIC_FL1009 0x1009
#define PCI_DEVICE_ID_FRESCO_LOGIC_FL1100 0x1100
#define PCI_DEVICE_ID_FRESCO_LOGIC_FL1400 0x1400
-#define PCI_VENDOR_ID_ETRON 0x1b6f
-#define PCI_DEVICE_ID_EJ168 0x7023
+#define PCI_VENDOR_ID_ETRON 0x1b6f
+#define PCI_DEVICE_ID_ETRON_EJ168 0x7023
+#define PCI_DEVICE_ID_ETRON_EJ188 0x7052
-#define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31
-#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
+#define PCI_DEVICE_ID_VIA_VL805 0x3483
+
+#define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31
+#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
#define PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_XHCI 0x9cb1
#define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI 0x22b5
#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f
#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f
#define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI 0x0aa8
#define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI 0x1aa8
-#define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8
-#define PCI_DEVICE_ID_INTEL_DNV_XHCI 0x19d0
+#define PCI_DEVICE_ID_INTEL_APOLLO_LAKE_XHCI 0x5aa8
+#define PCI_DEVICE_ID_INTEL_DENVERTON_XHCI 0x19d0
+#define PCI_DEVICE_ID_INTEL_ICE_LAKE_XHCI 0x8a13
+#define PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI 0x9a13
+#define PCI_DEVICE_ID_INTEL_TIGER_LAKE_PCH_XHCI 0xa0ed
+#define PCI_DEVICE_ID_INTEL_COMET_LAKE_XHCI 0xa3af
+#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI 0x51ed
+#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_PCH_XHCI 0x54ed
+
+#define PCI_VENDOR_ID_PHYTIUM 0x1db7
+#define PCI_DEVICE_ID_PHYTIUM_XHCI 0xdc27
+
+/* Thunderbolt */
+#define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI 0x1138
#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_XHCI 0x15b5
#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_XHCI 0x15b6
#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_XHCI 0x15c1
@@ -55,12 +70,6 @@
#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_XHCI 0x15e9
#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_XHCI 0x15ec
#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI 0x15f0
-#define PCI_DEVICE_ID_INTEL_ICE_LAKE_XHCI 0x8a13
-#define PCI_DEVICE_ID_INTEL_CML_XHCI 0xa3af
-#define PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI 0x9a13
-#define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI 0x1138
-#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI 0x51ed
-#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_PCH_XHCI 0x54ed
#define PCI_DEVICE_ID_AMD_RENOIR_XHCI 0x1639
#define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9
@@ -72,6 +81,7 @@
#define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142
#define PCI_DEVICE_ID_ASMEDIA_1142_XHCI 0x1242
#define PCI_DEVICE_ID_ASMEDIA_2142_XHCI 0x2142
+#define PCI_DEVICE_ID_ASMEDIA_3042_XHCI 0x3042
#define PCI_DEVICE_ID_ASMEDIA_3242_XHCI 0x3242
static const char hcd_name[] = "xhci_hcd";
@@ -89,6 +99,10 @@ static const struct xhci_driver_overrides xhci_pci_overrides __initconst = {
.update_hub_device = xhci_pci_update_hub_device,
};
+/*
+ * Primary Legacy and MSI IRQ are synced in suspend_common().
+ * All MSI-X IRQs and secondary MSI IRQs should be synced here.
+ */
static void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
{
struct usb_hcd *hcd = xhci_to_hcd(xhci);
@@ -101,13 +115,12 @@ static void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
}
}
-/* Free any IRQs and disable MSI-X */
+/* Legacy IRQ is freed by usb_remove_hcd() or usb_hcd_pci_shutdown() */
static void xhci_cleanup_msix(struct xhci_hcd *xhci)
{
struct usb_hcd *hcd = xhci_to_hcd(xhci);
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
- /* return if using legacy interrupt */
if (hcd->irq > 0)
return;
@@ -136,14 +149,11 @@ static int xhci_try_enable_msi(struct usb_hcd *hcd)
hcd->irq = 0;
/*
- * calculate number of MSI-X vectors supported.
- * - HCS_MAX_INTRS: the max number of interrupts the host can handle,
- * with max number of interrupters based on the xhci HCSPARAMS1.
- * - num_online_cpus: maximum MSI-X vectors per CPUs core.
- * Add additional 1 vector to ensure always available interrupt.
+ * Calculate number of MSI/MSI-X vectors supported.
+ * - max_interrupters: the max number of interrupts requested, capped to xhci HCSPARAMS1.
+ * - num_online_cpus: one vector per CPUs core, with at least one overall.
*/
- xhci->nvecs = min(num_online_cpus() + 1,
- HCS_MAX_INTRS(xhci->hcs_params1));
+ xhci->nvecs = min(num_online_cpus() + 1, xhci->max_interrupters);
/* TODO: Check with MSI Soc for sysdev */
xhci->nvecs = pci_alloc_irq_vectors(pdev, 1, xhci->nvecs,
@@ -231,15 +241,6 @@ static int xhci_pci_reinit(struct xhci_hcd *xhci, struct pci_dev *pdev)
static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
{
struct pci_dev *pdev = to_pci_dev(dev);
- struct xhci_driver_data *driver_data;
- const struct pci_device_id *id;
-
- id = pci_match_id(to_pci_driver(pdev->dev.driver)->id_table, pdev);
-
- if (id && id->driver_data) {
- driver_data = (struct xhci_driver_data *)id->driver_data;
- xhci->quirks |= driver_data->quirks;
- }
/* Look for vendor-specific quirks */
if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC &&
@@ -270,17 +271,12 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
"QUIRK: Fresco Logic revision %u "
"has broken MSI implementation",
pdev->revision);
- xhci->quirks |= XHCI_TRUST_TX_LENGTH;
}
if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC &&
pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1009)
xhci->quirks |= XHCI_BROKEN_STREAMS;
- if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC &&
- pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1100)
- xhci->quirks |= XHCI_TRUST_TX_LENGTH;
-
if (pdev->vendor == PCI_VENDOR_ID_NEC)
xhci->quirks |= XHCI_NEC_HOST;
@@ -307,8 +303,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
xhci->quirks |= XHCI_RESET_ON_RESUME;
}
- if (pdev->vendor == PCI_VENDOR_ID_AMD)
- xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+ if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x43f7)
+ xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
if ((pdev->vendor == PCI_VENDOR_ID_AMD) &&
((pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_4) ||
@@ -353,9 +349,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI ||
- pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI ||
- pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI ||
- pdev->device == PCI_DEVICE_ID_INTEL_CML_XHCI)) {
+ pdev->device == PCI_DEVICE_ID_INTEL_APOLLO_LAKE_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_DENVERTON_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_COMET_LAKE_XHCI)) {
xhci->quirks |= XHCI_PME_STUCK_QUIRK;
}
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
@@ -364,18 +360,19 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
(pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
- pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI))
+ pdev->device == PCI_DEVICE_ID_INTEL_APOLLO_LAKE_XHCI))
xhci->quirks |= XHCI_INTEL_USB_ROLE_SW;
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
(pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
- pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI ||
- pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI))
+ pdev->device == PCI_DEVICE_ID_INTEL_APOLLO_LAKE_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_DENVERTON_XHCI))
xhci->quirks |= XHCI_MISSING_CAS;
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
- (pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI ||
+ (pdev->device == PCI_DEVICE_ID_INTEL_TIGER_LAKE_PCH_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_PCH_XHCI))
xhci->quirks |= XHCI_RESET_TO_DEFAULT;
@@ -394,14 +391,16 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
- pdev->device == PCI_DEVICE_ID_EJ168) {
+ (pdev->device == PCI_DEVICE_ID_ETRON_EJ168 ||
+ pdev->device == PCI_DEVICE_ID_ETRON_EJ188)) {
+ xhci->quirks |= XHCI_ETRON_HOST;
xhci->quirks |= XHCI_RESET_ON_RESUME;
- xhci->quirks |= XHCI_TRUST_TX_LENGTH;
xhci->quirks |= XHCI_BROKEN_STREAMS;
+ xhci->quirks |= XHCI_NO_SOFT_RETRY;
}
+
if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
pdev->device == 0x0014) {
- xhci->quirks |= XHCI_TRUST_TX_LENGTH;
xhci->quirks |= XHCI_ZERO_64B_REGS;
}
if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
@@ -412,13 +411,19 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
if (pdev->vendor == PCI_VENDOR_ID_VIA)
xhci->quirks |= XHCI_RESET_ON_RESUME;
+ if (pdev->vendor == PCI_VENDOR_ID_PHYTIUM &&
+ pdev->device == PCI_DEVICE_ID_PHYTIUM_XHCI)
+ xhci->quirks |= XHCI_RESET_ON_RESUME;
+
/* See https://bugzilla.kernel.org/show_bug.cgi?id=79511 */
if (pdev->vendor == PCI_VENDOR_ID_VIA &&
pdev->device == 0x3432)
xhci->quirks |= XHCI_BROKEN_STREAMS;
- if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == 0x3483)
+ if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == PCI_DEVICE_ID_VIA_VL805) {
xhci->quirks |= XHCI_LPM_SUPPORT;
+ xhci->quirks |= XHCI_TRB_OVERFETCH;
+ }
if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
pdev->device == PCI_DEVICE_ID_ASMEDIA_1042_XHCI) {
@@ -431,7 +436,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
}
if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI) {
- xhci->quirks |= XHCI_TRUST_TX_LENGTH;
xhci->quirks |= XHCI_NO_64BIT_SUPPORT;
}
if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
@@ -444,6 +448,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI)
xhci->quirks |= XHCI_ASMEDIA_MODIFY_FLOWCONTROL;
+ if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
+ pdev->device == PCI_DEVICE_ID_ASMEDIA_3042_XHCI)
+ xhci->quirks |= XHCI_RESET_ON_RESUME;
+
if (pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241)
xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_7;
@@ -463,13 +471,17 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
if (pdev->device == 0x9202) {
xhci->quirks |= XHCI_RESET_ON_RESUME;
- xhci->quirks |= XHCI_ZHAOXIN_TRB_FETCH;
+ xhci->quirks |= XHCI_TRB_OVERFETCH;
}
if (pdev->device == 0x9203)
- xhci->quirks |= XHCI_ZHAOXIN_TRB_FETCH;
+ xhci->quirks |= XHCI_TRB_OVERFETCH;
}
+ if (pdev->vendor == PCI_VENDOR_ID_CDNS &&
+ pdev->device == PCI_DEVICE_ID_CDNS_USBSSP)
+ xhci->quirks |= XHCI_CDNS_SCTX_QUIRK;
+
/* xHC spec requires PCI devices to support D3hot and D3cold */
if (xhci->hci_version >= 0x120)
xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
@@ -531,10 +543,9 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
struct xhci_hcd *xhci;
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
int retval;
+ u8 sbrn;
xhci = hcd_to_xhci(hcd);
- if (!xhci->sbrn)
- pci_read_config_byte(pdev, XHCI_SBRN_OFFSET, &xhci->sbrn);
/* imod_interval is the interrupt moderation value in nanoseconds. */
xhci->imod_interval = 40000;
@@ -549,7 +560,8 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
xhci_pme_acpi_rtd3_enable(pdev);
- xhci_dbg(xhci, "Got SBRN %u\n", (unsigned int) xhci->sbrn);
+ pci_read_config_byte(pdev, XHCI_SBRN_OFFSET, &sbrn);
+ xhci_dbg(xhci, "Got SBRN %u\n", (unsigned int)sbrn);
/* Find any debug ports */
return xhci_pci_reinit(xhci, pdev);
@@ -569,21 +581,13 @@ static int xhci_pci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hd
* We need to register our own PCI probe function (instead of the USB core's
* function) in order to create a second roothub under xHCI.
*/
-static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+int xhci_pci_common_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
int retval;
struct xhci_hcd *xhci;
struct usb_hcd *hcd;
- struct xhci_driver_data *driver_data;
struct reset_control *reset;
- driver_data = (struct xhci_driver_data *)id->driver_data;
- if (driver_data && driver_data->quirks & XHCI_RENESAS_FW_QUIRK) {
- retval = renesas_xhci_check_request_fw(dev, id);
- if (retval)
- return retval;
- }
-
reset = devm_reset_control_get_optional_exclusive(&dev->dev, NULL);
if (IS_ERR(reset))
return PTR_ERR(reset);
@@ -632,12 +636,15 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
pm_runtime_put_noidle(&dev->dev);
if (pci_choose_state(dev, PMSG_SUSPEND) == PCI_D0)
- pm_runtime_forbid(&dev->dev);
+ pm_runtime_get(&dev->dev);
else if (xhci->quirks & XHCI_DEFAULT_PM_RUNTIME_ALLOW)
pm_runtime_allow(&dev->dev);
dma_set_max_seg_size(&dev->dev, UINT_MAX);
+ if (device_property_read_bool(&dev->dev, "ti,pwron-active-high"))
+ pci_clear_and_set_config_dword(dev, 0xE0, 0, 1 << 22);
+
return 0;
put_usb3_hcd:
@@ -648,16 +655,37 @@ put_runtime_pm:
pm_runtime_put_noidle(&dev->dev);
return retval;
}
+EXPORT_SYMBOL_NS_GPL(xhci_pci_common_probe, "xhci");
+
+/* handled by xhci-pci-renesas if enabled */
+static const struct pci_device_id pci_ids_renesas[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, 0x0014) },
+ { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, 0x0015) },
+ { /* end: all zeroes */ }
+};
-static void xhci_pci_remove(struct pci_dev *dev)
+static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ if (IS_ENABLED(CONFIG_USB_XHCI_PCI_RENESAS) &&
+ pci_match_id(pci_ids_renesas, dev))
+ return -ENODEV;
+
+ return xhci_pci_common_probe(dev, id);
+}
+
+void xhci_pci_remove(struct pci_dev *dev)
{
struct xhci_hcd *xhci;
+ bool set_power_d3;
xhci = hcd_to_xhci(pci_get_drvdata(dev));
+ set_power_d3 = xhci->quirks & XHCI_SPURIOUS_WAKEUP;
xhci->xhc_state |= XHCI_STATE_REMOVING;
- if (xhci->quirks & XHCI_DEFAULT_PM_RUNTIME_ALLOW)
+ if (pci_choose_state(dev, PMSG_SUSPEND) == PCI_D0)
+ pm_runtime_put(&dev->dev);
+ else if (xhci->quirks & XHCI_DEFAULT_PM_RUNTIME_ALLOW)
pm_runtime_forbid(&dev->dev);
if (xhci->shared_hcd) {
@@ -666,12 +694,13 @@ static void xhci_pci_remove(struct pci_dev *dev)
xhci->shared_hcd = NULL;
}
+ usb_hcd_pci_remove(dev);
+
/* Workaround for spurious wakeups at shutdown with HSW */
- if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
+ if (set_power_d3)
pci_set_power_state(dev, PCI_D3hot);
-
- usb_hcd_pci_remove(dev);
}
+EXPORT_SYMBOL_NS_GPL(xhci_pci_remove, "xhci");
/*
* In some Intel xHCI controllers, in order to get D3 working,
@@ -780,7 +809,6 @@ static int xhci_pci_resume(struct usb_hcd *hcd, pm_message_t msg)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
- int retval = 0;
reset_control_reset(xhci->reset);
@@ -811,8 +839,7 @@ static int xhci_pci_resume(struct usb_hcd *hcd, pm_message_t msg)
if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
xhci_pme_quirk(hcd);
- retval = xhci_resume(xhci, msg);
- return retval;
+ return xhci_resume(xhci, msg);
}
static int xhci_pci_poweroff_late(struct usb_hcd *hcd, bool do_wakeup)
@@ -820,7 +847,6 @@ static int xhci_pci_poweroff_late(struct usb_hcd *hcd, bool do_wakeup)
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct xhci_port *port;
struct usb_device *udev;
- unsigned int slot_id;
u32 portsc;
int i;
@@ -843,15 +869,14 @@ static int xhci_pci_poweroff_late(struct usb_hcd *hcd, bool do_wakeup)
if ((portsc & PORT_PLS_MASK) != XDEV_U3)
continue;
- slot_id = xhci_find_slot_id_by_port(port->rhub->hcd, xhci,
- port->hcd_portnum + 1);
- if (!slot_id || !xhci->devs[slot_id]) {
+ if (!port->slot_id || !xhci->devs[port->slot_id]) {
xhci_err(xhci, "No dev for slot_id %d for port %d-%d in U3\n",
- slot_id, port->rhub->hcd->self.busnum, port->hcd_portnum + 1);
+ port->slot_id, port->rhub->hcd->self.busnum,
+ port->hcd_portnum + 1);
continue;
}
- udev = xhci->devs[slot_id]->udev;
+ udev = xhci->devs[port->slot_id]->udev;
/* if wakeup is enabled then don't disable the port */
if (udev->do_remote_wakeup && do_wakeup)
@@ -881,19 +906,8 @@ static void xhci_pci_shutdown(struct usb_hcd *hcd)
/*-------------------------------------------------------------------------*/
-static const struct xhci_driver_data reneses_data = {
- .quirks = XHCI_RENESAS_FW_QUIRK,
- .firmware = "renesas_usb_fw.mem",
-};
-
/* PCI driver selection metadata; PCI hotplugging uses this */
static const struct pci_device_id pci_ids[] = {
- { PCI_DEVICE(0x1912, 0x0014),
- .driver_data = (unsigned long)&reneses_data,
- },
- { PCI_DEVICE(0x1912, 0x0015),
- .driver_data = (unsigned long)&reneses_data,
- },
/* handle any USB 3.0 xHCI controller */
{ PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_XHCI, ~0),
},
@@ -901,14 +915,6 @@ static const struct pci_device_id pci_ids[] = {
};
MODULE_DEVICE_TABLE(pci, pci_ids);
-/*
- * Without CONFIG_USB_XHCI_PCI_RENESAS renesas_xhci_check_request_fw() won't
- * load firmware, so don't encumber the xhci-pci driver with it.
- */
-#if IS_ENABLED(CONFIG_USB_XHCI_PCI_RENESAS)
-MODULE_FIRMWARE("renesas_usb_fw.mem");
-#endif
-
/* pci driver glue; this is a "new style" PCI driver module */
static struct pci_driver xhci_pci_driver = {
.name = hcd_name,
diff --git a/drivers/usb/host/xhci-pci.h b/drivers/usb/host/xhci-pci.h
index cb9a8f331a44..e87c7d9d76b8 100644
--- a/drivers/usb/host/xhci-pci.h
+++ b/drivers/usb/host/xhci-pci.h
@@ -4,22 +4,7 @@
#ifndef XHCI_PCI_H
#define XHCI_PCI_H
-#if IS_ENABLED(CONFIG_USB_XHCI_PCI_RENESAS)
-int renesas_xhci_check_request_fw(struct pci_dev *dev,
- const struct pci_device_id *id);
-
-#else
-static int renesas_xhci_check_request_fw(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return 0;
-}
-
-#endif
-
-struct xhci_driver_data {
- u64 quirks;
- const char *firmware;
-};
+int xhci_pci_common_probe(struct pci_dev *dev, const struct pci_device_id *id);
+void xhci_pci_remove(struct pci_dev *dev);
#endif
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 3d071b875308..d85ffa9ffaa7 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -256,6 +256,15 @@ int xhci_plat_probe(struct platform_device *pdev, struct device *sysdev, const s
if (device_property_read_bool(tmpdev, "xhci-sg-trb-cache-size-quirk"))
xhci->quirks |= XHCI_SG_TRB_CACHE_SIZE_QUIRK;
+ if (device_property_read_bool(tmpdev, "write-64-hi-lo-quirk"))
+ xhci->quirks |= XHCI_WRITE_64_HI_LO;
+
+ if (device_property_read_bool(tmpdev, "xhci-missing-cas-quirk"))
+ xhci->quirks |= XHCI_MISSING_CAS;
+
+ if (device_property_read_bool(tmpdev, "xhci-skip-phy-init-quirk"))
+ xhci->quirks |= XHCI_SKIP_PHY_INIT;
+
device_property_read_u32(tmpdev, "imod-interval-ns",
&xhci->imod_interval);
}
@@ -281,7 +290,8 @@ int xhci_plat_probe(struct platform_device *pdev, struct device *sysdev, const s
hcd->tpl_support = of_usb_host_tpl_support(sysdev->of_node);
- if (priv && (priv->quirks & XHCI_SKIP_PHY_INIT))
+ if ((priv && (priv->quirks & XHCI_SKIP_PHY_INIT)) ||
+ (xhci->quirks & XHCI_SKIP_PHY_INIT))
hcd->skip_phy_initialization = 1;
if (priv && (priv->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK))
@@ -320,6 +330,8 @@ int xhci_plat_probe(struct platform_device *pdev, struct device *sysdev, const s
usb3_hcd->can_do_streams = 1;
if (xhci->shared_hcd) {
+ xhci->shared_hcd->rsrc_start = hcd->rsrc_start;
+ xhci->shared_hcd->rsrc_len = hcd->rsrc_len;
ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED);
if (ret)
goto put_usb3_hcd;
@@ -557,6 +569,7 @@ EXPORT_SYMBOL_GPL(xhci_plat_pm_ops);
static const struct acpi_device_id usb_xhci_acpi_match[] = {
/* XHCI-compliant USB Controller */
{ "PNP0D10", },
+ { "PNP0D15", },
{ }
};
MODULE_DEVICE_TABLE(acpi, usb_xhci_acpi_match);
@@ -564,7 +577,7 @@ MODULE_DEVICE_TABLE(acpi, usb_xhci_acpi_match);
static struct platform_driver usb_generic_xhci_driver = {
.probe = xhci_generic_plat_probe,
- .remove_new = xhci_plat_remove,
+ .remove = xhci_plat_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "xhci-hcd",
diff --git a/drivers/usb/host/xhci-plat.h b/drivers/usb/host/xhci-plat.h
index 2d15386f2c50..6475130eac4b 100644
--- a/drivers/usb/host/xhci-plat.h
+++ b/drivers/usb/host/xhci-plat.h
@@ -8,7 +8,9 @@
#ifndef _XHCI_PLAT_H
#define _XHCI_PLAT_H
-#include "xhci.h" /* for hcd_to_xhci() */
+struct device;
+struct platform_device;
+struct usb_hcd;
struct xhci_plat_priv {
const char *firmware_name;
diff --git a/drivers/usb/host/xhci-port.h b/drivers/usb/host/xhci-port.h
new file mode 100644
index 000000000000..f19efb966d18
--- /dev/null
+++ b/drivers/usb/host/xhci-port.h
@@ -0,0 +1,176 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* PORTSC - Port Status and Control Register - port_status_base bitmasks */
+/* true: device connected */
+#define PORT_CONNECT (1 << 0)
+/* true: port enabled */
+#define PORT_PE (1 << 1)
+/* bit 2 reserved and zeroed */
+/* true: port has an over-current condition */
+#define PORT_OC (1 << 3)
+/* true: port reset signaling asserted */
+#define PORT_RESET (1 << 4)
+/* Port Link State - bits 5:8
+ * A read gives the current link PM state of the port,
+ * a write with Link State Write Strobe set sets the link state.
+ */
+#define PORT_PLS_MASK (0xf << 5)
+#define XDEV_U0 (0x0 << 5)
+#define XDEV_U1 (0x1 << 5)
+#define XDEV_U2 (0x2 << 5)
+#define XDEV_U3 (0x3 << 5)
+#define XDEV_DISABLED (0x4 << 5)
+#define XDEV_RXDETECT (0x5 << 5)
+#define XDEV_INACTIVE (0x6 << 5)
+#define XDEV_POLLING (0x7 << 5)
+#define XDEV_RECOVERY (0x8 << 5)
+#define XDEV_HOT_RESET (0x9 << 5)
+#define XDEV_COMP_MODE (0xa << 5)
+#define XDEV_TEST_MODE (0xb << 5)
+#define XDEV_RESUME (0xf << 5)
+
+/* true: port has power (see HCC_PPC) */
+#define PORT_POWER (1 << 9)
+/* bits 10:13 indicate device speed:
+ * 0 - undefined speed - port hasn't be initialized by a reset yet
+ * 1 - full speed
+ * 2 - low speed
+ * 3 - high speed
+ * 4 - super speed
+ * 5-15 reserved
+ */
+#define DEV_SPEED_MASK (0xf << 10)
+#define XDEV_FS (0x1 << 10)
+#define XDEV_LS (0x2 << 10)
+#define XDEV_HS (0x3 << 10)
+#define XDEV_SS (0x4 << 10)
+#define XDEV_SSP (0x5 << 10)
+#define DEV_UNDEFSPEED(p) (((p) & DEV_SPEED_MASK) == (0x0<<10))
+#define DEV_FULLSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_FS)
+#define DEV_LOWSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_LS)
+#define DEV_HIGHSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_HS)
+#define DEV_SUPERSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_SS)
+#define DEV_SUPERSPEEDPLUS(p) (((p) & DEV_SPEED_MASK) == XDEV_SSP)
+#define DEV_SUPERSPEED_ANY(p) (((p) & DEV_SPEED_MASK) >= XDEV_SS)
+#define DEV_PORT_SPEED(p) (((p) >> 10) & 0x0f)
+
+/* Bits 20:23 in the Slot Context are the speed for the device */
+#define SLOT_SPEED_FS (XDEV_FS << 10)
+#define SLOT_SPEED_LS (XDEV_LS << 10)
+#define SLOT_SPEED_HS (XDEV_HS << 10)
+#define SLOT_SPEED_SS (XDEV_SS << 10)
+#define SLOT_SPEED_SSP (XDEV_SSP << 10)
+/* Port Indicator Control */
+#define PORT_LED_OFF (0 << 14)
+#define PORT_LED_AMBER (1 << 14)
+#define PORT_LED_GREEN (2 << 14)
+#define PORT_LED_MASK (3 << 14)
+/* Port Link State Write Strobe - set this when changing link state */
+#define PORT_LINK_STROBE (1 << 16)
+/* true: connect status change */
+#define PORT_CSC (1 << 17)
+/* true: port enable change */
+#define PORT_PEC (1 << 18)
+/* true: warm reset for a USB 3.0 device is done. A "hot" reset puts the port
+ * into an enabled state, and the device into the default state. A "warm" reset
+ * also resets the link, forcing the device through the link training sequence.
+ * SW can also look at the Port Reset register to see when warm reset is done.
+ */
+#define PORT_WRC (1 << 19)
+/* true: over-current change */
+#define PORT_OCC (1 << 20)
+/* true: reset change - 1 to 0 transition of PORT_RESET */
+#define PORT_RC (1 << 21)
+/* port link status change - set on some port link state transitions:
+ * Transition Reason
+ * ------------------------------------------------------------------------------
+ * - U3 to Resume Wakeup signaling from a device
+ * - Resume to Recovery to U0 USB 3.0 device resume
+ * - Resume to U0 USB 2.0 device resume
+ * - U3 to Recovery to U0 Software resume of USB 3.0 device complete
+ * - U3 to U0 Software resume of USB 2.0 device complete
+ * - U2 to U0 L1 resume of USB 2.1 device complete
+ * - U0 to U0 (???) L1 entry rejection by USB 2.1 device
+ * - U0 to disabled L1 entry error with USB 2.1 device
+ * - Any state to inactive Error on USB 3.0 port
+ */
+#define PORT_PLC (1 << 22)
+/* port configure error change - port failed to configure its link partner */
+#define PORT_CEC (1 << 23)
+#define PORT_CHANGE_MASK (PORT_CSC | PORT_PEC | PORT_WRC | PORT_OCC | \
+ PORT_RC | PORT_PLC | PORT_CEC)
+
+
+/* Cold Attach Status - xHC can set this bit to report device attached during
+ * Sx state. Warm port reset should be perfomed to clear this bit and move port
+ * to connected state.
+ */
+#define PORT_CAS (1 << 24)
+/* wake on connect (enable) */
+#define PORT_WKCONN_E (1 << 25)
+/* wake on disconnect (enable) */
+#define PORT_WKDISC_E (1 << 26)
+/* wake on over-current (enable) */
+#define PORT_WKOC_E (1 << 27)
+/* bits 28:29 reserved */
+/* true: device is non-removable - for USB 3.0 roothub emulation */
+#define PORT_DEV_REMOVE (1 << 30)
+/* Initiate a warm port reset - complete when PORT_WRC is '1' */
+#define PORT_WR (1 << 31)
+
+/* We mark duplicate entries with -1 */
+#define DUPLICATE_ENTRY ((u8)(-1))
+
+/* Port Power Management Status and Control - port_power_base bitmasks */
+/* Inactivity timer value for transitions into U1, in microseconds.
+ * Timeout can be up to 127us. 0xFF means an infinite timeout.
+ */
+#define PORT_U1_TIMEOUT(p) ((p) & 0xff)
+#define PORT_U1_TIMEOUT_MASK 0xff
+/* Inactivity timer value for transitions into U2 */
+#define PORT_U2_TIMEOUT(p) (((p) & 0xff) << 8)
+#define PORT_U2_TIMEOUT_MASK (0xff << 8)
+/* Bits 24:31 for port testing */
+
+/* USB2 Protocol PORTSPMSC */
+#define PORT_L1S_MASK 7
+#define PORT_L1S_SUCCESS 1
+#define PORT_RWE (1 << 3)
+#define PORT_HIRD(p) (((p) & 0xf) << 4)
+#define PORT_HIRD_MASK (0xf << 4)
+#define PORT_L1DS_MASK (0xff << 8)
+#define PORT_L1DS(p) (((p) & 0xff) << 8)
+#define PORT_HLE (1 << 16)
+#define PORT_TEST_MODE_SHIFT 28
+
+/* USB3 Protocol PORTLI Port Link Information */
+#define PORT_RX_LANES(p) (((p) >> 16) & 0xf)
+#define PORT_TX_LANES(p) (((p) >> 20) & 0xf)
+
+/* USB2 Protocol PORTHLPMC */
+#define PORT_HIRDM(p)((p) & 3)
+#define PORT_L1_TIMEOUT(p)(((p) & 0xff) << 2)
+#define PORT_BESLD(p)(((p) & 0xf) << 10)
+
+/* use 512 microseconds as USB2 LPM L1 default timeout. */
+#define XHCI_L1_TIMEOUT 512
+
+/* Set default HIRD/BESL value to 4 (350/400us) for USB2 L1 LPM resume latency.
+ * Safe to use with mixed HIRD and BESL systems (host and device) and is used
+ * by other operating systems.
+ *
+ * XHCI 1.0 errata 8/14/12 Table 13 notes:
+ * "Software should choose xHC BESL/BESLD field values that do not violate a
+ * device's resume latency requirements,
+ * e.g. not program values > '4' if BLC = '1' and a HIRD device is attached,
+ * or not program values < '4' if BLC = '0' and a BESL device is attached.
+ */
+#define XHCI_DEFAULT_BESL 4
+
+/*
+ * USB3 specification define a 360ms tPollingLFPSTiemout for USB3 ports
+ * to complete link training. usually link trainig completes much faster
+ * so check status 10 times with 36ms sleep in places we need to wait for
+ * polling to complete.
+ */
+#define XHCI_PORT_POLLING_LFPS_TIME 36
diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c
index ab9c5969e462..1cc082a3b793 100644
--- a/drivers/usb/host/xhci-rcar.c
+++ b/drivers/usb/host/xhci-rcar.c
@@ -214,8 +214,7 @@ static int xhci_rcar_resume_quirk(struct usb_hcd *hcd)
*/
#define SET_XHCI_PLAT_PRIV_FOR_RCAR(firmware) \
.firmware_name = firmware, \
- .quirks = XHCI_NO_64BIT_SUPPORT | XHCI_TRUST_TX_LENGTH | \
- XHCI_SLOW_SUSPEND, \
+ .quirks = XHCI_NO_64BIT_SUPPORT | XHCI_SLOW_SUSPEND, \
.init_quirk = xhci_rcar_init_quirk, \
.plat_start = xhci_rcar_start, \
.resume_quirk = xhci_rcar_resume_quirk,
@@ -229,8 +228,7 @@ static const struct xhci_plat_priv xhci_plat_renesas_rcar_gen3 = {
};
static const struct xhci_plat_priv xhci_plat_renesas_rzv2m = {
- .quirks = XHCI_NO_64BIT_SUPPORT | XHCI_TRUST_TX_LENGTH |
- XHCI_SLOW_SUSPEND,
+ .quirks = XHCI_NO_64BIT_SUPPORT | XHCI_SLOW_SUSPEND,
.init_quirk = xhci_rzv2m_init_quirk,
.plat_start = xhci_rzv2m_start,
};
@@ -276,7 +274,7 @@ static int xhci_renesas_probe(struct platform_device *pdev)
static struct platform_driver usb_xhci_renesas_driver = {
.probe = xhci_renesas_probe,
- .remove_new = xhci_plat_remove,
+ .remove = xhci_plat_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "xhci-renesas-hcd",
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 4f64b814d4aa..965bffce301e 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -52,8 +52,10 @@
* endpoint rings; it generates events on the event ring for these.
*/
+#include <linux/jiffies.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/dma-mapping.h>
#include "xhci.h"
#include "xhci-trace.h"
@@ -113,6 +115,12 @@ static bool last_td_in_urb(struct xhci_td *td)
return urb_priv->num_tds_done == urb_priv->num_tds;
}
+static bool unhandled_event_trb(struct xhci_ring *ring)
+{
+ return ((le32_to_cpu(ring->dequeue->event_cmd.flags) & TRB_CYCLE) ==
+ ring->cycle_state);
+}
+
static void inc_td_cnt(struct urb *urb)
{
struct urb_priv *urb_priv = urb->hcpriv;
@@ -139,10 +147,8 @@ static void trb_to_noop(union xhci_trb *trb, u32 noop_type)
* TRB is in a new segment. This does not skip over link TRBs, and it does not
* effect the ring dequeue or enqueue pointers.
*/
-static void next_trb(struct xhci_hcd *xhci,
- struct xhci_ring *ring,
- struct xhci_segment **seg,
- union xhci_trb **trb)
+static void next_trb(struct xhci_segment **seg,
+ union xhci_trb **trb)
{
if (trb_is_link(*trb) || last_trb_on_seg(*seg, *trb)) {
*seg = (*seg)->next;
@@ -163,13 +169,16 @@ void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
if (ring->type == TYPE_EVENT) {
if (!last_trb_on_seg(ring->deq_seg, ring->dequeue)) {
ring->dequeue++;
- goto out;
+ return;
}
if (last_trb_on_ring(ring, ring->deq_seg, ring->dequeue))
ring->cycle_state ^= 1;
ring->deq_seg = ring->deq_seg->next;
ring->dequeue = ring->deq_seg->trbs;
- goto out;
+
+ trace_xhci_inc_deq(ring);
+
+ return;
}
/* All other rings have link trbs */
@@ -184,14 +193,13 @@ void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
ring->deq_seg = ring->deq_seg->next;
ring->dequeue = ring->deq_seg->trbs;
+ trace_xhci_inc_deq(ring);
+
if (link_trb_count++ > ring->num_segs) {
xhci_warn(xhci, "Ring is an endless link TRB loop\n");
break;
}
}
-out:
- trace_xhci_inc_deq(ring);
-
return;
}
@@ -244,9 +252,7 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
* AMD 0.96 host, carry over the chain bit of the previous TRB
* (which may mean the chain bit is cleared).
*/
- if (!(ring->type == TYPE_ISOC &&
- (xhci->quirks & XHCI_AMD_0x96_HOST)) &&
- !xhci_link_trb_quirk(xhci)) {
+ if (!xhci_link_chain_quirk(xhci, ring->type)) {
next->link.control &= cpu_to_le32(~TRB_CHAIN);
next->link.control |= cpu_to_le32(chain);
}
@@ -262,13 +268,13 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
ring->enqueue = ring->enq_seg->trbs;
next = ring->enqueue;
+ trace_xhci_inc_enq(ring);
+
if (link_trb_count++ > ring->num_segs) {
xhci_warn(xhci, "%s: Ring link TRB loop\n", __func__);
break;
}
}
-
- trace_xhci_inc_enq(ring);
}
/*
@@ -277,7 +283,7 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
* Only for transfer and command rings where driver is the producer, not for
* event rings.
*/
-static unsigned int xhci_num_trbs_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
+static unsigned int xhci_num_trbs_free(struct xhci_ring *ring)
{
struct xhci_segment *enq_seg = ring->enq_seg;
union xhci_trb *enq = ring->enqueue;
@@ -302,7 +308,7 @@ static unsigned int xhci_num_trbs_free(struct xhci_hcd *xhci, struct xhci_ring *
free += last_on_seg - enq;
enq_seg = enq_seg->next;
enq = enq_seg->trbs;
- } while (i++ <= ring->num_segs);
+ } while (i++ < ring->num_segs);
return free;
}
@@ -345,10 +351,8 @@ static unsigned int xhci_ring_expansion_needed(struct xhci_hcd *xhci, struct xhc
while (new_segs > 0) {
seg = seg->next;
if (seg == ring->deq_seg) {
- xhci_dbg(xhci, "Ring expansion by %d segments needed\n",
- new_segs);
- xhci_dbg(xhci, "Adding %d trbs moves enq %d trbs into deq seg\n",
- num_trbs, trbs_past_seg % TRBS_PER_SEGMENT);
+ xhci_dbg(xhci, "Adding %d trbs requires expanding ring by %d segments\n",
+ num_trbs, new_segs);
return new_segs;
}
new_segs--;
@@ -419,12 +423,13 @@ static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) &&
!(xhci->xhc_state & XHCI_STATE_DYING)) {
xhci->current_cmd = cur_cmd;
- xhci_mod_cmd_timer(xhci);
+ if (cur_cmd)
+ xhci_mod_cmd_timer(xhci);
xhci_ring_cmd_db(xhci);
}
}
-/* Must be called with xhci->lock held, releases and aquires lock back */
+/* Must be called with xhci->lock held, releases and acquires lock back */
static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags)
{
struct xhci_segment *new_seg = xhci->cmd_ring->deq_seg;
@@ -444,9 +449,9 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags)
* avoiding corrupting the command ring pointer in case the command ring
* is stopped by the time the upper dword is written.
*/
- next_trb(xhci, NULL, &new_seg, &new_deq);
+ next_trb(&new_seg, &new_deq);
if (trb_is_link(new_deq))
- next_trb(xhci, NULL, &new_seg, &new_deq);
+ next_trb(&new_seg, &new_deq);
crcr = xhci_trb_virt_to_dma(new_seg, new_deq);
xhci_write_64(xhci, crcr | CMD_RING_ABORT, &xhci->op_regs->cmd_ring);
@@ -650,25 +655,6 @@ static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci,
stream_id);
return -ENODEV;
}
- /*
- * A cancelled TD can complete with a stall if HW cached the trb.
- * In this case driver can't find td, but if the ring is empty we
- * can move the dequeue pointer to the current enqueue position.
- * We shouldn't hit this anymore as cached cancelled TRBs are given back
- * after clearing the cache, but be on the safe side and keep it anyway
- */
- if (!td) {
- if (list_empty(&ep_ring->td_list)) {
- new_seg = ep_ring->enq_seg;
- new_deq = ep_ring->enqueue;
- new_cycle = ep_ring->cycle_state;
- xhci_dbg(xhci, "ep ring empty, Set new dequeue = enqueue");
- goto deq_found;
- } else {
- xhci_warn(xhci, "Can't find new dequeue state, missing td\n");
- return -EINVAL;
- }
- }
hw_dequeue = xhci_get_hw_deq(xhci, dev, ep_index, stream_id);
new_seg = ep_ring->deq_seg;
@@ -677,8 +663,8 @@ static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci,
/*
* We want to find the pointer, segment and cycle state of the new trb
- * (the one after current TD's last_trb). We know the cycle state at
- * hw_dequeue, so walk the ring until both hw_dequeue and last_trb are
+ * (the one after current TD's end_trb). We know the cycle state at
+ * hw_dequeue, so walk the ring until both hw_dequeue and end_trb are
* found.
*/
do {
@@ -688,14 +674,14 @@ static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci,
if (td_last_trb_found)
break;
}
- if (new_deq == td->last_trb)
+ if (new_deq == td->end_trb)
td_last_trb_found = true;
if (cycle_found && trb_is_link(new_deq) &&
link_trb_toggles_cycle(new_deq))
new_cycle ^= 0x1;
- next_trb(xhci, ep_ring, &new_seg, &new_deq);
+ next_trb(&new_seg, &new_deq);
/* Search wrapped around, bail out */
if (new_deq == ep->ring->dequeue) {
@@ -705,8 +691,6 @@ static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci,
} while (!cycle_found || !td_last_trb_found);
-deq_found:
-
/* Don't update the ring cycle state for the producer (us). */
addr = xhci_trb_virt_to_dma(new_seg, new_deq);
if (addr == 0) {
@@ -734,7 +718,7 @@ deq_found:
lower_32_bits(addr) | trb_sct | new_cycle,
upper_32_bits(addr),
STREAM_ID_FOR_TRB(stream_id), SLOT_ID_FOR_TRB(slot_id) |
- EP_ID_FOR_TRB(ep_index) | TRB_TYPE(TRB_SET_DEQ), false);
+ EP_INDEX_FOR_TRB(ep_index) | TRB_TYPE(TRB_SET_DEQ), false);
if (ret < 0) {
xhci_free_command(xhci, cmd);
return ret;
@@ -759,30 +743,25 @@ deq_found:
* (The last TRB actually points to the ring enqueue pointer, which is not part
* of this TD.) This is used to remove partially enqueued isoc TDs from a ring.
*/
-static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
- struct xhci_td *td, bool flip_cycle)
+static void td_to_noop(struct xhci_td *td, bool flip_cycle)
{
struct xhci_segment *seg = td->start_seg;
- union xhci_trb *trb = td->first_trb;
+ union xhci_trb *trb = td->start_trb;
while (1) {
trb_to_noop(trb, TRB_TR_NOOP);
/* flip cycle if asked to */
- if (flip_cycle && trb != td->first_trb && trb != td->last_trb)
+ if (flip_cycle && trb != td->start_trb && trb != td->end_trb)
trb->generic.field[3] ^= cpu_to_le32(TRB_CYCLE);
- if (trb == td->last_trb)
+ if (trb == td->end_trb)
break;
- next_trb(xhci, ep_ring, &seg, &trb);
+ next_trb(&seg, &trb);
}
}
-/*
- * Must be called with xhci->lock held in interrupt context,
- * releases and re-acquires xhci->lock
- */
static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
struct xhci_td *cur_td, int status)
{
@@ -822,7 +801,7 @@ static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci,
dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
DMA_FROM_DEVICE);
- /* for in tranfers we need to copy the data from bounce to sg */
+ /* for in transfers we need to copy the data from bounce to sg */
if (urb->num_sgs) {
len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf,
seg->bounce_len, seg->bounce_offs);
@@ -837,8 +816,8 @@ static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci,
seg->bounce_offs = 0;
}
-static int xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td,
- struct xhci_ring *ep_ring, int status)
+static void xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td,
+ struct xhci_ring *ep_ring, int status)
{
struct urb *urb = NULL;
@@ -881,10 +860,18 @@ static int xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td,
status = 0;
xhci_giveback_urb_in_irq(xhci, td, status);
}
-
- return 0;
}
+/* Give back previous TD and move on to the next TD. */
+static void xhci_dequeue_td(struct xhci_hcd *xhci, struct xhci_td *td, struct xhci_ring *ring,
+ u32 status)
+{
+ ring->dequeue = td->end_trb;
+ ring->deq_seg = td->end_seg;
+ inc_deq(xhci, ring);
+
+ xhci_td_cleanup(xhci, td, ring, status);
+}
/* Complete the cancelled URBs we unlinked from td_list. */
static void xhci_giveback_invalidated_tds(struct xhci_virt_ep *ep)
@@ -995,13 +982,20 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
unsigned int slot_id = ep->vdev->slot_id;
int err;
+ /*
+ * This is not going to work if the hardware is changing its dequeue
+ * pointers as we look at them. Completion handler will call us later.
+ */
+ if (ep->ep_state & SET_DEQ_PENDING)
+ return 0;
+
xhci = ep->xhci;
list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) {
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"Removing canceled TD starting at 0x%llx (dma) in stream %u URB %p",
(unsigned long long)xhci_trb_virt_to_dma(
- td->start_seg, td->first_trb),
+ td->start_seg, td->start_trb),
td->urb->stream_id, td->urb);
list_del_init(&td->td_list);
ring = xhci_urb_to_transfer_ring(xhci, td->urb);
@@ -1020,26 +1014,39 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
td->urb->stream_id);
hw_deq &= ~0xf;
- if (td->cancel_status == TD_HALTED ||
- trb_in_td(xhci, td->start_seg, td->first_trb, td->last_trb, hw_deq, false)) {
+ if (td->cancel_status == TD_HALTED || trb_in_td(xhci, td, hw_deq, false)) {
switch (td->cancel_status) {
case TD_CLEARED: /* TD is already no-op */
case TD_CLEARING_CACHE: /* set TR deq command already queued */
break;
case TD_DIRTY: /* TD is cached, clear it */
case TD_HALTED:
+ case TD_CLEARING_CACHE_DEFERRED:
+ if (cached_td) {
+ if (cached_td->urb->stream_id != td->urb->stream_id) {
+ /* Multiple streams case, defer move dq */
+ xhci_dbg(xhci,
+ "Move dq deferred: stream %u URB %p\n",
+ td->urb->stream_id, td->urb);
+ td->cancel_status = TD_CLEARING_CACHE_DEFERRED;
+ break;
+ }
+
+ /* Should never happen, but clear the TD if it does */
+ xhci_warn(xhci,
+ "Found multiple active URBs %p and %p in stream %u?\n",
+ td->urb, cached_td->urb,
+ td->urb->stream_id);
+ td_to_noop(cached_td, false);
+ cached_td->cancel_status = TD_CLEARED;
+ }
+ td_to_noop(td, false);
td->cancel_status = TD_CLEARING_CACHE;
- if (cached_td)
- /* FIXME stream case, several stopped rings */
- xhci_dbg(xhci,
- "Move dq past stream %u URB %p instead of stream %u URB %p\n",
- td->urb->stream_id, td->urb,
- cached_td->urb->stream_id, cached_td->urb);
cached_td = td;
break;
}
} else {
- td_to_noop(xhci, ring, td, false);
+ td_to_noop(td, false);
td->cancel_status = TD_CLEARED;
}
}
@@ -1054,11 +1061,17 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
if (err) {
/* Failed to move past cached td, just set cached TDs to no-op */
list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) {
- if (td->cancel_status != TD_CLEARING_CACHE)
+ /*
+ * Deferred TDs need to have the deq pointer set after the above command
+ * completes, so if that failed we just give up on all of them (and
+ * complain loudly since this could cause issues due to caching).
+ */
+ if (td->cancel_status != TD_CLEARING_CACHE &&
+ td->cancel_status != TD_CLEARING_CACHE_DEFERRED)
continue;
- xhci_dbg(xhci, "Failed to clear cancelled cached URB %p, mark clear anyway\n",
- td->urb);
- td_to_noop(xhci, ring, td, false);
+ xhci_warn(xhci, "Failed to clear cancelled cached URB %p, mark clear anyway\n",
+ td->urb);
+ td_to_noop(td, false);
td->cancel_status = TD_CLEARED;
}
}
@@ -1066,6 +1079,19 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
}
/*
+ * Erase queued TDs from transfer ring(s) and give back those the xHC didn't
+ * stop on. If necessary, queue commands to move the xHC off cancelled TDs it
+ * stopped on. Those will be given back later when the commands complete.
+ *
+ * Call under xhci->lock on a stopped endpoint.
+ */
+void xhci_process_cancelled_tds(struct xhci_virt_ep *ep)
+{
+ xhci_invalidate_cancelled_tds(ep);
+ xhci_giveback_invalidated_tds(ep);
+}
+
+/*
* Returns the TD the endpoint ring halted on.
* Only call for non-running rings without streams.
*/
@@ -1078,8 +1104,7 @@ static struct xhci_td *find_halted_td(struct xhci_virt_ep *ep)
hw_deq = xhci_get_hw_deq(ep->xhci, ep->vdev, ep->ep_index, 0);
hw_deq &= ~0xf;
td = list_first_entry(&ep->ring->td_list, struct xhci_td, td_list);
- if (trb_in_td(ep->xhci, td->start_seg, td->first_trb,
- td->last_trb, hw_deq, false))
+ if (trb_in_td(ep->xhci, td, hw_deq, false))
return td;
}
return NULL;
@@ -1154,9 +1179,35 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
break;
ep->ep_state &= ~EP_STOP_CMD_PENDING;
return;
+ case EP_STATE_STOPPED:
+ /*
+ * Per xHCI 4.6.9, Stop Endpoint command on a Stopped
+ * EP is a Context State Error, and EP stays Stopped.
+ *
+ * But maybe it failed on Halted, and somebody ran Reset
+ * Endpoint later. EP state is now Stopped and EP_HALTED
+ * still set because Reset EP handler will run after us.
+ */
+ if (ep->ep_state & EP_HALTED)
+ break;
+ /*
+ * On some HCs EP state remains Stopped for some tens of
+ * us to a few ms or more after a doorbell ring, and any
+ * new Stop Endpoint fails without aborting the restart.
+ * This handler may run quickly enough to still see this
+ * Stopped state, but it will soon change to Running.
+ *
+ * Assume this bug on unexpected Stop Endpoint failures.
+ * Keep retrying until the EP starts and stops again, on
+ * chips where this is known to help. Wait for 100ms.
+ */
+ if (time_is_before_jiffies(ep->stop_time + msecs_to_jiffies(100)))
+ break;
+ fallthrough;
case EP_STATE_RUNNING:
/* Race, HW handled stop ep cmd before ep was running */
- xhci_dbg(xhci, "Stop ep completion ctx error, ep is running\n");
+ xhci_dbg(xhci, "Stop ep completion ctx error, ctx_state %d\n",
+ GET_EP_CTX_STATE(ep_ctx));
command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
if (!command) {
@@ -1334,6 +1385,7 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
struct xhci_virt_ep *ep;
struct xhci_ep_ctx *ep_ctx;
struct xhci_slot_ctx *slot_ctx;
+ struct xhci_stream_ctx *stream_ctx;
struct xhci_td *td, *tmp_td;
ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
@@ -1355,6 +1407,11 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
trace_xhci_handle_cmd_set_deq(slot_ctx);
trace_xhci_handle_cmd_set_deq_ep(ep_ctx);
+ if (ep->ep_state & EP_HAS_STREAMS) {
+ stream_ctx = &ep->stream_info->stream_ctx_array[stream_id];
+ trace_xhci_handle_cmd_set_deq_stream(ep->stream_info, stream_id);
+ }
+
if (cmd_comp_code != COMP_SUCCESS) {
unsigned int ep_state;
unsigned int slot_state;
@@ -1391,9 +1448,21 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
u64 deq;
/* 4.6.10 deq ptr is written to the stream ctx for streams */
if (ep->ep_state & EP_HAS_STREAMS) {
- struct xhci_stream_ctx *ctx =
- &ep->stream_info->stream_ctx_array[stream_id];
- deq = le64_to_cpu(ctx->stream_ring) & SCTX_DEQ_MASK;
+ deq = le64_to_cpu(stream_ctx->stream_ring) & SCTX_DEQ_MASK;
+
+ /*
+ * Cadence xHCI controllers store some endpoint state
+ * information within Rsvd0 fields of Stream Endpoint
+ * context. This field is not cleared during Set TR
+ * Dequeue Pointer command which causes XDMA to skip
+ * over transfer ring and leads to data loss on stream
+ * pipe.
+ * To fix this issue driver must clear Rsvd0 field.
+ */
+ if (xhci->quirks & XHCI_CDNS_SCTX_QUIRK) {
+ stream_ctx->reserved[0] = 0;
+ stream_ctx->reserved[1] = 0;
+ }
} else {
deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK;
}
@@ -1430,8 +1499,21 @@ cleanup:
ep->ep_state &= ~SET_DEQ_PENDING;
ep->queued_deq_seg = NULL;
ep->queued_deq_ptr = NULL;
- /* Restart any rings with pending URBs */
- ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
+
+ /* Check for deferred or newly cancelled TDs */
+ if (!list_empty(&ep->cancelled_td_list)) {
+ xhci_dbg(ep->xhci, "%s: Pending TDs to clear, continuing with invalidation\n",
+ __func__);
+ xhci_invalidate_cancelled_tds(ep);
+ /* Try to restart the endpoint if all is done */
+ ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
+ /* Start giving back any TDs invalidated above */
+ xhci_giveback_invalidated_tds(ep);
+ } else {
+ /* Restart any rings with pending URBs */
+ xhci_dbg(ep->xhci, "%s: All TDs cleared, ring doorbell\n", __func__);
+ ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
+ }
}
static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
@@ -1468,8 +1550,8 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
}
-static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id,
- struct xhci_command *command, u32 cmd_comp_code)
+static void xhci_handle_cmd_enable_slot(int slot_id, struct xhci_command *command,
+ u32 cmd_comp_code)
{
if (cmd_comp_code == COMP_SUCCESS)
command->slot_id = slot_id;
@@ -1494,8 +1576,7 @@ static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id)
xhci_free_device_endpoint_resources(xhci, virt_dev, true);
}
-static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
- u32 cmd_comp_code)
+static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id)
{
struct xhci_virt_device *virt_dev;
struct xhci_input_control_ctx *ctrl_ctx;
@@ -1570,12 +1651,13 @@ static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci,
NEC_FW_MINOR(le32_to_cpu(event->status)));
}
-static void xhci_complete_del_and_free_cmd(struct xhci_command *cmd, u32 status)
+static void xhci_complete_del_and_free_cmd(struct xhci_command *cmd, u32 comp_code, u32 comp_param)
{
list_del(&cmd->cmd_list);
if (cmd->completion) {
- cmd->status = status;
+ cmd->status = comp_code;
+ cmd->comp_param = comp_param;
complete(cmd->completion);
} else {
kfree(cmd);
@@ -1587,7 +1669,7 @@ void xhci_cleanup_command_queue(struct xhci_hcd *xhci)
struct xhci_command *cur_cmd, *tmp_cmd;
xhci->current_cmd = NULL;
list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list)
- xhci_complete_del_and_free_cmd(cur_cmd, COMP_COMMAND_ABORTED);
+ xhci_complete_del_and_free_cmd(cur_cmd, COMP_COMMAND_ABORTED, 0);
}
void xhci_handle_command_timeout(struct work_struct *work)
@@ -1672,6 +1754,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
struct xhci_event_cmd *event)
{
unsigned int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
+ u32 status = le32_to_cpu(event->status);
u64 cmd_dma;
dma_addr_t cmd_dequeue_dma;
u32 cmd_comp_code;
@@ -1687,7 +1770,15 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
cmd_dma = le64_to_cpu(event->cmd_trb);
cmd_trb = xhci->cmd_ring->dequeue;
- trace_xhci_handle_command(xhci->cmd_ring, &cmd_trb->generic);
+ trace_xhci_handle_command(xhci->cmd_ring, &cmd_trb->generic, cmd_dma);
+
+ cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status));
+
+ /* If CMD ring stopped we own the trbs between enqueue and dequeue */
+ if (cmd_comp_code == COMP_COMMAND_RING_STOPPED) {
+ complete_all(&xhci->cmd_ring_stop_completion);
+ return;
+ }
cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
cmd_trb);
@@ -1705,14 +1796,6 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
cancel_delayed_work(&xhci->cmd_timer);
- cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status));
-
- /* If CMD ring stopped we own the trbs between enqueue and dequeue */
- if (cmd_comp_code == COMP_COMMAND_RING_STOPPED) {
- complete_all(&xhci->cmd_ring_stop_completion);
- return;
- }
-
if (cmd->command_trb != xhci->cmd_ring->dequeue) {
xhci_err(xhci,
"Command completion event does not match command\n");
@@ -1737,14 +1820,14 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3]));
switch (cmd_type) {
case TRB_ENABLE_SLOT:
- xhci_handle_cmd_enable_slot(xhci, slot_id, cmd, cmd_comp_code);
+ xhci_handle_cmd_enable_slot(slot_id, cmd, cmd_comp_code);
break;
case TRB_DISABLE_SLOT:
xhci_handle_cmd_disable_slot(xhci, slot_id);
break;
case TRB_CONFIG_EP:
if (!cmd->completion)
- xhci_handle_cmd_config_ep(xhci, slot_id, cmd_comp_code);
+ xhci_handle_cmd_config_ep(xhci, slot_id);
break;
case TRB_EVAL_CONTEXT:
break;
@@ -1800,7 +1883,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
}
event_handled:
- xhci_complete_del_and_free_cmd(cmd, cmd_comp_code);
+ xhci_complete_del_and_free_cmd(cmd, cmd_comp_code, COMP_PARAM(status));
inc_deq(xhci, xhci->cmd_ring);
}
@@ -1862,15 +1945,12 @@ static void xhci_cavium_reset_phy_quirk(struct xhci_hcd *xhci)
} while (!(pll_lock_check & 0x1) && --retry_count);
}
-static void handle_port_status(struct xhci_hcd *xhci,
- struct xhci_interrupter *ir,
- union xhci_trb *event)
+static void handle_port_status(struct xhci_hcd *xhci, union xhci_trb *event)
{
struct usb_hcd *hcd;
u32 port_id;
u32 portsc, cmd_reg;
int max_ports;
- int slot_id;
unsigned int hcd_portnum;
struct xhci_bus_state *bus_state;
bool bogus_port_status = false;
@@ -1922,9 +2002,8 @@ static void handle_port_status(struct xhci_hcd *xhci,
if (hcd->speed >= HCD_USB3 &&
(portsc & PORT_PLS_MASK) == XDEV_INACTIVE) {
- slot_id = xhci_find_slot_id_by_port(hcd, xhci, hcd_portnum + 1);
- if (slot_id && xhci->devs[slot_id])
- xhci->devs[slot_id]->flags |= VDEV_PORT_ERROR;
+ if (port->slot_id && xhci->devs[port->slot_id])
+ xhci->devs[port->slot_id]->flags |= VDEV_PORT_ERROR;
}
if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_RESUME) {
@@ -1982,9 +2061,8 @@ static void handle_port_status(struct xhci_hcd *xhci,
* so the roothub behavior is consistent with external
* USB 3.0 hub behavior.
*/
- slot_id = xhci_find_slot_id_by_port(hcd, xhci, hcd_portnum + 1);
- if (slot_id && xhci->devs[slot_id])
- xhci_ring_device(xhci, slot_id);
+ if (port->slot_id && xhci->devs[port->slot_id])
+ xhci_ring_device(xhci, port->slot_id);
if (bus_state->port_remote_wakeup & (1 << hcd_portnum)) {
xhci_test_and_clear_bit(xhci, port, PORT_PLC);
usb_wakeup_notification(hcd->self.root_hub,
@@ -2039,25 +2117,19 @@ cleanup:
}
/*
- * This TD is defined by the TRBs starting at start_trb in start_seg and ending
- * at end_trb, which may be in another segment. If the suspect DMA address is a
- * TRB in this TD, this function returns that TRB's segment. Otherwise it
- * returns 0.
+ * If the suspect DMA address is a TRB in this TD, this function returns that
+ * TRB's segment. Otherwise it returns 0.
*/
-struct xhci_segment *trb_in_td(struct xhci_hcd *xhci,
- struct xhci_segment *start_seg,
- union xhci_trb *start_trb,
- union xhci_trb *end_trb,
- dma_addr_t suspect_dma,
- bool debug)
+struct xhci_segment *trb_in_td(struct xhci_hcd *xhci, struct xhci_td *td, dma_addr_t suspect_dma,
+ bool debug)
{
dma_addr_t start_dma;
dma_addr_t end_seg_dma;
dma_addr_t end_trb_dma;
struct xhci_segment *cur_seg;
- start_dma = xhci_trb_virt_to_dma(start_seg, start_trb);
- cur_seg = start_seg;
+ start_dma = xhci_trb_virt_to_dma(td->start_seg, td->start_trb);
+ cur_seg = td->start_seg;
do {
if (start_dma == 0)
@@ -2066,7 +2138,7 @@ struct xhci_segment *trb_in_td(struct xhci_hcd *xhci,
end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
&cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
/* If the end TRB isn't in this segment, this is set to 0 */
- end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
+ end_trb_dma = xhci_trb_virt_to_dma(cur_seg, td->end_trb);
if (debug)
xhci_warn(xhci,
@@ -2100,7 +2172,7 @@ struct xhci_segment *trb_in_td(struct xhci_hcd *xhci,
}
cur_seg = cur_seg->next;
start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
- } while (cur_seg != start_seg);
+ } while (cur_seg != td->start_seg);
return NULL;
}
@@ -2122,30 +2194,34 @@ static void xhci_clear_hub_tt_buffer(struct xhci_hcd *xhci, struct xhci_td *td,
}
}
-/* Check if an error has halted the endpoint ring. The class driver will
- * cleanup the halt for a non-default control endpoint if we indicate a stall.
- * However, a babble and other errors also halt the endpoint ring, and the class
- * driver won't clear the halt in that case, so we need to issue a Set Transfer
- * Ring Dequeue Pointer command manually.
+/*
+ * Check if xhci internal endpoint state has gone to a "halt" state due to an
+ * error or stall, including default control pipe protocol stall.
+ * The internal halt needs to be cleared with a reset endpoint command.
+ *
+ * External device side is also halted in functional stall cases. Class driver
+ * will clear the device halt with a CLEAR_FEATURE(ENDPOINT_HALT) request later.
*/
-static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
- struct xhci_ep_ctx *ep_ctx,
- unsigned int trb_comp_code)
+static bool xhci_halted_host_endpoint(struct xhci_ep_ctx *ep_ctx, unsigned int comp_code)
{
- /* TRB completion codes that may require a manual halt cleanup */
- if (trb_comp_code == COMP_USB_TRANSACTION_ERROR ||
- trb_comp_code == COMP_BABBLE_DETECTED_ERROR ||
- trb_comp_code == COMP_SPLIT_TRANSACTION_ERROR)
- /* The 0.95 spec says a babbling control endpoint
- * is not halted. The 0.96 spec says it is. Some HW
- * claims to be 0.95 compliant, but it halts the control
- * endpoint anyway. Check if a babble halted the
- * endpoint.
+ /* Stall halts both internal and device side endpoint */
+ if (comp_code == COMP_STALL_ERROR)
+ return true;
+
+ /* TRB completion codes that may require internal halt cleanup */
+ if (comp_code == COMP_USB_TRANSACTION_ERROR ||
+ comp_code == COMP_BABBLE_DETECTED_ERROR ||
+ comp_code == COMP_SPLIT_TRANSACTION_ERROR)
+ /*
+ * The 0.95 spec says a babbling control endpoint is not halted.
+ * The 0.96 spec says it is. Some HW claims to be 0.95
+ * compliant, but it halts the control endpoint anyway.
+ * Check endpoint context if endpoint is halted.
*/
if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_HALTED)
- return 1;
+ return true;
- return 0;
+ return false;
}
int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
@@ -2162,9 +2238,9 @@ int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
return 0;
}
-static int finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
- struct xhci_ring *ep_ring, struct xhci_td *td,
- u32 trb_comp_code)
+static void finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
+ struct xhci_ring *ep_ring, struct xhci_td *td,
+ u32 trb_comp_code)
{
struct xhci_ep_ctx *ep_ctx;
@@ -2179,7 +2255,7 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
* stopped TDs. A stopped TD may be restarted, so don't update
* the ring dequeue pointer or take this TD off any lists yet.
*/
- return 0;
+ return;
case COMP_USB_TRANSACTION_ERROR:
case COMP_BABBLE_DETECTED_ERROR:
case COMP_SPLIT_TRANSACTION_ERROR:
@@ -2204,8 +2280,8 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
!list_empty(&td->cancelled_td_list)) {
xhci_dbg(xhci, "Already resolving halted ep for 0x%llx\n",
(unsigned long long)xhci_trb_virt_to_dma(
- td->start_seg, td->first_trb));
- return 0;
+ td->start_seg, td->start_trb));
+ return;
}
/* endpoint not halted, don't reset it */
break;
@@ -2213,7 +2289,7 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
/* Almost same procedure as for STALL_ERROR below */
xhci_clear_hub_tt_buffer(xhci, td, ep);
xhci_handle_halted_endpoint(xhci, ep, td, EP_HARD_RESET);
- return 0;
+ return;
case COMP_STALL_ERROR:
/*
* xhci internal endpoint state will go to a "halt" state for
@@ -2230,28 +2306,22 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
xhci_handle_halted_endpoint(xhci, ep, td, EP_HARD_RESET);
- return 0; /* xhci_handle_halted_endpoint marked td cancelled */
+ return; /* xhci_handle_halted_endpoint marked td cancelled */
default:
break;
}
- /* Update ring dequeue pointer */
- ep_ring->dequeue = td->last_trb;
- ep_ring->deq_seg = td->last_trb_seg;
- inc_deq(xhci, ep_ring);
-
- return xhci_td_cleanup(xhci, td, ep_ring, td->status);
+ xhci_dequeue_td(xhci, td, ep_ring, td->status);
}
-/* sum trb lengths from ring dequeue up to stop_trb, _excluding_ stop_trb */
-static int sum_trb_lengths(struct xhci_hcd *xhci, struct xhci_ring *ring,
- union xhci_trb *stop_trb)
+/* sum trb lengths from the first trb up to stop_trb, _excluding_ stop_trb */
+static u32 sum_trb_lengths(struct xhci_td *td, union xhci_trb *stop_trb)
{
u32 sum;
- union xhci_trb *trb = ring->dequeue;
- struct xhci_segment *seg = ring->deq_seg;
+ union xhci_trb *trb = td->start_trb;
+ struct xhci_segment *seg = td->start_seg;
- for (sum = 0; trb != stop_trb; next_trb(xhci, ring, &seg, &trb)) {
+ for (sum = 0; trb != stop_trb; next_trb(&seg, &trb)) {
if (!trb_is_noop(trb) && !trb_is_link(trb))
sum += TRB_LEN(le32_to_cpu(trb->generic.field[2]));
}
@@ -2261,9 +2331,9 @@ static int sum_trb_lengths(struct xhci_hcd *xhci, struct xhci_ring *ring,
/*
* Process control tds, update urb status and actual_length.
*/
-static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
- struct xhci_ring *ep_ring, struct xhci_td *td,
- union xhci_trb *ep_trb, struct xhci_transfer_event *event)
+static void process_ctrl_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
+ struct xhci_ring *ep_ring, struct xhci_td *td,
+ union xhci_trb *ep_trb, struct xhci_transfer_event *event)
{
struct xhci_ep_ctx *ep_ctx;
u32 trb_comp_code;
@@ -2315,8 +2385,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
case COMP_STOPPED_LENGTH_INVALID:
goto finish_td;
default:
- if (!xhci_requires_manual_halt_cleanup(xhci,
- ep_ctx, trb_comp_code))
+ if (!xhci_halted_host_endpoint(ep_ctx, trb_comp_code))
break;
xhci_dbg(xhci, "TRB error %u, halted endpoint index = %u\n",
trb_comp_code, ep->ep_index);
@@ -2343,7 +2412,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
td->urb_length_set = true;
td->urb->actual_length = requested - remaining;
xhci_dbg(xhci, "Waiting for status stage event\n");
- return 0;
+ return;
}
/* at status stage */
@@ -2351,15 +2420,15 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
td->urb->actual_length = requested;
finish_td:
- return finish_td(xhci, ep, ep_ring, td, trb_comp_code);
+ finish_td(xhci, ep, ep_ring, td, trb_comp_code);
}
/*
* Process isochronous tds, update urb packet status and actual_length.
*/
-static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
- struct xhci_ring *ep_ring, struct xhci_td *td,
- union xhci_trb *ep_trb, struct xhci_transfer_event *event)
+static void process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
+ struct xhci_ring *ep_ring, struct xhci_td *td,
+ union xhci_trb *ep_trb, struct xhci_transfer_event *event)
{
struct urb_priv *urb_priv;
int idx;
@@ -2387,8 +2456,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
break;
if (remaining) {
frame->status = short_framestatus;
- if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
- sum_trbs_for_length = true;
+ sum_trbs_for_length = true;
break;
}
frame->status = 0;
@@ -2405,7 +2473,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
fallthrough;
case COMP_ISOCH_BUFFER_OVERRUN:
frame->status = -EOVERFLOW;
- if (ep_trb != td->last_trb)
+ if (ep_trb != td->end_trb)
td->error_mid_td = true;
break;
case COMP_INCOMPATIBLE_DEVICE_ERROR:
@@ -2415,19 +2483,21 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
case COMP_USB_TRANSACTION_ERROR:
frame->status = -EPROTO;
sum_trbs_for_length = true;
- if (ep_trb != td->last_trb)
+ if (ep_trb != td->end_trb)
td->error_mid_td = true;
break;
case COMP_STOPPED:
sum_trbs_for_length = true;
break;
case COMP_STOPPED_SHORT_PACKET:
- /* field normally containing residue now contains tranferred */
+ /* field normally containing residue now contains transferred */
frame->status = short_framestatus;
requested = remaining;
break;
case COMP_STOPPED_LENGTH_INVALID:
- requested = 0;
+ /* exclude stopped trb with invalid length from length sum */
+ sum_trbs_for_length = true;
+ ep_trb_len = 0;
remaining = 0;
break;
default:
@@ -2440,7 +2510,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
goto finish_td;
if (sum_trbs_for_length)
- frame->actual_length = sum_trb_lengths(xhci, ep->ring, ep_trb) +
+ frame->actual_length = sum_trb_lengths(td, ep_trb) +
ep_trb_len - remaining;
else
frame->actual_length = requested;
@@ -2449,17 +2519,16 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
finish_td:
/* Don't give back TD yet if we encountered an error mid TD */
- if (td->error_mid_td && ep_trb != td->last_trb) {
+ if (td->error_mid_td && ep_trb != td->end_trb) {
xhci_dbg(xhci, "Error mid isoc TD, wait for final completion event\n");
td->urb_length_set = true;
- return 0;
+ return;
}
-
- return finish_td(xhci, ep, ep_ring, td, trb_comp_code);
+ finish_td(xhci, ep, ep_ring, td, trb_comp_code);
}
-static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
- struct xhci_virt_ep *ep, int status)
+static void skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ struct xhci_virt_ep *ep, int status)
{
struct urb_priv *urb_priv;
struct usb_iso_packet_descriptor *frame;
@@ -2475,20 +2544,15 @@ static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
/* calc actual length */
frame->actual_length = 0;
- /* Update ring dequeue pointer */
- ep->ring->dequeue = td->last_trb;
- ep->ring->deq_seg = td->last_trb_seg;
- inc_deq(xhci, ep->ring);
-
- return xhci_td_cleanup(xhci, td, ep->ring, status);
+ xhci_dequeue_td(xhci, td, ep->ring, status);
}
/*
* Process bulk and interrupt tds, update urb status and actual_length.
*/
-static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
- struct xhci_ring *ep_ring, struct xhci_td *td,
- union xhci_trb *ep_trb, struct xhci_transfer_event *event)
+static void process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
+ struct xhci_ring *ep_ring, struct xhci_td *td,
+ union xhci_trb *ep_trb, struct xhci_transfer_event *event)
{
struct xhci_slot_ctx *slot_ctx;
u32 trb_comp_code;
@@ -2504,7 +2568,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
case COMP_SUCCESS:
ep->err_count = 0;
/* handle success with untransferred data as short packet */
- if (ep_trb != td->last_trb || remaining) {
+ if (ep_trb != td->end_trb || remaining) {
xhci_warn(xhci, "WARN Successful completion on short TX\n");
xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n",
td->urb->ep->desc.bEndpointAddress,
@@ -2513,9 +2577,6 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
td->status = 0;
break;
case COMP_SHORT_PACKET:
- xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n",
- td->urb->ep->desc.bEndpointAddress,
- requested, remaining);
td->status = 0;
break;
case COMP_STOPPED_SHORT_PACKET:
@@ -2523,9 +2584,8 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
goto finish_td;
case COMP_STOPPED_LENGTH_INVALID:
/* stopped on ep trb with invalid length, exclude it */
- ep_trb_len = 0;
- remaining = 0;
- break;
+ td->urb->actual_length = sum_trb_lengths(td, ep_trb);
+ goto finish_td;
case COMP_USB_TRANSACTION_ERROR:
if (xhci->quirks & XHCI_NO_SOFT_RETRY ||
(ep->err_count++ > MAX_SOFT_RETRY) ||
@@ -2535,17 +2595,17 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
td->status = 0;
xhci_handle_halted_endpoint(xhci, ep, td, EP_SOFT_RESET);
- return 0;
+ return;
default:
/* do nothing */
break;
}
- if (ep_trb == td->last_trb)
+ if (ep_trb == td->end_trb)
td->urb->actual_length = requested - remaining;
else
td->urb->actual_length =
- sum_trb_lengths(xhci, ep_ring, ep_trb) +
+ sum_trb_lengths(td, ep_trb) +
ep_trb_len - remaining;
finish_td:
if (remaining > requested) {
@@ -2554,7 +2614,34 @@ finish_td:
td->urb->actual_length = 0;
}
- return finish_td(xhci, ep, ep_ring, td, trb_comp_code);
+ finish_td(xhci, ep, ep_ring, td, trb_comp_code);
+}
+
+/* Transfer events which don't point to a transfer TRB, see xhci 4.17.4 */
+static int handle_transferless_tx_event(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
+ u32 trb_comp_code)
+{
+ switch (trb_comp_code) {
+ case COMP_STALL_ERROR:
+ case COMP_USB_TRANSACTION_ERROR:
+ case COMP_INVALID_STREAM_TYPE_ERROR:
+ case COMP_INVALID_STREAM_ID_ERROR:
+ xhci_dbg(xhci, "Stream transaction error ep %u no id\n", ep->ep_index);
+ if (ep->err_count++ > MAX_SOFT_RETRY)
+ xhci_handle_halted_endpoint(xhci, ep, NULL, EP_HARD_RESET);
+ else
+ xhci_handle_halted_endpoint(xhci, ep, NULL, EP_SOFT_RESET);
+ break;
+ case COMP_RING_UNDERRUN:
+ case COMP_RING_OVERRUN:
+ case COMP_STOPPED_LENGTH_INVALID:
+ break;
+ default:
+ xhci_err(xhci, "Transfer event %u for unknown stream ring slot %u ep %u\n",
+ trb_comp_code, ep->vdev->slot_id, ep->ep_index);
+ return -ENODEV;
+ }
+ return 0;
}
/*
@@ -2577,8 +2664,6 @@ static int handle_tx_event(struct xhci_hcd *xhci,
int status = -EINPROGRESS;
struct xhci_ep_ctx *ep_ctx;
u32 trb_comp_code;
- int td_num = 0;
- bool handling_skipped_tds = false;
slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
@@ -2601,36 +2686,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
goto err_out;
}
- /* Some transfer events don't always point to a trb, see xhci 4.17.4 */
- if (!ep_ring) {
- switch (trb_comp_code) {
- case COMP_STALL_ERROR:
- case COMP_USB_TRANSACTION_ERROR:
- case COMP_INVALID_STREAM_TYPE_ERROR:
- case COMP_INVALID_STREAM_ID_ERROR:
- xhci_dbg(xhci, "Stream transaction error ep %u no id\n",
- ep_index);
- if (ep->err_count++ > MAX_SOFT_RETRY)
- xhci_handle_halted_endpoint(xhci, ep, NULL,
- EP_HARD_RESET);
- else
- xhci_handle_halted_endpoint(xhci, ep, NULL,
- EP_SOFT_RESET);
- goto cleanup;
- case COMP_RING_UNDERRUN:
- case COMP_RING_OVERRUN:
- case COMP_STOPPED_LENGTH_INVALID:
- goto cleanup;
- default:
- xhci_err(xhci, "ERROR Transfer event for unknown stream ring slot %u ep %u\n",
- slot_id, ep_index);
- goto err_out;
- }
- }
-
- /* Count current td numbers if ep->skip is set */
- if (ep->skip)
- td_num += list_count_nodes(&ep_ring->td_list);
+ if (!ep_ring)
+ return handle_transferless_tx_event(xhci, ep, trb_comp_code);
/* Look for common error cases */
switch (trb_comp_code) {
@@ -2638,15 +2695,11 @@ static int handle_tx_event(struct xhci_hcd *xhci,
* transfer type
*/
case COMP_SUCCESS:
- if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0)
- break;
- if (xhci->quirks & XHCI_TRUST_TX_LENGTH ||
- ep_ring->last_td_was_short)
+ if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
trb_comp_code = COMP_SHORT_PACKET;
- else
- xhci_warn_ratelimited(xhci,
- "WARN Successful completion on short TX for slot %u ep %u: needs XHCI_TRUST_TX_LENGTH quirk?\n",
- slot_id, ep_index);
+ xhci_dbg(xhci, "Successful completion on short TX for slot %u ep %u with last td short %d\n",
+ slot_id, ep_index, ep_ring->last_td_was_short);
+ }
break;
case COMP_SHORT_PACKET:
break;
@@ -2716,21 +2769,15 @@ static int handle_tx_event(struct xhci_hcd *xhci,
* a Ring Overrun Event for IN Isoch endpoint or Ring
* Underrun Event for OUT Isoch endpoint.
*/
- xhci_dbg(xhci, "underrun event on endpoint\n");
- if (!list_empty(&ep_ring->td_list))
- xhci_dbg(xhci, "Underrun Event for slot %d ep %d "
- "still with TDs queued?\n",
- TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
- ep_index);
- goto cleanup;
+ xhci_dbg(xhci, "Underrun event on slot %u ep %u\n", slot_id, ep_index);
+ if (ep->skip)
+ break;
+ return 0;
case COMP_RING_OVERRUN:
- xhci_dbg(xhci, "overrun event on endpoint\n");
- if (!list_empty(&ep_ring->td_list))
- xhci_dbg(xhci, "Overrun Event for slot %d ep %d "
- "still with TDs queued?\n",
- TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
- ep_index);
- goto cleanup;
+ xhci_dbg(xhci, "Overrun event on slot %u ep %u\n", slot_id, ep_index);
+ if (ep->skip)
+ break;
+ return 0;
case COMP_MISSED_SERVICE_ERROR:
/*
* When encounter missed service error, one or more isoc tds
@@ -2742,13 +2789,13 @@ static int handle_tx_event(struct xhci_hcd *xhci,
xhci_dbg(xhci,
"Miss service interval error for slot %u ep %u, set skip flag\n",
slot_id, ep_index);
- goto cleanup;
+ return 0;
case COMP_NO_PING_RESPONSE_ERROR:
ep->skip = true;
xhci_dbg(xhci,
"No Ping response error for slot %u ep %u, Skip one Isoc TD\n",
slot_id, ep_index);
- goto cleanup;
+ return 0;
case COMP_INCOMPATIBLE_DEVICE_ERROR:
/* needs disable slot command to recover */
@@ -2765,78 +2812,80 @@ static int handle_tx_event(struct xhci_hcd *xhci,
xhci_warn(xhci,
"ERROR Unknown event condition %u for slot %u ep %u , HC probably busted\n",
trb_comp_code, slot_id, ep_index);
- goto cleanup;
+ if (ep->skip)
+ break;
+ return 0;
}
- do {
- /* This TRB should be in the TD at the head of this ring's
- * TD list.
- */
- if (list_empty(&ep_ring->td_list)) {
- /*
- * Don't print wanings if it's due to a stopped endpoint
- * generating an extra completion event if the device
- * was suspended. Or, a event for the last TRB of a
- * short TD we already got a short event for.
- * The short TD is already removed from the TD list.
- */
+ /*
+ * xhci 4.10.2 states isoc endpoints should continue
+ * processing the next TD if there was an error mid TD.
+ * So host like NEC don't generate an event for the last
+ * isoc TRB even if the IOC flag is set.
+ * xhci 4.9.1 states that if there are errors in mult-TRB
+ * TDs xHC should generate an error for that TRB, and if xHC
+ * proceeds to the next TD it should genete an event for
+ * any TRB with IOC flag on the way. Other host follow this.
+ *
+ * We wait for the final IOC event, but if we get an event
+ * anywhere outside this TD, just give it back already.
+ */
+ td = list_first_entry_or_null(&ep_ring->td_list, struct xhci_td, td_list);
- if (!(trb_comp_code == COMP_STOPPED ||
- trb_comp_code == COMP_STOPPED_LENGTH_INVALID ||
- ep_ring->last_td_was_short)) {
- xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
- TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
- ep_index);
- }
- if (ep->skip) {
- ep->skip = false;
- xhci_dbg(xhci, "td_list is empty while skip flag set. Clear skip flag for slot %u ep %u.\n",
- slot_id, ep_index);
- }
- if (trb_comp_code == COMP_STALL_ERROR ||
- xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
- trb_comp_code)) {
- xhci_handle_halted_endpoint(xhci, ep, NULL,
- EP_HARD_RESET);
- }
- goto cleanup;
- }
+ if (td && td->error_mid_td && !trb_in_td(xhci, td, ep_trb_dma, false)) {
+ xhci_dbg(xhci, "Missing TD completion event after mid TD error\n");
+ xhci_dequeue_td(xhci, td, ep_ring, td->status);
+ }
- /* We've skipped all the TDs on the ep ring when ep->skip set */
- if (ep->skip && td_num == 0) {
- ep->skip = false;
- xhci_dbg(xhci, "All tds on the ep_ring skipped. Clear skip flag for slot %u ep %u.\n",
- slot_id, ep_index);
- goto cleanup;
+ if (list_empty(&ep_ring->td_list)) {
+ /*
+ * Don't print wanings if ring is empty due to a stopped endpoint generating an
+ * extra completion event if the device was suspended. Or, a event for the last TRB
+ * of a short TD we already got a short event for. The short TD is already removed
+ * from the TD list.
+ */
+ if (trb_comp_code != COMP_STOPPED &&
+ trb_comp_code != COMP_STOPPED_LENGTH_INVALID &&
+ !ep_ring->last_td_was_short) {
+ xhci_warn(xhci, "Event TRB for slot %u ep %u with no TDs queued\n",
+ slot_id, ep_index);
}
+ ep->skip = false;
+ goto check_endpoint_halted;
+ }
+
+ do {
td = list_first_entry(&ep_ring->td_list, struct xhci_td,
td_list);
- if (ep->skip)
- td_num--;
/* Is this a TRB in the currently executing TD? */
- ep_seg = trb_in_td(xhci, ep_ring->deq_seg, ep_ring->dequeue,
- td->last_trb, ep_trb_dma, false);
-
- /*
- * Skip the Force Stopped Event. The event_trb(event_dma) of FSE
- * is not in the current TD pointed by ep_ring->dequeue because
- * that the hardware dequeue pointer still at the previous TRB
- * of the current TD. The previous TRB maybe a Link TD or the
- * last TRB of the previous TD. The command completion handle
- * will take care the rest.
- */
- if (!ep_seg && (trb_comp_code == COMP_STOPPED ||
- trb_comp_code == COMP_STOPPED_LENGTH_INVALID)) {
- goto cleanup;
- }
+ ep_seg = trb_in_td(xhci, td, ep_trb_dma, false);
if (!ep_seg) {
if (ep->skip && usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
skip_isoc_td(xhci, td, ep, status);
- goto cleanup;
+ if (!list_empty(&ep_ring->td_list))
+ continue;
+
+ xhci_dbg(xhci, "All TDs skipped for slot %u ep %u. Clear skip flag.\n",
+ slot_id, ep_index);
+ ep->skip = false;
+ td = NULL;
+ goto check_endpoint_halted;
+ }
+
+ /*
+ * Skip the Force Stopped Event. The 'ep_trb' of FSE is not in the current
+ * TD pointed by 'ep_ring->dequeue' because that the hardware dequeue
+ * pointer still at the previous TRB of the current TD. The previous TRB
+ * maybe a Link TD or the last TRB of the previous TD. The command
+ * completion handle will take care the rest.
+ */
+ if (trb_comp_code == COMP_STOPPED ||
+ trb_comp_code == COMP_STOPPED_LENGTH_INVALID) {
+ return 0;
}
/*
@@ -2846,54 +2895,17 @@ static int handle_tx_event(struct xhci_hcd *xhci,
if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
ep_ring->last_td_was_short) {
ep_ring->last_td_was_short = false;
- goto cleanup;
+ return 0;
}
- /*
- * xhci 4.10.2 states isoc endpoints should continue
- * processing the next TD if there was an error mid TD.
- * So host like NEC don't generate an event for the last
- * isoc TRB even if the IOC flag is set.
- * xhci 4.9.1 states that if there are errors in mult-TRB
- * TDs xHC should generate an error for that TRB, and if xHC
- * proceeds to the next TD it should genete an event for
- * any TRB with IOC flag on the way. Other host follow this.
- * So this event might be for the next TD.
- */
- if (td->error_mid_td &&
- !list_is_last(&td->td_list, &ep_ring->td_list)) {
- struct xhci_td *td_next = list_next_entry(td, td_list);
-
- ep_seg = trb_in_td(xhci, td_next->start_seg, td_next->first_trb,
- td_next->last_trb, ep_trb_dma, false);
- if (ep_seg) {
- /* give back previous TD, start handling new */
- xhci_dbg(xhci, "Missing TD completion event after mid TD error\n");
- ep_ring->dequeue = td->last_trb;
- ep_ring->deq_seg = td->last_trb_seg;
- inc_deq(xhci, ep_ring);
- xhci_td_cleanup(xhci, td, ep_ring, td->status);
- td = td_next;
- }
- }
+ /* HC is busted, give up! */
+ xhci_err(xhci,
+ "ERROR Transfer event TRB DMA ptr not part of current TD ep_index %d comp_code %u\n",
+ ep_index, trb_comp_code);
+ trb_in_td(xhci, td, ep_trb_dma, true);
- if (!ep_seg) {
- /* HC is busted, give up! */
- xhci_err(xhci,
- "ERROR Transfer event TRB DMA ptr not "
- "part of current TD ep_index %d "
- "comp_code %u\n", ep_index,
- trb_comp_code);
- trb_in_td(xhci, ep_ring->deq_seg,
- ep_ring->dequeue, td->last_trb,
- ep_trb_dma, true);
- return -ESHUTDOWN;
- }
+ return -ESHUTDOWN;
}
- if (trb_comp_code == COMP_SHORT_PACKET)
- ep_ring->last_td_was_short = true;
- else
- ep_ring->last_td_was_short = false;
if (ep->skip) {
xhci_dbg(xhci,
@@ -2902,50 +2914,46 @@ static int handle_tx_event(struct xhci_hcd *xhci,
ep->skip = false;
}
- ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma) /
- sizeof(*ep_trb)];
-
- trace_xhci_handle_transfer(ep_ring,
- (struct xhci_generic_trb *) ep_trb);
-
- /*
- * No-op TRB could trigger interrupts in a case where
- * a URB was killed and a STALL_ERROR happens right
- * after the endpoint ring stopped. Reset the halted
- * endpoint. Otherwise, the endpoint remains stalled
- * indefinitely.
- */
-
- if (trb_is_noop(ep_trb)) {
- if (trb_comp_code == COMP_STALL_ERROR ||
- xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
- trb_comp_code))
- xhci_handle_halted_endpoint(xhci, ep, td,
- EP_HARD_RESET);
- goto cleanup;
- }
-
- td->status = status;
-
- /* update the urb's actual_length and give back to the core */
- if (usb_endpoint_xfer_control(&td->urb->ep->desc))
- process_ctrl_td(xhci, ep, ep_ring, td, ep_trb, event);
- else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
- process_isoc_td(xhci, ep, ep_ring, td, ep_trb, event);
- else
- process_bulk_intr_td(xhci, ep, ep_ring, td, ep_trb, event);
-cleanup:
- handling_skipped_tds = ep->skip &&
- trb_comp_code != COMP_MISSED_SERVICE_ERROR &&
- trb_comp_code != COMP_NO_PING_RESPONSE_ERROR;
-
/*
* If ep->skip is set, it means there are missed tds on the
* endpoint ring need to take care of.
* Process them as short transfer until reach the td pointed by
* the event.
*/
- } while (handling_skipped_tds);
+ } while (ep->skip);
+
+ if (trb_comp_code == COMP_SHORT_PACKET)
+ ep_ring->last_td_was_short = true;
+ else
+ ep_ring->last_td_was_short = false;
+
+ ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma) / sizeof(*ep_trb)];
+ trace_xhci_handle_transfer(ep_ring, (struct xhci_generic_trb *) ep_trb, ep_trb_dma);
+
+ /*
+ * No-op TRB could trigger interrupts in a case where a URB was killed
+ * and a STALL_ERROR happens right after the endpoint ring stopped.
+ * Reset the halted endpoint. Otherwise, the endpoint remains stalled
+ * indefinitely.
+ */
+
+ if (trb_is_noop(ep_trb))
+ goto check_endpoint_halted;
+
+ td->status = status;
+
+ /* update the urb's actual_length and give back to the core */
+ if (usb_endpoint_xfer_control(&td->urb->ep->desc))
+ process_ctrl_td(xhci, ep, ep_ring, td, ep_trb, event);
+ else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
+ process_isoc_td(xhci, ep, ep_ring, td, ep_trb, event);
+ else
+ process_bulk_intr_td(xhci, ep, ep_ring, td, ep_trb, event);
+ return 0;
+
+check_endpoint_halted:
+ if (xhci_halted_host_endpoint(ep_ctx, trb_comp_code))
+ xhci_handle_halted_endpoint(xhci, ep, td, EP_HARD_RESET);
return 0;
@@ -2962,32 +2970,20 @@ err_out:
}
/*
- * This function handles all OS-owned events on the event ring. It may drop
+ * This function handles one OS-owned event on the event ring. It may drop
* xhci->lock between event processing (e.g. to pass up port status changes).
- * Returns >0 for "possibly more events to process" (caller should call again),
- * otherwise 0 if done. In future, <0 returns should indicate error code.
*/
-static int xhci_handle_event(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
+static int xhci_handle_event_trb(struct xhci_hcd *xhci, struct xhci_interrupter *ir,
+ union xhci_trb *event)
{
- union xhci_trb *event;
u32 trb_type;
- /* Event ring hasn't been allocated yet. */
- if (!ir || !ir->event_ring || !ir->event_ring->dequeue) {
- xhci_err(xhci, "ERROR interrupter not ready\n");
- return -ENOMEM;
- }
-
- event = ir->event_ring->dequeue;
- /* Does the HC or OS own the TRB? */
- if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
- ir->event_ring->cycle_state)
- return 0;
-
- trace_xhci_handle_event(ir->event_ring, &event->generic);
+ trace_xhci_handle_event(ir->event_ring, &event->generic,
+ xhci_trb_virt_to_dma(ir->event_ring->deq_seg,
+ ir->event_ring->dequeue));
/*
- * Barrier between reading the TRB_CYCLE (valid) flag above and any
+ * Barrier between reading the TRB_CYCLE (valid) flag before, and any
* speculative reads of the event's flags/data below.
*/
rmb();
@@ -2999,7 +2995,7 @@ static int xhci_handle_event(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
handle_cmd_completion(xhci, &event->event_cmd);
break;
case TRB_PORT_STATUS:
- handle_port_status(xhci, ir, event);
+ handle_port_status(xhci, event);
break;
case TRB_TRANSFER:
handle_tx_event(xhci, ir, &event->trans_event);
@@ -3017,18 +3013,11 @@ static int xhci_handle_event(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
* to make sure a watchdog timer didn't mark the host as non-responsive.
*/
if (xhci->xhc_state & XHCI_STATE_DYING) {
- xhci_dbg(xhci, "xHCI host dying, returning from "
- "event handler.\n");
- return 0;
+ xhci_dbg(xhci, "xHCI host dying, returning from event handler.\n");
+ return -ENODEV;
}
- /* Update SW event ring dequeue pointer */
- inc_deq(xhci, ir->event_ring);
-
- /* Are there more items on the event ring? Caller will call us again to
- * check.
- */
- return 1;
+ return 0;
}
/*
@@ -3038,30 +3027,26 @@ static int xhci_handle_event(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
*/
static void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
struct xhci_interrupter *ir,
- union xhci_trb *event_ring_deq,
bool clear_ehb)
{
u64 temp_64;
dma_addr_t deq;
temp_64 = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
- /* If necessary, update the HW's version of the event ring deq ptr. */
- if (event_ring_deq != ir->event_ring->dequeue) {
- deq = xhci_trb_virt_to_dma(ir->event_ring->deq_seg,
- ir->event_ring->dequeue);
- if (deq == 0)
- xhci_warn(xhci, "WARN something wrong with SW event ring dequeue ptr\n");
- /*
- * Per 4.9.4, Software writes to the ERDP register shall
- * always advance the Event Ring Dequeue Pointer value.
- */
- if ((temp_64 & ERST_PTR_MASK) == (deq & ERST_PTR_MASK))
- return;
+ deq = xhci_trb_virt_to_dma(ir->event_ring->deq_seg,
+ ir->event_ring->dequeue);
+ if (deq == 0)
+ xhci_warn(xhci, "WARN something wrong with SW event ring dequeue ptr\n");
+ /*
+ * Per 4.9.4, Software writes to the ERDP register shall always advance
+ * the Event Ring Dequeue Pointer value.
+ */
+ if ((temp_64 & ERST_PTR_MASK) == (deq & ERST_PTR_MASK) && !clear_ehb)
+ return;
- /* Update HC event ring dequeue pointer */
- temp_64 = ir->event_ring->deq_seg->num & ERST_DESI_MASK;
- temp_64 |= deq & ERST_PTR_MASK;
- }
+ /* Update HC event ring dequeue pointer */
+ temp_64 = ir->event_ring->deq_seg->num & ERST_DESI_MASK;
+ temp_64 |= deq & ERST_PTR_MASK;
/* Clear the event handler busy flag (RW1C) */
if (clear_ehb)
@@ -3069,6 +3054,75 @@ static void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
xhci_write_64(xhci, temp_64, &ir->ir_set->erst_dequeue);
}
+/* Clear the interrupt pending bit for a specific interrupter. */
+static void xhci_clear_interrupt_pending(struct xhci_interrupter *ir)
+{
+ if (!ir->ip_autoclear) {
+ u32 irq_pending;
+
+ irq_pending = readl(&ir->ir_set->irq_pending);
+ irq_pending |= IMAN_IP;
+ writel(irq_pending, &ir->ir_set->irq_pending);
+ }
+}
+
+/*
+ * Handle all OS-owned events on an interrupter event ring. It may drop
+ * and reaquire xhci->lock between event processing.
+ */
+static int xhci_handle_events(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
+{
+ int event_loop = 0;
+ int err;
+ u64 temp;
+
+ xhci_clear_interrupt_pending(ir);
+
+ /* Event ring hasn't been allocated yet. */
+ if (!ir->event_ring || !ir->event_ring->dequeue) {
+ xhci_err(xhci, "ERROR interrupter event ring not ready\n");
+ return -ENOMEM;
+ }
+
+ if (xhci->xhc_state & XHCI_STATE_DYING ||
+ xhci->xhc_state & XHCI_STATE_HALTED) {
+ xhci_dbg(xhci, "xHCI dying, ignoring interrupt. Shouldn't IRQs be disabled?\n");
+
+ /* Clear the event handler busy flag (RW1C) */
+ temp = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
+ xhci_write_64(xhci, temp | ERST_EHB, &ir->ir_set->erst_dequeue);
+ return -ENODEV;
+ }
+
+ /* Process all OS owned event TRBs on this event ring */
+ while (unhandled_event_trb(ir->event_ring)) {
+ err = xhci_handle_event_trb(xhci, ir, ir->event_ring->dequeue);
+
+ /*
+ * If half a segment of events have been handled in one go then
+ * update ERDP, and force isoc trbs to interrupt more often
+ */
+ if (event_loop++ > TRBS_PER_SEGMENT / 2) {
+ xhci_update_erst_dequeue(xhci, ir, false);
+
+ if (ir->isoc_bei_interval > AVOID_BEI_INTERVAL_MIN)
+ ir->isoc_bei_interval = ir->isoc_bei_interval / 2;
+
+ event_loop = 0;
+ }
+
+ /* Update SW event ring dequeue pointer */
+ inc_deq(xhci, ir->event_ring);
+
+ if (err)
+ break;
+ }
+
+ xhci_update_erst_dequeue(xhci, ir, true);
+
+ return 0;
+}
+
/*
* xHCI spec says we can get an interrupt, and if the HC has an error condition,
* we might get bad data out of the event ring. Section 4.10.2.7 has a list of
@@ -3077,24 +3131,21 @@ static void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
irqreturn_t xhci_irq(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
- union xhci_trb *event_ring_deq;
- struct xhci_interrupter *ir;
- irqreturn_t ret = IRQ_NONE;
- u64 temp_64;
+ irqreturn_t ret = IRQ_HANDLED;
u32 status;
- int event_loop = 0;
spin_lock(&xhci->lock);
/* Check if the xHC generated the interrupt, or the irq is shared */
status = readl(&xhci->op_regs->status);
if (status == ~(u32)0) {
xhci_hc_died(xhci);
- ret = IRQ_HANDLED;
goto out;
}
- if (!(status & STS_EINT))
+ if (!(status & STS_EINT)) {
+ ret = IRQ_NONE;
goto out;
+ }
if (status & STS_HCE) {
xhci_warn(xhci, "WARNING: Host Controller Error\n");
@@ -3104,7 +3155,6 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
if (status & STS_FATAL) {
xhci_warn(xhci, "WARNING: Host System Error\n");
xhci_halt(xhci);
- ret = IRQ_HANDLED;
goto out;
}
@@ -3117,48 +3167,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
writel(status, &xhci->op_regs->status);
/* This is the handler of the primary interrupter */
- ir = xhci->interrupters[0];
- if (!hcd->msi_enabled) {
- u32 irq_pending;
- irq_pending = readl(&ir->ir_set->irq_pending);
- irq_pending |= IMAN_IP;
- writel(irq_pending, &ir->ir_set->irq_pending);
- }
-
- if (xhci->xhc_state & XHCI_STATE_DYING ||
- xhci->xhc_state & XHCI_STATE_HALTED) {
- xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
- "Shouldn't IRQs be disabled?\n");
- /* Clear the event handler busy flag (RW1C);
- * the event ring should be empty.
- */
- temp_64 = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
- xhci_write_64(xhci, temp_64 | ERST_EHB,
- &ir->ir_set->erst_dequeue);
- ret = IRQ_HANDLED;
- goto out;
- }
-
- event_ring_deq = ir->event_ring->dequeue;
- /* FIXME this should be a delayed service routine
- * that clears the EHB.
- */
- while (xhci_handle_event(xhci, ir) > 0) {
- if (event_loop++ < TRBS_PER_SEGMENT / 2)
- continue;
- xhci_update_erst_dequeue(xhci, ir, event_ring_deq, false);
- event_ring_deq = ir->event_ring->dequeue;
-
- /* ring is half-full, force isoc trbs to interrupt more often */
- if (xhci->isoc_bei_interval > AVOID_BEI_INTERVAL_MIN)
- xhci->isoc_bei_interval = xhci->isoc_bei_interval / 2;
-
- event_loop = 0;
- }
-
- xhci_update_erst_dequeue(xhci, ir, event_ring_deq, true);
- ret = IRQ_HANDLED;
-
+ xhci_handle_events(xhci, xhci->interrupters[0]);
out:
spin_unlock(&xhci->lock);
@@ -3194,7 +3203,8 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
wmb();
trb->field[3] = cpu_to_le32(field4);
- trace_xhci_queue_trb(ring, trb);
+ trace_xhci_queue_trb(ring, trb,
+ xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue));
inc_enq(xhci, ring, more_trbs_coming);
}
@@ -3240,7 +3250,7 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
if (ep_ring != xhci->cmd_ring) {
new_segs = xhci_ring_expansion_needed(xhci, ep_ring, num_trbs);
- } else if (xhci_num_trbs_free(xhci, ep_ring) <= num_trbs) {
+ } else if (xhci_num_trbs_free(ep_ring) <= num_trbs) {
xhci_err(xhci, "Do not support expand command ring\n");
return -ENOMEM;
}
@@ -3258,9 +3268,7 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
/* If we're not dealing with 0.95 hardware or isoc rings
* on AMD 0.96 host, clear the chain bit.
*/
- if (!xhci_link_trb_quirk(xhci) &&
- !(ep_ring->type == TYPE_ISOC &&
- (xhci->quirks & XHCI_AMD_0x96_HOST)))
+ if (!xhci_link_chain_quirk(xhci, ep_ring->type))
ep_ring->enqueue->link.control &=
cpu_to_le32(~TRB_CHAIN);
else
@@ -3336,7 +3344,7 @@ static int prepare_transfer(struct xhci_hcd *xhci,
/* Add this TD to the tail of the endpoint ring's TD list */
list_add_tail(&td->td_list, &ep_ring->td_list);
td->start_seg = ep_ring->enq_seg;
- td->first_trb = ep_ring->enqueue;
+ td->start_trb = ep_ring->enqueue;
return 0;
}
@@ -3415,8 +3423,7 @@ static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
}
-static void check_interval(struct xhci_hcd *xhci, struct urb *urb,
- struct xhci_ep_ctx *ep_ctx)
+static void check_interval(struct urb *urb, struct xhci_ep_ctx *ep_ctx)
{
int xhci_interval;
int ep_interval;
@@ -3435,8 +3442,8 @@ static void check_interval(struct xhci_hcd *xhci, struct urb *urb,
if (xhci_interval != ep_interval) {
dev_dbg_ratelimited(&urb->dev->dev,
"Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n",
- ep_interval, ep_interval == 1 ? "" : "s",
- xhci_interval, xhci_interval == 1 ? "" : "s");
+ ep_interval, str_plural(ep_interval),
+ xhci_interval, str_plural(xhci_interval));
urb->interval = xhci_interval;
/* Convert back to frames for LS/FS devices */
if (urb->dev->speed == USB_SPEED_LOW ||
@@ -3457,7 +3464,7 @@ int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
struct xhci_ep_ctx *ep_ctx;
ep_ctx = xhci_get_ep_ctx(xhci, xhci->devs[slot_id]->out_ctx, ep_index);
- check_interval(xhci, urb, ep_ctx);
+ check_interval(urb, ep_ctx);
return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index);
}
@@ -3676,8 +3683,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
field &= ~TRB_CHAIN;
field |= TRB_IOC;
more_trbs_coming = false;
- td->last_trb = ring->enqueue;
- td->last_trb_seg = ring->enq_seg;
+ td->end_trb = ring->enqueue;
+ td->end_seg = ring->enq_seg;
if (xhci_urb_suitable_for_idt(urb)) {
memcpy(&send_addr, urb->transfer_buffer,
trb_buff_len);
@@ -3703,7 +3710,6 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
upper_32_bits(send_addr),
length_field,
field);
- td->num_trbs++;
addr += trb_buff_len;
sent_len = trb_buff_len;
@@ -3726,11 +3732,10 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
ret = prepare_transfer(xhci, xhci->devs[slot_id],
ep_index, urb->stream_id,
1, urb, 1, mem_flags);
- urb_priv->td[1].last_trb = ring->enqueue;
- urb_priv->td[1].last_trb_seg = ring->enq_seg;
+ urb_priv->td[1].end_trb = ring->enqueue;
+ urb_priv->td[1].end_seg = ring->enq_seg;
field = TRB_TYPE(TRB_NORMAL) | ring->cycle_state | TRB_IOC;
queue_trb(xhci, ring, 0, 0, 0, TRB_INTR_TARGET(0), field);
- urb_priv->td[1].num_trbs++;
}
check_trb_math(urb, enqd_len);
@@ -3764,6 +3769,20 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
if (!urb->setup_packet)
return -EINVAL;
+ if ((xhci->quirks & XHCI_ETRON_HOST) &&
+ urb->dev->speed >= USB_SPEED_SUPER) {
+ /*
+ * If next available TRB is the Link TRB in the ring segment then
+ * enqueue a No Op TRB, this can prevent the Setup and Data Stage
+ * TRB to be breaked by the Link TRB.
+ */
+ if (trb_is_link(ep_ring->enqueue + 1)) {
+ field = TRB_TYPE(TRB_TR_NOOP) | ep_ring->cycle_state;
+ queue_trb(xhci, ep_ring, false, 0, 0,
+ TRB_INTR_TARGET(0), field);
+ }
+ }
+
/* 1 TRB for setup, 1 for status */
num_trbs = 2;
/*
@@ -3781,7 +3800,6 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
urb_priv = urb->hcpriv;
td = &urb_priv->td[0];
- td->num_trbs = num_trbs;
/*
* Don't give the first TRB to the hardware (by toggling the cycle bit)
@@ -3853,8 +3871,8 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
}
/* Save the DMA address of the last TRB in the TD */
- td->last_trb = ep_ring->enqueue;
- td->last_trb_seg = ep_ring->enq_seg;
+ td->end_trb = ep_ring->enqueue;
+ td->end_seg = ep_ring->enq_seg;
/* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
/* If the device sent data, the status stage is an OUT transfer */
@@ -3979,10 +3997,6 @@ static int xhci_get_isoc_frame_id(struct xhci_hcd *xhci,
start_frame_id = (start_frame_id >> 3) & 0x7ff;
end_frame_id = (end_frame_id >> 3) & 0x7ff;
- xhci_dbg(xhci, "%s: index %d, reg 0x%x start_frame_id 0x%x, end_frame_id 0x%x, start_frame 0x%x\n",
- __func__, index, readl(&xhci->run_regs->microframe_index),
- start_frame_id, end_frame_id, start_frame);
-
if (start_frame_id < end_frame_id) {
if (start_frame > end_frame_id ||
start_frame < start_frame_id)
@@ -4019,7 +4033,8 @@ static int xhci_get_isoc_frame_id(struct xhci_hcd *xhci,
}
/* Check if we should generate event interrupt for a TD in an isoc URB */
-static bool trb_block_event_intr(struct xhci_hcd *xhci, int num_tds, int i)
+static bool trb_block_event_intr(struct xhci_hcd *xhci, int num_tds, int i,
+ struct xhci_interrupter *ir)
{
if (xhci->hci_version < 0x100)
return false;
@@ -4030,8 +4045,8 @@ static bool trb_block_event_intr(struct xhci_hcd *xhci, int num_tds, int i)
* If AVOID_BEI is set the host handles full event rings poorly,
* generate an event at least every 8th TD to clear the event ring
*/
- if (i && xhci->quirks & XHCI_AVOID_BEI)
- return !!(i % xhci->isoc_bei_interval);
+ if (i && ir->isoc_bei_interval && xhci->quirks & XHCI_AVOID_BEI)
+ return !!(i % ir->isoc_bei_interval);
return true;
}
@@ -4040,6 +4055,7 @@ static bool trb_block_event_intr(struct xhci_hcd *xhci, int num_tds, int i)
static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
struct urb *urb, int slot_id, unsigned int ep_index)
{
+ struct xhci_interrupter *ir;
struct xhci_ring *ep_ring;
struct urb_priv *urb_priv;
struct xhci_td *td;
@@ -4057,6 +4073,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
xep = &xhci->devs[slot_id]->eps[ep_index];
ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
+ ir = xhci->interrupters[0];
num_tds = urb->number_of_packets;
if (num_tds < 1) {
@@ -4099,7 +4116,6 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
goto cleanup;
}
td = &urb_priv->td[i];
- td->num_trbs = trbs_per_td;
/* use SIA as default, if frame id is used overwrite it */
sia_frame_id = TRB_SIA;
if (!(urb->transfer_flags & URB_ISO_ASAP) &&
@@ -4141,10 +4157,10 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
field |= TRB_CHAIN;
} else {
more_trbs_coming = false;
- td->last_trb = ep_ring->enqueue;
- td->last_trb_seg = ep_ring->enq_seg;
+ td->end_trb = ep_ring->enqueue;
+ td->end_seg = ep_ring->enq_seg;
field |= TRB_IOC;
- if (trb_block_event_intr(xhci, num_tds, i))
+ if (trb_block_event_intr(xhci, num_tds, i, ir))
field |= TRB_BEI;
}
/* Calculate TRB length */
@@ -4208,14 +4224,14 @@ cleanup:
/* Use the first TD as a temporary variable to turn the TDs we've queued
* into No-ops with a software-owned cycle bit. That way the hardware
* won't accidentally start executing bogus TDs when we partially
- * overwrite them. td->first_trb and td->start_seg are already set.
+ * overwrite them. td->start_trb and td->start_seg are already set.
*/
- urb_priv->td[0].last_trb = ep_ring->enqueue;
+ urb_priv->td[0].end_trb = ep_ring->enqueue;
/* Every TRB except the first & last will have its cycle bit flipped. */
- td_to_noop(xhci, ep_ring, &urb_priv->td[0], true);
+ td_to_noop(&urb_priv->td[0], true);
/* Reset the ring enqueue back to the first TRB and its cycle bit. */
- ep_ring->enqueue = urb_priv->td[0].first_trb;
+ ep_ring->enqueue = urb_priv->td[0].start_trb;
ep_ring->enq_seg = urb_priv->td[0].start_seg;
ep_ring->cycle_state = start_cycle;
usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
@@ -4263,7 +4279,7 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
* Check interval value. This should be done before we start to
* calculate the start frame value.
*/
- check_interval(xhci, urb, ep_ctx);
+ check_interval(urb, ep_ctx);
/* Calculate the start frame and put it in urb->start_frame. */
if (HCC_CFC(xhci->hcc_params) && !list_empty(&ep_ring->td_list)) {
@@ -4416,7 +4432,7 @@ int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd,
int slot_id, unsigned int ep_index, int suspend)
{
u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
- u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
+ u32 trb_ep_index = EP_INDEX_FOR_TRB(ep_index);
u32 type = TRB_TYPE(TRB_STOP_RING);
u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend);
@@ -4429,7 +4445,7 @@ int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd,
enum xhci_ep_reset_type reset_type)
{
u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
- u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
+ u32 trb_ep_index = EP_INDEX_FOR_TRB(ep_index);
u32 type = TRB_TYPE(TRB_RESET_EP);
if (reset_type == EP_SOFT_RESET)
diff --git a/drivers/usb/host/xhci-rzv2m.c b/drivers/usb/host/xhci-rzv2m.c
index ec65b24eafa8..4f59867d7117 100644
--- a/drivers/usb/host/xhci-rzv2m.c
+++ b/drivers/usb/host/xhci-rzv2m.c
@@ -6,6 +6,7 @@
*/
#include <linux/usb/rzv2m_usb3drd.h>
+#include "xhci.h"
#include "xhci-plat.h"
#include "xhci-rzv2m.h"
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index 6246d5ad1468..22dc86fb5254 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -26,6 +26,7 @@
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/usb/otg.h>
#include <linux/usb/phy.h>
#include <linux/usb/role.h>
@@ -724,7 +725,7 @@ static void tegra_xusb_mbox_handle(struct tegra_xusb *tegra,
if (err < 0) {
dev_err(dev,
"failed to %s LFPS detection on USB3#%u: %d\n",
- enable ? "enable" : "disable", port, err);
+ str_enable_disable(enable), port, err);
rsp.cmd = MBOX_CMD_NAK;
} else {
rsp.cmd = MBOX_CMD_ACK;
@@ -1349,7 +1350,7 @@ static void tegra_xhci_id_work(struct work_struct *work)
u32 status;
int ret;
- dev_dbg(tegra->dev, "host mode %s\n", tegra->host_mode ? "on" : "off");
+ dev_dbg(tegra->dev, "host mode %s\n", str_on_off(tegra->host_mode));
mutex_lock(&tegra->lock);
@@ -1667,7 +1668,7 @@ static int tegra_xusb_probe(struct platform_device *pdev)
goto put_padctl;
}
- if (!of_property_read_bool(pdev->dev.of_node, "power-domains")) {
+ if (!of_property_present(pdev->dev.of_node, "power-domains")) {
tegra->host_rst = devm_reset_control_get(&pdev->dev,
"xusb_host");
if (IS_ERR(tegra->host_rst)) {
@@ -2183,7 +2184,7 @@ static int tegra_xusb_enter_elpg(struct tegra_xusb *tegra, bool runtime)
goto out;
}
- for (i = 0; i < tegra->num_usb_phys; i++) {
+ for (i = 0; i < xhci->usb2_rhub.num_ports; i++) {
if (!xhci->usb2_rhub.ports[i])
continue;
portsc = readl(xhci->usb2_rhub.ports[i]->addr);
@@ -2664,7 +2665,7 @@ MODULE_DEVICE_TABLE(of, tegra_xusb_of_match);
static struct platform_driver tegra_xusb_driver = {
.probe = tegra_xusb_probe,
- .remove_new = tegra_xusb_remove,
+ .remove = tegra_xusb_remove,
.shutdown = tegra_xusb_shutdown,
.driver = {
.name = "tegra-xusb",
diff --git a/drivers/usb/host/xhci-trace.h b/drivers/usb/host/xhci-trace.h
index ac47b1c0544a..bfb5c5c17012 100644
--- a/drivers/usb/host/xhci-trace.h
+++ b/drivers/usb/host/xhci-trace.h
@@ -108,9 +108,10 @@ DEFINE_EVENT(xhci_log_ctx, xhci_address_ctx,
);
DECLARE_EVENT_CLASS(xhci_log_trb,
- TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
- TP_ARGS(ring, trb),
+ TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb, dma_addr_t dma),
+ TP_ARGS(ring, trb, dma),
TP_STRUCT__entry(
+ __field(dma_addr_t, dma)
__field(u32, type)
__field(u32, field0)
__field(u32, field1)
@@ -118,51 +119,54 @@ DECLARE_EVENT_CLASS(xhci_log_trb,
__field(u32, field3)
),
TP_fast_assign(
+ __entry->dma = dma;
__entry->type = ring->type;
__entry->field0 = le32_to_cpu(trb->field[0]);
__entry->field1 = le32_to_cpu(trb->field[1]);
__entry->field2 = le32_to_cpu(trb->field[2]);
__entry->field3 = le32_to_cpu(trb->field[3]);
),
- TP_printk("%s: %s", xhci_ring_type_string(__entry->type),
+ TP_printk("%s: @%pad %s",
+ xhci_ring_type_string(__entry->type), &__entry->dma,
xhci_decode_trb(__get_buf(XHCI_MSG_MAX), XHCI_MSG_MAX, __entry->field0,
__entry->field1, __entry->field2, __entry->field3)
)
);
DEFINE_EVENT(xhci_log_trb, xhci_handle_event,
- TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
- TP_ARGS(ring, trb)
+ TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb, dma_addr_t dma),
+ TP_ARGS(ring, trb, dma)
);
DEFINE_EVENT(xhci_log_trb, xhci_handle_command,
- TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
- TP_ARGS(ring, trb)
+ TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb, dma_addr_t dma),
+ TP_ARGS(ring, trb, dma)
);
DEFINE_EVENT(xhci_log_trb, xhci_handle_transfer,
- TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
- TP_ARGS(ring, trb)
+ TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb, dma_addr_t dma),
+ TP_ARGS(ring, trb, dma)
);
DEFINE_EVENT(xhci_log_trb, xhci_queue_trb,
- TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
- TP_ARGS(ring, trb)
+ TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb, dma_addr_t dma),
+ TP_ARGS(ring, trb, dma)
+
);
DEFINE_EVENT(xhci_log_trb, xhci_dbc_handle_event,
- TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
- TP_ARGS(ring, trb)
+ TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb, dma_addr_t dma),
+ TP_ARGS(ring, trb, dma)
);
DEFINE_EVENT(xhci_log_trb, xhci_dbc_handle_transfer,
- TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
- TP_ARGS(ring, trb)
+ TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb, dma_addr_t dma),
+ TP_ARGS(ring, trb, dma)
);
DEFINE_EVENT(xhci_log_trb, xhci_dbc_gadget_ep_queue,
- TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
- TP_ARGS(ring, trb)
+ TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb, dma_addr_t dma),
+ TP_ARGS(ring, trb, dma)
);
DECLARE_EVENT_CLASS(xhci_log_free_virt_dev,
@@ -172,8 +176,7 @@ DECLARE_EVENT_CLASS(xhci_log_free_virt_dev,
__field(void *, vdev)
__field(unsigned long long, out_ctx)
__field(unsigned long long, in_ctx)
- __field(u8, fake_port)
- __field(u8, real_port)
+ __field(int, slot_id)
__field(u16, current_mel)
),
@@ -181,13 +184,12 @@ DECLARE_EVENT_CLASS(xhci_log_free_virt_dev,
__entry->vdev = vdev;
__entry->in_ctx = (unsigned long long) vdev->in_ctx->dma;
__entry->out_ctx = (unsigned long long) vdev->out_ctx->dma;
- __entry->fake_port = (u8) vdev->fake_port;
- __entry->real_port = (u8) vdev->real_port;
+ __entry->slot_id = (int) vdev->slot_id;
__entry->current_mel = (u16) vdev->current_mel;
),
- TP_printk("vdev %p ctx %llx | %llx fake_port %d real_port %d current_mel %d",
- __entry->vdev, __entry->in_ctx, __entry->out_ctx,
- __entry->fake_port, __entry->real_port, __entry->current_mel
+ TP_printk("vdev %p slot %d ctx %llx | %llx current_mel %d",
+ __entry->vdev, __entry->slot_id, __entry->in_ctx,
+ __entry->out_ctx, __entry->current_mel
)
);
@@ -252,6 +254,7 @@ DECLARE_EVENT_CLASS(xhci_log_urb,
TP_PROTO(struct urb *urb),
TP_ARGS(urb),
TP_STRUCT__entry(
+ __string(devname, dev_name(&urb->dev->dev))
__field(void *, urb)
__field(unsigned int, pipe)
__field(unsigned int, stream)
@@ -267,6 +270,7 @@ DECLARE_EVENT_CLASS(xhci_log_urb,
__field(int, slot_id)
),
TP_fast_assign(
+ __assign_str(devname);
__entry->urb = urb;
__entry->pipe = urb->pipe;
__entry->stream = urb->stream_id;
@@ -281,7 +285,8 @@ DECLARE_EVENT_CLASS(xhci_log_urb,
__entry->type = usb_endpoint_type(&urb->ep->desc);
__entry->slot_id = urb->dev->slot_id;
),
- TP_printk("ep%d%s-%s: urb %p pipe %u slot %d length %d/%d sgs %d/%d stream %d flags %08x",
+ TP_printk("%s ep%d%s-%s: urb %p pipe %u slot %d length %d/%d sgs %d/%d stream %d flags %08x",
+ __get_str(devname),
__entry->epnum, __entry->dir_in ? "in" : "out",
__print_symbolic(__entry->type,
{ USB_ENDPOINT_XFER_INT, "intr" },
@@ -309,6 +314,37 @@ DEFINE_EVENT(xhci_log_urb, xhci_urb_dequeue,
TP_ARGS(urb)
);
+DECLARE_EVENT_CLASS(xhci_log_stream_ctx,
+ TP_PROTO(struct xhci_stream_info *info, unsigned int stream_id),
+ TP_ARGS(info, stream_id),
+ TP_STRUCT__entry(
+ __field(unsigned int, stream_id)
+ __field(u64, stream_ring)
+ __field(dma_addr_t, ctx_array_dma)
+
+ ),
+ TP_fast_assign(
+ __entry->stream_id = stream_id;
+ __entry->stream_ring = le64_to_cpu(info->stream_ctx_array[stream_id].stream_ring);
+ __entry->ctx_array_dma = info->ctx_array_dma + stream_id * 16;
+
+ ),
+ TP_printk("stream %u ctx @%pad: SCT %llu deq %llx", __entry->stream_id,
+ &__entry->ctx_array_dma, CTX_TO_SCT(__entry->stream_ring),
+ __entry->stream_ring
+ )
+);
+
+DEFINE_EVENT(xhci_log_stream_ctx, xhci_alloc_stream_info_ctx,
+ TP_PROTO(struct xhci_stream_info *info, unsigned int stream_id),
+ TP_ARGS(info, stream_id)
+);
+
+DEFINE_EVENT(xhci_log_stream_ctx, xhci_handle_cmd_set_deq_stream,
+ TP_PROTO(struct xhci_stream_info *info, unsigned int stream_id),
+ TP_ARGS(info, stream_id)
+);
+
DECLARE_EVENT_CLASS(xhci_log_ep_ctx,
TP_PROTO(struct xhci_ep_ctx *ctx),
TP_ARGS(ctx),
@@ -453,8 +489,6 @@ DECLARE_EVENT_CLASS(xhci_log_ring,
__field(void *, ring)
__field(dma_addr_t, enq)
__field(dma_addr_t, deq)
- __field(dma_addr_t, enq_seg)
- __field(dma_addr_t, deq_seg)
__field(unsigned int, num_segs)
__field(unsigned int, stream_id)
__field(unsigned int, cycle_state)
@@ -465,17 +499,15 @@ DECLARE_EVENT_CLASS(xhci_log_ring,
__entry->type = ring->type;
__entry->num_segs = ring->num_segs;
__entry->stream_id = ring->stream_id;
- __entry->enq_seg = ring->enq_seg->dma;
- __entry->deq_seg = ring->deq_seg->dma;
__entry->cycle_state = ring->cycle_state;
__entry->bounce_buf_len = ring->bounce_buf_len;
__entry->enq = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
__entry->deq = xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
),
- TP_printk("%s %p: enq %pad(%pad) deq %pad(%pad) segs %d stream %d bounce %d cycle %d",
+ TP_printk("%s %p: enq %pad deq %pad segs %d stream %d bounce %d cycle %d",
xhci_ring_type_string(__entry->type), __entry->ring,
- &__entry->enq, &__entry->enq_seg,
- &__entry->deq, &__entry->deq_seg,
+ &__entry->enq,
+ &__entry->deq,
__entry->num_segs,
__entry->stream_id,
__entry->bounce_buf_len,
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index c057c42c36f4..1a90ebc8a30e 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -8,6 +8,7 @@
* Some code borrowed from the Linux EHCI driver.
*/
+#include <linux/jiffies.h>
#include <linux/pci.h>
#include <linux/iommu.h>
#include <linux/iopoll.h>
@@ -16,6 +17,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/dmi.h>
#include <linux/dma-mapping.h>
@@ -40,15 +42,15 @@ MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default");
static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring)
{
- struct xhci_segment *seg = ring->first_seg;
+ struct xhci_segment *seg;
if (!td || !td->start_seg)
return false;
- do {
+
+ xhci_for_each_ring_seg(ring->first_seg, seg) {
if (seg == td->start_seg)
return true;
- seg = seg->next;
- } while (seg && seg != ring->first_seg);
+ }
return false;
}
@@ -346,6 +348,23 @@ static int xhci_disable_interrupter(struct xhci_interrupter *ir)
return 0;
}
+/* interrupt moderation interval imod_interval in nanoseconds */
+int xhci_set_interrupter_moderation(struct xhci_interrupter *ir,
+ u32 imod_interval)
+{
+ u32 imod;
+
+ if (!ir || !ir->ir_set || imod_interval > U16_MAX * 250)
+ return -EINVAL;
+
+ imod = readl(&ir->ir_set->irq_control);
+ imod &= ~ER_IRQ_INTERVAL_MASK;
+ imod |= (imod_interval / 250) & ER_IRQ_INTERVAL_MASK;
+ writel(imod, &ir->ir_set->irq_control);
+
+ return 0;
+}
+
static void compliance_mode_recovery(struct timer_list *t)
{
struct xhci_hcd *xhci;
@@ -456,14 +475,7 @@ static int xhci_init(struct usb_hcd *hcd)
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init");
spin_lock_init(&xhci->lock);
- if (xhci->hci_version == 0x95 && link_quirk) {
- xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
- "QUIRK: Not clearing Link TRB chain bits.");
- xhci->quirks |= XHCI_LINK_TRB_QUIRK;
- } else {
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "xHCI doesn't need link TRB QUIRK");
- }
+
retval = xhci_mem_init(xhci, GFP_KERNEL);
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init");
@@ -528,7 +540,6 @@ static int xhci_run_finished(struct xhci_hcd *xhci)
*/
int xhci_run(struct usb_hcd *hcd)
{
- u32 temp;
u64 temp_64;
int ret;
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
@@ -538,6 +549,9 @@ int xhci_run(struct usb_hcd *hcd)
*/
hcd->uses_new_polling = 1;
+ if (hcd->msi_enabled)
+ ir->ip_autoclear = true;
+
if (!usb_hcd_is_primary_hcd(hcd))
return xhci_run_finished(xhci);
@@ -548,12 +562,7 @@ int xhci_run(struct usb_hcd *hcd)
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"ERST deq = 64'h%0lx", (long unsigned int) temp_64);
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "// Set the interrupt modulation register");
- temp = readl(&ir->ir_set->irq_control);
- temp &= ~ER_IRQ_INTERVAL_MASK;
- temp |= (xhci->imod_interval / 250) & ER_IRQ_INTERVAL_MASK;
- writel(temp, &ir->ir_set->irq_control);
+ xhci_set_interrupter_moderation(ir, xhci->imod_interval);
if (xhci->quirks & XHCI_NEC_HOST) {
struct xhci_command *command;
@@ -771,28 +780,14 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci)
struct xhci_segment *seg;
ring = xhci->cmd_ring;
- seg = ring->deq_seg;
- do {
- memset(seg->trbs, 0,
- sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
- seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
- cpu_to_le32(~TRB_CYCLE);
- seg = seg->next;
- } while (seg != ring->deq_seg);
-
- /* Reset the software enqueue and dequeue pointers */
- ring->deq_seg = ring->first_seg;
- ring->dequeue = ring->first_seg->trbs;
- ring->enq_seg = ring->deq_seg;
- ring->enqueue = ring->dequeue;
-
- ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
- /*
- * Ring is now zeroed, so the HW should look for change of ownership
- * when the cycle bit is set to 1.
- */
- ring->cycle_state = 1;
+ xhci_for_each_ring_seg(ring->first_seg, seg) {
+ /* erase all TRBs before the link */
+ memset(seg->trbs, 0, sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
+ /* clear link cycle bit */
+ seg->trbs[TRBS_PER_SEGMENT - 1].link.control &= cpu_to_le32(~TRB_CYCLE);
+ }
+ xhci_initialize_ring_info(ring);
/*
* Reset the hardware dequeue pointer.
* Yes, this will need to be re-written after resume, but we're paranoid
@@ -1123,10 +1118,20 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg)
xhci_dbg(xhci, "Start the secondary HCD\n");
retval = xhci_run(xhci->shared_hcd);
}
-
+ if (retval)
+ return retval;
+ /*
+ * Resume roothubs unconditionally as PORTSC change bits are not
+ * immediately visible after xHC reset
+ */
hcd->state = HC_STATE_SUSPENDED;
- if (xhci->shared_hcd)
+
+ if (xhci->shared_hcd) {
xhci->shared_hcd->state = HC_STATE_SUSPENDED;
+ usb_hcd_resume_root_hub(xhci->shared_hcd);
+ }
+ usb_hcd_resume_root_hub(hcd);
+
goto done;
}
@@ -1150,7 +1155,6 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg)
xhci_dbc_resume(xhci);
- done:
if (retval == 0) {
/*
* Resume roothubs only if there are pending events.
@@ -1176,6 +1180,7 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg)
usb_hcd_resume_root_hub(hcd);
}
}
+done:
/*
* If system is subject to the Quirk, Compliance Mode Timer needs to
* be re-initialized Always after a system resume. Ports are subject
@@ -1217,6 +1222,8 @@ static int xhci_map_temp_buffer(struct usb_hcd *hcd, struct urb *urb)
temp = kzalloc_node(buf_len, GFP_ATOMIC,
dev_to_node(hcd->self.sysdev));
+ if (!temp)
+ return -ENOMEM;
if (usb_urb_dir_out(urb))
sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
@@ -1742,7 +1749,7 @@ static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
urb->ep->desc.bEndpointAddress,
(unsigned long long) xhci_trb_virt_to_dma(
urb_priv->td[i].start_seg,
- urb_priv->td[i].first_trb));
+ urb_priv->td[i].start_trb));
for (; i < urb_priv->num_tds; i++) {
td = &urb_priv->td[i];
@@ -1754,15 +1761,27 @@ static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
}
}
- /* Queue a stop endpoint command, but only if this is
- * the first cancellation to be handled.
- */
- if (!(ep->ep_state & EP_STOP_CMD_PENDING)) {
+ /* These completion handlers will sort out cancelled TDs for us */
+ if (ep->ep_state & (EP_STOP_CMD_PENDING | EP_HALTED | SET_DEQ_PENDING)) {
+ xhci_dbg(xhci, "Not queuing Stop Endpoint on slot %d ep %d in state 0x%x\n",
+ urb->dev->slot_id, ep_index, ep->ep_state);
+ goto done;
+ }
+
+ /* In this case no commands are pending but the endpoint is stopped */
+ if (ep->ep_state & EP_CLEARING_TT) {
+ /* and cancelled TDs can be given back right away */
+ xhci_dbg(xhci, "Invalidating TDs instantly on slot %d ep %d in state 0x%x\n",
+ urb->dev->slot_id, ep_index, ep->ep_state);
+ xhci_process_cancelled_tds(ep);
+ } else {
+ /* Otherwise, queue a new Stop Endpoint command */
command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
if (!command) {
ret = -ENOMEM;
goto done;
}
+ ep->stop_time = jiffies;
ep->ep_state |= EP_STOP_CMD_PENDING;
xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id,
ep_index, 0);
@@ -2259,7 +2278,7 @@ static int xhci_check_tt_bw_table(struct xhci_hcd *xhci,
struct xhci_tt_bw_info *tt_info;
/* Find the bandwidth table for the root port this TT is attached to. */
- bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table;
+ bw_table = &xhci->rh_bw[virt_dev->rhub_port->hw_portnum].bw_table;
tt_info = virt_dev->tt_info;
/* If this TT already had active endpoints, the bandwidth for this TT
* has already been added. Removing all periodic endpoints (and thus
@@ -2377,7 +2396,7 @@ static int xhci_check_bw_table(struct xhci_hcd *xhci,
if (virt_dev->tt_info) {
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
"Recalculating BW for rootport %u",
- virt_dev->real_port);
+ virt_dev->rhub_port->hw_portnum + 1);
if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) {
xhci_warn(xhci, "Not enough bandwidth on HS bus for "
"newly activated TT.\n");
@@ -2390,7 +2409,7 @@ static int xhci_check_bw_table(struct xhci_hcd *xhci,
} else {
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
"Recalculating BW for rootport %u",
- virt_dev->real_port);
+ virt_dev->rhub_port->hw_portnum + 1);
}
/* Add in how much bandwidth will be used for interval zero, or the
@@ -2487,14 +2506,12 @@ static int xhci_check_bw_table(struct xhci_hcd *xhci,
bw_used += overhead + packet_size;
if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) {
- unsigned int port_index = virt_dev->real_port - 1;
-
/* OK, we're manipulating a HS device attached to a
* root port bandwidth domain. Include the number of active TTs
* in the bandwidth used.
*/
bw_used += TT_HS_OVERHEAD *
- xhci->rh_bw[port_index].num_active_tts;
+ xhci->rh_bw[virt_dev->rhub_port->hw_portnum].num_active_tts;
}
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
@@ -2681,7 +2698,7 @@ void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
if (!virt_dev->tt_info)
return;
- rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1];
+ rh_bw_info = &xhci->rh_bw[virt_dev->rhub_port->hw_portnum];
if (old_active_eps == 0 &&
virt_dev->tt_info->active_eps != 0) {
rh_bw_info->num_active_tts += 1;
@@ -2782,6 +2799,51 @@ static int xhci_reserve_bandwidth(struct xhci_hcd *xhci,
return -ENOMEM;
}
+/*
+ * Synchronous XHCI stop endpoint helper. Issues the stop endpoint command and
+ * waits for the command completion before returning. This does not call
+ * xhci_handle_cmd_stop_ep(), which has additional handling for 'context error'
+ * cases, along with transfer ring cleanup.
+ *
+ * xhci_stop_endpoint_sync() is intended to be utilized by clients that manage
+ * their own transfer ring, such as offload situations.
+ */
+int xhci_stop_endpoint_sync(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, int suspend,
+ gfp_t gfp_flags)
+{
+ struct xhci_command *command;
+ unsigned long flags;
+ int ret;
+
+ command = xhci_alloc_command(xhci, true, gfp_flags);
+ if (!command)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&xhci->lock, flags);
+ ret = xhci_queue_stop_endpoint(xhci, command, ep->vdev->slot_id,
+ ep->ep_index, suspend);
+ if (ret < 0) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ goto out;
+ }
+
+ xhci_ring_cmd_db(xhci);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+
+ wait_for_completion(command->completion);
+
+ /* No handling for COMP_CONTEXT_STATE_ERROR done at command completion*/
+ if (command->status == COMP_COMMAND_ABORTED ||
+ command->status == COMP_COMMAND_RING_STOPPED) {
+ xhci_warn(xhci, "Timeout while waiting for stop endpoint command\n");
+ ret = -ETIME;
+ }
+out:
+ xhci_free_command(xhci, command);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(xhci_stop_endpoint_sync);
/* Issue a configure endpoint command or evaluate context command
* and wait for it to finish.
@@ -2825,7 +2887,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
xhci->num_active_eps);
return -ENOMEM;
}
- if ((xhci->quirks & XHCI_SW_BW_CHECKING) &&
+ if ((xhci->quirks & XHCI_SW_BW_CHECKING) && !ctx_change &&
xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) {
if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
xhci_free_host_resources(xhci, ctrl_ctx);
@@ -3680,6 +3742,8 @@ void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
xhci->num_active_eps);
}
+static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev);
+
/*
* This submits a Reset Device Command, which will set the device state to 0,
* set the device address to 0, and disable all the endpoints except the default
@@ -3750,6 +3814,23 @@ static int xhci_discover_or_reset_device(struct usb_hcd *hcd,
SLOT_STATE_DISABLED)
return 0;
+ if (xhci->quirks & XHCI_ETRON_HOST) {
+ /*
+ * Obtaining a new device slot to inform the xHCI host that
+ * the USB device has been reset.
+ */
+ ret = xhci_disable_slot(xhci, udev->slot_id);
+ xhci_free_virt_device(xhci, udev->slot_id);
+ if (!ret) {
+ ret = xhci_alloc_dev(hcd, udev);
+ if (ret == 1)
+ ret = 0;
+ else
+ ret = -EINVAL;
+ }
+ return ret;
+ }
+
trace_xhci_discover_or_reset_device(slot_ctx);
xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
@@ -4188,8 +4269,10 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
mutex_unlock(&xhci->mutex);
ret = xhci_disable_slot(xhci, udev->slot_id);
xhci_free_virt_device(xhci, udev->slot_id);
- if (!ret)
- xhci_alloc_dev(hcd, udev);
+ if (!ret) {
+ if (xhci_alloc_dev(hcd, udev) == 1)
+ xhci_setup_addressable_virt_dev(xhci, udev);
+ }
kfree(command->completion);
kfree(command);
return -EPROTO;
@@ -4445,7 +4528,7 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
hlpm_addr = ports[port_num]->addr + PORTHLPMC;
xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
- enable ? "enable" : "disable", port_num + 1);
+ str_enable_disable(enable), port_num + 1);
if (enable) {
/* Host supports BESL timeout instead of HIRD */
@@ -4505,35 +4588,27 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
return 0;
}
-/* check if a usb2 port supports a given extened capability protocol
- * only USB2 ports extended protocol capability values are cached.
- * Return 1 if capability is supported
- */
-static int xhci_check_usb2_port_capability(struct xhci_hcd *xhci, int port,
- unsigned capability)
-{
- u32 port_offset, port_count;
- int i;
-
- for (i = 0; i < xhci->num_ext_caps; i++) {
- if (xhci->ext_caps[i] & capability) {
- /* port offsets starts at 1 */
- port_offset = XHCI_EXT_PORT_OFF(xhci->ext_caps[i]) - 1;
- port_count = XHCI_EXT_PORT_COUNT(xhci->ext_caps[i]);
- if (port >= port_offset &&
- port < port_offset + port_count)
- return 1;
- }
- }
- return 0;
-}
-
static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
- int portnum = udev->portnum - 1;
+ struct xhci_port *port;
+ u32 capability;
+
+ /* Check if USB3 device at root port is tunneled over USB4 */
+ if (hcd->speed >= HCD_USB3 && !udev->parent->parent) {
+ port = xhci->usb3_rhub.ports[udev->portnum - 1];
+
+ udev->tunnel_mode = xhci_port_is_tunneled(xhci, port);
+ if (udev->tunnel_mode == USB_LINK_UNKNOWN)
+ dev_dbg(&udev->dev, "link tunnel state unknown\n");
+ else if (udev->tunnel_mode == USB_LINK_TUNNELED)
+ dev_dbg(&udev->dev, "tunneled over USB4 link\n");
+ else if (udev->tunnel_mode == USB_LINK_NATIVE)
+ dev_dbg(&udev->dev, "native USB 3.x link\n");
+ return 0;
+ }
- if (hcd->speed >= HCD_USB3 || !udev->lpm_capable)
+ if (hcd->speed >= HCD_USB3 || !udev->lpm_capable || !xhci->hw_lpm_support)
return 0;
/* we only support lpm for non-hub device connected to root hub yet */
@@ -4541,14 +4616,14 @@ static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
udev->descriptor.bDeviceClass == USB_CLASS_HUB)
return 0;
- if (xhci->hw_lpm_support == 1 &&
- xhci_check_usb2_port_capability(
- xhci, portnum, XHCI_HLC)) {
+ port = xhci->usb2_rhub.ports[udev->portnum - 1];
+ capability = port->port_cap->protocol_caps;
+
+ if (capability & XHCI_HLC) {
udev->usb2_hw_lpm_capable = 1;
udev->l1_params.timeout = XHCI_L1_TIMEOUT;
udev->l1_params.besl = XHCI_DEFAULT_BESL;
- if (xhci_check_usb2_port_capability(xhci, portnum,
- XHCI_BLC))
+ if (capability & XHCI_BLC)
udev->usb2_hw_lpm_besl_capable = 1;
}
@@ -5245,6 +5320,11 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
if (xhci->hci_version > 0x96)
xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
+ if (xhci->hci_version == 0x95 && link_quirk) {
+ xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits");
+ xhci->quirks |= XHCI_LINK_TRB_QUIRK;
+ }
+
/* Make sure the HC is halted. */
retval = xhci_halt(xhci);
if (retval)
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 6f82d404883f..779b01dee068 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -17,11 +17,15 @@
#include <linux/kernel.h>
#include <linux/usb/hcd.h>
#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
/* Code sharing between pci-quirks and xhci hcd */
#include "xhci-ext-caps.h"
#include "pci-quirks.h"
+#include "xhci-port.h"
+#include "xhci-caps.h"
+
/* max buffer size for trace and debug messages */
#define XHCI_MSG_MAX 500
@@ -62,90 +66,6 @@ struct xhci_cap_regs {
/* Reserved up to (CAPLENGTH - 0x1C) */
};
-/* hc_capbase bitmasks */
-/* bits 7:0 - how long is the Capabilities register */
-#define HC_LENGTH(p) XHCI_HC_LENGTH(p)
-/* bits 31:16 */
-#define HC_VERSION(p) (((p) >> 16) & 0xffff)
-
-/* HCSPARAMS1 - hcs_params1 - bitmasks */
-/* bits 0:7, Max Device Slots */
-#define HCS_MAX_SLOTS(p) (((p) >> 0) & 0xff)
-#define HCS_SLOTS_MASK 0xff
-/* bits 8:18, Max Interrupters */
-#define HCS_MAX_INTRS(p) (((p) >> 8) & 0x7ff)
-/* bits 24:31, Max Ports - max value is 0x7F = 127 ports */
-#define HCS_MAX_PORTS(p) (((p) >> 24) & 0x7f)
-
-/* HCSPARAMS2 - hcs_params2 - bitmasks */
-/* bits 0:3, frames or uframes that SW needs to queue transactions
- * ahead of the HW to meet periodic deadlines */
-#define HCS_IST(p) (((p) >> 0) & 0xf)
-/* bits 4:7, max number of Event Ring segments */
-#define HCS_ERST_MAX(p) (((p) >> 4) & 0xf)
-/* bits 21:25 Hi 5 bits of Scratchpad buffers SW must allocate for the HW */
-/* bit 26 Scratchpad restore - for save/restore HW state - not used yet */
-/* bits 27:31 Lo 5 bits of Scratchpad buffers SW must allocate for the HW */
-#define HCS_MAX_SCRATCHPAD(p) ((((p) >> 16) & 0x3e0) | (((p) >> 27) & 0x1f))
-
-/* HCSPARAMS3 - hcs_params3 - bitmasks */
-/* bits 0:7, Max U1 to U0 latency for the roothub ports */
-#define HCS_U1_LATENCY(p) (((p) >> 0) & 0xff)
-/* bits 16:31, Max U2 to U0 latency for the roothub ports */
-#define HCS_U2_LATENCY(p) (((p) >> 16) & 0xffff)
-
-/* HCCPARAMS - hcc_params - bitmasks */
-/* true: HC can use 64-bit address pointers */
-#define HCC_64BIT_ADDR(p) ((p) & (1 << 0))
-/* true: HC can do bandwidth negotiation */
-#define HCC_BANDWIDTH_NEG(p) ((p) & (1 << 1))
-/* true: HC uses 64-byte Device Context structures
- * FIXME 64-byte context structures aren't supported yet.
- */
-#define HCC_64BYTE_CONTEXT(p) ((p) & (1 << 2))
-/* true: HC has port power switches */
-#define HCC_PPC(p) ((p) & (1 << 3))
-/* true: HC has port indicators */
-#define HCS_INDICATOR(p) ((p) & (1 << 4))
-/* true: HC has Light HC Reset Capability */
-#define HCC_LIGHT_RESET(p) ((p) & (1 << 5))
-/* true: HC supports latency tolerance messaging */
-#define HCC_LTC(p) ((p) & (1 << 6))
-/* true: no secondary Stream ID Support */
-#define HCC_NSS(p) ((p) & (1 << 7))
-/* true: HC supports Stopped - Short Packet */
-#define HCC_SPC(p) ((p) & (1 << 9))
-/* true: HC has Contiguous Frame ID Capability */
-#define HCC_CFC(p) ((p) & (1 << 11))
-/* Max size for Primary Stream Arrays - 2^(n+1), where n is bits 12:15 */
-#define HCC_MAX_PSA(p) (1 << ((((p) >> 12) & 0xf) + 1))
-/* Extended Capabilities pointer from PCI base - section 5.3.6 */
-#define HCC_EXT_CAPS(p) XHCI_HCC_EXT_CAPS(p)
-
-#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
-
-/* db_off bitmask - bits 0:1 reserved */
-#define DBOFF_MASK (~0x3)
-
-/* run_regs_off bitmask - bits 0:4 reserved */
-#define RTSOFF_MASK (~0x1f)
-
-/* HCCPARAMS2 - hcc_params2 - bitmasks */
-/* true: HC supports U3 entry Capability */
-#define HCC2_U3C(p) ((p) & (1 << 0))
-/* true: HC supports Configure endpoint command Max exit latency too large */
-#define HCC2_CMC(p) ((p) & (1 << 1))
-/* true: HC supports Force Save context Capability */
-#define HCC2_FSC(p) ((p) & (1 << 2))
-/* true: HC supports Compliance Transition Capability */
-#define HCC2_CTC(p) ((p) & (1 << 3))
-/* true: HC support Large ESIT payload Capability > 48k */
-#define HCC2_LEC(p) ((p) & (1 << 4))
-/* true: HC support Configuration Information Capability */
-#define HCC2_CIC(p) ((p) & (1 << 5))
-/* true: HC support Extended TBC Capability, Isoc burst count > 65535 */
-#define HCC2_ETC(p) ((p) & (1 << 6))
-
/* Number of registers per port */
#define NUM_PORT_REGS 4
@@ -291,181 +211,6 @@ struct xhci_op_regs {
#define CONFIG_CIE (1 << 9)
/* bits 10:31 - reserved and should be preserved */
-/* PORTSC - Port Status and Control Register - port_status_base bitmasks */
-/* true: device connected */
-#define PORT_CONNECT (1 << 0)
-/* true: port enabled */
-#define PORT_PE (1 << 1)
-/* bit 2 reserved and zeroed */
-/* true: port has an over-current condition */
-#define PORT_OC (1 << 3)
-/* true: port reset signaling asserted */
-#define PORT_RESET (1 << 4)
-/* Port Link State - bits 5:8
- * A read gives the current link PM state of the port,
- * a write with Link State Write Strobe set sets the link state.
- */
-#define PORT_PLS_MASK (0xf << 5)
-#define XDEV_U0 (0x0 << 5)
-#define XDEV_U1 (0x1 << 5)
-#define XDEV_U2 (0x2 << 5)
-#define XDEV_U3 (0x3 << 5)
-#define XDEV_DISABLED (0x4 << 5)
-#define XDEV_RXDETECT (0x5 << 5)
-#define XDEV_INACTIVE (0x6 << 5)
-#define XDEV_POLLING (0x7 << 5)
-#define XDEV_RECOVERY (0x8 << 5)
-#define XDEV_HOT_RESET (0x9 << 5)
-#define XDEV_COMP_MODE (0xa << 5)
-#define XDEV_TEST_MODE (0xb << 5)
-#define XDEV_RESUME (0xf << 5)
-
-/* true: port has power (see HCC_PPC) */
-#define PORT_POWER (1 << 9)
-/* bits 10:13 indicate device speed:
- * 0 - undefined speed - port hasn't be initialized by a reset yet
- * 1 - full speed
- * 2 - low speed
- * 3 - high speed
- * 4 - super speed
- * 5-15 reserved
- */
-#define DEV_SPEED_MASK (0xf << 10)
-#define XDEV_FS (0x1 << 10)
-#define XDEV_LS (0x2 << 10)
-#define XDEV_HS (0x3 << 10)
-#define XDEV_SS (0x4 << 10)
-#define XDEV_SSP (0x5 << 10)
-#define DEV_UNDEFSPEED(p) (((p) & DEV_SPEED_MASK) == (0x0<<10))
-#define DEV_FULLSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_FS)
-#define DEV_LOWSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_LS)
-#define DEV_HIGHSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_HS)
-#define DEV_SUPERSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_SS)
-#define DEV_SUPERSPEEDPLUS(p) (((p) & DEV_SPEED_MASK) == XDEV_SSP)
-#define DEV_SUPERSPEED_ANY(p) (((p) & DEV_SPEED_MASK) >= XDEV_SS)
-#define DEV_PORT_SPEED(p) (((p) >> 10) & 0x0f)
-
-/* Bits 20:23 in the Slot Context are the speed for the device */
-#define SLOT_SPEED_FS (XDEV_FS << 10)
-#define SLOT_SPEED_LS (XDEV_LS << 10)
-#define SLOT_SPEED_HS (XDEV_HS << 10)
-#define SLOT_SPEED_SS (XDEV_SS << 10)
-#define SLOT_SPEED_SSP (XDEV_SSP << 10)
-/* Port Indicator Control */
-#define PORT_LED_OFF (0 << 14)
-#define PORT_LED_AMBER (1 << 14)
-#define PORT_LED_GREEN (2 << 14)
-#define PORT_LED_MASK (3 << 14)
-/* Port Link State Write Strobe - set this when changing link state */
-#define PORT_LINK_STROBE (1 << 16)
-/* true: connect status change */
-#define PORT_CSC (1 << 17)
-/* true: port enable change */
-#define PORT_PEC (1 << 18)
-/* true: warm reset for a USB 3.0 device is done. A "hot" reset puts the port
- * into an enabled state, and the device into the default state. A "warm" reset
- * also resets the link, forcing the device through the link training sequence.
- * SW can also look at the Port Reset register to see when warm reset is done.
- */
-#define PORT_WRC (1 << 19)
-/* true: over-current change */
-#define PORT_OCC (1 << 20)
-/* true: reset change - 1 to 0 transition of PORT_RESET */
-#define PORT_RC (1 << 21)
-/* port link status change - set on some port link state transitions:
- * Transition Reason
- * ------------------------------------------------------------------------------
- * - U3 to Resume Wakeup signaling from a device
- * - Resume to Recovery to U0 USB 3.0 device resume
- * - Resume to U0 USB 2.0 device resume
- * - U3 to Recovery to U0 Software resume of USB 3.0 device complete
- * - U3 to U0 Software resume of USB 2.0 device complete
- * - U2 to U0 L1 resume of USB 2.1 device complete
- * - U0 to U0 (???) L1 entry rejection by USB 2.1 device
- * - U0 to disabled L1 entry error with USB 2.1 device
- * - Any state to inactive Error on USB 3.0 port
- */
-#define PORT_PLC (1 << 22)
-/* port configure error change - port failed to configure its link partner */
-#define PORT_CEC (1 << 23)
-#define PORT_CHANGE_MASK (PORT_CSC | PORT_PEC | PORT_WRC | PORT_OCC | \
- PORT_RC | PORT_PLC | PORT_CEC)
-
-
-/* Cold Attach Status - xHC can set this bit to report device attached during
- * Sx state. Warm port reset should be perfomed to clear this bit and move port
- * to connected state.
- */
-#define PORT_CAS (1 << 24)
-/* wake on connect (enable) */
-#define PORT_WKCONN_E (1 << 25)
-/* wake on disconnect (enable) */
-#define PORT_WKDISC_E (1 << 26)
-/* wake on over-current (enable) */
-#define PORT_WKOC_E (1 << 27)
-/* bits 28:29 reserved */
-/* true: device is non-removable - for USB 3.0 roothub emulation */
-#define PORT_DEV_REMOVE (1 << 30)
-/* Initiate a warm port reset - complete when PORT_WRC is '1' */
-#define PORT_WR (1 << 31)
-
-/* We mark duplicate entries with -1 */
-#define DUPLICATE_ENTRY ((u8)(-1))
-
-/* Port Power Management Status and Control - port_power_base bitmasks */
-/* Inactivity timer value for transitions into U1, in microseconds.
- * Timeout can be up to 127us. 0xFF means an infinite timeout.
- */
-#define PORT_U1_TIMEOUT(p) ((p) & 0xff)
-#define PORT_U1_TIMEOUT_MASK 0xff
-/* Inactivity timer value for transitions into U2 */
-#define PORT_U2_TIMEOUT(p) (((p) & 0xff) << 8)
-#define PORT_U2_TIMEOUT_MASK (0xff << 8)
-/* Bits 24:31 for port testing */
-
-/* USB2 Protocol PORTSPMSC */
-#define PORT_L1S_MASK 7
-#define PORT_L1S_SUCCESS 1
-#define PORT_RWE (1 << 3)
-#define PORT_HIRD(p) (((p) & 0xf) << 4)
-#define PORT_HIRD_MASK (0xf << 4)
-#define PORT_L1DS_MASK (0xff << 8)
-#define PORT_L1DS(p) (((p) & 0xff) << 8)
-#define PORT_HLE (1 << 16)
-#define PORT_TEST_MODE_SHIFT 28
-
-/* USB3 Protocol PORTLI Port Link Information */
-#define PORT_RX_LANES(p) (((p) >> 16) & 0xf)
-#define PORT_TX_LANES(p) (((p) >> 20) & 0xf)
-
-/* USB2 Protocol PORTHLPMC */
-#define PORT_HIRDM(p)((p) & 3)
-#define PORT_L1_TIMEOUT(p)(((p) & 0xff) << 2)
-#define PORT_BESLD(p)(((p) & 0xf) << 10)
-
-/* use 512 microseconds as USB2 LPM L1 default timeout. */
-#define XHCI_L1_TIMEOUT 512
-
-/* Set default HIRD/BESL value to 4 (350/400us) for USB2 L1 LPM resume latency.
- * Safe to use with mixed HIRD and BESL systems (host and device) and is used
- * by other operating systems.
- *
- * XHCI 1.0 errata 8/14/12 Table 13 notes:
- * "Software should choose xHC BESL/BESLD field values that do not violate a
- * device's resume latency requirements,
- * e.g. not program values > '4' if BLC = '1' and a HIRD device is attached,
- * or not program values < '4' if BLC = '0' and a BESL device is attached.
- */
-#define XHCI_DEFAULT_BESL 4
-
-/*
- * USB3 specification define a 360ms tPollingLFPSTiemout for USB3 ports
- * to complete link training. usually link trainig completes much faster
- * so check status 10 times with 36ms sleep in places we need to wait for
- * polling to complete.
- */
-#define XHCI_PORT_POLLING_LFPS_TIME 36
-
/**
* struct xhci_intr_reg - Interrupt Register Set
* @irq_pending: IMAN - Interrupt Management Register. Used to enable
@@ -784,6 +529,7 @@ struct xhci_command {
/* Input context for changing device state */
struct xhci_container_ctx *in_ctx;
u32 status;
+ u32 comp_param;
int slot_id;
/* If completion is null, no one is waiting on this command
* and the structure can be freed after the command completes.
@@ -809,6 +555,7 @@ struct xhci_stream_ctx {
/* Stream Context Types (section 6.4.1) - bits 3:1 of stream ctx deq ptr */
#define SCT_FOR_CTX(p) (((p) & 0x7) << 1)
+#define CTX_TO_SCT(p) (((p) >> 1) & 0x7)
/* Secondary stream array type, dequeue pointer is to a transfer ring */
#define SCT_SEC_TR 0
/* Primary stream array type, dequeue pointer is to a transfer ring */
@@ -945,6 +692,7 @@ struct xhci_virt_ep {
/* Bandwidth checking storage */
struct xhci_bw_info bw_info;
struct list_head bw_endpoint_list;
+ unsigned long stop_time;
/* Isoch Frame ID checking storage */
int next_frame_id;
/* Use new Isoch TRB layout needed for extended TBC support */
@@ -995,8 +743,7 @@ struct xhci_virt_device {
/* Used for addressing devices and configuration changes */
struct xhci_container_ctx *in_ctx;
struct xhci_virt_ep eps[EP_CTX_PER_DEV];
- u8 fake_port;
- u8 real_port;
+ struct xhci_port *rhub_port;
struct xhci_interval_bw_table *bw_table;
struct xhci_tt_bw_info *tt_info;
/*
@@ -1062,13 +809,19 @@ struct xhci_transfer_event {
__le32 flags;
};
+/* Transfer event flags bitfield, also for select command completion events */
+#define TRB_TO_SLOT_ID(p) (((p) >> 24) & 0xff)
+#define SLOT_ID_FOR_TRB(p) (((p) & 0xff) << 24)
+
+#define TRB_TO_EP_ID(p) (((p) >> 16) & 0x1f) /* Endpoint ID 1 - 31 */
+#define EP_ID_FOR_TRB(p) (((p) & 0x1f) << 16)
+
+#define TRB_TO_EP_INDEX(p) (TRB_TO_EP_ID(p) - 1) /* Endpoint index 0 - 30 */
+#define EP_INDEX_FOR_TRB(p) ((((p) + 1) & 0x1f) << 16)
+
/* Transfer event TRB length bit mask */
-/* bits 0:23 */
#define EVENT_TRB_LEN(p) ((p) & 0xffffff)
-/** Transfer Event bit fields **/
-#define TRB_TO_EP_ID(p) (((p) >> 16) & 0x1f)
-
/* Completion Code - only applicable for some types of TRBs */
#define COMP_CODE_MASK (0xff << 24)
#define GET_COMP_CODE(p) (((p) & COMP_CODE_MASK) >> 24)
@@ -1207,7 +960,8 @@ struct xhci_event_cmd {
__le32 flags;
};
-/* flags bitmasks */
+/* status bitmasks */
+#define COMP_PARAM(p) ((p) & 0xffffff) /* Command Completion Parameter */
/* Address device - disable SetAddress */
#define TRB_BSR (1<<9)
@@ -1244,13 +998,8 @@ enum xhci_setup_dev {
/* bits 16:23 are the virtual function ID */
/* bits 24:31 are the slot ID */
-#define TRB_TO_SLOT_ID(p) (((p) & (0xff<<24)) >> 24)
-#define SLOT_ID_FOR_TRB(p) (((p) & 0xff) << 24)
/* Stop Endpoint TRB - ep_index to endpoint ID for this TRB */
-#define TRB_TO_EP_INDEX(p) ((((p) & (0x1f << 16)) >> 16) - 1)
-#define EP_ID_FOR_TRB(p) ((((p) + 1) & 0x1f) << 16)
-
#define SUSPEND_PORT_FOR_TRB(p) (((p) & 1) << 23)
#define TRB_TO_SUSPEND_PORT(p) (((p) & (1 << 23)) >> 23)
#define LAST_EP_INDEX 30
@@ -1258,7 +1007,7 @@ enum xhci_setup_dev {
/* Set TR Dequeue Pointer command TRB fields, 6.4.3.9 */
#define TRB_TO_STREAM_ID(p) ((((p) & (0xffff << 16)) >> 16))
#define STREAM_ID_FOR_TRB(p) ((((p)) & 0xffff) << 16)
-#define SCT_FOR_TRB(p) (((p) << 1) & 0x7)
+#define SCT_FOR_TRB(p) (((p) & 0x7) << 1)
/* Link TRB specific fields */
#define TRB_TC (1<<1)
@@ -1280,9 +1029,6 @@ enum xhci_setup_dev {
/* Interrupter Target - which MSI-X vector to target the completion event at */
#define TRB_INTR_TARGET(p) (((p) & 0x3ff) << 22)
#define GET_INTR_TARGET(p) (((p) >> 22) & 0x3ff)
-/* Total burst count field, Rsvdz on xhci 1.1 with Extended TBC enabled (ETE) */
-#define TRB_TBC(p) (((p) & 0x3) << 7)
-#define TRB_TLBPC(p) (((p) & 0xf) << 16)
/* Cycle bit - indicates TRB ownership by HC or HCD */
#define TRB_CYCLE (1<<0)
@@ -1316,6 +1062,12 @@ enum xhci_setup_dev {
/* Isochronous TRB specific fields */
#define TRB_SIA (1<<31)
#define TRB_FRAME_ID(p) (((p) & 0x7ff) << 20)
+#define GET_FRAME_ID(p) (((p) >> 20) & 0x7ff)
+/* Total burst count field, Rsvdz on xhci 1.1 with Extended TBC enabled (ETE) */
+#define TRB_TBC(p) (((p) & 0x3) << 7)
+#define GET_TBC(p) (((p) >> 7) & 0x3)
+#define TRB_TLBPC(p) (((p) & 0xf) << 16)
+#define GET_TLBPC(p) (((p) >> 16) & 0xf)
/* TRB cache size for xHC with TRB cache */
#define TRB_CACHE_SIZE_HS 8
@@ -1516,6 +1268,9 @@ static inline const char *xhci_trb_type_string(u8 type)
#define AVOID_BEI_INTERVAL_MIN 8
#define AVOID_BEI_INTERVAL_MAX 32
+#define xhci_for_each_ring_seg(head, seg) \
+ for (seg = head; seg != NULL; seg = (seg->next != head ? seg->next : NULL))
+
struct xhci_segment {
union xhci_trb *trbs;
/* private to HCD */
@@ -1533,6 +1288,7 @@ enum xhci_cancelled_td_status {
TD_DIRTY = 0,
TD_HALTED,
TD_CLEARING_CACHE,
+ TD_CLEARING_CACHE_DEFERRED,
TD_CLEARED,
};
@@ -1543,14 +1299,13 @@ struct xhci_td {
enum xhci_cancelled_td_status cancel_status;
struct urb *urb;
struct xhci_segment *start_seg;
- union xhci_trb *first_trb;
- union xhci_trb *last_trb;
- struct xhci_segment *last_trb_seg;
+ union xhci_trb *start_trb;
+ struct xhci_segment *end_seg;
+ union xhci_trb *end_trb;
struct xhci_segment *bounce_seg;
/* actual_length of the URB has already been set */
bool urb_length_set;
bool error_mid_td;
- unsigned int num_trbs;
};
/*
@@ -1633,8 +1388,6 @@ struct xhci_erst {
unsigned int num_entries;
/* xhci->event_ring keeps track of segment dma addresses */
dma_addr_t erst_dma_addr;
- /* Num entries the ERST can contain */
- unsigned int erst_size;
};
struct xhci_scratchpad {
@@ -1649,8 +1402,8 @@ struct urb_priv {
struct xhci_td td[] __counted_by(num_tds);
};
-/* Reasonable limit for number of Event Ring segments (spec allows 32k) */
-#define ERST_MAX_SEGS 2
+/* Number of Event Ring segments to allocate, when amount is not specified. (spec allows 32k) */
+#define ERST_DEFAULT_SEGS 2
/* Poll every 60 seconds */
#define POLL_TIMEOUT 60
/* Stop endpoint command timeout (secs) for URB cancellation watchdog timer */
@@ -1688,6 +1441,8 @@ struct xhci_interrupter {
struct xhci_erst erst;
struct xhci_intr_reg __iomem *ir_set;
unsigned int intr_num;
+ bool ip_autoclear;
+ u32 isoc_bei_interval;
/* For interrupter registers save and restore over suspend/resume */
u32 s3_irq_pending;
u32 s3_irq_control;
@@ -1706,6 +1461,7 @@ struct xhci_port_cap {
u8 psi_uid_count;
u8 maj_rev;
u8 min_rev;
+ u32 protocol_caps;
};
struct xhci_port {
@@ -1717,6 +1473,8 @@ struct xhci_port {
unsigned int lpm_incapable:1;
unsigned long resume_timestamp;
bool rexit_active;
+ /* Slot ID is the index of the device directly connected to the port */
+ int slot_id;
struct completion rexit_done;
struct completion u3exit_done;
};
@@ -1752,16 +1510,10 @@ struct xhci_hcd {
spinlock_t lock;
/* packed release number */
- u8 sbrn;
u16 hci_version;
- u8 max_slots;
u16 max_interrupters;
- u8 max_ports;
- u8 isoc_threshold;
/* imod_interval in ns (I * 250ns) */
u32 imod_interval;
- u32 isoc_bei_interval;
- int event_ring_max;
/* 4KB min, 128MB max */
int page_size;
/* Valid values are 12 to 20, inclusive */
@@ -1843,7 +1595,7 @@ struct xhci_hcd {
#define XHCI_RESET_ON_RESUME BIT_ULL(7)
#define XHCI_SW_BW_CHECKING BIT_ULL(8)
#define XHCI_AMD_0x96_HOST BIT_ULL(9)
-#define XHCI_TRUST_TX_LENGTH BIT_ULL(10)
+#define XHCI_TRUST_TX_LENGTH BIT_ULL(10) /* Deprecated */
#define XHCI_LPM_SUPPORT BIT_ULL(11)
#define XHCI_INTEL_HOST BIT_ULL(12)
#define XHCI_SPURIOUS_REBOOT BIT_ULL(13)
@@ -1871,7 +1623,7 @@ struct xhci_hcd {
#define XHCI_DEFAULT_PM_RUNTIME_ALLOW BIT_ULL(33)
#define XHCI_RESET_PLL_ON_DISCONNECT BIT_ULL(34)
#define XHCI_SNPS_BROKEN_SUSPEND BIT_ULL(35)
-#define XHCI_RENESAS_FW_QUIRK BIT_ULL(36)
+/* Reserved. It was XHCI_RENESAS_FW_QUIRK */
#define XHCI_SKIP_PHY_INIT BIT_ULL(37)
#define XHCI_DISABLE_SPARSE BIT_ULL(38)
#define XHCI_SG_TRB_CACHE_SIZE_QUIRK BIT_ULL(39)
@@ -1880,8 +1632,11 @@ struct xhci_hcd {
#define XHCI_EP_CTX_BROKEN_DCS BIT_ULL(42)
#define XHCI_SUSPEND_RESUME_CLKS BIT_ULL(43)
#define XHCI_RESET_TO_DEFAULT BIT_ULL(44)
-#define XHCI_ZHAOXIN_TRB_FETCH BIT_ULL(45)
+#define XHCI_TRB_OVERFETCH BIT_ULL(45)
#define XHCI_ZHAOXIN_HOST BIT_ULL(46)
+#define XHCI_WRITE_64_HI_LO BIT_ULL(47)
+#define XHCI_CDNS_SCTX_QUIRK BIT_ULL(48)
+#define XHCI_ETRON_HOST BIT_ULL(49)
unsigned int num_active_eps;
unsigned int limit_active_eps;
@@ -1894,9 +1649,6 @@ struct xhci_hcd {
unsigned broken_suspend:1;
/* Indicates that omitting hcd is supported if root hub has no ports */
unsigned allow_single_roothub:1;
- /* cached usb2 extened protocol capabilites */
- u32 *ext_caps;
- unsigned int num_ext_caps;
/* cached extended protocol port capabilities */
struct xhci_port_cap *port_caps;
unsigned int num_port_caps;
@@ -1983,8 +1735,6 @@ static inline bool xhci_has_one_roothub(struct xhci_hcd *xhci)
dev_err(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
#define xhci_warn(xhci, fmt, args...) \
dev_warn(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
-#define xhci_warn_ratelimited(xhci, fmt, args...) \
- dev_warn_ratelimited(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
#define xhci_info(xhci, fmt, args...) \
dev_info(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
@@ -2008,9 +1758,12 @@ static inline void xhci_write_64(struct xhci_hcd *xhci,
lo_hi_writeq(val, regs);
}
-static inline int xhci_link_trb_quirk(struct xhci_hcd *xhci)
+
+/* Link TRB chain should always be set on 0.95 hosts, and AMD 0.96 ISOC rings */
+static inline bool xhci_link_chain_quirk(struct xhci_hcd *xhci, enum xhci_ring_type type)
{
- return xhci->quirks & XHCI_LINK_TRB_QUIRK;
+ return (xhci->quirks & XHCI_LINK_TRB_QUIRK) ||
+ (type == TYPE_ISOC && (xhci->quirks & XHCI_AMD_0x96_HOST));
}
/* xHCI debugging */
@@ -2048,14 +1801,12 @@ void xhci_slot_copy(struct xhci_hcd *xhci,
int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev,
struct usb_device *udev, struct usb_host_endpoint *ep,
gfp_t mem_flags);
-struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
- unsigned int num_segs, unsigned int cycle_state,
+struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, unsigned int num_segs,
enum xhci_ring_type type, unsigned int max_packet, gfp_t flags);
void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring);
int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
unsigned int num_trbs, gfp_t flags);
-void xhci_initialize_ring_info(struct xhci_ring *ring,
- unsigned int cycle_state);
+void xhci_initialize_ring_info(struct xhci_ring *ring);
void xhci_free_endpoint_ring(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
unsigned int ep_index);
@@ -2087,7 +1838,8 @@ struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
void xhci_free_container_ctx(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx);
struct xhci_interrupter *
-xhci_create_secondary_interrupter(struct usb_hcd *hcd, int num_seg);
+xhci_create_secondary_interrupter(struct usb_hcd *hcd, unsigned int segs,
+ u32 imod_interval);
void xhci_remove_secondary_interrupter(struct usb_hcd
*hcd, struct xhci_interrupter *ir);
@@ -2127,12 +1879,13 @@ int xhci_alloc_tt_info(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
struct usb_device *hdev,
struct usb_tt *tt, gfp_t mem_flags);
+int xhci_set_interrupter_moderation(struct xhci_interrupter *ir,
+ u32 imod_interval);
/* xHCI ring, segment, TRB, and TD functions */
dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb);
-struct xhci_segment *trb_in_td(struct xhci_hcd *xhci,
- struct xhci_segment *start_seg, union xhci_trb *start_trb,
- union xhci_trb *end_trb, dma_addr_t suspect_dma, bool debug);
+struct xhci_segment *trb_in_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ dma_addr_t suspect_dma, bool debug);
int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code);
void xhci_ring_cmd_db(struct xhci_hcd *xhci);
int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd,
@@ -2161,10 +1914,6 @@ int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd,
enum xhci_ep_reset_type reset_type);
int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
u32 slot_id);
-void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int slot_id,
- unsigned int ep_index, unsigned int stream_id,
- struct xhci_td *td);
-void xhci_stop_endpoint_command_watchdog(struct timer_list *t);
void xhci_handle_command_timeout(struct work_struct *work);
void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id,
@@ -2175,6 +1924,9 @@ void xhci_ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
void xhci_cleanup_command_queue(struct xhci_hcd *xhci);
void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring);
unsigned int count_trbs(u64 addr, u64 len);
+int xhci_stop_endpoint_sync(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
+ int suspend, gfp_t gfp_flags);
+void xhci_process_cancelled_tds(struct xhci_virt_ep *ep);
/* xHCI roothub code */
void xhci_set_link_state(struct xhci_hcd *xhci, struct xhci_port *port,
@@ -2186,7 +1938,8 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
int xhci_hub_status_data(struct usb_hcd *hcd, char *buf);
int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1);
struct xhci_hub *xhci_get_rhub(struct usb_hcd *hcd);
-
+enum usb_link_tunnel_mode xhci_port_is_tunneled(struct xhci_hcd *xhci,
+ struct xhci_port *port);
void xhci_hc_died(struct xhci_hcd *xhci);
#ifdef CONFIG_PM
@@ -2200,8 +1953,6 @@ unsigned long xhci_get_resuming_ports(struct usb_hcd *hcd);
#endif /* CONFIG_PM */
u32 xhci_port_state_to_neutral(u32 state);
-int xhci_find_slot_id_by_port(struct usb_hcd *hcd, struct xhci_hcd *xhci,
- u16 port);
void xhci_ring_device(struct xhci_hcd *xhci, int slot_id);
/* xHCI contexts */
@@ -2283,8 +2034,7 @@ static inline const char *xhci_decode_trb(char *str, size_t size,
field1, field0,
xhci_trb_comp_code_string(GET_COMP_CODE(field2)),
EVENT_TRB_LEN(field2), TRB_TO_SLOT_ID(field3),
- /* Macro decrements 1, maybe it shouldn't?!? */
- TRB_TO_EP_INDEX(field3) + 1,
+ TRB_TO_EP_ID(field3),
xhci_trb_type_string(type),
field3 & EVENT_DATA ? 'E' : 'e',
field3 & TRB_CYCLE ? 'C' : 'c');
@@ -2334,7 +2084,6 @@ static inline const char *xhci_decode_trb(char *str, size_t size,
field3 & TRB_CYCLE ? 'C' : 'c');
break;
case TRB_NORMAL:
- case TRB_ISOC:
case TRB_EVENT_DATA:
case TRB_TR_NOOP:
snprintf(str, size,
@@ -2351,7 +2100,25 @@ static inline const char *xhci_decode_trb(char *str, size_t size,
field3 & TRB_ENT ? 'E' : 'e',
field3 & TRB_CYCLE ? 'C' : 'c');
break;
-
+ case TRB_ISOC:
+ snprintf(str, size,
+ "Buffer %08x%08x length %d TD size/TBC %d intr %d type '%s' TBC %u TLBPC %u frame_id %u flags %c:%c:%c:%c:%c:%c:%c:%c:%c",
+ field1, field0, TRB_LEN(field2), GET_TD_SIZE(field2),
+ GET_INTR_TARGET(field2),
+ xhci_trb_type_string(type),
+ GET_TBC(field3),
+ GET_TLBPC(field3),
+ GET_FRAME_ID(field3),
+ field3 & TRB_SIA ? 'S' : 's',
+ field3 & TRB_BEI ? 'B' : 'b',
+ field3 & TRB_IDT ? 'I' : 'i',
+ field3 & TRB_IOC ? 'I' : 'i',
+ field3 & TRB_CHAIN ? 'C' : 'c',
+ field3 & TRB_NO_SNOOP ? 'S' : 's',
+ field3 & TRB_ISP ? 'I' : 'i',
+ field3 & TRB_ENT ? 'E' : 'e',
+ field3 & TRB_CYCLE ? 'C' : 'c');
+ break;
case TRB_CMD_NOOP:
case TRB_ENABLE_SLOT:
snprintf(str, size,
@@ -2399,8 +2166,7 @@ static inline const char *xhci_decode_trb(char *str, size_t size,
xhci_trb_type_string(type),
field1, field0,
TRB_TO_SLOT_ID(field3),
- /* Macro decrements 1, maybe it shouldn't?!? */
- TRB_TO_EP_INDEX(field3) + 1,
+ TRB_TO_EP_ID(field3),
field3 & TRB_TSP ? 'T' : 't',
field3 & TRB_CYCLE ? 'C' : 'c');
break;
@@ -2410,8 +2176,7 @@ static inline const char *xhci_decode_trb(char *str, size_t size,
xhci_trb_type_string(type),
TRB_TO_SLOT_ID(field3),
TRB_TO_SUSPEND_PORT(field3),
- /* Macro decrements 1, maybe it shouldn't?!? */
- TRB_TO_EP_INDEX(field3) + 1,
+ TRB_TO_EP_ID(field3),
field3 & TRB_CYCLE ? 'C' : 'c');
break;
case TRB_SET_DEQ:
@@ -2421,8 +2186,7 @@ static inline const char *xhci_decode_trb(char *str, size_t size,
field1, field0,
TRB_TO_STREAM_ID(field2),
TRB_TO_SLOT_ID(field3),
- /* Macro decrements 1, maybe it shouldn't?!? */
- TRB_TO_EP_INDEX(field3) + 1,
+ TRB_TO_EP_ID(field3),
field3 & TRB_CYCLE ? 'C' : 'c');
break;
case TRB_RESET_DEV:
@@ -2596,7 +2360,12 @@ static inline const char *xhci_decode_portsc(char *str, u32 portsc)
{
int ret;
- ret = sprintf(str, "%s %s %s Link:%s PortSpeed:%d ",
+ ret = sprintf(str, "0x%08x ", portsc);
+
+ if (portsc == ~(u32)0)
+ return str;
+
+ ret += sprintf(str + ret, "%s %s %s Link:%s PortSpeed:%d ",
portsc & PORT_POWER ? "Powered" : "Powered-off",
portsc & PORT_CONNECT ? "Connected" : "Not-connected",
portsc & PORT_PE ? "Enabled" : "Disabled",