summaryrefslogtreecommitdiff
path: root/drivers/usb/host/xhci.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/host/xhci.c')
-rw-r--r--drivers/usb/host/xhci.c495
1 files changed, 343 insertions, 152 deletions
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 8579603edaff..8a819e853288 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -8,6 +8,7 @@
* Some code borrowed from the Linux EHCI driver.
*/
+#include <linux/jiffies.h>
#include <linux/pci.h>
#include <linux/iommu.h>
#include <linux/iopoll.h>
@@ -16,8 +17,10 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/dmi.h>
#include <linux/dma-mapping.h>
+#include <linux/usb/xhci-sideband.h>
#include "xhci.h"
#include "xhci-trace.h"
@@ -40,15 +43,15 @@ MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default");
static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring)
{
- struct xhci_segment *seg = ring->first_seg;
+ struct xhci_segment *seg;
if (!td || !td->start_seg)
return false;
- do {
+
+ xhci_for_each_ring_seg(ring->first_seg, seg) {
if (seg == td->start_seg)
return true;
- seg = seg->next;
- } while (seg && seg != ring->first_seg);
+ }
return false;
}
@@ -82,29 +85,6 @@ int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us)
}
/*
- * xhci_handshake_check_state - same as xhci_handshake but takes an additional
- * exit_state parameter, and bails out with an error immediately when xhc_state
- * has exit_state flag set.
- */
-int xhci_handshake_check_state(struct xhci_hcd *xhci, void __iomem *ptr,
- u32 mask, u32 done, int usec, unsigned int exit_state)
-{
- u32 result;
- int ret;
-
- ret = readl_poll_timeout_atomic(ptr, result,
- (result & mask) == done ||
- result == U32_MAX ||
- xhci->xhc_state & exit_state,
- 1, usec);
-
- if (result == U32_MAX || xhci->xhc_state & exit_state)
- return -ENODEV;
-
- return ret;
-}
-
-/*
* Disable interrupts and begin the xHCI halting process.
*/
void xhci_quiesce(struct xhci_hcd *xhci)
@@ -224,8 +204,7 @@ int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us)
if (xhci->quirks & XHCI_INTEL_HOST)
udelay(1000);
- ret = xhci_handshake_check_state(xhci, &xhci->op_regs->command,
- CMD_RESET, 0, timeout_us, XHCI_STATE_REMOVING);
+ ret = xhci_handshake(&xhci->op_regs->command, CMD_RESET, 0, timeout_us);
if (ret)
return ret;
@@ -320,45 +299,56 @@ static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
xhci_info(xhci, "Fault detected\n");
}
-static int xhci_enable_interrupter(struct xhci_interrupter *ir)
+int xhci_enable_interrupter(struct xhci_interrupter *ir)
{
u32 iman;
if (!ir || !ir->ir_set)
return -EINVAL;
- iman = readl(&ir->ir_set->irq_pending);
- writel(ER_IRQ_ENABLE(iman), &ir->ir_set->irq_pending);
+ iman = readl(&ir->ir_set->iman);
+ iman |= IMAN_IE;
+ writel(iman, &ir->ir_set->iman);
+ /* Read operation to guarantee the write has been flushed from posted buffers */
+ readl(&ir->ir_set->iman);
return 0;
}
-static int xhci_disable_interrupter(struct xhci_interrupter *ir)
+int xhci_disable_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
{
u32 iman;
if (!ir || !ir->ir_set)
return -EINVAL;
- iman = readl(&ir->ir_set->irq_pending);
- writel(ER_IRQ_DISABLE(iman), &ir->ir_set->irq_pending);
+ iman = readl(&ir->ir_set->iman);
+ iman &= ~IMAN_IE;
+ writel(iman, &ir->ir_set->iman);
+
+ iman = readl(&ir->ir_set->iman);
+ if (iman & IMAN_IP)
+ xhci_dbg(xhci, "%s: Interrupt pending\n", __func__);
return 0;
}
/* interrupt moderation interval imod_interval in nanoseconds */
-static int xhci_set_interrupter_moderation(struct xhci_interrupter *ir,
- u32 imod_interval)
+int xhci_set_interrupter_moderation(struct xhci_interrupter *ir,
+ u32 imod_interval)
{
u32 imod;
- if (!ir || !ir->ir_set || imod_interval > U16_MAX * 250)
+ if (!ir || !ir->ir_set)
return -EINVAL;
- imod = readl(&ir->ir_set->irq_control);
- imod &= ~ER_IRQ_INTERVAL_MASK;
- imod |= (imod_interval / 250) & ER_IRQ_INTERVAL_MASK;
- writel(imod, &ir->ir_set->irq_control);
+ /* IMODI value in IMOD register is in 250ns increments */
+ imod_interval = umin(imod_interval / 250, IMODI_MASK);
+
+ imod = readl(&ir->ir_set->imod);
+ imod &= ~IMODI_MASK;
+ imod |= imod_interval;
+ writel(imod, &ir->ir_set->imod);
return 0;
}
@@ -371,7 +361,7 @@ static void compliance_mode_recovery(struct timer_list *t)
u32 temp;
int i;
- xhci = from_timer(xhci, t, comp_mode_recovery_timer);
+ xhci = timer_container_of(xhci, t, comp_mode_recovery_timer);
rhub = &xhci->usb3_rhub;
hcd = rhub->hcd;
@@ -458,6 +448,82 @@ static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci)
return (xhci->port_status_u0 == ((1 << xhci->usb3_rhub.num_ports) - 1));
}
+static void xhci_hcd_page_size(struct xhci_hcd *xhci)
+{
+ u32 page_size;
+
+ page_size = readl(&xhci->op_regs->page_size) & XHCI_PAGE_SIZE_MASK;
+ if (!is_power_of_2(page_size)) {
+ xhci_warn(xhci, "Invalid page size register = 0x%x\n", page_size);
+ /* Fallback to 4K page size, since that's common */
+ page_size = 1;
+ }
+
+ xhci->page_size = page_size << 12;
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "HCD page size set to %iK",
+ xhci->page_size >> 10);
+}
+
+static void xhci_enable_max_dev_slots(struct xhci_hcd *xhci)
+{
+ u32 config_reg;
+ u32 max_slots;
+
+ max_slots = HCS_MAX_SLOTS(xhci->hcs_params1);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xHC can handle at most %d device slots",
+ max_slots);
+
+ config_reg = readl(&xhci->op_regs->config_reg);
+ config_reg &= ~HCS_SLOTS_MASK;
+ config_reg |= max_slots;
+
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Setting Max device slots reg = 0x%x",
+ config_reg);
+ writel(config_reg, &xhci->op_regs->config_reg);
+}
+
+static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
+{
+ dma_addr_t deq_dma;
+ u64 crcr;
+
+ deq_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, xhci->cmd_ring->dequeue);
+ deq_dma &= CMD_RING_PTR_MASK;
+
+ crcr = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
+ crcr &= ~CMD_RING_PTR_MASK;
+ crcr |= deq_dma;
+
+ crcr &= ~CMD_RING_CYCLE;
+ crcr |= xhci->cmd_ring->cycle_state;
+
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Setting command ring address to 0x%llx", crcr);
+ xhci_write_64(xhci, crcr, &xhci->op_regs->cmd_ring);
+}
+
+static void xhci_set_doorbell_ptr(struct xhci_hcd *xhci)
+{
+ u32 offset;
+
+ offset = readl(&xhci->cap_regs->db_off) & DBOFF_MASK;
+ xhci->dba = (void __iomem *)xhci->cap_regs + offset;
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Doorbell array is located at offset 0x%x from cap regs base addr", offset);
+}
+
+/*
+ * Enable USB 3.0 device notifications for function remote wake, which is necessary
+ * for allowing USB 3.0 devices to do remote wakeup from U3 (device suspend).
+ */
+static void xhci_set_dev_notifications(struct xhci_hcd *xhci)
+{
+ u32 dev_notf;
+
+ dev_notf = readl(&xhci->op_regs->dev_notification);
+ dev_notf &= ~DEV_NOTE_MASK;
+ dev_notf |= DEV_NOTE_FWAKE;
+ writel(dev_notf, &xhci->op_regs->dev_notification);
+}
/*
* Initialize memory for HCD and xHC (one-time init).
@@ -471,18 +537,37 @@ static int xhci_init(struct usb_hcd *hcd)
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
int retval;
- xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Starting %s", __func__);
spin_lock_init(&xhci->lock);
- if (xhci->hci_version == 0x95 && link_quirk) {
- xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
- "QUIRK: Not clearing Link TRB chain bits.");
- xhci->quirks |= XHCI_LINK_TRB_QUIRK;
- } else {
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "xHCI doesn't need link TRB QUIRK");
- }
+
+ INIT_LIST_HEAD(&xhci->cmd_list);
+ INIT_DELAYED_WORK(&xhci->cmd_timer, xhci_handle_command_timeout);
+ init_completion(&xhci->cmd_ring_stop_completion);
+ xhci_hcd_page_size(xhci);
+ memset(xhci->devs, 0, MAX_HC_SLOTS * sizeof(*xhci->devs));
+
retval = xhci_mem_init(xhci, GFP_KERNEL);
- xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init");
+ if (retval)
+ return retval;
+
+ /* Set the Number of Device Slots Enabled to the maximum supported value */
+ xhci_enable_max_dev_slots(xhci);
+
+ /* Set the address in the Command Ring Control register */
+ xhci_set_cmd_ring_deq(xhci);
+
+ /* Set Device Context Base Address Array pointer */
+ xhci_write_64(xhci, xhci->dcbaa->dma, &xhci->op_regs->dcbaa_ptr);
+
+ /* Set Doorbell array pointer */
+ xhci_set_doorbell_ptr(xhci);
+
+ /* Set USB 3.0 device notifications for function remote wake */
+ xhci_set_dev_notifications(xhci);
+
+ /* Initialize the Primary interrupter */
+ xhci_add_interrupter(xhci, 0);
+ xhci->interrupters[0]->isoc_bei_interval = AVOID_BEI_INTERVAL_MAX;
/* Initializing Compliance Mode Recovery Data If Needed */
if (xhci_compliance_mode_recovery_timer_quirk_check()) {
@@ -490,7 +575,8 @@ static int xhci_init(struct usb_hcd *hcd)
compliance_mode_recovery_timer_init(xhci);
}
- return retval;
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished %s", __func__);
+ return 0;
}
/*-------------------------------------------------------------------------*/
@@ -632,7 +718,7 @@ void xhci_stop(struct usb_hcd *hcd)
/* Deleting Compliance Mode Recovery Timer */
if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
(!(xhci_all_ports_seen_u0(xhci)))) {
- del_timer_sync(&xhci->comp_mode_recovery_timer);
+ timer_delete_sync(&xhci->comp_mode_recovery_timer);
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
"%s: compliance mode recovery timer deleted",
__func__);
@@ -645,7 +731,7 @@ void xhci_stop(struct usb_hcd *hcd)
"// Disabling event ring interrupts");
temp = readl(&xhci->op_regs->status);
writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
- xhci_disable_interrupter(ir);
+ xhci_disable_interrupter(xhci, ir);
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory");
xhci_mem_cleanup(xhci);
@@ -677,11 +763,11 @@ void xhci_shutdown(struct usb_hcd *hcd)
xhci_dbg(xhci, "%s: stopping usb%d port polling.\n",
__func__, hcd->self.busnum);
clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
- del_timer_sync(&hcd->rh_timer);
+ timer_delete_sync(&hcd->rh_timer);
if (xhci->shared_hcd) {
clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
- del_timer_sync(&xhci->shared_hcd->rh_timer);
+ timer_delete_sync(&xhci->shared_hcd->rh_timer);
}
spin_lock_irq(&xhci->lock);
@@ -724,8 +810,8 @@ static void xhci_save_registers(struct xhci_hcd *xhci)
ir->s3_erst_size = readl(&ir->ir_set->erst_size);
ir->s3_erst_base = xhci_read_64(xhci, &ir->ir_set->erst_base);
ir->s3_erst_dequeue = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
- ir->s3_irq_pending = readl(&ir->ir_set->irq_pending);
- ir->s3_irq_control = readl(&ir->ir_set->irq_control);
+ ir->s3_iman = readl(&ir->ir_set->iman);
+ ir->s3_imod = readl(&ir->ir_set->imod);
}
}
@@ -748,28 +834,11 @@ static void xhci_restore_registers(struct xhci_hcd *xhci)
writel(ir->s3_erst_size, &ir->ir_set->erst_size);
xhci_write_64(xhci, ir->s3_erst_base, &ir->ir_set->erst_base);
xhci_write_64(xhci, ir->s3_erst_dequeue, &ir->ir_set->erst_dequeue);
- writel(ir->s3_irq_pending, &ir->ir_set->irq_pending);
- writel(ir->s3_irq_control, &ir->ir_set->irq_control);
+ writel(ir->s3_iman, &ir->ir_set->iman);
+ writel(ir->s3_imod, &ir->ir_set->imod);
}
}
-static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
-{
- u64 val_64;
-
- /* step 2: initialize command ring buffer */
- val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
- val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
- (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
- xhci->cmd_ring->dequeue) &
- (u64) ~CMD_RING_RSVD_BITS) |
- xhci->cmd_ring->cycle_state;
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "// Setting command ring address to 0x%llx",
- (long unsigned long) val_64);
- xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
-}
-
/*
* The whole command ring must be cleared to zero when we suspend the host.
*
@@ -785,16 +854,14 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci)
struct xhci_segment *seg;
ring = xhci->cmd_ring;
- seg = ring->deq_seg;
- do {
- memset(seg->trbs, 0,
- sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
- seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
- cpu_to_le32(~TRB_CYCLE);
- seg = seg->next;
- } while (seg != ring->deq_seg);
-
- xhci_initialize_ring_info(ring, 1);
+ xhci_for_each_ring_seg(ring->first_seg, seg) {
+ /* erase all TRBs before the link */
+ memset(seg->trbs, 0, sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
+ /* clear link cycle bit */
+ seg->trbs[TRBS_PER_SEGMENT - 1].link.control &= cpu_to_le32(~TRB_CYCLE);
+ }
+
+ xhci_initialize_ring_info(ring);
/*
* Reset the hardware dequeue pointer.
* Yes, this will need to be re-written after resume, but we're paranoid
@@ -915,10 +982,10 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
xhci_dbg(xhci, "%s: stopping usb%d port polling.\n",
__func__, hcd->self.busnum);
clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
- del_timer_sync(&hcd->rh_timer);
+ timer_delete_sync(&hcd->rh_timer);
if (xhci->shared_hcd) {
clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
- del_timer_sync(&xhci->shared_hcd->rh_timer);
+ timer_delete_sync(&xhci->shared_hcd->rh_timer);
}
if (xhci->quirks & XHCI_SUSPEND_DELAY)
@@ -985,7 +1052,7 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
*/
if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
(!(xhci_all_ports_seen_u0(xhci)))) {
- del_timer_sync(&xhci->comp_mode_recovery_timer);
+ timer_delete_sync(&xhci->comp_mode_recovery_timer);
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
"%s: compliance mode recovery timer deleted",
__func__);
@@ -1001,16 +1068,14 @@ EXPORT_SYMBOL_GPL(xhci_suspend);
* This is called when the machine transition from S3/S4 mode.
*
*/
-int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg)
+int xhci_resume(struct xhci_hcd *xhci, bool power_lost, bool is_auto_resume)
{
- bool hibernated = (msg.event == PM_EVENT_RESTORE);
u32 command, temp = 0;
struct usb_hcd *hcd = xhci_to_hcd(xhci);
int retval = 0;
bool comp_timer_running = false;
bool pending_portevent = false;
bool suspended_usb3_devs = false;
- bool reinit_xhc = false;
if (!hcd->state)
return 0;
@@ -1029,10 +1094,10 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg)
spin_lock_irq(&xhci->lock);
- if (hibernated || xhci->quirks & XHCI_RESET_ON_RESUME || xhci->broken_suspend)
- reinit_xhc = true;
+ if (xhci->quirks & XHCI_RESET_ON_RESUME || xhci->broken_suspend)
+ power_lost = true;
- if (!reinit_xhc) {
+ if (!power_lost) {
/*
* Some controllers might lose power during suspend, so wait
* for controller not ready bit to clear, just as in xHC init.
@@ -1072,15 +1137,15 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg)
/* re-initialize the HC on Restore Error, or Host Controller Error */
if ((temp & (STS_SRE | STS_HCE)) &&
!(xhci->xhc_state & XHCI_STATE_REMOVING)) {
- reinit_xhc = true;
- if (!xhci->broken_suspend)
+ if (!power_lost)
xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp);
+ power_lost = true;
}
- if (reinit_xhc) {
+ if (power_lost) {
if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
!(xhci_all_ports_seen_u0(xhci))) {
- del_timer_sync(&xhci->comp_mode_recovery_timer);
+ timer_delete_sync(&xhci->comp_mode_recovery_timer);
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
"Compliance Mode Recovery Timer deleted!");
}
@@ -1093,7 +1158,10 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg)
xhci_dbg(xhci, "Stop HCD\n");
xhci_halt(xhci);
xhci_zero_64b_regs(xhci);
- retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC);
+ if (xhci->xhc_state & XHCI_STATE_REMOVING)
+ retval = -ENODEV;
+ else
+ retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC);
spin_unlock_irq(&xhci->lock);
if (retval)
return retval;
@@ -1101,7 +1169,7 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg)
xhci_dbg(xhci, "// Disabling event ring interrupts\n");
temp = readl(&xhci->op_regs->status);
writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
- xhci_disable_interrupter(xhci->interrupters[0]);
+ xhci_disable_interrupter(xhci, xhci->interrupters[0]);
xhci_dbg(xhci, "cleaning up memory\n");
xhci_mem_cleanup(xhci);
@@ -1125,10 +1193,20 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg)
xhci_dbg(xhci, "Start the secondary HCD\n");
retval = xhci_run(xhci->shared_hcd);
}
-
+ if (retval)
+ return retval;
+ /*
+ * Resume roothubs unconditionally as PORTSC change bits are not
+ * immediately visible after xHC reset
+ */
hcd->state = HC_STATE_SUSPENDED;
- if (xhci->shared_hcd)
+
+ if (xhci->shared_hcd) {
xhci->shared_hcd->state = HC_STATE_SUSPENDED;
+ usb_hcd_resume_root_hub(xhci->shared_hcd);
+ }
+ usb_hcd_resume_root_hub(hcd);
+
goto done;
}
@@ -1152,7 +1230,6 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg)
xhci_dbc_resume(xhci);
- done:
if (retval == 0) {
/*
* Resume roothubs only if there are pending events.
@@ -1166,8 +1243,7 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg)
pending_portevent = xhci_pending_portevent(xhci);
- if (suspended_usb3_devs && !pending_portevent &&
- msg.event == PM_EVENT_AUTO_RESUME) {
+ if (suspended_usb3_devs && !pending_portevent && is_auto_resume) {
msleep(120);
pending_portevent = xhci_pending_portevent(xhci);
}
@@ -1178,6 +1254,7 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg)
usb_hcd_resume_root_hub(hcd);
}
}
+done:
/*
* If system is subject to the Quirk, Compliance Mode Timer needs to
* be re-initialized Always after a system resume. Ports are subject
@@ -1359,6 +1436,7 @@ static void xhci_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
* xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
* HCDs. Find the index for an endpoint given its descriptor. Use the return
* value to right shift 1 for the bitmask.
+ * @desc: USB endpoint descriptor to determine index for
*
* Index = (epnum * 2) + direction - 1,
* where direction = 0 for OUT, 1 for IN.
@@ -1746,7 +1824,7 @@ static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
urb->ep->desc.bEndpointAddress,
(unsigned long long) xhci_trb_virt_to_dma(
urb_priv->td[i].start_seg,
- urb_priv->td[i].first_trb));
+ urb_priv->td[i].start_trb));
for (; i < urb_priv->num_tds; i++) {
td = &urb_priv->td[i];
@@ -1758,15 +1836,27 @@ static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
}
}
- /* Queue a stop endpoint command, but only if this is
- * the first cancellation to be handled.
- */
- if (!(ep->ep_state & EP_STOP_CMD_PENDING)) {
+ /* These completion handlers will sort out cancelled TDs for us */
+ if (ep->ep_state & (EP_STOP_CMD_PENDING | EP_HALTED | SET_DEQ_PENDING)) {
+ xhci_dbg(xhci, "Not queuing Stop Endpoint on slot %d ep %d in state 0x%x\n",
+ urb->dev->slot_id, ep_index, ep->ep_state);
+ goto done;
+ }
+
+ /* In this case no commands are pending but the endpoint is stopped */
+ if (ep->ep_state & EP_CLEARING_TT) {
+ /* and cancelled TDs can be given back right away */
+ xhci_dbg(xhci, "Invalidating TDs instantly on slot %d ep %d in state 0x%x\n",
+ urb->dev->slot_id, ep_index, ep->ep_state);
+ xhci_process_cancelled_tds(ep);
+ } else {
+ /* Otherwise, queue a new Stop Endpoint command */
command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
if (!command) {
ret = -ENOMEM;
goto done;
}
+ ep->stop_time = jiffies;
ep->ep_state |= EP_STOP_CMD_PENDING;
xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id,
ep_index, 0);
@@ -2784,6 +2874,51 @@ static int xhci_reserve_bandwidth(struct xhci_hcd *xhci,
return -ENOMEM;
}
+/*
+ * Synchronous XHCI stop endpoint helper. Issues the stop endpoint command and
+ * waits for the command completion before returning. This does not call
+ * xhci_handle_cmd_stop_ep(), which has additional handling for 'context error'
+ * cases, along with transfer ring cleanup.
+ *
+ * xhci_stop_endpoint_sync() is intended to be utilized by clients that manage
+ * their own transfer ring, such as offload situations.
+ */
+int xhci_stop_endpoint_sync(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, int suspend,
+ gfp_t gfp_flags)
+{
+ struct xhci_command *command;
+ unsigned long flags;
+ int ret;
+
+ command = xhci_alloc_command(xhci, true, gfp_flags);
+ if (!command)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&xhci->lock, flags);
+ ret = xhci_queue_stop_endpoint(xhci, command, ep->vdev->slot_id,
+ ep->ep_index, suspend);
+ if (ret < 0) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ goto out;
+ }
+
+ xhci_ring_cmd_db(xhci);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+
+ wait_for_completion(command->completion);
+
+ /* No handling for COMP_CONTEXT_STATE_ERROR done at command completion*/
+ if (command->status == COMP_COMMAND_ABORTED ||
+ command->status == COMP_COMMAND_RING_STOPPED) {
+ xhci_warn(xhci, "Timeout while waiting for stop endpoint command\n");
+ ret = -ETIME;
+ }
+out:
+ xhci_free_command(xhci, command);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(xhci_stop_endpoint_sync);
/* Issue a configure endpoint command or evaluate context command
* and wait for it to finish.
@@ -2827,7 +2962,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
xhci->num_active_eps);
return -ENOMEM;
}
- if ((xhci->quirks & XHCI_SW_BW_CHECKING) &&
+ if ((xhci->quirks & XHCI_SW_BW_CHECKING) && !ctx_change &&
xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) {
if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
xhci_free_host_resources(xhci, ctrl_ctx);
@@ -3032,6 +3167,42 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
}
EXPORT_SYMBOL_GPL(xhci_reset_bandwidth);
+/* Get the available bandwidth of the ports under the xhci roothub */
+int xhci_get_port_bandwidth(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx,
+ u8 dev_speed)
+{
+ struct xhci_command *cmd;
+ unsigned long flags;
+ int ret;
+
+ if (!ctx || !xhci)
+ return -EINVAL;
+
+ cmd = xhci_alloc_command(xhci, true, GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->in_ctx = ctx;
+
+ /* get xhci port bandwidth, refer to xhci rev1_2 protocol 4.6.15 */
+ spin_lock_irqsave(&xhci->lock, flags);
+
+ ret = xhci_queue_get_port_bw(xhci, cmd, ctx->dma, dev_speed, 0);
+ if (ret) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ goto err_out;
+ }
+ xhci_ring_cmd_db(xhci);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+
+ wait_for_completion(cmd->completion);
+err_out:
+ kfree(cmd->completion);
+ kfree(cmd);
+
+ return ret;
+}
+
static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
struct xhci_container_ctx *in_ctx,
struct xhci_container_ctx *out_ctx,
@@ -3682,6 +3853,8 @@ void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
xhci->num_active_eps);
}
+static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev);
+
/*
* This submits a Reset Device Command, which will set the device state to 0,
* set the device address to 0, and disable all the endpoints except the default
@@ -3752,6 +3925,23 @@ static int xhci_discover_or_reset_device(struct usb_hcd *hcd,
SLOT_STATE_DISABLED)
return 0;
+ if (xhci->quirks & XHCI_ETRON_HOST) {
+ /*
+ * Obtaining a new device slot to inform the xHCI host that
+ * the USB device has been reset.
+ */
+ ret = xhci_disable_slot(xhci, udev->slot_id);
+ xhci_free_virt_device(xhci, udev->slot_id);
+ if (!ret) {
+ ret = xhci_alloc_dev(hcd, udev);
+ if (ret == 1)
+ ret = 0;
+ else
+ ret = -EINVAL;
+ }
+ return ret;
+ }
+
trace_xhci_discover_or_reset_device(slot_ctx);
xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
@@ -3835,6 +4025,8 @@ static int xhci_discover_or_reset_device(struct usb_hcd *hcd,
}
if (ep->ring) {
+ if (ep->sideband)
+ xhci_sideband_notify_ep_ring_free(ep->sideband, i);
xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
xhci_free_endpoint_ring(xhci, virt_dev, i);
}
@@ -4190,8 +4382,10 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
mutex_unlock(&xhci->mutex);
ret = xhci_disable_slot(xhci, udev->slot_id);
xhci_free_virt_device(xhci, udev->slot_id);
- if (!ret)
- xhci_alloc_dev(hcd, udev);
+ if (!ret) {
+ if (xhci_alloc_dev(hcd, udev) == 1)
+ xhci_setup_addressable_virt_dev(xhci, udev);
+ }
kfree(command->completion);
kfree(command);
return -EPROTO;
@@ -4447,7 +4641,7 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
hlpm_addr = ports[port_num]->addr + PORTHLPMC;
xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
- enable ? "enable" : "disable", port_num + 1);
+ str_enable_disable(enable), port_num + 1);
if (enable) {
/* Host supports BESL timeout instead of HIRD */
@@ -4507,35 +4701,27 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
return 0;
}
-/* check if a usb2 port supports a given extened capability protocol
- * only USB2 ports extended protocol capability values are cached.
- * Return 1 if capability is supported
- */
-static int xhci_check_usb2_port_capability(struct xhci_hcd *xhci, int port,
- unsigned capability)
-{
- u32 port_offset, port_count;
- int i;
-
- for (i = 0; i < xhci->num_ext_caps; i++) {
- if (xhci->ext_caps[i] & capability) {
- /* port offsets starts at 1 */
- port_offset = XHCI_EXT_PORT_OFF(xhci->ext_caps[i]) - 1;
- port_count = XHCI_EXT_PORT_COUNT(xhci->ext_caps[i]);
- if (port >= port_offset &&
- port < port_offset + port_count)
- return 1;
- }
- }
- return 0;
-}
-
static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
- int portnum = udev->portnum - 1;
+ struct xhci_port *port;
+ u32 capability;
- if (hcd->speed >= HCD_USB3 || !udev->lpm_capable)
+ /* Check if USB3 device at root port is tunneled over USB4 */
+ if (hcd->speed >= HCD_USB3 && !udev->parent->parent) {
+ port = xhci->usb3_rhub.ports[udev->portnum - 1];
+
+ udev->tunnel_mode = xhci_port_is_tunneled(xhci, port);
+ if (udev->tunnel_mode == USB_LINK_UNKNOWN)
+ dev_dbg(&udev->dev, "link tunnel state unknown\n");
+ else if (udev->tunnel_mode == USB_LINK_TUNNELED)
+ dev_dbg(&udev->dev, "tunneled over USB4 link\n");
+ else if (udev->tunnel_mode == USB_LINK_NATIVE)
+ dev_dbg(&udev->dev, "native USB 3.x link\n");
+ return 0;
+ }
+
+ if (hcd->speed >= HCD_USB3 || !udev->lpm_capable || !xhci->hw_lpm_support)
return 0;
/* we only support lpm for non-hub device connected to root hub yet */
@@ -4543,14 +4729,14 @@ static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
udev->descriptor.bDeviceClass == USB_CLASS_HUB)
return 0;
- if (xhci->hw_lpm_support == 1 &&
- xhci_check_usb2_port_capability(
- xhci, portnum, XHCI_HLC)) {
+ port = xhci->usb2_rhub.ports[udev->portnum - 1];
+ capability = port->port_cap->protocol_caps;
+
+ if (capability & XHCI_HLC) {
udev->usb2_hw_lpm_capable = 1;
udev->l1_params.timeout = XHCI_L1_TIMEOUT;
udev->l1_params.besl = XHCI_DEFAULT_BESL;
- if (xhci_check_usb2_port_capability(xhci, portnum,
- XHCI_BLC))
+ if (capability & XHCI_BLC)
udev->usb2_hw_lpm_besl_capable = 1;
}
@@ -4686,8 +4872,8 @@ static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
*/
if (timeout_ns <= USB3_LPM_U1_MAX_TIMEOUT)
return timeout_ns;
- dev_dbg(&udev->dev, "Hub-initiated U1 disabled "
- "due to long timeout %llu ms\n", timeout_ns);
+ dev_dbg(&udev->dev, "Hub-initiated U1 disabled due to long timeout %lluus\n",
+ timeout_ns);
return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U1);
}
@@ -4744,8 +4930,8 @@ static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
*/
if (timeout_ns <= USB3_LPM_U2_MAX_TIMEOUT)
return timeout_ns;
- dev_dbg(&udev->dev, "Hub-initiated U2 disabled "
- "due to long timeout %llu ms\n", timeout_ns);
+ dev_dbg(&udev->dev, "Hub-initiated U2 disabled due to long timeout %lluus\n",
+ timeout_ns * 256);
return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U2);
}
@@ -5247,6 +5433,11 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
if (xhci->hci_version > 0x96)
xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
+ if (xhci->hci_version == 0x95 && link_quirk) {
+ xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits");
+ xhci->quirks |= XHCI_LINK_TRB_QUIRK;
+ }
+
/* Make sure the HC is halted. */
retval = xhci_halt(xhci);
if (retval)