diff options
Diffstat (limited to 'drivers/usb/host/ohci-q.c')
| -rw-r--r-- | drivers/usb/host/ohci-q.c | 307 |
1 files changed, 193 insertions, 114 deletions
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c index df4a6707322d..3b445312beea 100644 --- a/drivers/usb/host/ohci-q.c +++ b/drivers/usb/host/ohci-q.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-1.0+ /* * OHCI HCD (Host Controller Driver) for USB. * @@ -41,9 +42,13 @@ finish_urb(struct ohci_hcd *ohci, struct urb *urb, int status) __releases(ohci->lock) __acquires(ohci->lock) { - struct device *dev = ohci_to_hcd(ohci)->self.controller; + struct device *dev = ohci_to_hcd(ohci)->self.controller; + struct usb_host_endpoint *ep = urb->ep; + struct urb_priv *urb_priv; + // ASSERT (urb->hcpriv != 0); + restart: urb_free_priv (ohci, urb->hcpriv); urb->hcpriv = NULL; if (likely(status == -EINPROGRESS)) @@ -64,10 +69,6 @@ __acquires(ohci->lock) break; } -#ifdef OHCI_VERBOSE_DEBUG - urb_print(urb, "RET", usb_pipeout (urb->pipe), status); -#endif - /* urb->complete() can reenter this HCD */ usb_hcd_unlink_urb_from_ep(ohci_to_hcd(ohci), urb); spin_unlock (&ohci->lock); @@ -80,6 +81,21 @@ __acquires(ohci->lock) ohci->hc_control &= ~(OHCI_CTRL_PLE|OHCI_CTRL_IE); ohci_writel (ohci, ohci->hc_control, &ohci->regs->control); } + + /* + * An isochronous URB that is sumitted too late won't have any TDs + * (marked by the fact that the td_cnt value is larger than the + * actual number of TDs). If the next URB on this endpoint is like + * that, give it back now. + */ + if (!list_empty(&ep->urb_list)) { + urb = list_first_entry(&ep->urb_list, struct urb, urb_list); + urb_priv = urb->hcpriv; + if (urb_priv->td_cnt > urb_priv->length) { + status = 0; + goto restart; + } + } } @@ -128,7 +144,7 @@ static void periodic_link (struct ohci_hcd *ohci, struct ed *ed) { unsigned i; - ohci_vdbg (ohci, "link %sed %p branch %d [%dus.], interval %d\n", + ohci_dbg(ohci, "link %sed %p branch %d [%dus.], interval %d\n", (ed->hwINFO & cpu_to_hc32 (ohci, ED_ISO)) ? "iso " : "", ed, ed->branch, ed->load, ed->interval); @@ -168,14 +184,9 @@ static int ed_schedule (struct ohci_hcd *ohci, struct ed *ed) { int branch; - ed->state = ED_OPER; ed->ed_prev = NULL; ed->ed_next = NULL; ed->hwNextED = 0; - if (quirk_zfmicro(ohci) - && (ed->type == PIPE_INTERRUPT) - && !(ohci->eds_scheduled++)) - mod_timer(&ohci->unlink_watchdog, round_jiffies(jiffies + HZ)); wmb (); /* we care about rm_list when setting CLE/BLE in case the HC was at @@ -248,6 +259,8 @@ static int ed_schedule (struct ohci_hcd *ohci, struct ed *ed) /* the HC may not see the schedule updates yet, but if it does * then they'll be properly ordered. */ + + ed->state = ED_OPER; return 0; } @@ -275,7 +288,7 @@ static void periodic_unlink (struct ohci_hcd *ohci, struct ed *ed) } ohci_to_hcd(ohci)->self.bandwidth_allocated -= ed->load / ed->interval; - ohci_vdbg (ohci, "unlink %sed %p branch %d [%dus.], interval %d\n", + ohci_dbg(ohci, "unlink %sed %p branch %d [%dus.], interval %d\n", (ed->hwINFO & cpu_to_hc32 (ohci, ED_ISO)) ? "iso " : "", ed, ed->branch, ed->load, ed->interval); } @@ -296,8 +309,7 @@ static void periodic_unlink (struct ohci_hcd *ohci, struct ed *ed) * - ED_OPER: when there's any request queued, the ED gets rescheduled * immediately. HC should be working on them. * - * - ED_IDLE: when there's no TD queue. there's no reason for the HC - * to care about this ED; safe to disable the endpoint. + * - ED_IDLE: when there's no TD queue or the HC isn't running. * * When finish_unlinks() runs later, after SOF interrupt, it will often * complete one or more URB unlinks before making that state change. @@ -397,7 +409,8 @@ static struct ed *ed_get ( spin_lock_irqsave (&ohci->lock, flags); - if (!(ed = ep->hcpriv)) { + ed = ep->hcpriv; + if (!ed) { struct td *td; int is_out; u32 info; @@ -546,7 +559,6 @@ td_fill (struct ohci_hcd *ohci, u32 info, td->hwCBP = cpu_to_hc32 (ohci, data & 0xFFFFF000); *ohci_hwPSWp(ohci, td, 0) = cpu_to_hc16 (ohci, (data & 0x0FFF) | 0xE000); - td->ed->last_iso = info & 0xffff; } else { td->hwCBP = cpu_to_hc32 (ohci, data); } @@ -588,6 +600,8 @@ static void td_submit_urb ( u32 info = 0; int is_out = usb_pipeout (urb->pipe); int periodic = 0; + int i, this_sg_len, n; + struct scatterlist *sg; /* OHCI handles the bulk/interrupt data toggles itself. We just * use the device toggle bits for resetting, and rely on the fact @@ -601,10 +615,24 @@ static void td_submit_urb ( list_add (&urb_priv->pending, &ohci->pending); - if (data_len) - data = urb->transfer_dma; - else - data = 0; + i = urb->num_mapped_sgs; + if (data_len > 0 && i > 0) { + sg = urb->sg; + data = sg_dma_address(sg); + + /* + * urb->transfer_buffer_length may be smaller than the + * size of the scatterlist (or vice versa) + */ + this_sg_len = min_t(int, sg_dma_len(sg), data_len); + } else { + sg = NULL; + if (data_len) + data = urb->transfer_dma; + else + data = 0; + this_sg_len = data_len; + } /* NOTE: TD_CC is set so we can tell which TDs the HC processed by * using TD_CC_GET, as well as by seeing them on the done list. @@ -619,23 +647,35 @@ static void td_submit_urb ( /* ... and periodic urbs have extra accounting */ periodic = ohci_to_hcd(ohci)->self.bandwidth_int_reqs++ == 0 && ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0; - /* FALLTHROUGH */ + fallthrough; case PIPE_BULK: info = is_out ? TD_T_TOGGLE | TD_CC | TD_DP_OUT : TD_T_TOGGLE | TD_CC | TD_DP_IN; /* TDs _could_ transfer up to 8K each */ - while (data_len > 4096) { - td_fill (ohci, info, data, 4096, urb, cnt); - data += 4096; - data_len -= 4096; + for (;;) { + n = min(this_sg_len, 4096); + + /* maybe avoid ED halt on final TD short read */ + if (n >= data_len || (i == 1 && n >= this_sg_len)) { + if (!(urb->transfer_flags & URB_SHORT_NOT_OK)) + info |= TD_R; + } + td_fill(ohci, info, data, n, urb, cnt); + this_sg_len -= n; + data_len -= n; + data += n; cnt++; + + if (this_sg_len <= 0) { + if (--i <= 0 || data_len <= 0) + break; + sg = sg_next(sg); + data = sg_dma_address(sg); + this_sg_len = min_t(int, sg_dma_len(sg), + data_len); + } } - /* maybe avoid ED halt on final TD short read */ - if (!(urb->transfer_flags & URB_SHORT_NOT_OK)) - info |= TD_R; - td_fill (ohci, info, data, data_len, urb, cnt); - cnt++; if ((urb->transfer_flags & URB_ZERO_PACKET) && cnt < urb_priv->length) { td_fill (ohci, info, 0, 0, urb, cnt); @@ -747,7 +787,7 @@ static int td_done(struct ohci_hcd *ohci, struct urb *urb, struct td *td) urb->iso_frame_desc [td->index].status = cc_to_error [cc]; if (cc != TD_CC_NOERROR) - ohci_vdbg (ohci, + ohci_dbg(ohci, "urb %p iso td %p (%d) len %d cc %d\n", urb, td, 1 + td->index, dlen, cc); @@ -779,7 +819,7 @@ static int td_done(struct ohci_hcd *ohci, struct urb *urb, struct td *td) } if (cc != TD_CC_NOERROR && cc < 0x0E) - ohci_vdbg (ohci, + ohci_dbg(ohci, "urb %p td %p (%d) cc %d, len=%d/%d\n", urb, td, 1 + td->index, cc, urb->actual_length, @@ -839,11 +879,11 @@ static void ed_halted(struct ohci_hcd *ohci, struct td *td, int cc) case TD_DATAUNDERRUN: if ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0) break; - /* fallthrough */ + fallthrough; case TD_CC_STALL: if (usb_pipecontrol (urb->pipe)) break; - /* fallthrough */ + fallthrough; default: ohci_dbg (ohci, "urb %p path %s ep%d%s %08x cc %d --> status %d\n", @@ -855,13 +895,46 @@ static void ed_halted(struct ohci_hcd *ohci, struct td *td, int cc) } } -/* replies to the request have to be on a FIFO basis so - * we unreverse the hc-reversed done-list - */ -static struct td *dl_reverse_done_list (struct ohci_hcd *ohci) +/* Add a TD to the done list */ +static void add_to_done_list(struct ohci_hcd *ohci, struct td *td) +{ + struct td *td2, *td_prev; + struct ed *ed; + + if (td->next_dl_td) + return; /* Already on the list */ + + /* Add all the TDs going back until we reach one that's on the list */ + ed = td->ed; + td2 = td_prev = td; + list_for_each_entry_continue_reverse(td2, &ed->td_list, td_list) { + if (td2->next_dl_td) + break; + td2->next_dl_td = td_prev; + td_prev = td2; + } + + if (ohci->dl_end) + ohci->dl_end->next_dl_td = td_prev; + else + ohci->dl_start = td_prev; + + /* + * Make td->next_dl_td point to td itself, to mark the fact + * that td is on the done list. + */ + ohci->dl_end = td->next_dl_td = td; + + /* Did we just add the latest pending TD? */ + td2 = ed->pending_td; + if (td2 && td2->next_dl_td) + ed->pending_td = NULL; +} + +/* Get the entries on the hardware done queue and put them on our list */ +static void update_done_list(struct ohci_hcd *ohci) { u32 td_dma; - struct td *td_rev = NULL; struct td *td = NULL; td_dma = hc32_to_cpup (ohci, &ohci->hcca->done_head); @@ -869,7 +942,7 @@ static struct td *dl_reverse_done_list (struct ohci_hcd *ohci) wmb(); /* get TD from hc's singly linked list, and - * prepend to ours. ed->td_list changes later. + * add to ours. ed->td_list changes later. */ while (td_dma) { int cc; @@ -891,19 +964,17 @@ static struct td *dl_reverse_done_list (struct ohci_hcd *ohci) && (td->ed->hwHeadP & cpu_to_hc32 (ohci, ED_H))) ed_halted(ohci, td, cc); - td->next_dl_td = td_rev; - td_rev = td; td_dma = hc32_to_cpup (ohci, &td->hwNextTD); + add_to_done_list(ohci, td); } - return td_rev; } /*-------------------------------------------------------------------------*/ /* there are some urbs/eds to unlink; called in_irq(), with HCD locked */ -static void -finish_unlinks (struct ohci_hcd *ohci, u16 tick) +static void finish_unlinks(struct ohci_hcd *ohci) { + unsigned tick = ohci_frame_no(ohci); struct ed *ed, **last; rescan_all: @@ -915,32 +986,35 @@ rescan_all: /* only take off EDs that the HC isn't using, accounting for * frame counter wraps and EDs with partially retired TDs */ - if (likely(ohci->rh_state == OHCI_RH_RUNNING)) { - if (tick_before (tick, ed->tick)) { + if (likely(ohci->rh_state == OHCI_RH_RUNNING) && + tick_before(tick, ed->tick)) { skip_ed: - last = &ed->ed_next; - continue; - } + last = &ed->ed_next; + continue; + } + if (!list_empty(&ed->td_list)) { + struct td *td; + u32 head; - if (!list_empty (&ed->td_list)) { - struct td *td; - u32 head; - - td = list_entry (ed->td_list.next, struct td, - td_list); - head = hc32_to_cpu (ohci, ed->hwHeadP) & - TD_MASK; - - /* INTR_WDH may need to clean up first */ - if (td->td_dma != head) { - if (ed == ohci->ed_to_check) - ohci->ed_to_check = NULL; - else - goto skip_ed; - } - } + td = list_first_entry(&ed->td_list, struct td, td_list); + + /* INTR_WDH may need to clean up first */ + head = hc32_to_cpu(ohci, ed->hwHeadP) & TD_MASK; + if (td->td_dma != head && + ohci->rh_state == OHCI_RH_RUNNING) + goto skip_ed; + + /* Don't mess up anything already on the done list */ + if (td->next_dl_td) + goto skip_ed; } + /* ED's now officially unlinked, hc doesn't see */ + ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H); + ed->hwNextED = 0; + wmb(); + ed->hwINFO &= ~cpu_to_hc32(ohci, ED_SKIP | ED_DEQUEUE); + /* reentrancy: if we drop the schedule lock, someone might * have modified this list. normally it's just prepending * entries (which we'd ignore), but paranoia won't hurt. @@ -996,7 +1070,7 @@ rescan_this: urb_priv->td_cnt++; /* if URB is done, clean up */ - if (urb_priv->td_cnt == urb_priv->length) { + if (urb_priv->td_cnt >= urb_priv->length) { modified = completed = 1; finish_urb(ohci, urb, 0); } @@ -1004,19 +1078,23 @@ rescan_this: if (completed && !list_empty (&ed->td_list)) goto rescan_this; - /* ED's now officially unlinked, hc doesn't see */ - ed->state = ED_IDLE; - if (quirk_zfmicro(ohci) && ed->type == PIPE_INTERRUPT) - ohci->eds_scheduled--; - ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H); - ed->hwNextED = 0; - wmb (); - ed->hwINFO &= ~cpu_to_hc32 (ohci, ED_SKIP | ED_DEQUEUE); - - /* but if there's work queued, reschedule */ - if (!list_empty (&ed->td_list)) { - if (ohci->rh_state == OHCI_RH_RUNNING) - ed_schedule (ohci, ed); + /* + * If no TDs are queued, ED is now idle. + * Otherwise, if the HC is running, reschedule. + * If the HC isn't running, add ED back to the + * start of the list for later processing. + */ + if (list_empty(&ed->td_list)) { + ed->state = ED_IDLE; + list_del(&ed->in_use_list); + } else if (ohci->rh_state == OHCI_RH_RUNNING) { + ed_schedule(ohci, ed); + } else { + ed->ed_next = ohci->ed_rm_list; + ohci->ed_rm_list = ed; + /* Don't loop on the same ED */ + if (last == &ohci->ed_rm_list) + last = &ed->ed_next; } if (modified) @@ -1068,12 +1146,7 @@ rescan_this: /*-------------------------------------------------------------------------*/ -/* - * Used to take back a TD from the host controller. This would normally be - * called from within dl_done_list, however it may be called directly if the - * HC no longer sees the TD and it has not appeared on the donelist (after - * two frames). This bug has been observed on ZF Micro systems. - */ +/* Take back a TD from the host controller */ static void takeback_td(struct ohci_hcd *ohci, struct td *td) { struct urb *urb = td->urb; @@ -1086,7 +1159,7 @@ static void takeback_td(struct ohci_hcd *ohci, struct td *td) urb_priv->td_cnt++; /* If all this urb's TDs are done, call complete() */ - if (urb_priv->td_cnt == urb_priv->length) + if (urb_priv->td_cnt >= urb_priv->length) finish_urb(ohci, urb, status); /* clean schedule: unlink EDs that are no longer busy */ @@ -1120,37 +1193,43 @@ static void takeback_td(struct ohci_hcd *ohci, struct td *td) * * This is the main path for handing urbs back to drivers. The only other * normal path is finish_unlinks(), which unlinks URBs using ed_rm_list, - * instead of scanning the (re-reversed) donelist as this does. There's - * an abnormal path too, handling a quirk in some Compaq silicon: URBs - * with TDs that appear to be orphaned are directly reclaimed. + * instead of scanning the (re-reversed) donelist as this does. */ -static void -dl_done_list (struct ohci_hcd *ohci) +static void process_done_list(struct ohci_hcd *ohci) { - struct td *td = dl_reverse_done_list (ohci); + struct td *td; - while (td) { - struct td *td_next = td->next_dl_td; - struct ed *ed = td->ed; + while (ohci->dl_start) { + td = ohci->dl_start; + if (td == ohci->dl_end) + ohci->dl_start = ohci->dl_end = NULL; + else + ohci->dl_start = td->next_dl_td; - /* - * Some OHCI controllers (NVIDIA for sure, maybe others) - * occasionally forget to add TDs to the done queue. Since - * TDs for a given endpoint are always processed in order, - * if we find a TD on the donelist then all of its - * predecessors must be finished as well. - */ - for (;;) { - struct td *td2; + takeback_td(ohci, td); + } +} - td2 = list_first_entry(&ed->td_list, struct td, - td_list); - if (td2 == td) - break; - takeback_td(ohci, td2); - } +/* + * TD takeback and URB giveback must be single-threaded. + * This routine takes care of it all. + */ +static void ohci_work(struct ohci_hcd *ohci) +{ + if (ohci->working) { + ohci->restart_work = 1; + return; + } + ohci->working = 1; - takeback_td(ohci, td); - td = td_next; + restart: + process_done_list(ohci); + if (ohci->ed_rm_list) + finish_unlinks(ohci); + + if (ohci->restart_work) { + ohci->restart_work = 0; + goto restart; } + ohci->working = 0; } |
