summaryrefslogtreecommitdiff
path: root/drivers/usb/host/xhci-mem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/host/xhci-mem.c')
-rw-r--r--drivers/usb/host/xhci-mem.c1379
1 files changed, 668 insertions, 711 deletions
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 8067f178fa84..c708bdd69f16 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -9,6 +9,7 @@
*/
#include <linux/usb.h>
+#include <linux/overflow.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/dmapool.h>
@@ -26,13 +27,12 @@
* "All components of all Command and Transfer TRBs shall be initialized to '0'"
*/
static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
- unsigned int cycle_state,
unsigned int max_packet,
+ unsigned int num,
gfp_t flags)
{
struct xhci_segment *seg;
dma_addr_t dma;
- int i;
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
seg = kzalloc_node(sizeof(*seg), flags, dev_to_node(dev));
@@ -54,11 +54,7 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
return NULL;
}
}
- /* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */
- if (cycle_state == 0) {
- for (i = 0; i < TRBS_PER_SEGMENT; i++)
- seg->trbs[i].link.control |= cpu_to_le32(TRB_CYCLE);
- }
+ seg->num = num;
seg->dma = dma;
seg->next = NULL;
@@ -75,79 +71,105 @@ static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
kfree(seg);
}
-static void xhci_free_segments_for_ring(struct xhci_hcd *xhci,
- struct xhci_segment *first)
+static void xhci_ring_segments_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
{
- struct xhci_segment *seg;
+ struct xhci_segment *seg, *next;
+
+ ring->last_seg->next = NULL;
+ seg = ring->first_seg;
- seg = first->next;
- while (seg != first) {
- struct xhci_segment *next = seg->next;
+ while (seg) {
+ next = seg->next;
xhci_segment_free(xhci, seg);
seg = next;
}
- xhci_segment_free(xhci, first);
}
/*
- * Make the prev segment point to the next segment.
+ * Only for transfer and command rings where driver is the producer, not for
+ * event rings.
*
- * Change the last TRB in the prev segment to be a Link TRB which points to the
+ * Change the last TRB in the segment to be a Link TRB which points to the
* DMA address of the next segment. The caller needs to set any Link TRB
* related flags, such as End TRB, Toggle Cycle, and no snoop.
*/
-static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
- struct xhci_segment *next, enum xhci_ring_type type)
+static void xhci_set_link_trb(struct xhci_segment *seg, bool chain_links)
{
+ union xhci_trb *trb;
u32 val;
- if (!prev || !next)
+ if (!seg || !seg->next)
return;
- prev->next = next;
- if (type != TYPE_EVENT) {
- prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr =
- cpu_to_le64(next->dma);
- /* Set the last TRB in the segment to have a TRB type ID of Link TRB */
- val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control);
- val &= ~TRB_TYPE_BITMASK;
- val |= TRB_TYPE(TRB_LINK);
- /* Always set the chain bit with 0.95 hardware */
- /* Set chain bit for isoc rings on AMD 0.96 host */
- if (xhci_link_trb_quirk(xhci) ||
- (type == TYPE_ISOC &&
- (xhci->quirks & XHCI_AMD_0x96_HOST)))
- val |= TRB_CHAIN;
- prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
- }
+ trb = &seg->trbs[TRBS_PER_SEGMENT - 1];
+
+ /* Set the last TRB in the segment to have a TRB type ID of Link TRB */
+ val = le32_to_cpu(trb->link.control);
+ val &= ~TRB_TYPE_BITMASK;
+ val |= TRB_TYPE(TRB_LINK);
+ if (chain_links)
+ val |= TRB_CHAIN;
+ trb->link.control = cpu_to_le32(val);
+ trb->link.segment_ptr = cpu_to_le64(seg->next->dma);
+}
+
+static void xhci_initialize_ring_segments(struct xhci_hcd *xhci, struct xhci_ring *ring)
+{
+ struct xhci_segment *seg;
+ bool chain_links;
+
+ if (ring->type == TYPE_EVENT)
+ return;
+
+ chain_links = xhci_link_chain_quirk(xhci, ring->type);
+ xhci_for_each_ring_seg(ring->first_seg, seg)
+ xhci_set_link_trb(seg, chain_links);
+
+ /* See section 4.9.2.1 and 6.4.4.1 */
+ ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |= cpu_to_le32(LINK_TOGGLE);
}
/*
- * Link the ring to the new segments.
+ * Link the src ring segments to the dst ring.
* Set Toggle Cycle for the new ring if needed.
*/
-static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring,
- struct xhci_segment *first, struct xhci_segment *last,
- unsigned int num_segs)
+static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *src, struct xhci_ring *dst)
{
- struct xhci_segment *next;
+ struct xhci_segment *seg;
+ bool chain_links;
- if (!ring || !first || !last)
+ if (!src || !dst)
return;
- next = ring->enq_seg->next;
- xhci_link_segments(xhci, ring->enq_seg, first, ring->type);
- xhci_link_segments(xhci, last, next, ring->type);
- ring->num_segs += num_segs;
- ring->num_trbs_free += (TRBS_PER_SEGMENT - 1) * num_segs;
+ /* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */
+ if (dst->cycle_state == 0) {
+ xhci_for_each_ring_seg(src->first_seg, seg) {
+ for (int i = 0; i < TRBS_PER_SEGMENT; i++)
+ seg->trbs[i].link.control |= cpu_to_le32(TRB_CYCLE);
+ }
+ }
- if (ring->type != TYPE_EVENT && ring->enq_seg == ring->last_seg) {
- ring->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control
- &= ~cpu_to_le32(LINK_TOGGLE);
- last->trbs[TRBS_PER_SEGMENT-1].link.control
- |= cpu_to_le32(LINK_TOGGLE);
- ring->last_seg = last;
+ src->last_seg->next = dst->enq_seg->next;
+ dst->enq_seg->next = src->first_seg;
+ if (dst->type != TYPE_EVENT) {
+ chain_links = xhci_link_chain_quirk(xhci, dst->type);
+ xhci_set_link_trb(dst->enq_seg, chain_links);
+ xhci_set_link_trb(src->last_seg, chain_links);
}
+ dst->num_segs += src->num_segs;
+
+ if (dst->enq_seg == dst->last_seg) {
+ if (dst->type != TYPE_EVENT)
+ dst->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control
+ &= ~cpu_to_le32(LINK_TOGGLE);
+
+ dst->last_seg = src->last_seg;
+ } else if (dst->type != TYPE_EVENT) {
+ src->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control &= ~cpu_to_le32(LINK_TOGGLE);
+ }
+
+ for (seg = dst->enq_seg; seg != dst->last_seg; seg = seg->next)
+ seg->next->num = seg->num + 1;
}
/*
@@ -217,7 +239,6 @@ static int xhci_update_stream_segment_mapping(
struct radix_tree_root *trb_address_map,
struct xhci_ring *ring,
struct xhci_segment *first_seg,
- struct xhci_segment *last_seg,
gfp_t mem_flags)
{
struct xhci_segment *seg;
@@ -227,28 +248,22 @@ static int xhci_update_stream_segment_mapping(
if (WARN_ON_ONCE(trb_address_map == NULL))
return 0;
- seg = first_seg;
- do {
+ xhci_for_each_ring_seg(first_seg, seg) {
ret = xhci_insert_segment_mapping(trb_address_map,
ring, seg, mem_flags);
if (ret)
goto remove_streams;
- if (seg == last_seg)
- return 0;
- seg = seg->next;
- } while (seg != first_seg);
+ }
return 0;
remove_streams:
failed_seg = seg;
- seg = first_seg;
- do {
+ xhci_for_each_ring_seg(first_seg, seg) {
xhci_remove_segment_mapping(trb_address_map, seg);
if (seg == failed_seg)
return ret;
- seg = seg->next;
- } while (seg != first_seg);
+ }
return ret;
}
@@ -260,17 +275,14 @@ static void xhci_remove_stream_mapping(struct xhci_ring *ring)
if (WARN_ON_ONCE(ring->trb_address_map == NULL))
return;
- seg = ring->first_seg;
- do {
+ xhci_for_each_ring_seg(ring->first_seg, seg)
xhci_remove_segment_mapping(ring->trb_address_map, seg);
- seg = seg->next;
- } while (seg != ring->first_seg);
}
static int xhci_update_stream_mapping(struct xhci_ring *ring, gfp_t mem_flags)
{
return xhci_update_stream_segment_mapping(ring->trb_address_map, ring,
- ring->first_seg, ring->last_seg, mem_flags);
+ ring->first_seg, mem_flags);
}
/* XXX: Do we need the hcd structure in all these functions? */
@@ -284,14 +296,13 @@ void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
if (ring->first_seg) {
if (ring->type == TYPE_STREAM)
xhci_remove_stream_mapping(ring);
- xhci_free_segments_for_ring(xhci, ring->first_seg);
+ xhci_ring_segments_free(xhci, ring);
}
kfree(ring);
}
-static void xhci_initialize_ring_info(struct xhci_ring *ring,
- unsigned int cycle_state)
+void xhci_initialize_ring_info(struct xhci_ring *ring)
{
/* The ring is empty, so the enqueue pointer == dequeue pointer */
ring->enqueue = ring->first_seg->trbs;
@@ -305,7 +316,7 @@ static void xhci_initialize_ring_info(struct xhci_ring *ring,
* New rings are initialized with cycle state equal to 1; if we are
* handling ring expansion, set the cycle state equal to the old ring.
*/
- ring->cycle_state = cycle_state;
+ ring->cycle_state = 1;
/*
* Each segment has a link TRB, and leave an extra TRB for SW
@@ -313,55 +324,51 @@ static void xhci_initialize_ring_info(struct xhci_ring *ring,
*/
ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
}
+EXPORT_SYMBOL_GPL(xhci_initialize_ring_info);
/* Allocate segments and link them for a ring */
-static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
- struct xhci_segment **first, struct xhci_segment **last,
- unsigned int num_segs, unsigned int cycle_state,
- enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
+static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci, struct xhci_ring *ring, gfp_t flags)
{
struct xhci_segment *prev;
+ unsigned int num = 0;
- prev = xhci_segment_alloc(xhci, cycle_state, max_packet, flags);
+ prev = xhci_segment_alloc(xhci, ring->bounce_buf_len, num, flags);
if (!prev)
return -ENOMEM;
- num_segs--;
+ num++;
- *first = prev;
- while (num_segs > 0) {
+ ring->first_seg = prev;
+ while (num < ring->num_segs) {
struct xhci_segment *next;
- next = xhci_segment_alloc(xhci, cycle_state, max_packet, flags);
- if (!next) {
- prev = *first;
- while (prev) {
- next = prev->next;
- xhci_segment_free(xhci, prev);
- prev = next;
- }
- return -ENOMEM;
- }
- xhci_link_segments(xhci, prev, next, type);
+ next = xhci_segment_alloc(xhci, ring->bounce_buf_len, num, flags);
+ if (!next)
+ goto free_segments;
+ prev->next = next;
prev = next;
- num_segs--;
+ num++;
}
- xhci_link_segments(xhci, prev, *first, type);
- *last = prev;
+ ring->last_seg = prev;
+ ring->last_seg->next = ring->first_seg;
return 0;
+
+free_segments:
+ ring->last_seg = prev;
+ xhci_ring_segments_free(xhci, ring);
+ return -ENOMEM;
}
-/**
+/*
* Create a new ring with zero or more segments.
*
* Link each segment together into a ring.
* Set the end flag and the cycle toggle bit on the last segment.
* See section 4.9.1 and figures 15 and 16.
*/
-struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
- unsigned int num_segs, unsigned int cycle_state,
- enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
+struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, unsigned int num_segs,
+ enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
{
struct xhci_ring *ring;
int ret;
@@ -378,19 +385,12 @@ struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
if (num_segs == 0)
return ring;
- ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg,
- &ring->last_seg, num_segs, cycle_state, type,
- max_packet, flags);
+ ret = xhci_alloc_segments_for_ring(xhci, ring, flags);
if (ret)
goto fail;
- /* Only event ring does not use link TRB */
- if (type != TYPE_EVENT) {
- /* See section 4.9.2.1 and 6.4.4.1 */
- ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |=
- cpu_to_le32(LINK_TOGGLE);
- }
- xhci_initialize_ring_info(ring, cycle_state);
+ xhci_initialize_ring_segments(xhci, ring);
+ xhci_initialize_ring_info(ring);
trace_xhci_ring_alloc(ring);
return ring;
@@ -412,49 +412,41 @@ void xhci_free_endpoint_ring(struct xhci_hcd *xhci,
* Allocate a new ring which has same segment numbers and link the two rings.
*/
int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
- unsigned int num_trbs, gfp_t flags)
+ unsigned int num_new_segs, gfp_t flags)
{
- struct xhci_segment *first;
- struct xhci_segment *last;
- unsigned int num_segs;
- unsigned int num_segs_needed;
- int ret;
-
- num_segs_needed = (num_trbs + (TRBS_PER_SEGMENT - 1) - 1) /
- (TRBS_PER_SEGMENT - 1);
+ struct xhci_ring new_ring;
+ int ret;
- /* Allocate number of segments we needed, or double the ring size */
- num_segs = ring->num_segs > num_segs_needed ?
- ring->num_segs : num_segs_needed;
+ if (num_new_segs == 0)
+ return 0;
- ret = xhci_alloc_segments_for_ring(xhci, &first, &last,
- num_segs, ring->cycle_state, ring->type,
- ring->bounce_buf_len, flags);
+ new_ring.num_segs = num_new_segs;
+ new_ring.bounce_buf_len = ring->bounce_buf_len;
+ new_ring.type = ring->type;
+ ret = xhci_alloc_segments_for_ring(xhci, &new_ring, flags);
if (ret)
return -ENOMEM;
- if (ring->type == TYPE_STREAM)
- ret = xhci_update_stream_segment_mapping(ring->trb_address_map,
- ring, first, last, flags);
- if (ret) {
- struct xhci_segment *next;
- do {
- next = first->next;
- xhci_segment_free(xhci, first);
- if (first == last)
- break;
- first = next;
- } while (true);
- return ret;
+ xhci_initialize_ring_segments(xhci, &new_ring);
+
+ if (ring->type == TYPE_STREAM) {
+ ret = xhci_update_stream_segment_mapping(ring->trb_address_map, ring,
+ new_ring.first_seg, flags);
+ if (ret)
+ goto free_segments;
}
- xhci_link_rings(xhci, ring, first, last, num_segs);
+ xhci_link_rings(xhci, &new_ring, ring);
trace_xhci_ring_expansion(ring);
xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
"ring expansion succeed, now has %d segments",
ring->num_segs);
return 0;
+
+free_segments:
+ xhci_ring_segments_free(xhci, &new_ring);
+ return ret;
}
struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
@@ -471,7 +463,7 @@ struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
return NULL;
ctx->type = type;
- ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024;
+ ctx->size = xhci->hcc_params & HCC_64BYTE_CONTEXT ? 2048 : 1024;
if (type == XHCI_CTX_TYPE_INPUT)
ctx->size += CTX_SIZE(xhci->hcc_params);
@@ -492,6 +484,35 @@ void xhci_free_container_ctx(struct xhci_hcd *xhci,
kfree(ctx);
}
+struct xhci_container_ctx *xhci_alloc_port_bw_ctx(struct xhci_hcd *xhci,
+ gfp_t flags)
+{
+ struct xhci_container_ctx *ctx;
+ struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
+
+ ctx = kzalloc_node(sizeof(*ctx), flags, dev_to_node(dev));
+ if (!ctx)
+ return NULL;
+
+ ctx->size = GET_PORT_BW_ARRAY_SIZE;
+
+ ctx->bytes = dma_pool_zalloc(xhci->port_bw_pool, flags, &ctx->dma);
+ if (!ctx->bytes) {
+ kfree(ctx);
+ return NULL;
+ }
+ return ctx;
+}
+
+void xhci_free_port_bw_ctx(struct xhci_hcd *xhci,
+ struct xhci_container_ctx *ctx)
+{
+ if (!ctx)
+ return;
+ dma_pool_free(xhci->port_bw_pool, ctx->bytes, ctx->dma);
+ kfree(ctx);
+}
+
struct xhci_input_control_ctx *xhci_get_input_control_ctx(
struct xhci_container_ctx *ctx)
{
@@ -523,7 +544,7 @@ struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
return (struct xhci_ep_ctx *)
(ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params)));
}
-
+EXPORT_SYMBOL_GPL(xhci_get_ep_ctx);
/***************** Streams structures manipulation *************************/
@@ -532,17 +553,14 @@ static void xhci_free_stream_ctx(struct xhci_hcd *xhci,
struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
{
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
- size_t size = sizeof(struct xhci_stream_ctx) * num_stream_ctxs;
+ size_t size = array_size(sizeof(struct xhci_stream_ctx), num_stream_ctxs);
if (size > MEDIUM_STREAM_ARRAY_SIZE)
- dma_free_coherent(dev, size,
- stream_ctx, dma);
- else if (size <= SMALL_STREAM_ARRAY_SIZE)
- return dma_pool_free(xhci->small_streams_pool,
- stream_ctx, dma);
+ dma_free_coherent(dev, size, stream_ctx, dma);
+ else if (size > SMALL_STREAM_ARRAY_SIZE)
+ dma_pool_free(xhci->medium_streams_pool, stream_ctx, dma);
else
- return dma_pool_free(xhci->medium_streams_pool,
- stream_ctx, dma);
+ dma_pool_free(xhci->small_streams_pool, stream_ctx, dma);
}
/*
@@ -560,17 +578,14 @@ static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
gfp_t mem_flags)
{
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
- size_t size = sizeof(struct xhci_stream_ctx) * num_stream_ctxs;
+ size_t size = array_size(sizeof(struct xhci_stream_ctx), num_stream_ctxs);
if (size > MEDIUM_STREAM_ARRAY_SIZE)
- return dma_alloc_coherent(dev, size,
- dma, mem_flags);
- else if (size <= SMALL_STREAM_ARRAY_SIZE)
- return dma_pool_alloc(xhci->small_streams_pool,
- mem_flags, dma);
+ return dma_alloc_coherent(dev, size, dma, mem_flags);
+ if (size > SMALL_STREAM_ARRAY_SIZE)
+ return dma_pool_zalloc(xhci->medium_streams_pool, mem_flags, dma);
else
- return dma_pool_alloc(xhci->medium_streams_pool,
- mem_flags, dma);
+ return dma_pool_zalloc(xhci->small_streams_pool, mem_flags, dma);
}
struct xhci_ring *xhci_dma_to_transfer_ring(
@@ -583,23 +598,6 @@ struct xhci_ring *xhci_dma_to_transfer_ring(
return ep->ring;
}
-struct xhci_ring *xhci_stream_id_to_ring(
- struct xhci_virt_device *dev,
- unsigned int ep_index,
- unsigned int stream_id)
-{
- struct xhci_virt_ep *ep = &dev->eps[ep_index];
-
- if (stream_id == 0)
- return ep->ring;
- if (!ep->stream_info)
- return NULL;
-
- if (stream_id >= ep->stream_info->num_streams)
- return NULL;
- return ep->stream_info->stream_rings[stream_id];
-}
-
/*
* Change an endpoint's internal structure so it supports stream IDs. The
* number of requested streams includes stream 0, which cannot be used by device
@@ -621,8 +619,7 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
int ret;
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
- xhci_dbg(xhci, "Allocating %u streams and %u "
- "stream context array entries.\n",
+ xhci_dbg(xhci, "Allocating %u streams and %u stream context array entries.\n",
num_streams, num_stream_ctxs);
if (xhci->cmd_ring_reserved_trbs == MAX_RSVD_CMD_TRBS) {
xhci_dbg(xhci, "Command ring has no reserved TRBs available\n");
@@ -650,9 +647,7 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
num_stream_ctxs, &stream_info->ctx_array_dma,
mem_flags);
if (!stream_info->stream_ctx_array)
- goto cleanup_ctx;
- memset(stream_info->stream_ctx_array, 0,
- sizeof(struct xhci_stream_ctx)*num_stream_ctxs);
+ goto cleanup_ring_array;
/* Allocate everything needed to free the stream rings later */
stream_info->free_streams_command =
@@ -669,8 +664,7 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
stream_info->stream_rings[cur_stream] =
- xhci_ring_alloc(xhci, 2, 1, TYPE_STREAM, max_packet,
- mem_flags);
+ xhci_ring_alloc(xhci, 2, TYPE_STREAM, max_packet, mem_flags);
cur_ring = stream_info->stream_rings[cur_stream];
if (!cur_ring)
goto cleanup_rings;
@@ -682,10 +676,11 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
cur_ring->cycle_state;
stream_info->stream_ctx_array[cur_stream].stream_ring =
cpu_to_le64(addr);
- xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n",
- cur_stream, (unsigned long long) addr);
+ xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n", cur_stream, addr);
ret = xhci_update_stream_mapping(cur_ring, mem_flags);
+
+ trace_xhci_alloc_stream_info_ctx(stream_info, cur_stream);
if (ret) {
xhci_ring_free(xhci, cur_ring);
stream_info->stream_rings[cur_stream] = NULL;
@@ -711,6 +706,11 @@ cleanup_rings:
}
xhci_free_command(xhci, stream_info->free_streams_command);
cleanup_ctx:
+ xhci_free_stream_ctx(xhci,
+ stream_info->num_stream_ctxs,
+ stream_info->stream_ctx_array,
+ stream_info->ctx_array_dma);
+cleanup_ring_array:
kfree(stream_info->stream_rings);
cleanup_info:
kfree(stream_info);
@@ -791,14 +791,6 @@ void xhci_free_stream_info(struct xhci_hcd *xhci,
/***************** Device context manipulation *************************/
-static void xhci_init_endpoint_timer(struct xhci_hcd *xhci,
- struct xhci_virt_ep *ep)
-{
- timer_setup(&ep->stop_cmd_timer, xhci_stop_endpoint_command_watchdog,
- 0);
- ep->xhci = xhci;
-}
-
static void xhci_free_tt_info(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
int slot_id)
@@ -808,15 +800,14 @@ static void xhci_free_tt_info(struct xhci_hcd *xhci,
bool slot_found = false;
/* If the device never made it past the Set Address stage,
- * it may not have the real_port set correctly.
+ * it may not have the root hub port pointer set correctly.
*/
- if (virt_dev->real_port == 0 ||
- virt_dev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
- xhci_dbg(xhci, "Bad real port.\n");
+ if (!virt_dev->rhub_port) {
+ xhci_dbg(xhci, "Bad rhub port.\n");
return;
}
- tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts);
+ tt_list_head = &(xhci->rh_bw[virt_dev->rhub_port->hw_portnum].tts);
list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
/* Multi-TT hubs will have more than one entry */
if (tt_info->slot_id == slot_id) {
@@ -853,7 +844,7 @@ int xhci_alloc_tt_info(struct xhci_hcd *xhci,
goto free_tts;
INIT_LIST_HEAD(&tt_info->tt_list);
list_add(&tt_info->tt_list,
- &xhci->rh_bw[virt_dev->real_port - 1].tts);
+ &xhci->rh_bw[virt_dev->rhub_port->hw_portnum].tts);
tt_info->slot_id = virt_dev->udev->slot_id;
if (tt->multi)
tt_info->ttport = i+1;
@@ -874,21 +865,20 @@ free_tts:
* will be manipulated by the configure endpoint, allocate device, or update
* hub functions while this function is removing the TT entries from the list.
*/
-void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
+void xhci_free_virt_device(struct xhci_hcd *xhci, struct xhci_virt_device *dev,
+ int slot_id)
{
- struct xhci_virt_device *dev;
int i;
int old_active_eps = 0;
/* Slot ID 0 is reserved */
- if (slot_id == 0 || !xhci->devs[slot_id])
+ if (slot_id == 0 || !dev)
return;
- dev = xhci->devs[slot_id];
-
- xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
- if (!dev)
- return;
+ /* If device ctx array still points to _this_ device, clear it */
+ if (dev->out_ctx &&
+ xhci->dcbaa->dev_context_ptrs[slot_id] == cpu_to_le64(dev->out_ctx->dma))
+ xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
trace_xhci_free_virt_device(dev);
@@ -901,15 +891,19 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
if (dev->eps[i].stream_info)
xhci_free_stream_info(xhci,
dev->eps[i].stream_info);
- /* Endpoints on the TT/root port lists should have been removed
- * when usb_disable_device() was called for the device.
- * We can't drop them anyway, because the udev might have gone
- * away by this point, and we can't tell what speed it was.
+ /*
+ * Endpoints are normally deleted from the bandwidth list when
+ * endpoints are dropped, before device is freed.
+ * If host is dying or being removed then endpoints aren't
+ * dropped cleanly, so delete the endpoint from list here.
+ * Only applicable for hosts with software bandwidth checking.
*/
- if (!list_empty(&dev->eps[i].bw_endpoint_list))
- xhci_warn(xhci, "Slot %u endpoint %u "
- "not removed from BW list!\n",
- slot_id, i);
+
+ if (!list_empty(&dev->eps[i].bw_endpoint_list)) {
+ list_del_init(&dev->eps[i].bw_endpoint_list);
+ xhci_dbg(xhci, "Slot %u endpoint %u not removed from BW list!\n",
+ slot_id, i);
+ }
}
/* If this is a hub, free the TT(s) from the TT list */
xhci_free_tt_info(xhci, dev, slot_id);
@@ -923,8 +917,11 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
if (dev->udev && dev->udev->slot_id)
dev->udev->slot_id = 0;
- kfree(xhci->devs[slot_id]);
- xhci->devs[slot_id] = NULL;
+ if (dev->rhub_port && dev->rhub_port->slot_id == slot_id)
+ dev->rhub_port->slot_id = 0;
+ if (xhci->devs[slot_id] == dev)
+ xhci->devs[slot_id] = NULL;
+ kfree(dev);
}
/*
@@ -933,7 +930,7 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
* that tt_info, then free the child first. Recursive.
* We can't rely on udev at this point to find child-parent relationships.
*/
-void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
+static void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
{
struct xhci_virt_device *vdev;
struct list_head *tt_list_head;
@@ -944,18 +941,17 @@ void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
if (!vdev)
return;
- if (vdev->real_port == 0 ||
- vdev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
- xhci_dbg(xhci, "Bad vdev->real_port.\n");
+ if (!vdev->rhub_port) {
+ xhci_dbg(xhci, "Bad rhub port.\n");
goto out;
}
- tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts);
+ tt_list_head = &(xhci->rh_bw[vdev->rhub_port->hw_portnum].tts);
list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
/* is this a hub device that added a tt_info to the tts list */
if (tt_info->slot_id == slot_id) {
/* are any devices using this tt_info? */
- for (i = 1; i < HCS_MAX_SLOTS(xhci->hcs_params1); i++) {
+ for (i = 1; i < xhci->max_slots; i++) {
vdev = xhci->devs[i];
if (vdev && (vdev->tt_info == tt_info))
xhci_free_virt_devices_depth_first(
@@ -966,7 +962,7 @@ void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
out:
/* we are now at a leaf device */
xhci_debugfs_remove_slot(xhci, slot_id);
- xhci_free_virt_device(xhci, slot_id);
+ xhci_free_virt_device(xhci, xhci->devs[slot_id], slot_id);
}
int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
@@ -985,31 +981,33 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
if (!dev)
return 0;
+ dev->slot_id = slot_id;
+
/* Allocate the (output) device context that will be used in the HC. */
dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
if (!dev->out_ctx)
goto fail;
- xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id,
- (unsigned long long)dev->out_ctx->dma);
+ xhci_dbg(xhci, "Slot %d output ctx = 0x%pad (dma)\n", slot_id, &dev->out_ctx->dma);
/* Allocate the (input) device context for address device command */
dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags);
if (!dev->in_ctx)
goto fail;
- xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
- (unsigned long long)dev->in_ctx->dma);
+ xhci_dbg(xhci, "Slot %d input ctx = 0x%pad (dma)\n", slot_id, &dev->in_ctx->dma);
- /* Initialize the cancellation list and watchdog timers for each ep */
+ /* Initialize the cancellation and bandwidth list for each ep */
for (i = 0; i < 31; i++) {
- xhci_init_endpoint_timer(xhci, &dev->eps[i]);
+ dev->eps[i].ep_index = i;
+ dev->eps[i].vdev = dev;
+ dev->eps[i].xhci = xhci;
INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list);
INIT_LIST_HEAD(&dev->eps[i].bw_endpoint_list);
}
/* Allocate endpoint 0 ring */
- dev->eps[0].ring = xhci_ring_alloc(xhci, 2, 1, TYPE_CTRL, 0, flags);
+ dev->eps[0].ring = xhci_ring_alloc(xhci, 2, TYPE_CTRL, 0, flags);
if (!dev->eps[0].ring)
goto fail;
@@ -1064,20 +1062,20 @@ void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
* The xHCI roothub may have ports of differing speeds in any order in the port
* status registers.
*
- * The xHCI hardware wants to know the roothub port number that the USB device
+ * The xHCI hardware wants to know the roothub port that the USB device
* is attached to (or the roothub port its ancestor hub is attached to). All we
* know is the index of that port under either the USB 2.0 or the USB 3.0
* roothub, but that doesn't give us the real index into the HW port status
- * registers. Call xhci_find_raw_port_number() to get real index.
+ * registers.
*/
-static u32 xhci_find_real_port_number(struct xhci_hcd *xhci,
- struct usb_device *udev)
+static struct xhci_port *xhci_find_rhub_port(struct xhci_hcd *xhci, struct usb_device *udev)
{
struct usb_device *top_dev;
+ struct xhci_hub *rhub;
struct usb_hcd *hcd;
if (udev->speed >= USB_SPEED_SUPER)
- hcd = xhci->shared_hcd;
+ hcd = xhci_get_usb3_hcd(xhci);
else
hcd = xhci->main_hcd;
@@ -1085,7 +1083,8 @@ static u32 xhci_find_real_port_number(struct xhci_hcd *xhci,
top_dev = top_dev->parent)
/* Found device below root hub */;
- return xhci_find_raw_port_number(hcd, top_dev->portnum);
+ rhub = xhci_get_rhub(hcd);
+ return rhub->ports[top_dev->portnum - 1];
}
/* Setup an xHCI virtual device for a Set Address command */
@@ -1094,9 +1093,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
struct xhci_virt_device *dev;
struct xhci_ep_ctx *ep0_ctx;
struct xhci_slot_ctx *slot_ctx;
- u32 port_num;
u32 max_packets;
- struct usb_device *top_dev;
dev = xhci->devs[udev->slot_id];
/* Slot ID 0 is reserved */
@@ -1132,27 +1129,20 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS);
max_packets = MAX_PACKET(8);
break;
- case USB_SPEED_WIRELESS:
- xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
- return -EINVAL;
- break;
default:
/* Speed was set earlier, this shouldn't happen. */
return -EINVAL;
}
/* Find the root hub port this device is under */
- port_num = xhci_find_real_port_number(xhci, udev);
- if (!port_num)
+ dev->rhub_port = xhci_find_rhub_port(xhci, udev);
+ if (!dev->rhub_port)
return -EINVAL;
- slot_ctx->dev_info2 |= cpu_to_le32(ROOT_HUB_PORT(port_num));
- /* Set the port number in the virtual_device to the faked port number */
- for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
- top_dev = top_dev->parent)
- /* Found device below root hub */;
- dev->fake_port = top_dev->portnum;
- dev->real_port = port_num;
- xhci_dbg(xhci, "Set root hub portnum to %d\n", port_num);
- xhci_dbg(xhci, "Set fake root hub portnum to %d\n", dev->fake_port);
+ /* Slot ID is set to the device directly below the root hub */
+ if (!udev->parent->parent)
+ dev->rhub_port->slot_id = udev->slot_id;
+ slot_ctx->dev_info2 |= cpu_to_le32(ROOT_HUB_PORT(dev->rhub_port->hw_portnum + 1));
+ xhci_dbg(xhci, "Slot ID %d: HW portnum %d, hcd portnum %d\n",
+ udev->slot_id, dev->rhub_port->hw_portnum, dev->rhub_port->hcd_portnum);
/* Find the right bandwidth table that this device will be a part of.
* If this is a full speed device attached directly to a root port (or a
@@ -1161,12 +1151,12 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
* will never be created for the HS root hub.
*/
if (!udev->tt || !udev->tt->hub->parent) {
- dev->bw_table = &xhci->rh_bw[port_num - 1].bw_table;
+ dev->bw_table = &xhci->rh_bw[dev->rhub_port->hw_portnum].bw_table;
} else {
struct xhci_root_port_bw_info *rh_bw;
struct xhci_tt_bw_info *tt_bw;
- rh_bw = &xhci->rh_bw[port_num - 1];
+ rh_bw = &xhci->rh_bw[dev->rhub_port->hw_portnum];
/* Find the right TT. */
list_for_each_entry(tt_bw, &rh_bw->tts, tt_list) {
if (tt_bw->slot_id != udev->tt->hub->slot_id)
@@ -1205,6 +1195,8 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
ep0_ctx->deq = cpu_to_le64(dev->eps[0].ring->first_seg->dma |
dev->eps[0].ring->cycle_state);
+ ep0_ctx->tx_info = cpu_to_le32(EP_AVG_TRB_LENGTH(8));
+
trace_xhci_setup_addressable_virt_device(dev);
/* Steps 7 and 8 were done in xhci_alloc_virt_device() */
@@ -1302,7 +1294,7 @@ static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
interval = xhci_parse_microframe_interval(udev, ep);
break;
}
- /* Fall through - SS and HS isoc/int have same decoding */
+ fallthrough; /* SS and HS isoc/int have same decoding */
case USB_SPEED_SUPER_PLUS:
case USB_SPEED_SUPER:
@@ -1322,7 +1314,7 @@ static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
* since it uses the same rules as low speed interrupt
* endpoints.
*/
- /* fall through */
+ fallthrough;
case USB_SPEED_LOW:
if (usb_endpoint_xfer_int(&ep->desc) ||
@@ -1338,18 +1330,33 @@ static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
return interval;
}
-/* The "Mult" field in the endpoint context is only set for SuperSpeed isoc eps.
+/*
+ * xHCs without LEC use the "Mult" field in the endpoint context for SuperSpeed
+ * isoc eps, and High speed isoc eps that support bandwidth doubling. Standard
* High speed endpoint descriptors can define "the number of additional
* transaction opportunities per microframe", but that goes in the Max Burst
* endpoint context field.
*/
-static u32 xhci_get_endpoint_mult(struct usb_device *udev,
- struct usb_host_endpoint *ep)
+static u32 xhci_get_endpoint_mult(struct xhci_hcd *xhci,
+ struct usb_device *udev,
+ struct usb_host_endpoint *ep)
{
- if (udev->speed < USB_SPEED_SUPER ||
- !usb_endpoint_xfer_isoc(&ep->desc))
- return 0;
- return ep->ss_ep_comp.bmAttributes;
+ bool lec;
+
+ /* xHCI 1.1 with LEC set does not use mult field, except intel eUSB2 */
+ lec = xhci->hci_version > 0x100 && (xhci->hcc_params2 & HCC2_LEC);
+
+ /* eUSB2 double isoc bw devices are the only USB2 devices using mult */
+ if (usb_endpoint_is_hs_isoc_double(udev, ep) &&
+ (!lec || xhci->quirks & XHCI_INTEL_HOST))
+ return 1;
+
+ /* SuperSpeed isoc transfers on hosts without LEC uses mult field */
+ if (udev->speed >= USB_SPEED_SUPER &&
+ usb_endpoint_xfer_isoc(&ep->desc) && !lec)
+ return ep->ss_ep_comp.bmAttributes;
+
+ return 0;
}
static u32 xhci_get_endpoint_max_burst(struct usb_device *udev,
@@ -1361,8 +1368,16 @@ static u32 xhci_get_endpoint_max_burst(struct usb_device *udev,
if (udev->speed == USB_SPEED_HIGH &&
(usb_endpoint_xfer_isoc(&ep->desc) ||
- usb_endpoint_xfer_int(&ep->desc)))
+ usb_endpoint_xfer_int(&ep->desc))) {
+ /*
+ * USB 2 Isochronous Double IN Bandwidth ECN uses fixed burst
+ * size and max packets bits 12:11 are invalid.
+ */
+ if (usb_endpoint_is_hs_isoc_double(udev, ep))
+ return 2;
+
return usb_endpoint_maxp_mult(&ep->desc) - 1;
+ }
return 0;
}
@@ -1386,35 +1401,6 @@ static u32 xhci_get_endpoint_type(struct usb_host_endpoint *ep)
return 0;
}
-/* Return the maximum endpoint service interval time (ESIT) payload.
- * Basically, this is the maxpacket size, multiplied by the burst size
- * and mult size.
- */
-static u32 xhci_get_max_esit_payload(struct usb_device *udev,
- struct usb_host_endpoint *ep)
-{
- int max_burst;
- int max_packet;
-
- /* Only applies for interrupt or isochronous endpoints */
- if (usb_endpoint_xfer_control(&ep->desc) ||
- usb_endpoint_xfer_bulk(&ep->desc))
- return 0;
-
- /* SuperSpeedPlus Isoc ep sending over 48k per esit */
- if ((udev->speed >= USB_SPEED_SUPER_PLUS) &&
- USB_SS_SSP_ISOC_COMP(ep->ss_ep_comp.bmAttributes))
- return le32_to_cpu(ep->ssp_isoc_ep_comp.dwBytesPerInterval);
- /* SuperSpeed or SuperSpeedPlus Isoc ep with less than 48k per esit */
- else if (udev->speed >= USB_SPEED_SUPER)
- return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval);
-
- max_packet = usb_endpoint_maxp(&ep->desc);
- max_burst = usb_endpoint_maxp_mult(&ep->desc);
- /* A 0 in max burst means 1 transfer per ESIT */
- return max_packet * max_burst;
-}
-
/* Set up an endpoint with one ring segment. Do not allocate stream rings.
* Drivers will have to call usb_alloc_streams() to do that.
*/
@@ -1446,18 +1432,28 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
ring_type = usb_endpoint_type(&ep->desc);
+ /* Ensure host supports double isoc bandwidth for eUSB2 devices */
+ if (usb_endpoint_is_hs_isoc_double(udev, ep) && !(xhci->hcc_params2 & HCC2_EUSB2_DIC)) {
+ dev_dbg(&udev->dev, "Double Isoc Bandwidth not supported by xhci\n");
+ return -EINVAL;
+ }
+
/*
* Get values to fill the endpoint context, mostly from ep descriptor.
* The average TRB buffer lengt for bulk endpoints is unclear as we
* have no clue on scatter gather list entry size. For Isoc and Int,
* set it to max available. See xHCI 1.1 spec 4.14.1.1 for details.
*/
- max_esit_payload = xhci_get_max_esit_payload(udev, ep);
+ max_esit_payload = usb_endpoint_max_periodic_payload(udev, ep);
interval = xhci_get_endpoint_interval(udev, ep);
/* Periodic endpoint bInterval limit quirk */
if (usb_endpoint_xfer_int(&ep->desc) ||
usb_endpoint_xfer_isoc(&ep->desc)) {
+ if ((xhci->quirks & XHCI_LIMIT_ENDPOINT_INTERVAL_9) &&
+ interval >= 9) {
+ interval = 8;
+ }
if ((xhci->quirks & XHCI_LIMIT_ENDPOINT_INTERVAL_7) &&
udev->speed >= USB_SPEED_HIGH &&
interval >= 7) {
@@ -1465,8 +1461,8 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
}
}
- mult = xhci_get_endpoint_mult(udev, ep);
- max_packet = usb_endpoint_maxp(&ep->desc);
+ mult = xhci_get_endpoint_mult(xhci, udev, ep);
+ max_packet = xhci_usb_endpoint_maxp(udev, ep);
max_burst = xhci_get_endpoint_max_burst(udev, ep);
avg_trb_len = max_esit_payload;
@@ -1475,19 +1471,22 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
/* Allow 3 retries for everything but isoc, set CErr = 3 */
if (!usb_endpoint_xfer_isoc(&ep->desc))
err_count = 3;
- /* Some devices get this wrong */
- if (usb_endpoint_xfer_bulk(&ep->desc) && udev->speed == USB_SPEED_HIGH)
- max_packet = 512;
+ /* HS bulk max packet should be 512, FS bulk supports 8, 16, 32 or 64 */
+ if (usb_endpoint_xfer_bulk(&ep->desc)) {
+ if (udev->speed == USB_SPEED_HIGH)
+ max_packet = 512;
+ if (udev->speed == USB_SPEED_FULL) {
+ max_packet = rounddown_pow_of_two(max_packet);
+ max_packet = clamp_val(max_packet, 8, 64);
+ }
+ }
/* xHCI 1.0 and 1.1 indicates that ctrl ep avg TRB Length should be 8 */
if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version >= 0x100)
avg_trb_len = 8;
- /* xhci 1.1 with LEC support doesn't use mult field, use RsvdZ */
- if ((xhci->hci_version > 0x100) && HCC2_LEC(xhci->hcc_params2))
- mult = 0;
/* Set up the endpoint ring */
virt_dev->eps[ep_index].new_ring =
- xhci_ring_alloc(xhci, 2, 1, ring_type, max_packet, mem_flags);
+ xhci_ring_alloc(xhci, 2, ring_type, max_packet, mem_flags);
if (!virt_dev->eps[ep_index].new_ring)
return -ENOMEM;
@@ -1659,7 +1658,7 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
goto fail_sp;
xhci->scratchpad->sp_array = dma_alloc_coherent(dev,
- num_sp * sizeof(u64),
+ array_size(sizeof(u64), num_sp),
&xhci->scratchpad->sp_dma, flags);
if (!xhci->scratchpad->sp_array)
goto fail_sp2;
@@ -1684,16 +1683,15 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
return 0;
fail_sp4:
- for (i = i - 1; i >= 0; i--) {
+ while (i--)
dma_free_coherent(dev, xhci->page_size,
xhci->scratchpad->sp_buffers[i],
xhci->scratchpad->sp_array[i]);
- }
kfree(xhci->scratchpad->sp_buffers);
fail_sp3:
- dma_free_coherent(dev, num_sp * sizeof(u64),
+ dma_free_coherent(dev, array_size(sizeof(u64), num_sp),
xhci->scratchpad->sp_array,
xhci->scratchpad->sp_dma);
@@ -1722,7 +1720,7 @@ static void scratchpad_free(struct xhci_hcd *xhci)
xhci->scratchpad->sp_array[i]);
}
kfree(xhci->scratchpad->sp_buffers);
- dma_free_coherent(dev, num_sp * sizeof(u64),
+ dma_free_coherent(dev, array_size(sizeof(u64), num_sp),
xhci->scratchpad->sp_array,
xhci->scratchpad->sp_dma);
kfree(xhci->scratchpad);
@@ -1751,6 +1749,8 @@ struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
}
command->status = 0;
+ /* set default timeout to 5000 ms */
+ command->timeout_ms = XHCI_CMD_DEFAULT_TIMEOUT;
INIT_LIST_HEAD(&command->cmd_list);
return command;
}
@@ -1788,7 +1788,7 @@ void xhci_free_command(struct xhci_hcd *xhci,
kfree(command);
}
-int xhci_alloc_erst(struct xhci_hcd *xhci,
+static int xhci_alloc_erst(struct xhci_hcd *xhci,
struct xhci_ring *evt_ring,
struct xhci_erst *erst,
gfp_t flags)
@@ -1798,7 +1798,7 @@ int xhci_alloc_erst(struct xhci_hcd *xhci,
struct xhci_segment *seg;
struct xhci_erst_entry *entry;
- size = sizeof(struct xhci_erst_entry) * evt_ring->num_segs;
+ size = array_size(sizeof(struct xhci_erst_entry), evt_ring->num_segs);
erst->entries = dma_alloc_coherent(xhci_to_hcd(xhci)->self.sysdev,
size, &erst->erst_dma_addr, flags);
if (!erst->entries)
@@ -1818,44 +1818,106 @@ int xhci_alloc_erst(struct xhci_hcd *xhci,
return 0;
}
-void xhci_free_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
+static void
+xhci_remove_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
+{
+ u32 tmp;
+
+ if (!ir)
+ return;
+
+ /*
+ * Clean out interrupter registers except ERSTBA. Clearing either the
+ * low or high 32 bits of ERSTBA immediately causes the controller to
+ * dereference the partially cleared 64 bit address, causing IOMMU error.
+ */
+ if (ir->ir_set) {
+ tmp = readl(&ir->ir_set->erst_size);
+ tmp &= ~ERST_SIZE_MASK;
+ writel(tmp, &ir->ir_set->erst_size);
+
+ xhci_update_erst_dequeue(xhci, ir, true);
+ }
+}
+
+static void
+xhci_free_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
{
- size_t size;
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
+ size_t erst_size;
+
+ if (!ir)
+ return;
+
+ erst_size = array_size(sizeof(struct xhci_erst_entry), ir->erst.num_entries);
+ if (ir->erst.entries)
+ dma_free_coherent(dev, erst_size,
+ ir->erst.entries,
+ ir->erst.erst_dma_addr);
+ ir->erst.entries = NULL;
- size = sizeof(struct xhci_erst_entry) * (erst->num_entries);
- if (erst->entries)
- dma_free_coherent(dev, size,
- erst->entries,
- erst->erst_dma_addr);
- erst->entries = NULL;
+ /* free interrupter event ring */
+ if (ir->event_ring)
+ xhci_ring_free(xhci, ir->event_ring);
+
+ ir->event_ring = NULL;
+
+ kfree(ir);
}
+void xhci_remove_secondary_interrupter(struct usb_hcd *hcd, struct xhci_interrupter *ir)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ unsigned int intr_num;
+
+ spin_lock_irq(&xhci->lock);
+
+ /* interrupter 0 is primary interrupter, don't touch it */
+ if (!ir || !ir->intr_num || ir->intr_num >= xhci->max_interrupters) {
+ xhci_dbg(xhci, "Invalid secondary interrupter, can't remove\n");
+ spin_unlock_irq(&xhci->lock);
+ return;
+ }
+
+ /*
+ * Cleanup secondary interrupter to ensure there are no pending events.
+ * This also updates event ring dequeue pointer back to the start.
+ */
+ xhci_skip_sec_intr_events(xhci, ir->event_ring, ir);
+ intr_num = ir->intr_num;
+
+ xhci_remove_interrupter(xhci, ir);
+ xhci->interrupters[intr_num] = NULL;
+
+ spin_unlock_irq(&xhci->lock);
+
+ xhci_free_interrupter(xhci, ir);
+}
+EXPORT_SYMBOL_GPL(xhci_remove_secondary_interrupter);
+
void xhci_mem_cleanup(struct xhci_hcd *xhci)
{
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
- int i, j, num_ports;
+ int i, j;
cancel_delayed_work_sync(&xhci->cmd_timer);
- xhci_free_erst(xhci, &xhci->erst);
-
- if (xhci->event_ring)
- xhci_ring_free(xhci, xhci->event_ring);
- xhci->event_ring = NULL;
- xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed event ring");
+ for (i = 0; xhci->interrupters && i < xhci->max_interrupters; i++) {
+ if (xhci->interrupters[i]) {
+ xhci_remove_interrupter(xhci, xhci->interrupters[i]);
+ xhci_free_interrupter(xhci, xhci->interrupters[i]);
+ xhci->interrupters[i] = NULL;
+ }
+ }
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed interrupters");
- if (xhci->lpm_command)
- xhci_free_command(xhci, xhci->lpm_command);
- xhci->lpm_command = NULL;
if (xhci->cmd_ring)
xhci_ring_free(xhci, xhci->cmd_ring);
xhci->cmd_ring = NULL;
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed command ring");
xhci_cleanup_command_queue(xhci);
- num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
- for (i = 0; i < num_ports && xhci->rh_bw; i++) {
+ for (i = 0; i < xhci->max_ports && xhci->rh_bw; i++) {
struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
struct list_head *ep = &bwt->interval_bw[j].endpoints;
@@ -1864,7 +1926,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
}
}
- for (i = HCS_MAX_SLOTS(xhci->hcs_params1); i > 0; i--)
+ for (i = xhci->max_slots; i > 0; i--)
xhci_free_virt_devices_depth_first(xhci, i);
dma_pool_destroy(xhci->segment_pool);
@@ -1880,6 +1942,11 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Freed small stream array pool");
+ dma_pool_destroy(xhci->port_bw_pool);
+ xhci->port_bw_pool = NULL;
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Freed xhci port bw array pool");
+
dma_pool_destroy(xhci->medium_streams_pool);
xhci->medium_streams_pool = NULL;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
@@ -1895,7 +1962,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
if (!xhci->rh_bw)
goto no_bw;
- for (i = 0; i < num_ports; i++) {
+ for (i = 0; i < xhci->max_ports; i++) {
struct xhci_tt_bw_info *tt, *n;
list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) {
list_del(&tt->tt_list);
@@ -1912,200 +1979,39 @@ no_bw:
kfree(xhci->usb3_rhub.ports);
kfree(xhci->hw_ports);
kfree(xhci->rh_bw);
- kfree(xhci->ext_caps);
+ for (i = 0; i < xhci->num_port_caps; i++)
+ kfree(xhci->port_caps[i].psi);
+ kfree(xhci->port_caps);
+ kfree(xhci->interrupters);
+ xhci->num_port_caps = 0;
xhci->usb2_rhub.ports = NULL;
xhci->usb3_rhub.ports = NULL;
xhci->hw_ports = NULL;
xhci->rh_bw = NULL;
- xhci->ext_caps = NULL;
+ xhci->port_caps = NULL;
+ xhci->interrupters = NULL;
xhci->page_size = 0;
- xhci->page_shift = 0;
xhci->usb2_rhub.bus_state.bus_suspended = 0;
xhci->usb3_rhub.bus_state.bus_suspended = 0;
}
-static int xhci_test_trb_in_td(struct xhci_hcd *xhci,
- struct xhci_segment *input_seg,
- union xhci_trb *start_trb,
- union xhci_trb *end_trb,
- dma_addr_t input_dma,
- struct xhci_segment *result_seg,
- char *test_name, int test_number)
+static void xhci_set_hc_event_deq(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
{
- unsigned long long start_dma;
- unsigned long long end_dma;
- struct xhci_segment *seg;
-
- start_dma = xhci_trb_virt_to_dma(input_seg, start_trb);
- end_dma = xhci_trb_virt_to_dma(input_seg, end_trb);
-
- seg = trb_in_td(xhci, input_seg, start_trb, end_trb, input_dma, false);
- if (seg != result_seg) {
- xhci_warn(xhci, "WARN: %s TRB math test %d failed!\n",
- test_name, test_number);
- xhci_warn(xhci, "Tested TRB math w/ seg %p and "
- "input DMA 0x%llx\n",
- input_seg,
- (unsigned long long) input_dma);
- xhci_warn(xhci, "starting TRB %p (0x%llx DMA), "
- "ending TRB %p (0x%llx DMA)\n",
- start_trb, start_dma,
- end_trb, end_dma);
- xhci_warn(xhci, "Expected seg %p, got seg %p\n",
- result_seg, seg);
- trb_in_td(xhci, input_seg, start_trb, end_trb, input_dma,
- true);
- return -1;
- }
- return 0;
-}
-
-/* TRB math checks for xhci_trb_in_td(), using the command and event rings. */
-static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci)
-{
- struct {
- dma_addr_t input_dma;
- struct xhci_segment *result_seg;
- } simple_test_vector [] = {
- /* A zeroed DMA field should fail */
- { 0, NULL },
- /* One TRB before the ring start should fail */
- { xhci->event_ring->first_seg->dma - 16, NULL },
- /* One byte before the ring start should fail */
- { xhci->event_ring->first_seg->dma - 1, NULL },
- /* Starting TRB should succeed */
- { xhci->event_ring->first_seg->dma, xhci->event_ring->first_seg },
- /* Ending TRB should succeed */
- { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16,
- xhci->event_ring->first_seg },
- /* One byte after the ring end should fail */
- { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16 + 1, NULL },
- /* One TRB after the ring end should fail */
- { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT)*16, NULL },
- /* An address of all ones should fail */
- { (dma_addr_t) (~0), NULL },
- };
- struct {
- struct xhci_segment *input_seg;
- union xhci_trb *start_trb;
- union xhci_trb *end_trb;
- dma_addr_t input_dma;
- struct xhci_segment *result_seg;
- } complex_test_vector [] = {
- /* Test feeding a valid DMA address from a different ring */
- { .input_seg = xhci->event_ring->first_seg,
- .start_trb = xhci->event_ring->first_seg->trbs,
- .end_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
- .input_dma = xhci->cmd_ring->first_seg->dma,
- .result_seg = NULL,
- },
- /* Test feeding a valid end TRB from a different ring */
- { .input_seg = xhci->event_ring->first_seg,
- .start_trb = xhci->event_ring->first_seg->trbs,
- .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
- .input_dma = xhci->cmd_ring->first_seg->dma,
- .result_seg = NULL,
- },
- /* Test feeding a valid start and end TRB from a different ring */
- { .input_seg = xhci->event_ring->first_seg,
- .start_trb = xhci->cmd_ring->first_seg->trbs,
- .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
- .input_dma = xhci->cmd_ring->first_seg->dma,
- .result_seg = NULL,
- },
- /* TRB in this ring, but after this TD */
- { .input_seg = xhci->event_ring->first_seg,
- .start_trb = &xhci->event_ring->first_seg->trbs[0],
- .end_trb = &xhci->event_ring->first_seg->trbs[3],
- .input_dma = xhci->event_ring->first_seg->dma + 4*16,
- .result_seg = NULL,
- },
- /* TRB in this ring, but before this TD */
- { .input_seg = xhci->event_ring->first_seg,
- .start_trb = &xhci->event_ring->first_seg->trbs[3],
- .end_trb = &xhci->event_ring->first_seg->trbs[6],
- .input_dma = xhci->event_ring->first_seg->dma + 2*16,
- .result_seg = NULL,
- },
- /* TRB in this ring, but after this wrapped TD */
- { .input_seg = xhci->event_ring->first_seg,
- .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
- .end_trb = &xhci->event_ring->first_seg->trbs[1],
- .input_dma = xhci->event_ring->first_seg->dma + 2*16,
- .result_seg = NULL,
- },
- /* TRB in this ring, but before this wrapped TD */
- { .input_seg = xhci->event_ring->first_seg,
- .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
- .end_trb = &xhci->event_ring->first_seg->trbs[1],
- .input_dma = xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 4)*16,
- .result_seg = NULL,
- },
- /* TRB not in this ring, and we have a wrapped TD */
- { .input_seg = xhci->event_ring->first_seg,
- .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
- .end_trb = &xhci->event_ring->first_seg->trbs[1],
- .input_dma = xhci->cmd_ring->first_seg->dma + 2*16,
- .result_seg = NULL,
- },
- };
-
- unsigned int num_tests;
- int i, ret;
-
- num_tests = ARRAY_SIZE(simple_test_vector);
- for (i = 0; i < num_tests; i++) {
- ret = xhci_test_trb_in_td(xhci,
- xhci->event_ring->first_seg,
- xhci->event_ring->first_seg->trbs,
- &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
- simple_test_vector[i].input_dma,
- simple_test_vector[i].result_seg,
- "Simple", i);
- if (ret < 0)
- return ret;
- }
-
- num_tests = ARRAY_SIZE(complex_test_vector);
- for (i = 0; i < num_tests; i++) {
- ret = xhci_test_trb_in_td(xhci,
- complex_test_vector[i].input_seg,
- complex_test_vector[i].start_trb,
- complex_test_vector[i].end_trb,
- complex_test_vector[i].input_dma,
- complex_test_vector[i].result_seg,
- "Complex", i);
- if (ret < 0)
- return ret;
- }
- xhci_dbg(xhci, "TRB math tests passed.\n");
- return 0;
-}
-
-static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
-{
- u64 temp;
dma_addr_t deq;
- deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
- xhci->event_ring->dequeue);
- if (deq == 0 && !in_interrupt())
- xhci_warn(xhci, "WARN something wrong with SW event ring "
- "dequeue ptr.\n");
+ deq = xhci_trb_virt_to_dma(ir->event_ring->deq_seg,
+ ir->event_ring->dequeue);
+ if (!deq)
+ xhci_warn(xhci, "WARN something wrong with SW event ring dequeue ptr.\n");
/* Update HC event ring dequeue pointer */
- temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
- temp &= ERST_PTR_MASK;
/* Don't clear the EHB bit (which is RW1C) because
* there might be more events to service.
*/
- temp &= ~ERST_EHB;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "// Write event ring dequeue pointer, "
- "preserving EHB bit");
- xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
- &xhci->ir_set->erst_dequeue);
+ "// Write event ring dequeue pointer, preserving EHB bit");
+ xhci_write_64(xhci, deq & ERST_PTR_MASK, &ir->ir_set->erst_dequeue);
}
static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
@@ -2113,9 +2019,10 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
{
u32 temp, port_offset, port_count;
int i;
- u8 major_revision, minor_revision;
+ u8 major_revision, minor_revision, tmp_minor_revision;
struct xhci_hub *rhub;
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
+ struct xhci_port_cap *port_cap;
temp = readl(addr);
major_revision = XHCI_EXT_PORT_MAJOR(temp);
@@ -2123,63 +2030,92 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
if (major_revision == 0x03) {
rhub = &xhci->usb3_rhub;
+ /*
+ * Some hosts incorrectly use sub-minor version for minor
+ * version (i.e. 0x02 instead of 0x20 for bcdUSB 0x320 and 0x01
+ * for bcdUSB 0x310). Since there is no USB release with sub
+ * minor version 0x301 to 0x309, we can assume that they are
+ * incorrect and fix it here.
+ */
+ if (minor_revision > 0x00 && minor_revision < 0x10)
+ minor_revision <<= 4;
+ /*
+ * Some zhaoxin's xHCI controller that follow usb3.1 spec
+ * but only support Gen1.
+ */
+ if (xhci->quirks & XHCI_ZHAOXIN_HOST) {
+ tmp_minor_revision = minor_revision;
+ minor_revision = 0;
+ }
+
} else if (major_revision <= 0x02) {
rhub = &xhci->usb2_rhub;
} else {
- xhci_warn(xhci, "Ignoring unknown port speed, "
- "Ext Cap %p, revision = 0x%x\n",
+ xhci_warn(xhci, "Ignoring unknown port speed, Ext Cap %p, revision = 0x%x\n",
addr, major_revision);
/* Ignoring port protocol we can't understand. FIXME */
return;
}
- rhub->maj_rev = XHCI_EXT_PORT_MAJOR(temp);
-
- if (rhub->min_rev < minor_revision)
- rhub->min_rev = minor_revision;
/* Port offset and count in the third dword, see section 7.2 */
temp = readl(addr + 2);
port_offset = XHCI_EXT_PORT_OFF(temp);
port_count = XHCI_EXT_PORT_COUNT(temp);
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "Ext Cap %p, port offset = %u, "
- "count = %u, revision = 0x%x",
- addr, port_offset, port_count, major_revision);
+ "Ext Cap %p, port offset = %u, count = %u, revision = 0x%x",
+ addr, port_offset, port_count, major_revision);
/* Port count includes the current port offset */
if (port_offset == 0 || (port_offset + port_count - 1) > num_ports)
/* WTF? "Valid values are ‘1’ to MaxPorts" */
return;
- rhub->psi_count = XHCI_EXT_PORT_PSIC(temp);
- if (rhub->psi_count) {
- rhub->psi = kcalloc_node(rhub->psi_count, sizeof(*rhub->psi),
- GFP_KERNEL, dev_to_node(dev));
- if (!rhub->psi)
- rhub->psi_count = 0;
+ port_cap = &xhci->port_caps[xhci->num_port_caps++];
+ if (xhci->num_port_caps > max_caps)
+ return;
- rhub->psi_uid_count++;
- for (i = 0; i < rhub->psi_count; i++) {
- rhub->psi[i] = readl(addr + 4 + i);
+ port_cap->psi_count = XHCI_EXT_PORT_PSIC(temp);
+
+ if (port_cap->psi_count) {
+ port_cap->psi = kcalloc_node(port_cap->psi_count,
+ sizeof(*port_cap->psi),
+ GFP_KERNEL, dev_to_node(dev));
+ if (!port_cap->psi)
+ port_cap->psi_count = 0;
+
+ port_cap->psi_uid_count++;
+ for (i = 0; i < port_cap->psi_count; i++) {
+ port_cap->psi[i] = readl(addr + 4 + i);
/* count unique ID values, two consecutive entries can
* have the same ID if link is assymetric
*/
- if (i && (XHCI_EXT_PORT_PSIV(rhub->psi[i]) !=
- XHCI_EXT_PORT_PSIV(rhub->psi[i - 1])))
- rhub->psi_uid_count++;
+ if (i && (XHCI_EXT_PORT_PSIV(port_cap->psi[i]) !=
+ XHCI_EXT_PORT_PSIV(port_cap->psi[i - 1])))
+ port_cap->psi_uid_count++;
+
+ if (xhci->quirks & XHCI_ZHAOXIN_HOST &&
+ major_revision == 0x03 &&
+ XHCI_EXT_PORT_PSIV(port_cap->psi[i]) >= 5)
+ minor_revision = tmp_minor_revision;
xhci_dbg(xhci, "PSIV:%d PSIE:%d PLT:%d PFD:%d LP:%d PSIM:%d\n",
- XHCI_EXT_PORT_PSIV(rhub->psi[i]),
- XHCI_EXT_PORT_PSIE(rhub->psi[i]),
- XHCI_EXT_PORT_PLT(rhub->psi[i]),
- XHCI_EXT_PORT_PFD(rhub->psi[i]),
- XHCI_EXT_PORT_LP(rhub->psi[i]),
- XHCI_EXT_PORT_PSIM(rhub->psi[i]));
+ XHCI_EXT_PORT_PSIV(port_cap->psi[i]),
+ XHCI_EXT_PORT_PSIE(port_cap->psi[i]),
+ XHCI_EXT_PORT_PLT(port_cap->psi[i]),
+ XHCI_EXT_PORT_PFD(port_cap->psi[i]),
+ XHCI_EXT_PORT_LP(port_cap->psi[i]),
+ XHCI_EXT_PORT_PSIM(port_cap->psi[i]));
}
}
- /* cache usb2 port capabilities */
- if (major_revision < 0x03 && xhci->num_ext_caps < max_caps)
- xhci->ext_caps[xhci->num_ext_caps++] = temp;
+
+ rhub->maj_rev = major_revision;
+
+ if (rhub->min_rev < minor_revision)
+ rhub->min_rev = minor_revision;
+
+ port_cap->maj_rev = major_revision;
+ port_cap->min_rev = minor_revision;
+ port_cap->protocol_caps = temp;
if ((xhci->hci_version >= 0x100) && (major_revision != 0x03) &&
(temp & XHCI_HLC)) {
@@ -2193,10 +2129,8 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
struct xhci_port *hw_port = &xhci->hw_ports[i];
/* Duplicate entry. Ignore the port if the revisions differ. */
if (hw_port->rhub) {
- xhci_warn(xhci, "Duplicate port entry, Ext Cap %p,"
- " port %u\n", addr, i);
- xhci_warn(xhci, "Port was marked as USB %u, "
- "duplicated as USB %u\n",
+ xhci_warn(xhci, "Duplicate port entry, Ext Cap %p, port %u\n", addr, i);
+ xhci_warn(xhci, "Port was marked as USB %u, duplicated as USB %u\n",
hw_port->rhub->maj_rev, major_revision);
/* Only adjust the roothub port counts if we haven't
* found a similar duplicate.
@@ -2209,6 +2143,7 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
continue;
}
hw_port->rhub = rhub;
+ hw_port->port_cap = port_cap;
rhub->num_ports++;
}
/* FIXME: Should we disable ports not in the Extended Capabilities? */
@@ -2223,9 +2158,12 @@ static void xhci_create_rhub_port_array(struct xhci_hcd *xhci,
if (!rhub->num_ports)
return;
- rhub->ports = kcalloc_node(rhub->num_ports, sizeof(rhub->ports), flags,
- dev_to_node(dev));
- for (i = 0; i < HCS_MAX_PORTS(xhci->hcs_params1); i++) {
+ rhub->ports = kcalloc_node(rhub->num_ports, sizeof(*rhub->ports),
+ flags, dev_to_node(dev));
+ if (!rhub->ports)
+ return;
+
+ for (i = 0; i < xhci->max_ports; i++) {
if (xhci->hw_ports[i].rhub != rhub ||
xhci->hw_ports[i].hcd_portnum == DUPLICATE_ENTRY)
continue;
@@ -2248,29 +2186,28 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
{
void __iomem *base;
u32 offset;
- unsigned int num_ports;
int i, j;
int cap_count = 0;
u32 cap_start;
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
- num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
- xhci->hw_ports = kcalloc_node(num_ports, sizeof(*xhci->hw_ports),
- flags, dev_to_node(dev));
+ xhci->hw_ports = kcalloc_node(xhci->max_ports, sizeof(*xhci->hw_ports),
+ flags, dev_to_node(dev));
if (!xhci->hw_ports)
return -ENOMEM;
- for (i = 0; i < num_ports; i++) {
- xhci->hw_ports[i].addr = &xhci->op_regs->port_status_base +
- NUM_PORT_REGS * i;
+ for (i = 0; i < xhci->max_ports; i++) {
+ xhci->hw_ports[i].port_reg = &xhci->op_regs->port_regs[i];
xhci->hw_ports[i].hw_portnum = i;
+
+ init_completion(&xhci->hw_ports[i].rexit_done);
+ init_completion(&xhci->hw_ports[i].u3exit_done);
}
- xhci->rh_bw = kcalloc_node(num_ports, sizeof(*xhci->rh_bw), flags,
- dev_to_node(dev));
+ xhci->rh_bw = kcalloc_node(xhci->max_ports, sizeof(*xhci->rh_bw), flags, dev_to_node(dev));
if (!xhci->rh_bw)
return -ENOMEM;
- for (i = 0; i < num_ports; i++) {
+ for (i = 0; i < xhci->max_ports; i++) {
struct xhci_interval_bw_table *bw_table;
INIT_LIST_HEAD(&xhci->rh_bw[i].tts);
@@ -2294,17 +2231,16 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
XHCI_EXT_CAPS_PROTOCOL);
}
- xhci->ext_caps = kcalloc_node(cap_count, sizeof(*xhci->ext_caps),
+ xhci->port_caps = kcalloc_node(cap_count, sizeof(*xhci->port_caps),
flags, dev_to_node(dev));
- if (!xhci->ext_caps)
+ if (!xhci->port_caps)
return -ENOMEM;
offset = cap_start;
while (offset) {
- xhci_add_in_port(xhci, num_ports, base + offset, cap_count);
- if (xhci->usb2_rhub.num_ports + xhci->usb3_rhub.num_ports ==
- num_ports)
+ xhci_add_in_port(xhci, xhci->max_ports, base + offset, cap_count);
+ if (xhci->usb2_rhub.num_ports + xhci->usb3_rhub.num_ports == xhci->max_ports)
break;
offset = xhci_find_next_ext_cap(base, offset,
XHCI_EXT_CAPS_PROTOCOL);
@@ -2333,10 +2269,11 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
xhci->usb2_rhub.num_ports = USB_MAXCHILDREN;
}
- /*
- * Note we could have all USB 3.0 ports, or all USB 2.0 ports.
- * Not sure how the USB core will handle a hub with no ports...
- */
+ if (!xhci->usb2_rhub.num_ports)
+ xhci_info(xhci, "USB2 root hub has no ports\n");
+
+ if (!xhci->usb3_rhub.num_ports)
+ xhci_info(xhci, "USB3 root hub has no ports\n");
xhci_create_rhub_port_array(xhci, &xhci->usb2_rhub, flags);
xhci_create_rhub_port_array(xhci, &xhci->usb3_rhub, flags);
@@ -2344,67 +2281,140 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
return 0;
}
-int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
+static struct xhci_interrupter *
+xhci_alloc_interrupter(struct xhci_hcd *xhci, unsigned int segs, gfp_t flags)
{
- dma_addr_t dma;
- struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
- unsigned int val, val2;
- u64 val_64;
- u32 page_size, temp;
- int i, ret;
+ struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
+ struct xhci_interrupter *ir;
+ unsigned int max_segs;
+ int ret;
- INIT_LIST_HEAD(&xhci->cmd_list);
+ if (!segs)
+ segs = ERST_DEFAULT_SEGS;
- /* init command timeout work */
- INIT_DELAYED_WORK(&xhci->cmd_timer, xhci_handle_command_timeout);
- init_completion(&xhci->cmd_ring_stop_completion);
+ max_segs = BIT(HCS_ERST_MAX(xhci->hcs_params2));
+ segs = min(segs, max_segs);
- page_size = readl(&xhci->op_regs->page_size);
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "Supported page size register = 0x%x", page_size);
- for (i = 0; i < 16; i++) {
- if ((0x1 & page_size) != 0)
- break;
- page_size = page_size >> 1;
+ ir = kzalloc_node(sizeof(*ir), flags, dev_to_node(dev));
+ if (!ir)
+ return NULL;
+
+ ir->event_ring = xhci_ring_alloc(xhci, segs, TYPE_EVENT, 0, flags);
+ if (!ir->event_ring) {
+ xhci_warn(xhci, "Failed to allocate interrupter event ring\n");
+ kfree(ir);
+ return NULL;
}
- if (i < 16)
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "Supported page size of %iK", (1 << (i+12)) / 1024);
+
+ ret = xhci_alloc_erst(xhci, ir->event_ring, &ir->erst, flags);
+ if (ret) {
+ xhci_warn(xhci, "Failed to allocate interrupter erst\n");
+ xhci_ring_free(xhci, ir->event_ring);
+ kfree(ir);
+ return NULL;
+ }
+
+ return ir;
+}
+
+void xhci_add_interrupter(struct xhci_hcd *xhci, unsigned int intr_num)
+{
+ struct xhci_interrupter *ir;
+ u64 erst_base;
+ u32 erst_size;
+
+ ir = xhci->interrupters[intr_num];
+ ir->intr_num = intr_num;
+ ir->ir_set = &xhci->run_regs->ir_set[intr_num];
+
+ /* set ERST count with the number of entries in the segment table */
+ erst_size = readl(&ir->ir_set->erst_size);
+ erst_size &= ~ERST_SIZE_MASK;
+ erst_size |= ir->event_ring->num_segs;
+ writel(erst_size, &ir->ir_set->erst_size);
+
+ erst_base = xhci_read_64(xhci, &ir->ir_set->erst_base);
+ erst_base &= ~ERST_BASE_ADDRESS_MASK;
+ erst_base |= ir->erst.erst_dma_addr & ERST_BASE_ADDRESS_MASK;
+ if (xhci->quirks & XHCI_WRITE_64_HI_LO)
+ hi_lo_writeq(erst_base, &ir->ir_set->erst_base);
else
- xhci_warn(xhci, "WARN: no supported page size\n");
- /* Use 4K pages, since that's common and the minimum the HC supports */
- xhci->page_shift = 12;
- xhci->page_size = 1 << xhci->page_shift;
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "HCD page size set to %iK", xhci->page_size / 1024);
+ xhci_write_64(xhci, erst_base, &ir->ir_set->erst_base);
- /*
- * Program the Number of Device Slots Enabled field in the CONFIG
- * register with the max value of slots the HC can handle.
- */
- val = HCS_MAX_SLOTS(readl(&xhci->cap_regs->hcs_params1));
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "// xHC can handle at most %d device slots.", val);
- val2 = readl(&xhci->op_regs->config_reg);
- val |= (val2 & ~HCS_SLOTS_MASK);
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "// Setting Max device slots reg = 0x%x.", val);
- writel(val, &xhci->op_regs->config_reg);
+ /* Set the event ring dequeue address of this interrupter */
+ xhci_set_hc_event_deq(xhci, ir);
+}
+
+struct xhci_interrupter *
+xhci_create_secondary_interrupter(struct usb_hcd *hcd, unsigned int segs,
+ u32 imod_interval, unsigned int intr_num)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct xhci_interrupter *ir;
+ unsigned int i;
+ int err = -ENOSPC;
+
+ if (!xhci->interrupters || xhci->max_interrupters <= 1 ||
+ intr_num >= xhci->max_interrupters)
+ return NULL;
+
+ ir = xhci_alloc_interrupter(xhci, segs, GFP_KERNEL);
+ if (!ir)
+ return NULL;
+
+ spin_lock_irq(&xhci->lock);
+ if (!intr_num) {
+ /* Find available secondary interrupter, interrupter 0 is reserved for primary */
+ for (i = 1; i < xhci->max_interrupters; i++) {
+ if (!xhci->interrupters[i]) {
+ xhci->interrupters[i] = ir;
+ xhci_add_interrupter(xhci, i);
+ err = 0;
+ break;
+ }
+ }
+ } else {
+ if (!xhci->interrupters[intr_num]) {
+ xhci->interrupters[intr_num] = ir;
+ xhci_add_interrupter(xhci, intr_num);
+ err = 0;
+ }
+ }
+ spin_unlock_irq(&xhci->lock);
+
+ if (err) {
+ xhci_warn(xhci, "Failed to add secondary interrupter, max interrupters %d\n",
+ xhci->max_interrupters);
+ xhci_free_interrupter(xhci, ir);
+ return NULL;
+ }
+
+ xhci_set_interrupter_moderation(ir, imod_interval);
+
+ xhci_dbg(xhci, "Add secondary interrupter %d, max interrupters %d\n",
+ ir->intr_num, xhci->max_interrupters);
+
+ return ir;
+}
+EXPORT_SYMBOL_GPL(xhci_create_secondary_interrupter);
+
+int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
+{
+ struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
+ dma_addr_t dma;
/*
- * xHCI section 5.4.6 - doorbell array must be
+ * xHCI section 5.4.6 - Device Context array must be
* "physically contiguous and 64-byte (cache line) aligned".
*/
- xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma,
- flags);
+ xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma, flags);
if (!xhci->dcbaa)
goto fail;
- memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
+
xhci->dcbaa->dma = dma;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "// Device context base array address = 0x%llx (DMA), %p (virt)",
- (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
- xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
+ "Device context base array address = 0x%pad (DMA), %p (virt)",
+ &xhci->dcbaa->dma, xhci->dcbaa);
/*
* Initialize the ring segment pool. The ring must be a contiguous
@@ -2413,142 +2423,89 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
* and our use of dma addresses in the trb_address_map radix tree needs
* TRB_SEGMENT_SIZE alignment, so we pick the greater alignment need.
*/
- xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
- TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE, xhci->page_size);
+ if (xhci->quirks & XHCI_TRB_OVERFETCH)
+ /* Buggy HC prefetches beyond segment bounds - allocate dummy space at the end */
+ xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
+ TRB_SEGMENT_SIZE * 2, TRB_SEGMENT_SIZE * 2, xhci->page_size * 2);
+ else
+ xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
+ TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE, xhci->page_size);
+ if (!xhci->segment_pool)
+ goto fail;
/* See Table 46 and Note on Figure 55 */
- xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
- 2112, 64, xhci->page_size);
- if (!xhci->segment_pool || !xhci->device_pool)
+ xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev, 2112, 64,
+ xhci->page_size);
+ if (!xhci->device_pool)
goto fail;
- /* Linear stream context arrays don't have any boundary restrictions,
+ /*
+ * Linear stream context arrays don't have any boundary restrictions,
* and only need to be 16-byte aligned.
*/
- xhci->small_streams_pool =
- dma_pool_create("xHCI 256 byte stream ctx arrays",
- dev, SMALL_STREAM_ARRAY_SIZE, 16, 0);
- xhci->medium_streams_pool =
- dma_pool_create("xHCI 1KB stream ctx arrays",
- dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0);
- /* Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE
- * will be allocated with dma_alloc_coherent()
+ xhci->small_streams_pool = dma_pool_create("xHCI 256 byte stream ctx arrays",
+ dev, SMALL_STREAM_ARRAY_SIZE, 16, 0);
+ if (!xhci->small_streams_pool)
+ goto fail;
+
+ /*
+ * Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE will be
+ * allocated with dma_alloc_coherent().
*/
- if (!xhci->small_streams_pool || !xhci->medium_streams_pool)
+ xhci->medium_streams_pool = dma_pool_create("xHCI 1KB stream ctx arrays",
+ dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0);
+ if (!xhci->medium_streams_pool)
+ goto fail;
+
+ /*
+ * refer to xhci rev1_2 protocol 5.3.3 max ports is 255.
+ * refer to xhci rev1_2 protocol 6.4.3.14 port bandwidth buffer need
+ * to be 16-byte aligned.
+ */
+ xhci->port_bw_pool = dma_pool_create("xHCI 256 port bw ctx arrays",
+ dev, GET_PORT_BW_ARRAY_SIZE, 16, 0);
+ if (!xhci->port_bw_pool)
goto fail;
/* Set up the command ring to have one segments for now. */
- xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, 0, flags);
+ xhci->cmd_ring = xhci_ring_alloc(xhci, 1, TYPE_COMMAND, 0, flags);
if (!xhci->cmd_ring)
goto fail;
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "Allocated command ring at %p", xhci->cmd_ring);
- xhci_dbg_trace(xhci, trace_xhci_dbg_init, "First segment DMA is 0x%llx",
- (unsigned long long)xhci->cmd_ring->first_seg->dma);
-
- /* Set the address in the Command Ring Control register */
- val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
- val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
- (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
- xhci->cmd_ring->cycle_state;
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "// Setting command ring address to 0x%016llx", val_64);
- xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
- xhci->lpm_command = xhci_alloc_command_with_ctx(xhci, true, flags);
- if (!xhci->lpm_command)
- goto fail;
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Allocated command ring at %p", xhci->cmd_ring);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "First segment DMA is 0x%pad",
+ &xhci->cmd_ring->first_seg->dma);
- /* Reserve one command ring TRB for disabling LPM.
+ /*
+ * Reserve one command ring TRB for disabling LPM.
* Since the USB core grabs the shared usb_bus bandwidth mutex before
* disabling LPM, we only need to reserve one TRB for all devices.
*/
xhci->cmd_ring_reserved_trbs++;
- val = readl(&xhci->cap_regs->db_off);
- val &= DBOFF_MASK;
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "// Doorbell array is located at offset 0x%x"
- " from cap regs base addr", val);
- xhci->dba = (void __iomem *) xhci->cap_regs + val;
- /* Set ir_set to interrupt register set 0 */
- xhci->ir_set = &xhci->run_regs->ir_set[0];
-
- /*
- * Event ring setup: Allocate a normal ring, but also setup
- * the event ring segment table (ERST). Section 4.9.3.
- */
- xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Allocating event ring");
- xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT,
- 0, flags);
- if (!xhci->event_ring)
- goto fail;
- if (xhci_check_trb_in_td_math(xhci) < 0)
+ /* Allocate and set up primary interrupter 0 with an event ring. */
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Allocating primary event ring");
+ xhci->interrupters = kcalloc_node(xhci->max_interrupters, sizeof(*xhci->interrupters),
+ flags, dev_to_node(dev));
+ if (!xhci->interrupters)
goto fail;
- ret = xhci_alloc_erst(xhci, xhci->event_ring, &xhci->erst, flags);
- if (ret)
+ xhci->interrupters[0] = xhci_alloc_interrupter(xhci, 0, flags);
+ if (!xhci->interrupters[0])
goto fail;
- /* set ERST count with the number of entries in the segment table */
- val = readl(&xhci->ir_set->erst_size);
- val &= ERST_SIZE_MASK;
- val |= ERST_NUM_SEGS;
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "// Write ERST size = %i to ir_set 0 (some bits preserved)",
- val);
- writel(val, &xhci->ir_set->erst_size);
-
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "// Set ERST entries to point to event ring.");
- /* set the segment table base address */
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "// Set ERST base address for ir_set 0 = 0x%llx",
- (unsigned long long)xhci->erst.erst_dma_addr);
- val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
- val_64 &= ERST_PTR_MASK;
- val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
- xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
-
- /* Set the event ring dequeue address */
- xhci_set_hc_event_deq(xhci);
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "Wrote ERST address to ir_set 0.");
-
- /*
- * XXX: Might need to set the Interrupter Moderation Register to
- * something other than the default (~1ms minimum between interrupts).
- * See section 5.5.1.2.
- */
- for (i = 0; i < MAX_HC_SLOTS; i++)
- xhci->devs[i] = NULL;
- for (i = 0; i < USB_MAXCHILDREN; i++) {
- xhci->usb2_rhub.bus_state.resume_done[i] = 0;
- xhci->usb3_rhub.bus_state.resume_done[i] = 0;
- /* Only the USB 2.0 completions will ever be used. */
- init_completion(&xhci->usb2_rhub.bus_state.rexit_done[i]);
- }
-
if (scratchpad_alloc(xhci, flags))
goto fail;
+
if (xhci_setup_port_arrays(xhci, flags))
goto fail;
- /* Enable USB 3.0 device notifications for function remote wake, which
- * is necessary for allowing USB 3.0 devices to do remote wakeup from
- * U3 (device suspend).
- */
- temp = readl(&xhci->op_regs->dev_notification);
- temp &= ~DEV_NOTE_MASK;
- temp |= DEV_NOTE_FWAKE;
- writel(temp, &xhci->op_regs->dev_notification);
-
return 0;
fail:
xhci_halt(xhci);
- xhci_reset(xhci);
+ xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
xhci_mem_cleanup(xhci);
return -ENOMEM;
}