summaryrefslogtreecommitdiff
path: root/drivers/usb/dwc3/gadget.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/dwc3/gadget.c')
-rw-r--r--drivers/usb/dwc3/gadget.c152
1 files changed, 90 insertions, 62 deletions
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 4959c26d3b71..83dc7304d701 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -197,7 +197,6 @@ static void dwc3_gadget_del_and_unmap_request(struct dwc3_ep *dep,
list_del(&req->list);
req->remaining = 0;
- req->needs_extra_trb = false;
req->num_trbs = 0;
if (req->request.status == -EINPROGRESS)
@@ -688,6 +687,44 @@ static int dwc3_gadget_calc_tx_fifo_size(struct dwc3 *dwc, int mult)
}
/**
+ * dwc3_gadget_calc_ram_depth - calculates the ram depth for txfifo
+ * @dwc: pointer to the DWC3 context
+ */
+static int dwc3_gadget_calc_ram_depth(struct dwc3 *dwc)
+{
+ int ram_depth;
+ int fifo_0_start;
+ bool is_single_port_ram;
+
+ /* Check supporting RAM type by HW */
+ is_single_port_ram = DWC3_SPRAM_TYPE(dwc->hwparams.hwparams1);
+
+ /*
+ * If a single port RAM is utilized, then allocate TxFIFOs from
+ * RAM0. otherwise, allocate them from RAM1.
+ */
+ ram_depth = is_single_port_ram ? DWC3_RAM0_DEPTH(dwc->hwparams.hwparams6) :
+ DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
+
+ /*
+ * In a single port RAM configuration, the available RAM is shared
+ * between the RX and TX FIFOs. This means that the txfifo can begin
+ * at a non-zero address.
+ */
+ if (is_single_port_ram) {
+ u32 reg;
+
+ /* Check if TXFIFOs start at non-zero addr */
+ reg = dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(0));
+ fifo_0_start = DWC3_GTXFIFOSIZ_TXFSTADDR(reg);
+
+ ram_depth -= (fifo_0_start >> 16);
+ }
+
+ return ram_depth;
+}
+
+/**
* dwc3_gadget_clear_tx_fifos - Clears txfifo allocation
* @dwc: pointer to the DWC3 context
*
@@ -753,7 +790,7 @@ static int dwc3_gadget_resize_tx_fifos(struct dwc3_ep *dep)
{
struct dwc3 *dwc = dep->dwc;
int fifo_0_start;
- int ram1_depth;
+ int ram_depth;
int fifo_size;
int min_depth;
int num_in_ep;
@@ -773,17 +810,32 @@ static int dwc3_gadget_resize_tx_fifos(struct dwc3_ep *dep)
if (dep->flags & DWC3_EP_TXFIFO_RESIZED)
return 0;
- ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
+ ram_depth = dwc3_gadget_calc_ram_depth(dwc);
- if ((dep->endpoint.maxburst > 1 &&
- usb_endpoint_xfer_bulk(dep->endpoint.desc)) ||
- usb_endpoint_xfer_isoc(dep->endpoint.desc))
- num_fifos = 3;
-
- if (dep->endpoint.maxburst > 6 &&
- (usb_endpoint_xfer_bulk(dep->endpoint.desc) ||
- usb_endpoint_xfer_isoc(dep->endpoint.desc)) && DWC3_IP_IS(DWC31))
- num_fifos = dwc->tx_fifo_resize_max_num;
+ switch (dwc->gadget->speed) {
+ case USB_SPEED_SUPER_PLUS:
+ case USB_SPEED_SUPER:
+ if (usb_endpoint_xfer_bulk(dep->endpoint.desc) ||
+ usb_endpoint_xfer_isoc(dep->endpoint.desc))
+ num_fifos = min_t(unsigned int,
+ dep->endpoint.maxburst,
+ dwc->tx_fifo_resize_max_num);
+ break;
+ case USB_SPEED_HIGH:
+ if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
+ num_fifos = min_t(unsigned int,
+ usb_endpoint_maxp_mult(dep->endpoint.desc) + 1,
+ dwc->tx_fifo_resize_max_num);
+ break;
+ }
+ fallthrough;
+ case USB_SPEED_FULL:
+ if (usb_endpoint_xfer_bulk(dep->endpoint.desc))
+ num_fifos = 2;
+ break;
+ default:
+ break;
+ }
/* FIFO size for a single buffer */
fifo = dwc3_gadget_calc_tx_fifo_size(dwc, 1);
@@ -794,7 +846,7 @@ static int dwc3_gadget_resize_tx_fifos(struct dwc3_ep *dep)
/* Reserve at least one FIFO for the number of IN EPs */
min_depth = num_in_ep * (fifo + 1);
- remaining = ram1_depth - min_depth - dwc->last_fifo_depth;
+ remaining = ram_depth - min_depth - dwc->last_fifo_depth;
remaining = max_t(int, 0, remaining);
/*
* We've already reserved 1 FIFO per EP, so check what we can fit in
@@ -820,9 +872,9 @@ static int dwc3_gadget_resize_tx_fifos(struct dwc3_ep *dep)
dwc->last_fifo_depth += DWC31_GTXFIFOSIZ_TXFDEP(fifo_size);
/* Check fifo size allocation doesn't exceed available RAM size. */
- if (dwc->last_fifo_depth >= ram1_depth) {
+ if (dwc->last_fifo_depth >= ram_depth) {
dev_err(dwc->dev, "Fifosize(%d) > RAM size(%d) %s depth:%d\n",
- dwc->last_fifo_depth, ram1_depth,
+ dwc->last_fifo_depth, ram_depth,
dep->endpoint.name, fifo_size);
if (DWC3_IP_IS(DWC3))
fifo_size = DWC3_GTXFIFOSIZ_TXFDEP(fifo_size);
@@ -1177,11 +1229,14 @@ static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep)
* pending to be processed by the driver.
*/
if (dep->trb_enqueue == dep->trb_dequeue) {
+ struct dwc3_request *req;
+
/*
- * If there is any request remained in the started_list at
- * this point, that means there is no TRB available.
+ * If there is any request remained in the started_list with
+ * active TRBs at this point, then there is no TRB available.
*/
- if (!list_empty(&dep->started_list))
+ req = next_request(&dep->started_list);
+ if (req && req->num_trbs)
return 0;
return DWC3_TRB_NUM - 1;
@@ -1384,6 +1439,7 @@ static int dwc3_prepare_last_sg(struct dwc3_ep *dep,
unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
unsigned int rem = req->request.length % maxp;
unsigned int num_trbs = 1;
+ bool needs_extra_trb;
if (dwc3_needs_extra_trb(dep, req))
num_trbs++;
@@ -1391,15 +1447,15 @@ static int dwc3_prepare_last_sg(struct dwc3_ep *dep,
if (dwc3_calc_trbs_left(dep) < num_trbs)
return 0;
- req->needs_extra_trb = num_trbs > 1;
+ needs_extra_trb = num_trbs > 1;
/* Prepare a normal TRB */
if (req->direction || req->request.length)
dwc3_prepare_one_trb(dep, req, entry_length,
- req->needs_extra_trb, node, false, false);
+ needs_extra_trb, node, false, false);
/* Prepare extra TRBs for ZLP and MPS OUT transfer alignment */
- if ((!req->direction && !req->request.length) || req->needs_extra_trb)
+ if ((!req->direction && !req->request.length) || needs_extra_trb)
dwc3_prepare_one_trb(dep, req,
req->direction ? 0 : maxp - rem,
false, 1, true, false);
@@ -1414,8 +1470,8 @@ static int dwc3_prepare_trbs_sg(struct dwc3_ep *dep,
struct scatterlist *s;
int i;
unsigned int length = req->request.length;
- unsigned int remaining = req->request.num_mapped_sgs
- - req->num_queued_sgs;
+ unsigned int remaining = req->num_pending_sgs;
+ unsigned int num_queued_sgs = req->request.num_mapped_sgs - remaining;
unsigned int num_trbs = req->num_trbs;
bool needs_extra_trb = dwc3_needs_extra_trb(dep, req);
@@ -1423,7 +1479,7 @@ static int dwc3_prepare_trbs_sg(struct dwc3_ep *dep,
* If we resume preparing the request, then get the remaining length of
* the request and resume where we left off.
*/
- for_each_sg(req->request.sg, s, req->num_queued_sgs, i)
+ for_each_sg(req->request.sg, s, num_queued_sgs, i)
length -= sg_dma_len(s);
for_each_sg(sg, s, remaining, i) {
@@ -1488,7 +1544,6 @@ static int dwc3_prepare_trbs_sg(struct dwc3_ep *dep,
if (!last_sg)
req->start_sg = sg_next(s);
- req->num_queued_sgs++;
req->num_pending_sgs--;
/*
@@ -1569,9 +1624,7 @@ static int dwc3_prepare_trbs(struct dwc3_ep *dep)
if (ret)
return ret;
- req->sg = req->request.sg;
- req->start_sg = req->sg;
- req->num_queued_sgs = 0;
+ req->start_sg = req->request.sg;
req->num_pending_sgs = req->request.num_mapped_sgs;
if (req->num_pending_sgs > 0) {
@@ -3075,7 +3128,7 @@ static int dwc3_gadget_check_config(struct usb_gadget *g)
struct dwc3 *dwc = gadget_to_dwc(g);
struct usb_ep *ep;
int fifo_size = 0;
- int ram1_depth;
+ int ram_depth;
int ep_num = 0;
if (!dwc->do_fifo_resize)
@@ -3098,8 +3151,8 @@ static int dwc3_gadget_check_config(struct usb_gadget *g)
fifo_size += dwc->max_cfg_eps;
/* Check if we can fit a single fifo per endpoint */
- ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
- if (fifo_size > ram1_depth)
+ ram_depth = dwc3_gadget_calc_ram_depth(dwc);
+ if (fifo_size > ram_depth)
return -ENOMEM;
return 0;
@@ -3416,20 +3469,16 @@ static int dwc3_gadget_ep_reclaim_trb_sg(struct dwc3_ep *dep,
int status)
{
struct dwc3_trb *trb;
- struct scatterlist *sg = req->sg;
- struct scatterlist *s;
- unsigned int num_queued = req->num_queued_sgs;
+ unsigned int num_completed_trbs = req->num_trbs;
unsigned int i;
int ret = 0;
- for_each_sg(sg, s, num_queued, i) {
+ for (i = 0; i < num_completed_trbs; i++) {
trb = &dep->trb_pool[dep->trb_dequeue];
- req->sg = sg_next(s);
- req->num_queued_sgs--;
-
ret = dwc3_gadget_ep_reclaim_completed_trb(dep, req,
- trb, event, status, true);
+ trb, event, status,
+ !!(trb->ctrl & DWC3_TRB_CTRL_CHN));
if (ret)
break;
}
@@ -3437,19 +3486,9 @@ static int dwc3_gadget_ep_reclaim_trb_sg(struct dwc3_ep *dep,
return ret;
}
-static int dwc3_gadget_ep_reclaim_trb_linear(struct dwc3_ep *dep,
- struct dwc3_request *req, const struct dwc3_event_depevt *event,
- int status)
-{
- struct dwc3_trb *trb = &dep->trb_pool[dep->trb_dequeue];
-
- return dwc3_gadget_ep_reclaim_completed_trb(dep, req, trb,
- event, status, false);
-}
-
static bool dwc3_gadget_ep_request_completed(struct dwc3_request *req)
{
- return req->num_pending_sgs == 0 && req->num_queued_sgs == 0;
+ return req->num_pending_sgs == 0 && req->num_trbs == 0;
}
static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
@@ -3459,24 +3498,13 @@ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
int request_status;
int ret;
- if (req->request.num_mapped_sgs)
- ret = dwc3_gadget_ep_reclaim_trb_sg(dep, req, event,
- status);
- else
- ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event,
- status);
+ ret = dwc3_gadget_ep_reclaim_trb_sg(dep, req, event, status);
req->request.actual = req->request.length - req->remaining;
if (!dwc3_gadget_ep_request_completed(req))
goto out;
- if (req->needs_extra_trb) {
- ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event,
- status);
- req->needs_extra_trb = false;
- }
-
/*
* The event status only reflects the status of the TRB with IOC set.
* For the requests that don't set interrupt on completion, the driver