summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/ti/davinci_cpdma.c
diff options
context:
space:
mode:
authorIvan Khoronzhuk <ivan.khoronzhuk@linaro.org>2019-06-15 14:01:32 +0300
committerDavid S. Miller <davem@davemloft.net>2019-06-16 14:03:25 -0700
commit871e846585919adf727f21398f433fd424b6f0e1 (patch)
tree19da3891f6595a817ddb9f1c8895a1f71aa83ec7 /drivers/net/ethernet/ti/davinci_cpdma.c
parent4e18a8a149d5577a171ddc5fe80cc715041b193d (diff)
net: ethernet: ti: davinci_cpdma: use idled submit
While data pass suspend, reuse of rx descriptors can be disabled using channel state & lock from cpdma layer. For this, submit to a channel has to be disabled using state != "not active" under lock, what is done with this patch. The same submit is used to fill rx channel while ndo_open, when channel is idled, so add idled submit routine that allows to prepare descs for the channel. All this simplifies code and helps to avoid dormant mode usage and send packets only to active channels, avoiding potential race in later on changes. Also add missed sync barrier analogically like in other places after stopping tx queues. Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/ti/davinci_cpdma.c')
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c85
1 files changed, 64 insertions, 21 deletions
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 35bf14d8e7af..5cf1758d425b 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -134,6 +134,14 @@ struct cpdma_control_info {
#define ACCESS_RW (ACCESS_RO | ACCESS_WO)
};
+struct submit_info {
+ struct cpdma_chan *chan;
+ int directed;
+ void *token;
+ void *data;
+ int len;
+};
+
static struct cpdma_control_info controls[] = {
[CPDMA_TX_RLIM] = {CPDMA_DMACONTROL, 8, 0xffff, ACCESS_RW},
[CPDMA_CMD_IDLE] = {CPDMA_DMACONTROL, 3, 1, ACCESS_WO},
@@ -1002,34 +1010,25 @@ static void __cpdma_chan_submit(struct cpdma_chan *chan,
}
}
-int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
- int len, int directed)
+static int cpdma_chan_submit_si(struct submit_info *si)
{
+ struct cpdma_chan *chan = si->chan;
struct cpdma_ctlr *ctlr = chan->ctlr;
+ int len = si->len;
struct cpdma_desc __iomem *desc;
dma_addr_t buffer;
- unsigned long flags;
u32 mode;
- int ret = 0;
-
- spin_lock_irqsave(&chan->lock, flags);
-
- if (chan->state == CPDMA_STATE_TEARDOWN) {
- ret = -EINVAL;
- goto unlock_ret;
- }
+ int ret;
if (chan->count >= chan->desc_num) {
chan->stats.desc_alloc_fail++;
- ret = -ENOMEM;
- goto unlock_ret;
+ return -ENOMEM;
}
desc = cpdma_desc_alloc(ctlr->pool);
if (!desc) {
chan->stats.desc_alloc_fail++;
- ret = -ENOMEM;
- goto unlock_ret;
+ return -ENOMEM;
}
if (len < ctlr->params.min_packet_size) {
@@ -1037,16 +1036,15 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
chan->stats.runt_transmit_buff++;
}
- buffer = dma_map_single(ctlr->dev, data, len, chan->dir);
+ buffer = dma_map_single(ctlr->dev, si->data, len, chan->dir);
ret = dma_mapping_error(ctlr->dev, buffer);
if (ret) {
cpdma_desc_free(ctlr->pool, desc, 1);
- ret = -EINVAL;
- goto unlock_ret;
+ return -EINVAL;
}
mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
- cpdma_desc_to_port(chan, mode, directed);
+ cpdma_desc_to_port(chan, mode, si->directed);
/* Relaxed IO accessors can be used here as there is read barrier
* at the end of write sequence.
@@ -1055,7 +1053,7 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
writel_relaxed(buffer, &desc->hw_buffer);
writel_relaxed(len, &desc->hw_len);
writel_relaxed(mode | len, &desc->hw_mode);
- writel_relaxed((uintptr_t)token, &desc->sw_token);
+ writel_relaxed((uintptr_t)si->token, &desc->sw_token);
writel_relaxed(buffer, &desc->sw_buffer);
writel_relaxed(len, &desc->sw_len);
desc_read(desc, sw_len);
@@ -1066,8 +1064,53 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
chan_write(chan, rxfree, 1);
chan->count++;
+ return 0;
+}
-unlock_ret:
+int cpdma_chan_idle_submit(struct cpdma_chan *chan, void *token, void *data,
+ int len, int directed)
+{
+ struct submit_info si;
+ unsigned long flags;
+ int ret;
+
+ si.chan = chan;
+ si.token = token;
+ si.data = data;
+ si.len = len;
+ si.directed = directed;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ if (chan->state == CPDMA_STATE_TEARDOWN) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return -EINVAL;
+ }
+
+ ret = cpdma_chan_submit_si(&si);
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return ret;
+}
+
+int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
+ int len, int directed)
+{
+ struct submit_info si;
+ unsigned long flags;
+ int ret;
+
+ si.chan = chan;
+ si.token = token;
+ si.data = data;
+ si.len = len;
+ si.directed = directed;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ if (chan->state != CPDMA_STATE_ACTIVE) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return -EINVAL;
+ }
+
+ ret = cpdma_chan_submit_si(&si);
spin_unlock_irqrestore(&chan->lock, flags);
return ret;
}