summaryrefslogtreecommitdiff
path: root/drivers/dma/ti
diff options
context:
space:
mode:
authorPeter Ujfalusi <peter.ujfalusi@ti.com>2019-07-16 11:24:59 +0300
committerVinod Koul <vkoul@kernel.org>2019-07-29 12:05:01 +0530
commit4689d35c765c696bdf0535486a990038b242a26b (patch)
treed86f80227e9dd6e67ca7cf0a59a3bb92970fb674 /drivers/dma/ti
parentaac8670369dc017609b8e8870975641ad3143448 (diff)
dmaengine: ti: omap-dma: Improved memcpy polling support
When a DMA client driver does not set the DMA_PREP_INTERRUPT because it does not want to use interrupts for DMA completion or because it can not rely on DMA interrupts due to executing the memcpy when interrupts are disabled it will poll the status of the transfer. If the interrupts are enabled then the cookie will be set completed in the interrupt handler so only check in HW completion when the polling is really needed. Signed-off-by: Peter Ujfalusi <peter.ujfalusi@ti.com> Link: https://lore.kernel.org/r/20190716082459.1222-3-peter.ujfalusi@ti.com Signed-off-by: Vinod Koul <vkoul@kernel.org>
Diffstat (limited to 'drivers/dma/ti')
-rw-r--r--drivers/dma/ti/omap-dma.c44
1 files changed, 28 insertions, 16 deletions
diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c
index 80fd2667b2c8..a4a63425dc0b 100644
--- a/drivers/dma/ti/omap-dma.c
+++ b/drivers/dma/ti/omap-dma.c
@@ -91,6 +91,7 @@ struct omap_desc {
bool using_ll;
enum dma_transfer_direction dir;
dma_addr_t dev_addr;
+ bool polled;
int32_t fi; /* for OMAP_DMA_SYNC_PACKET / double indexing */
int16_t ei; /* for double indexing */
@@ -816,26 +817,20 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
struct virt_dma_desc *vd;
enum dma_status ret;
unsigned long flags;
+ struct omap_desc *d = NULL;
ret = dma_cookie_status(chan, cookie, txstate);
-
- if (!c->paused && c->running) {
- uint32_t ccr = omap_dma_chan_read(c, CCR);
- /*
- * The channel is no longer active, set the return value
- * accordingly
- */
- if (!(ccr & CCR_ENABLE))
- ret = DMA_COMPLETE;
- }
-
- if (ret == DMA_COMPLETE || !txstate)
+ if (ret == DMA_COMPLETE)
return ret;
spin_lock_irqsave(&c->vc.lock, flags);
+ if (c->desc && c->desc->vd.tx.cookie == cookie)
+ d = c->desc;
+
+ if (!txstate)
+ goto out;
- if (c->desc && c->desc->vd.tx.cookie == cookie) {
- struct omap_desc *d = c->desc;
+ if (d) {
dma_addr_t pos;
if (d->dir == DMA_MEM_TO_DEV)
@@ -852,8 +847,22 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
txstate->residue = 0;
}
- if (ret == DMA_IN_PROGRESS && c->paused)
+out:
+ if (ret == DMA_IN_PROGRESS && c->paused) {
ret = DMA_PAUSED;
+ } else if (d && d->polled && c->running) {
+ uint32_t ccr = omap_dma_chan_read(c, CCR);
+ /*
+ * The channel is no longer active, set the return value
+ * accordingly and mark it as completed
+ */
+ if (!(ccr & CCR_ENABLE)) {
+ struct omap_desc *d = c->desc;
+ ret = DMA_COMPLETE;
+ omap_dma_start_desc(c);
+ vchan_cookie_complete(&d->vd);
+ }
+ }
spin_unlock_irqrestore(&c->vc.lock, flags);
@@ -1181,7 +1190,10 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_memcpy(
d->ccr = c->ccr;
d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_POSTINC;
- d->cicr = CICR_DROP_IE | CICR_FRAME_IE;
+ if (tx_flags & DMA_PREP_INTERRUPT)
+ d->cicr |= CICR_FRAME_IE;
+ else
+ d->polled = true;
d->csdp = data_type;