summaryrefslogtreecommitdiff
path: root/drivers/dma/ti
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-09-17 19:04:40 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-09-17 19:04:40 -0700
commit04cbfba6208592999d7bfe6609ec01dc3fde73f5 (patch)
tree78182160d45c82792a3a8607d3f95b87703dbfa9 /drivers/dma/ti
parent4feaab05dc1eda3dbb57b097377766002e7a7cb9 (diff)
parentc5c6faaee6e0c374c7dfaf053aa23750f5a68e20 (diff)
Merge tag 'dmaengine-5.4-rc1' of git://git.infradead.org/users/vkoul/slave-dma
Pull dmaengine updates from Vinod Koul: - Move Dmaengine DT bindings to YAML and convert Allwinner to schema. - FSL dma device_synchronize implementation - DW split acpi and of helpers and updates to driver and support for Elkhart Lake - Move filter fn as private for omap-dma and edma drivers and improvements to these drivers - Mark expected switch fall-through in couple of drivers - Renames of shdma and nbpfaxi binding document - Minor updates to bunch of drivers * tag 'dmaengine-5.4-rc1' of git://git.infradead.org/users/vkoul/slave-dma: (55 commits) dmaengine: ti: edma: Use bitmap_set() instead of open coded edma_set_bits() dmaengine: ti: edma: Only reset region0 access registers dmaengine: ti: edma: Do not reset reserved paRAM slots dmaengine: iop-adma.c: fix printk format warning dmaengine: stm32-dma: Use struct_size() helper dt-bindings: dmaengine: dma-common: Fix the dma-channel-mask property dmanegine: ioat/dca: Use struct_size() helper dmaengine: iop-adma: remove set but not used variable 'slots_per_op' dmaengine: dmatest: Add support for completion polling dmaengine: ti: omap-dma: Remove variable override in omap_dma_tx_status() dmaengine: ti: omap-dma: Remove 'Assignment in if condition' dmaengine: ti: edma: Remove 'Assignment in if condition' dmaengine: dw: platform: Split OF helpers to separate module dmaengine: dw: platform: Split ACPI helpers to separate module dmaengine: dw: platform: Move handle check to dw_dma_acpi_controller_register() dmaengine: dw: platform: Switch to acpi_dma_controller_register() dmaengine: dw: platform: Use devm_platform_ioremap_resource() dmaengine: dw: platform: Enable iDMA 32-bit on Intel Elkhart Lake dmaengine: dw: platform: Use struct dw_dma_chip_pdata dmaengine: dw: Export struct dw_dma_chip_pdata for wider use ...
Diffstat (limited to 'drivers/dma/ti')
-rw-r--r--drivers/dma/ti/edma.c228
-rw-r--r--drivers/dma/ti/omap-dma.c62
2 files changed, 183 insertions, 107 deletions
diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
index ceabdea40ae0..ba7c4f07fcd6 100644
--- a/drivers/dma/ti/edma.c
+++ b/drivers/dma/ti/edma.c
@@ -15,7 +15,7 @@
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
-#include <linux/edma.h>
+#include <linux/bitmap.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -133,6 +133,17 @@
#define EDMA_CONT_PARAMS_FIXED_EXACT 1002
#define EDMA_CONT_PARAMS_FIXED_NOT_EXACT 1003
+/*
+ * 64bit array registers are split into two 32bit registers:
+ * reg0: channel/event 0-31
+ * reg1: channel/event 32-63
+ *
+ * bit 5 in the channel number tells the array index (0/1)
+ * bit 0-4 (0x1f) is the bit offset within the register
+ */
+#define EDMA_REG_ARRAY_INDEX(channel) ((channel) >> 5)
+#define EDMA_CHANNEL_BIT(channel) (BIT((channel) & 0x1f))
+
/* PaRAM slots are laid out like this */
struct edmacc_param {
u32 opt;
@@ -169,6 +180,7 @@ struct edma_desc {
struct list_head node;
enum dma_transfer_direction direction;
int cyclic;
+ bool polled;
int absync;
int pset_nr;
struct edma_chan *echan;
@@ -412,12 +424,6 @@ static inline void edma_param_or(struct edma_cc *ecc, int offset, int param_no,
edma_or(ecc, EDMA_PARM + offset + (param_no << 5), or);
}
-static inline void edma_set_bits(int offset, int len, unsigned long *p)
-{
- for (; len > 0; len--)
- set_bit(offset + (len - 1), p);
-}
-
static void edma_assign_priority_to_queue(struct edma_cc *ecc, int queue_no,
int priority)
{
@@ -441,15 +447,14 @@ static void edma_setup_interrupt(struct edma_chan *echan, bool enable)
{
struct edma_cc *ecc = echan->ecc;
int channel = EDMA_CHAN_SLOT(echan->ch_num);
+ int idx = EDMA_REG_ARRAY_INDEX(channel);
+ int ch_bit = EDMA_CHANNEL_BIT(channel);
if (enable) {
- edma_shadow0_write_array(ecc, SH_ICR, channel >> 5,
- BIT(channel & 0x1f));
- edma_shadow0_write_array(ecc, SH_IESR, channel >> 5,
- BIT(channel & 0x1f));
+ edma_shadow0_write_array(ecc, SH_ICR, idx, ch_bit);
+ edma_shadow0_write_array(ecc, SH_IESR, idx, ch_bit);
} else {
- edma_shadow0_write_array(ecc, SH_IECR, channel >> 5,
- BIT(channel & 0x1f));
+ edma_shadow0_write_array(ecc, SH_IECR, idx, ch_bit);
}
}
@@ -587,26 +592,26 @@ static void edma_start(struct edma_chan *echan)
{
struct edma_cc *ecc = echan->ecc;
int channel = EDMA_CHAN_SLOT(echan->ch_num);
- int j = (channel >> 5);
- unsigned int mask = BIT(channel & 0x1f);
+ int idx = EDMA_REG_ARRAY_INDEX(channel);
+ int ch_bit = EDMA_CHANNEL_BIT(channel);
if (!echan->hw_triggered) {
/* EDMA channels without event association */
- dev_dbg(ecc->dev, "ESR%d %08x\n", j,
- edma_shadow0_read_array(ecc, SH_ESR, j));
- edma_shadow0_write_array(ecc, SH_ESR, j, mask);
+ dev_dbg(ecc->dev, "ESR%d %08x\n", idx,
+ edma_shadow0_read_array(ecc, SH_ESR, idx));
+ edma_shadow0_write_array(ecc, SH_ESR, idx, ch_bit);
} else {
/* EDMA channel with event association */
- dev_dbg(ecc->dev, "ER%d %08x\n", j,
- edma_shadow0_read_array(ecc, SH_ER, j));
+ dev_dbg(ecc->dev, "ER%d %08x\n", idx,
+ edma_shadow0_read_array(ecc, SH_ER, idx));
/* Clear any pending event or error */
- edma_write_array(ecc, EDMA_ECR, j, mask);
- edma_write_array(ecc, EDMA_EMCR, j, mask);
+ edma_write_array(ecc, EDMA_ECR, idx, ch_bit);
+ edma_write_array(ecc, EDMA_EMCR, idx, ch_bit);
/* Clear any SER */
- edma_shadow0_write_array(ecc, SH_SECR, j, mask);
- edma_shadow0_write_array(ecc, SH_EESR, j, mask);
- dev_dbg(ecc->dev, "EER%d %08x\n", j,
- edma_shadow0_read_array(ecc, SH_EER, j));
+ edma_shadow0_write_array(ecc, SH_SECR, idx, ch_bit);
+ edma_shadow0_write_array(ecc, SH_EESR, idx, ch_bit);
+ dev_dbg(ecc->dev, "EER%d %08x\n", idx,
+ edma_shadow0_read_array(ecc, SH_EER, idx));
}
}
@@ -614,19 +619,19 @@ static void edma_stop(struct edma_chan *echan)
{
struct edma_cc *ecc = echan->ecc;
int channel = EDMA_CHAN_SLOT(echan->ch_num);
- int j = (channel >> 5);
- unsigned int mask = BIT(channel & 0x1f);
+ int idx = EDMA_REG_ARRAY_INDEX(channel);
+ int ch_bit = EDMA_CHANNEL_BIT(channel);
- edma_shadow0_write_array(ecc, SH_EECR, j, mask);
- edma_shadow0_write_array(ecc, SH_ECR, j, mask);
- edma_shadow0_write_array(ecc, SH_SECR, j, mask);
- edma_write_array(ecc, EDMA_EMCR, j, mask);
+ edma_shadow0_write_array(ecc, SH_EECR, idx, ch_bit);
+ edma_shadow0_write_array(ecc, SH_ECR, idx, ch_bit);
+ edma_shadow0_write_array(ecc, SH_SECR, idx, ch_bit);
+ edma_write_array(ecc, EDMA_EMCR, idx, ch_bit);
/* clear possibly pending completion interrupt */
- edma_shadow0_write_array(ecc, SH_ICR, j, mask);
+ edma_shadow0_write_array(ecc, SH_ICR, idx, ch_bit);
- dev_dbg(ecc->dev, "EER%d %08x\n", j,
- edma_shadow0_read_array(ecc, SH_EER, j));
+ dev_dbg(ecc->dev, "EER%d %08x\n", idx,
+ edma_shadow0_read_array(ecc, SH_EER, idx));
/* REVISIT: consider guarding against inappropriate event
* chaining by overwriting with dummy_paramset.
@@ -640,45 +645,49 @@ static void edma_stop(struct edma_chan *echan)
static void edma_pause(struct edma_chan *echan)
{
int channel = EDMA_CHAN_SLOT(echan->ch_num);
- unsigned int mask = BIT(channel & 0x1f);
- edma_shadow0_write_array(echan->ecc, SH_EECR, channel >> 5, mask);
+ edma_shadow0_write_array(echan->ecc, SH_EECR,
+ EDMA_REG_ARRAY_INDEX(channel),
+ EDMA_CHANNEL_BIT(channel));
}
/* Re-enable EDMA hardware events on the specified channel. */
static void edma_resume(struct edma_chan *echan)
{
int channel = EDMA_CHAN_SLOT(echan->ch_num);
- unsigned int mask = BIT(channel & 0x1f);
- edma_shadow0_write_array(echan->ecc, SH_EESR, channel >> 5, mask);
+ edma_shadow0_write_array(echan->ecc, SH_EESR,
+ EDMA_REG_ARRAY_INDEX(channel),
+ EDMA_CHANNEL_BIT(channel));
}
static void edma_trigger_channel(struct edma_chan *echan)
{
struct edma_cc *ecc = echan->ecc;
int channel = EDMA_CHAN_SLOT(echan->ch_num);
- unsigned int mask = BIT(channel & 0x1f);
+ int idx = EDMA_REG_ARRAY_INDEX(channel);
+ int ch_bit = EDMA_CHANNEL_BIT(channel);
- edma_shadow0_write_array(ecc, SH_ESR, (channel >> 5), mask);
+ edma_shadow0_write_array(ecc, SH_ESR, idx, ch_bit);
- dev_dbg(ecc->dev, "ESR%d %08x\n", (channel >> 5),
- edma_shadow0_read_array(ecc, SH_ESR, (channel >> 5)));
+ dev_dbg(ecc->dev, "ESR%d %08x\n", idx,
+ edma_shadow0_read_array(ecc, SH_ESR, idx));
}
static void edma_clean_channel(struct edma_chan *echan)
{
struct edma_cc *ecc = echan->ecc;
int channel = EDMA_CHAN_SLOT(echan->ch_num);
- int j = (channel >> 5);
- unsigned int mask = BIT(channel & 0x1f);
+ int idx = EDMA_REG_ARRAY_INDEX(channel);
+ int ch_bit = EDMA_CHANNEL_BIT(channel);
- dev_dbg(ecc->dev, "EMR%d %08x\n", j, edma_read_array(ecc, EDMA_EMR, j));
- edma_shadow0_write_array(ecc, SH_ECR, j, mask);
+ dev_dbg(ecc->dev, "EMR%d %08x\n", idx,
+ edma_read_array(ecc, EDMA_EMR, idx));
+ edma_shadow0_write_array(ecc, SH_ECR, idx, ch_bit);
/* Clear the corresponding EMR bits */
- edma_write_array(ecc, EDMA_EMCR, j, mask);
+ edma_write_array(ecc, EDMA_EMCR, idx, ch_bit);
/* Clear any SER */
- edma_shadow0_write_array(ecc, SH_SECR, j, mask);
+ edma_shadow0_write_array(ecc, SH_SECR, idx, ch_bit);
edma_write(ecc, EDMA_CCERRCLR, BIT(16) | BIT(1) | BIT(0));
}
@@ -708,7 +717,8 @@ static int edma_alloc_channel(struct edma_chan *echan,
int channel = EDMA_CHAN_SLOT(echan->ch_num);
/* ensure access through shadow region 0 */
- edma_or_array2(ecc, EDMA_DRAE, 0, channel >> 5, BIT(channel & 0x1f));
+ edma_or_array2(ecc, EDMA_DRAE, 0, EDMA_REG_ARRAY_INDEX(channel),
+ EDMA_CHANNEL_BIT(channel));
/* ensure no events are pending */
edma_stop(echan);
@@ -1011,6 +1021,7 @@ static int edma_config_pset(struct dma_chan *chan, struct edma_pset *epset,
src_cidx = cidx;
dst_bidx = acnt;
dst_cidx = cidx;
+ epset->addr = src_addr;
} else {
dev_err(dev, "%s: direction not implemented yet\n", __func__);
return -EINVAL;
@@ -1211,8 +1222,9 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
edesc->pset[0].param.opt |= ITCCHEN;
if (nslots == 1) {
- /* Enable transfer complete interrupt */
- edesc->pset[0].param.opt |= TCINTEN;
+ /* Enable transfer complete interrupt if requested */
+ if (tx_flags & DMA_PREP_INTERRUPT)
+ edesc->pset[0].param.opt |= TCINTEN;
} else {
/* Enable transfer complete chaining for the first slot */
edesc->pset[0].param.opt |= TCCHEN;
@@ -1239,9 +1251,14 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
}
edesc->pset[1].param.opt |= ITCCHEN;
- edesc->pset[1].param.opt |= TCINTEN;
+ /* Enable transfer complete interrupt if requested */
+ if (tx_flags & DMA_PREP_INTERRUPT)
+ edesc->pset[1].param.opt |= TCINTEN;
}
+ if (!(tx_flags & DMA_PREP_INTERRUPT))
+ edesc->polled = true;
+
return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
}
@@ -1721,7 +1738,11 @@ static u32 edma_residue(struct edma_desc *edesc)
int loop_count = EDMA_MAX_TR_WAIT_LOOPS;
struct edma_chan *echan = edesc->echan;
struct edma_pset *pset = edesc->pset;
- dma_addr_t done, pos;
+ dma_addr_t done, pos, pos_old;
+ int channel = EDMA_CHAN_SLOT(echan->ch_num);
+ int idx = EDMA_REG_ARRAY_INDEX(channel);
+ int ch_bit = EDMA_CHANNEL_BIT(channel);
+ int event_reg;
int i;
/*
@@ -1734,16 +1755,20 @@ static u32 edma_residue(struct edma_desc *edesc)
* "pos" may represent a transfer request that is still being
* processed by the EDMACC or EDMATC. We will busy wait until
* any one of the situations occurs:
- * 1. the DMA hardware is idle
- * 2. a new transfer request is setup
+ * 1. while and event is pending for the channel
+ * 2. a position updated
* 3. we hit the loop limit
*/
- while (edma_read(echan->ecc, EDMA_CCSTAT) & EDMA_CCSTAT_ACTV) {
- /* check if a new transfer request is setup */
- if (edma_get_position(echan->ecc,
- echan->slot[0], dst) != pos) {
+ if (is_slave_direction(edesc->direction))
+ event_reg = SH_ER;
+ else
+ event_reg = SH_ESR;
+
+ pos_old = pos;
+ while (edma_shadow0_read_array(echan->ecc, event_reg, idx) & ch_bit) {
+ pos = edma_get_position(echan->ecc, echan->slot[0], dst);
+ if (pos != pos_old)
break;
- }
if (!--loop_count) {
dev_dbg_ratelimited(echan->vchan.chan.device->dev,
@@ -1769,6 +1794,12 @@ static u32 edma_residue(struct edma_desc *edesc)
}
/*
+ * If the position is 0, then EDMA loaded the closing dummy slot, the
+ * transfer is completed
+ */
+ if (!pos)
+ return 0;
+ /*
* For SG operation we catch up with the last processed
* status.
*/
@@ -1796,19 +1827,46 @@ static enum dma_status edma_tx_status(struct dma_chan *chan,
struct dma_tx_state *txstate)
{
struct edma_chan *echan = to_edma_chan(chan);
- struct virt_dma_desc *vdesc;
+ struct dma_tx_state txstate_tmp;
enum dma_status ret;
unsigned long flags;
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret == DMA_COMPLETE || !txstate)
+
+ if (ret == DMA_COMPLETE)
return ret;
+ /* Provide a dummy dma_tx_state for completion checking */
+ if (!txstate)
+ txstate = &txstate_tmp;
+
spin_lock_irqsave(&echan->vchan.lock, flags);
- if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie)
+ if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) {
txstate->residue = edma_residue(echan->edesc);
- else if ((vdesc = vchan_find_desc(&echan->vchan, cookie)))
- txstate->residue = to_edma_desc(&vdesc->tx)->residue;
+ } else {
+ struct virt_dma_desc *vdesc = vchan_find_desc(&echan->vchan,
+ cookie);
+
+ if (vdesc)
+ txstate->residue = to_edma_desc(&vdesc->tx)->residue;
+ else
+ txstate->residue = 0;
+ }
+
+ /*
+ * Mark the cookie completed if the residue is 0 for non cyclic
+ * transfers
+ */
+ if (ret != DMA_COMPLETE && !txstate->residue &&
+ echan->edesc && echan->edesc->polled &&
+ echan->edesc->vdesc.tx.cookie == cookie) {
+ edma_stop(echan);
+ vchan_cookie_complete(&echan->edesc->vdesc);
+ echan->edesc = NULL;
+ edma_execute(echan);
+ ret = DMA_COMPLETE;
+ }
+
spin_unlock_irqrestore(&echan->vchan.lock, flags);
return ret;
@@ -2185,11 +2243,13 @@ static struct dma_chan *of_edma_xlate(struct of_phandle_args *dma_spec,
}
#endif
+static bool edma_filter_fn(struct dma_chan *chan, void *param);
+
static int edma_probe(struct platform_device *pdev)
{
struct edma_soc_info *info = pdev->dev.platform_data;
s8 (*queue_priority_mapping)[2];
- int i, off, ln;
+ int i, off;
const s16 (*rsv_slots)[2];
const s16 (*xbar_chans)[2];
int irq;
@@ -2273,21 +2333,22 @@ static int edma_probe(struct platform_device *pdev)
ecc->default_queue = info->default_queue;
- for (i = 0; i < ecc->num_slots; i++)
- edma_write_slot(ecc, i, &dummy_paramset);
-
if (info->rsv) {
/* Set the reserved slots in inuse list */
rsv_slots = info->rsv->rsv_slots;
if (rsv_slots) {
- for (i = 0; rsv_slots[i][0] != -1; i++) {
- off = rsv_slots[i][0];
- ln = rsv_slots[i][1];
- edma_set_bits(off, ln, ecc->slot_inuse);
- }
+ for (i = 0; rsv_slots[i][0] != -1; i++)
+ bitmap_set(ecc->slot_inuse, rsv_slots[i][0],
+ rsv_slots[i][1]);
}
}
+ for (i = 0; i < ecc->num_slots; i++) {
+ /* Reset only unused - not reserved - paRAM slots */
+ if (!test_bit(i, ecc->slot_inuse))
+ edma_write_slot(ecc, i, &dummy_paramset);
+ }
+
/* Clear the xbar mapped channels in unused list */
xbar_chans = info->xbar_chans;
if (xbar_chans) {
@@ -2366,11 +2427,10 @@ static int edma_probe(struct platform_device *pdev)
edma_assign_priority_to_queue(ecc, queue_priority_mapping[i][0],
queue_priority_mapping[i][1]);
- for (i = 0; i < ecc->num_region; i++) {
- edma_write_array2(ecc, EDMA_DRAE, i, 0, 0x0);
- edma_write_array2(ecc, EDMA_DRAE, i, 1, 0x0);
- edma_write_array(ecc, EDMA_QRAE, i, 0x0);
- }
+ edma_write_array2(ecc, EDMA_DRAE, 0, 0, 0x0);
+ edma_write_array2(ecc, EDMA_DRAE, 0, 1, 0x0);
+ edma_write_array(ecc, EDMA_QRAE, 0, 0x0);
+
ecc->info = info;
/* Init the dma device and channels */
@@ -2482,8 +2542,9 @@ static int edma_pm_resume(struct device *dev)
for (i = 0; i < ecc->num_channels; i++) {
if (echan[i].alloced) {
/* ensure access through shadow region 0 */
- edma_or_array2(ecc, EDMA_DRAE, 0, i >> 5,
- BIT(i & 0x1f));
+ edma_or_array2(ecc, EDMA_DRAE, 0,
+ EDMA_REG_ARRAY_INDEX(i),
+ EDMA_CHANNEL_BIT(i));
edma_setup_interrupt(&echan[i], true);
@@ -2524,7 +2585,7 @@ static struct platform_driver edma_tptc_driver = {
},
};
-bool edma_filter_fn(struct dma_chan *chan, void *param)
+static bool edma_filter_fn(struct dma_chan *chan, void *param)
{
bool match = false;
@@ -2539,7 +2600,6 @@ bool edma_filter_fn(struct dma_chan *chan, void *param)
}
return match;
}
-EXPORT_SYMBOL(edma_filter_fn);
static int edma_init(void)
{
diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c
index d07c0d5de7a2..6b6ba238b81a 100644
--- a/drivers/dma/ti/omap-dma.c
+++ b/drivers/dma/ti/omap-dma.c
@@ -91,6 +91,7 @@ struct omap_desc {
bool using_ll;
enum dma_transfer_direction dir;
dma_addr_t dev_addr;
+ bool polled;
int32_t fi; /* for OMAP_DMA_SYNC_PACKET / double indexing */
int16_t ei; /* for double indexing */
@@ -202,6 +203,7 @@ static const unsigned es_bytes[] = {
[CSDP_DATA_TYPE_32] = 4,
};
+static bool omap_dma_filter_fn(struct dma_chan *chan, void *param);
static struct of_dma_filter_info omap_dma_info = {
.filter_fn = omap_dma_filter_fn,
};
@@ -812,31 +814,22 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
dma_cookie_t cookie, struct dma_tx_state *txstate)
{
struct omap_chan *c = to_omap_dma_chan(chan);
- struct virt_dma_desc *vd;
enum dma_status ret;
unsigned long flags;
+ struct omap_desc *d = NULL;
ret = dma_cookie_status(chan, cookie, txstate);
-
- if (!c->paused && c->running) {
- uint32_t ccr = omap_dma_chan_read(c, CCR);
- /*
- * The channel is no longer active, set the return value
- * accordingly
- */
- if (!(ccr & CCR_ENABLE))
- ret = DMA_COMPLETE;
- }
-
- if (ret == DMA_COMPLETE || !txstate)
+ if (ret == DMA_COMPLETE)
return ret;
spin_lock_irqsave(&c->vc.lock, flags);
- vd = vchan_find_desc(&c->vc, cookie);
- if (vd) {
- txstate->residue = omap_dma_desc_size(to_omap_dma_desc(&vd->tx));
- } else if (c->desc && c->desc->vd.tx.cookie == cookie) {
- struct omap_desc *d = c->desc;
+ if (c->desc && c->desc->vd.tx.cookie == cookie)
+ d = c->desc;
+
+ if (!txstate)
+ goto out;
+
+ if (d) {
dma_addr_t pos;
if (d->dir == DMA_MEM_TO_DEV)
@@ -848,10 +841,31 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
txstate->residue = omap_dma_desc_size_pos(d, pos);
} else {
- txstate->residue = 0;
+ struct virt_dma_desc *vd = vchan_find_desc(&c->vc, cookie);
+
+ if (vd)
+ txstate->residue = omap_dma_desc_size(
+ to_omap_dma_desc(&vd->tx));
+ else
+ txstate->residue = 0;
}
- if (ret == DMA_IN_PROGRESS && c->paused)
+
+out:
+ if (ret == DMA_IN_PROGRESS && c->paused) {
ret = DMA_PAUSED;
+ } else if (d && d->polled && c->running) {
+ uint32_t ccr = omap_dma_chan_read(c, CCR);
+ /*
+ * The channel is no longer active, set the return value
+ * accordingly and mark it as completed
+ */
+ if (!(ccr & CCR_ENABLE)) {
+ ret = DMA_COMPLETE;
+ omap_dma_start_desc(c);
+ vchan_cookie_complete(&d->vd);
+ }
+ }
+
spin_unlock_irqrestore(&c->vc.lock, flags);
return ret;
@@ -1178,7 +1192,10 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_memcpy(
d->ccr = c->ccr;
d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_POSTINC;
- d->cicr = CICR_DROP_IE | CICR_FRAME_IE;
+ if (tx_flags & DMA_PREP_INTERRUPT)
+ d->cicr |= CICR_FRAME_IE;
+ else
+ d->polled = true;
d->csdp = data_type;
@@ -1639,7 +1656,7 @@ static struct platform_driver omap_dma_driver = {
},
};
-bool omap_dma_filter_fn(struct dma_chan *chan, void *param)
+static bool omap_dma_filter_fn(struct dma_chan *chan, void *param)
{
if (chan->device->dev->driver == &omap_dma_driver.driver) {
struct omap_dmadev *od = to_omap_dma_dev(chan->device);
@@ -1653,7 +1670,6 @@ bool omap_dma_filter_fn(struct dma_chan *chan, void *param)
}
return false;
}
-EXPORT_SYMBOL_GPL(omap_dma_filter_fn);
static int omap_dma_init(void)
{