summaryrefslogtreecommitdiff
path: root/drivers/dma/ppc4xx
diff options
context:
space:
mode:
authorDave Jiang <dave.jiang@intel.com>2016-07-25 10:34:14 -0700
committerVinod Koul <vinod.koul@intel.com>2016-08-08 08:11:43 +0530
commited9f2c5896baf277959ed91f6b77b03c5de2db0f (patch)
treeef7dd1b18320887b0e4fa5c20d3551ca4a16a794 /drivers/dma/ppc4xx
parent8058e25809f53cadc0438ebb8f920415a0d2ec17 (diff)
dmaengine: ppc4xx/adma: move unmap to before callback
Completion callback should happen after dma_descriptor_unmap() has happened. This allow the cache invalidate to happen and ensure that the data accessed by the upper layer is in memory that was from DMA rather than stale data. On some architecture this is done by the hardware, however we should make the code consistent to not cause confusion. Signed-off-by: Dave Jiang <dave.jiang@intel.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma/ppc4xx')
-rw-r--r--drivers/dma/ppc4xx/adma.c2
1 files changed, 1 insertions, 1 deletions
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index 140f3ed429f4..fc71a635edf6 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -1482,11 +1482,11 @@ static dma_cookie_t ppc440spe_adma_run_tx_complete_actions(
cookie = desc->async_tx.cookie;
desc->async_tx.cookie = 0;
+ dma_descriptor_unmap(&desc->async_tx);
/* call the callback (must not sleep or submit new
* operations to this channel)
*/
dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL);
- dma_descriptor_unmap(&desc->async_tx);
}
/* run dependent operations */