summaryrefslogtreecommitdiff
path: root/drivers/dma/dw-edma/dw-edma-core.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/dw-edma/dw-edma-core.c')
-rw-r--r--drivers/dma/dw-edma/dw-edma-core.c65
1 files changed, 46 insertions, 19 deletions
diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c
index ff392c01bad1..ed430ad9b3dd 100644
--- a/drivers/dma/dw-edma/dw-edma-core.c
+++ b/drivers/dma/dw-edma/dw-edma-core.c
@@ -13,8 +13,9 @@
#include <linux/dmaengine.h>
#include <linux/err.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/dma/edma.h>
-#include <linux/pci.h>
+#include <linux/dma-mapping.h>
#include "dw-edma-core.h"
#include "dw-edma-v0-core.h"
@@ -322,7 +323,7 @@ static struct dma_async_tx_descriptor *
dw_edma_device_transfer(struct dw_edma_transfer *xfer)
{
struct dw_edma_chan *chan = dchan2dw_edma_chan(xfer->dchan);
- enum dma_transfer_direction direction = xfer->direction;
+ enum dma_transfer_direction dir = xfer->direction;
phys_addr_t src_addr, dst_addr;
struct scatterlist *sg = NULL;
struct dw_edma_chunk *chunk;
@@ -331,10 +332,26 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
u32 cnt;
int i;
- if ((direction == DMA_MEM_TO_DEV && chan->dir == EDMA_DIR_WRITE) ||
- (direction == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_READ))
+ if (!chan->configured)
return NULL;
+ switch (chan->config.direction) {
+ case DMA_DEV_TO_MEM: /* local dma */
+ if (dir == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_READ)
+ break;
+ return NULL;
+ case DMA_MEM_TO_DEV: /* local dma */
+ if (dir == DMA_MEM_TO_DEV && chan->dir == EDMA_DIR_WRITE)
+ break;
+ return NULL;
+ default: /* remote dma */
+ if (dir == DMA_MEM_TO_DEV && chan->dir == EDMA_DIR_READ)
+ break;
+ if (dir == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_WRITE)
+ break;
+ return NULL;
+ }
+
if (xfer->cyclic) {
if (!xfer->xfer.cyclic.len || !xfer->xfer.cyclic.cnt)
return NULL;
@@ -343,9 +360,6 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
return NULL;
}
- if (!chan->configured)
- return NULL;
-
desc = dw_edma_alloc_desc(chan);
if (unlikely(!desc))
goto err_alloc;
@@ -386,7 +400,7 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
chunk->ll_region.sz += burst->sz;
desc->alloc_sz += burst->sz;
- if (direction == DMA_DEV_TO_MEM) {
+ if (chan->dir == EDMA_DIR_WRITE) {
burst->sar = src_addr;
if (xfer->cyclic) {
burst->dar = xfer->xfer.cyclic.paddr;
@@ -773,6 +787,7 @@ static int dw_edma_irq_request(struct dw_edma_chip *chip,
u32 rd_mask = 1;
int i, err = 0;
u32 ch_cnt;
+ int irq;
ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt;
@@ -781,16 +796,16 @@ static int dw_edma_irq_request(struct dw_edma_chip *chip,
if (dw->nr_irqs == 1) {
/* Common IRQ shared among all channels */
- err = request_irq(pci_irq_vector(to_pci_dev(dev), 0),
- dw_edma_interrupt_common,
+ irq = dw->ops->irq_vector(dev, 0);
+ err = request_irq(irq, dw_edma_interrupt_common,
IRQF_SHARED, dw->name, &dw->irq[0]);
if (err) {
dw->nr_irqs = 0;
return err;
}
- get_cached_msi_msg(pci_irq_vector(to_pci_dev(dev), 0),
- &dw->irq[0].msi);
+ if (irq_get_msi_desc(irq))
+ get_cached_msi_msg(irq, &dw->irq[0].msi);
} else {
/* Distribute IRQs equally among all channels */
int tmp = dw->nr_irqs;
@@ -804,7 +819,8 @@ static int dw_edma_irq_request(struct dw_edma_chip *chip,
dw_edma_add_irq_mask(&rd_mask, *rd_alloc, dw->rd_ch_cnt);
for (i = 0; i < (*wr_alloc + *rd_alloc); i++) {
- err = request_irq(pci_irq_vector(to_pci_dev(dev), i),
+ irq = dw->ops->irq_vector(dev, i);
+ err = request_irq(irq,
i < *wr_alloc ?
dw_edma_interrupt_write :
dw_edma_interrupt_read,
@@ -815,8 +831,8 @@ static int dw_edma_irq_request(struct dw_edma_chip *chip,
return err;
}
- get_cached_msi_msg(pci_irq_vector(to_pci_dev(dev), i),
- &dw->irq[i].msi);
+ if (irq_get_msi_desc(irq))
+ get_cached_msi_msg(irq, &dw->irq[i].msi);
}
dw->nr_irqs = i;
@@ -827,12 +843,23 @@ static int dw_edma_irq_request(struct dw_edma_chip *chip,
int dw_edma_probe(struct dw_edma_chip *chip)
{
- struct device *dev = chip->dev;
- struct dw_edma *dw = chip->dw;
+ struct device *dev;
+ struct dw_edma *dw;
u32 wr_alloc = 0;
u32 rd_alloc = 0;
int i, err;
+ if (!chip)
+ return -EINVAL;
+
+ dev = chip->dev;
+ if (!dev)
+ return -EINVAL;
+
+ dw = chip->dw;
+ if (!dw || !dw->irq || !dw->ops || !dw->ops->irq_vector)
+ return -EINVAL;
+
raw_spin_lock_init(&dw->lock);
/* Find out how many write channels are supported by hardware */
@@ -884,7 +911,7 @@ int dw_edma_probe(struct dw_edma_chip *chip)
err_irq_free:
for (i = (dw->nr_irqs - 1); i >= 0; i--)
- free_irq(pci_irq_vector(to_pci_dev(dev), i), &dw->irq[i]);
+ free_irq(dw->ops->irq_vector(dev, i), &dw->irq[i]);
dw->nr_irqs = 0;
@@ -904,7 +931,7 @@ int dw_edma_remove(struct dw_edma_chip *chip)
/* Free irqs */
for (i = (dw->nr_irqs - 1); i >= 0; i--)
- free_irq(pci_irq_vector(to_pci_dev(dev), i), &dw->irq[i]);
+ free_irq(dw->ops->irq_vector(dev, i), &dw->irq[i]);
/* Power management */
pm_runtime_disable(dev);