summaryrefslogtreecommitdiff
path: root/drivers/dma/pl330.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/pl330.c')
-rw-r--r--drivers/dma/pl330.c164
1 files changed, 85 insertions, 79 deletions
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 88b884cbb7c1..82a9fe88ad54 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -33,7 +33,8 @@
#define PL330_MAX_PERI 32
#define PL330_MAX_BURST 16
-#define PL330_QUIRK_BROKEN_NO_FLUSHP BIT(0)
+#define PL330_QUIRK_BROKEN_NO_FLUSHP BIT(0)
+#define PL330_QUIRK_PERIPH_BURST BIT(1)
enum pl330_cachectrl {
CCTRL0, /* Noncacheable and nonbufferable */
@@ -254,7 +255,7 @@ enum pl330_byteswap {
static unsigned cmd_line;
#define PL330_DBGCMD_DUMP(off, x...) do { \
printk("%x:", cmd_line); \
- printk(x); \
+ printk(KERN_CONT x); \
cmd_line += off; \
} while (0)
#define PL330_DBGMC_START(addr) (cmd_line = addr)
@@ -284,7 +285,7 @@ struct pl330_config {
u32 irq_ns;
};
-/**
+/*
* Request Configuration.
* The PL330 core does not modify this and uses the last
* working configuration if the request doesn't provide any.
@@ -403,6 +404,12 @@ enum desc_status {
*/
BUSY,
/*
+ * Pause was called while descriptor was BUSY. Due to hardware
+ * limitations, only termination is possible for descriptors
+ * that have been paused.
+ */
+ PAUSED,
+ /*
* Sitting on the channel work_list but xfer done
* by PL330 core
*/
@@ -459,9 +466,6 @@ struct pl330_dmac {
/* DMA-Engine Device */
struct dma_device ddma;
- /* Holds info about sg limitations */
- struct device_dma_parameters dma_parms;
-
/* Pool of descriptors available for the DMAC's channels */
struct list_head desc_pool;
/* To protect desc_pool manipulation */
@@ -509,6 +513,10 @@ static struct pl330_of_quirks {
{
.quirk = "arm,pl330-broken-no-flushp",
.id = PL330_QUIRK_BROKEN_NO_FLUSHP,
+ },
+ {
+ .quirk = "arm,pl330-periph-burst",
+ .id = PL330_QUIRK_PERIPH_BURST,
}
};
@@ -885,6 +893,12 @@ static inline void _execute_DBGINSN(struct pl330_thread *thrd,
void __iomem *regs = thrd->dmac->base;
u32 val;
+ /* If timed out due to halted state-machine */
+ if (_until_dmac_idle(thrd)) {
+ dev_err(thrd->dmac->ddma.dev, "DMAC halted!\n");
+ return;
+ }
+
val = (insn[0] << 16) | (insn[1] << 24);
if (!as_manager) {
val |= (1 << 0);
@@ -895,12 +909,6 @@ static inline void _execute_DBGINSN(struct pl330_thread *thrd,
val = le32_to_cpu(*((__le32 *)&insn[2]));
writel(val, regs + DBGINST1);
- /* If timed out due to halted state-machine */
- if (_until_dmac_idle(thrd)) {
- dev_err(thrd->dmac->ddma.dev, "DMAC halted!\n");
- return;
- }
-
/* Get going */
writel(0, regs + DBGCMD);
}
@@ -1048,7 +1056,7 @@ static bool _trigger(struct pl330_thread *thrd)
return true;
}
-static bool _start(struct pl330_thread *thrd)
+static bool pl330_start_thread(struct pl330_thread *thrd)
{
switch (_state(thrd)) {
case PL330_STATE_FAULT_COMPLETING:
@@ -1056,16 +1064,16 @@ static bool _start(struct pl330_thread *thrd)
if (_state(thrd) == PL330_STATE_KILLING)
UNTIL(thrd, PL330_STATE_STOPPED)
- /* fall through */
+ fallthrough;
case PL330_STATE_FAULTING:
_stop(thrd);
- /* fall through */
+ fallthrough;
case PL330_STATE_KILLING:
case PL330_STATE_COMPLETING:
UNTIL(thrd, PL330_STATE_STOPPED)
- /* fall through */
+ fallthrough;
case PL330_STATE_STOPPED:
return _trigger(thrd);
@@ -1116,7 +1124,6 @@ static u32 _emit_load(unsigned int dry_run, u8 buf[],
switch (direction) {
case DMA_MEM_TO_MEM:
- /* fall through */
case DMA_MEM_TO_DEV:
off += _emit_LD(dry_run, &buf[off], cond);
break;
@@ -1150,7 +1157,6 @@ static inline u32 _emit_store(unsigned int dry_run, u8 buf[],
switch (direction) {
case DMA_MEM_TO_MEM:
- /* fall through */
case DMA_DEV_TO_MEM:
off += _emit_ST(dry_run, &buf[off], cond);
break;
@@ -1183,9 +1189,6 @@ static inline int _ldst_peripheral(struct pl330_dmac *pl330,
{
int off = 0;
- if (pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP)
- cond = BURST;
-
/*
* do FLUSHP at beginning to clear any stale dma requests before the
* first WFP.
@@ -1209,9 +1212,11 @@ static int _bursts(struct pl330_dmac *pl330, unsigned dry_run, u8 buf[],
int off = 0;
enum pl330_cond cond = BRST_LEN(pxs->ccr) > 1 ? BURST : SINGLE;
+ if (pl330->quirks & PL330_QUIRK_PERIPH_BURST)
+ cond = BURST;
+
switch (pxs->desc->rqtype) {
case DMA_MEM_TO_DEV:
- /* fall through */
case DMA_DEV_TO_MEM:
off += _ldst_peripheral(pl330, dry_run, &buf[off], pxs, cyc,
cond);
@@ -1231,8 +1236,9 @@ static int _bursts(struct pl330_dmac *pl330, unsigned dry_run, u8 buf[],
}
/*
- * transfer dregs with single transfers to peripheral, or a reduced size burst
- * for mem-to-mem.
+ * only the unaligned burst transfers have the dregs.
+ * so, still transfer dregs with a reduced size burst
+ * for mem-to-mem, mem-to-dev or dev-to-mem.
*/
static int _dregs(struct pl330_dmac *pl330, unsigned int dry_run, u8 buf[],
const struct _xfer_spec *pxs, int transfer_length)
@@ -1243,22 +1249,30 @@ static int _dregs(struct pl330_dmac *pl330, unsigned int dry_run, u8 buf[],
if (transfer_length == 0)
return off;
+ /*
+ * dregs_len = (total bytes - BURST_TO_BYTE(bursts, ccr)) /
+ * BRST_SIZE(ccr)
+ * the dregs len must be smaller than burst len,
+ * so, for higher efficiency, we can modify CCR
+ * to use a reduced size burst len for the dregs.
+ */
+ dregs_ccr = pxs->ccr;
+ dregs_ccr &= ~((0xf << CC_SRCBRSTLEN_SHFT) |
+ (0xf << CC_DSTBRSTLEN_SHFT));
+ dregs_ccr |= (((transfer_length - 1) & 0xf) <<
+ CC_SRCBRSTLEN_SHFT);
+ dregs_ccr |= (((transfer_length - 1) & 0xf) <<
+ CC_DSTBRSTLEN_SHFT);
+
switch (pxs->desc->rqtype) {
case DMA_MEM_TO_DEV:
- /* fall through */
case DMA_DEV_TO_MEM:
- off += _ldst_peripheral(pl330, dry_run, &buf[off], pxs,
- transfer_length, SINGLE);
+ off += _emit_MOV(dry_run, &buf[off], CCR, dregs_ccr);
+ off += _ldst_peripheral(pl330, dry_run, &buf[off], pxs, 1,
+ BURST);
break;
case DMA_MEM_TO_MEM:
- dregs_ccr = pxs->ccr;
- dregs_ccr &= ~((0xf << CC_SRCBRSTLEN_SHFT) |
- (0xf << CC_DSTBRSTLEN_SHFT));
- dregs_ccr |= (((transfer_length - 1) & 0xf) <<
- CC_SRCBRSTLEN_SHFT);
- dregs_ccr |= (((transfer_length - 1) & 0xf) <<
- CC_DSTBRSTLEN_SHFT);
off += _emit_MOV(dry_run, &buf[off], CCR, dregs_ccr);
off += _ldst_memtomem(dry_run, &buf[off], pxs, 1);
break;
@@ -1519,8 +1533,6 @@ static int pl330_submit_req(struct pl330_thread *thrd,
/* First dry run to check if req is acceptable */
ret = _setup_req(pl330, 1, thrd, idx, &xs);
- if (ret < 0)
- goto xfer_exit;
if (ret > pl330->mcbufsz / 2) {
dev_info(pl330->ddma.dev, "%s:%d Try increasing mcbufsz (%i/%i)\n",
@@ -1565,9 +1577,9 @@ static void dma_pl330_rqcb(struct dma_pl330_desc *desc, enum pl330_op_err err)
tasklet_schedule(&pch->task);
}
-static void pl330_dotask(unsigned long data)
+static void pl330_dotask(struct tasklet_struct *t)
{
- struct pl330_dmac *pl330 = (struct pl330_dmac *) data;
+ struct pl330_dmac *pl330 = from_tasklet(pl330, t, tasks);
unsigned long flags;
int i;
@@ -1696,7 +1708,7 @@ static int pl330_update(struct pl330_dmac *pl330)
thrd->req_running = -1;
/* Get going again ASAP */
- _start(thrd);
+ pl330_start_thread(thrd);
/* For now, just make a list of callbacks to be done */
list_add_tail(&descdone->rqd, &pl330->req_done);
@@ -1971,7 +1983,7 @@ static int pl330_add(struct pl330_dmac *pl330)
return ret;
}
- tasklet_init(&pl330->tasks, pl330_dotask, (unsigned long) pl330);
+ tasklet_setup(&pl330->tasks, pl330_dotask);
pl330->state = INIT;
@@ -2035,7 +2047,7 @@ static inline void fill_queue(struct dma_pl330_chan *pch)
list_for_each_entry(desc, &pch->work_list, node) {
/* If already submitted */
- if (desc->status == BUSY)
+ if (desc->status == BUSY || desc->status == PAUSED)
continue;
ret = pl330_submit_req(pch->thread, desc);
@@ -2054,9 +2066,9 @@ static inline void fill_queue(struct dma_pl330_chan *pch)
}
}
-static void pl330_tasklet(unsigned long data)
+static void pl330_tasklet(struct tasklet_struct *t)
{
- struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
+ struct dma_pl330_chan *pch = from_tasklet(pch, t, task);
struct dma_pl330_desc *desc, *_dt;
unsigned long flags;
bool power_down = false;
@@ -2083,7 +2095,7 @@ static void pl330_tasklet(unsigned long data)
} else {
/* Make sure the PL330 Channel thread is active */
spin_lock(&pch->thread->dmac->lock);
- _start(pch->thread);
+ pl330_start_thread(pch->thread);
spin_unlock(&pch->thread->dmac->lock);
}
@@ -2101,7 +2113,7 @@ static void pl330_tasklet(unsigned long data)
if (power_down) {
pch->active = true;
spin_lock(&pch->thread->dmac->lock);
- _start(pch->thread);
+ pl330_start_thread(pch->thread);
spin_unlock(&pch->thread->dmac->lock);
power_down = false;
}
@@ -2164,7 +2176,7 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
return -ENOMEM;
}
- tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
+ tasklet_setup(&pch->task, pl330_tasklet);
spin_unlock_irqrestore(&pl330->lock, flags);
@@ -2221,9 +2233,7 @@ static bool pl330_prep_slave_fifo(struct dma_pl330_chan *pch,
static int fixup_burst_len(int max_burst_len, int quirks)
{
- if (quirks & PL330_QUIRK_BROKEN_NO_FLUSHP)
- return 1;
- else if (max_burst_len > PL330_MAX_BURST)
+ if (max_burst_len > PL330_MAX_BURST)
return PL330_MAX_BURST;
else if (max_burst_len < 1)
return 1;
@@ -2322,6 +2332,7 @@ static int pl330_pause(struct dma_chan *chan)
{
struct dma_pl330_chan *pch = to_pchan(chan);
struct pl330_dmac *pl330 = pch->dmac;
+ struct dma_pl330_desc *desc;
unsigned long flags;
pm_runtime_get_sync(pl330->ddma.dev);
@@ -2331,6 +2342,10 @@ static int pl330_pause(struct dma_chan *chan)
_stop(pch->thread);
spin_unlock(&pl330->lock);
+ list_for_each_entry(desc, &pch->work_list, node) {
+ if (desc->status == BUSY)
+ desc->status = PAUSED;
+ }
spin_unlock_irqrestore(&pch->lock, flags);
pm_runtime_mark_last_busy(pl330->ddma.dev);
pm_runtime_put_autosuspend(pl330->ddma.dev);
@@ -2421,7 +2436,7 @@ pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
else if (running && desc == running)
transferred =
pl330_get_current_xferred_count(pch, desc);
- else if (desc->status == BUSY)
+ else if (desc->status == BUSY || desc->status == PAUSED)
/*
* Busy but not running means either just enqueued,
* or finished and not yet marked done
@@ -2438,6 +2453,9 @@ pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
case DONE:
ret = DMA_COMPLETE;
break;
+ case PAUSED:
+ ret = DMA_PAUSED;
+ break;
case PREP:
case BUSY:
ret = DMA_IN_PROGRESS;
@@ -2478,7 +2496,7 @@ static void pl330_issue_pending(struct dma_chan *chan)
list_splice_tail_init(&pch->submitted_list, &pch->work_list);
spin_unlock_irqrestore(&pch->lock, flags);
- pl330_tasklet((unsigned long)pch);
+ pl330_tasklet(&pch->task);
}
/*
@@ -2567,6 +2585,7 @@ static struct dma_pl330_desc *pluck_desc(struct list_head *pool,
desc->status = PREP;
desc->txd.callback = NULL;
+ desc->txd.callback_result = NULL;
}
spin_unlock_irqrestore(lock, flags);
@@ -2585,7 +2604,7 @@ static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
/* If the DMAC pool is empty, alloc new */
if (!desc) {
- DEFINE_SPINLOCK(lock);
+ static DEFINE_SPINLOCK(lock);
LIST_HEAD(pool);
if (!add_desc(&pool, &lock, GFP_ATOMIC, 1))
@@ -2690,13 +2709,15 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
for (i = 0; i < len / period_len; i++) {
desc = pl330_get_desc(pch);
if (!desc) {
+ unsigned long iflags;
+
dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n",
__func__, __LINE__);
if (!first)
return NULL;
- spin_lock_irqsave(&pl330->pool_lock, flags);
+ spin_lock_irqsave(&pl330->pool_lock, iflags);
while (!list_empty(&first->node)) {
desc = list_entry(first->node.next,
@@ -2706,7 +2727,7 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
list_move_tail(&first->node, &pl330->desc_pool);
- spin_unlock_irqrestore(&pl330->pool_lock, flags);
+ spin_unlock_irqrestore(&pl330->pool_lock, iflags);
return NULL;
}
@@ -2746,7 +2767,6 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
return NULL;
pch->cyclic = true;
- desc->txd.flags = flags;
return &desc->txd;
}
@@ -2788,18 +2808,16 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
while (burst != (1 << desc->rqcfg.brst_size))
desc->rqcfg.brst_size++;
+ desc->rqcfg.brst_len = get_burst_len(desc, len);
/*
* If burst size is smaller than bus width then make sure we only
* transfer one at a time to avoid a burst stradling an MFIFO entry.
*/
- if (desc->rqcfg.brst_size * 8 < pl330->pcfg.data_bus_width)
+ if (burst * 8 < pl330->pcfg.data_bus_width)
desc->rqcfg.brst_len = 1;
- desc->rqcfg.brst_len = get_burst_len(desc, len);
desc->bytes_requested = len;
- desc->txd.flags = flags;
-
return &desc->txd;
}
@@ -2883,7 +2901,6 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
}
/* Return the last desc in the chain */
- desc->txd.flags = flg;
return &desc->txd;
}
@@ -2962,7 +2979,7 @@ static int __maybe_unused pl330_suspend(struct device *dev)
struct amba_device *pcdev = to_amba_device(dev);
pm_runtime_force_suspend(dev);
- amba_pclk_unprepare(pcdev);
+ clk_unprepare(pcdev->pclk);
return 0;
}
@@ -2972,7 +2989,7 @@ static int __maybe_unused pl330_resume(struct device *dev)
struct amba_device *pcdev = to_amba_device(dev);
int ret;
- ret = amba_pclk_prepare(pcdev);
+ ret = clk_prepare(pcdev->pclk);
if (ret)
return ret;
@@ -3025,9 +3042,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pl330->rstc = devm_reset_control_get_optional(&adev->dev, "dma");
if (IS_ERR(pl330->rstc)) {
- if (PTR_ERR(pl330->rstc) != -EPROBE_DEFER)
- dev_err(&adev->dev, "Failed to get reset!\n");
- return PTR_ERR(pl330->rstc);
+ return dev_err_probe(&adev->dev, PTR_ERR(pl330->rstc), "Failed to get reset!\n");
} else {
ret = reset_control_deassert(pl330->rstc);
if (ret) {
@@ -3038,9 +3053,8 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pl330->rstc_ocp = devm_reset_control_get_optional(&adev->dev, "dma-ocp");
if (IS_ERR(pl330->rstc_ocp)) {
- if (PTR_ERR(pl330->rstc_ocp) != -EPROBE_DEFER)
- dev_err(&adev->dev, "Failed to get OCP reset!\n");
- return PTR_ERR(pl330->rstc_ocp);
+ return dev_err_probe(&adev->dev, PTR_ERR(pl330->rstc_ocp),
+ "Failed to get OCP reset!\n");
} else {
ret = reset_control_deassert(pl330->rstc_ocp);
if (ret) {
@@ -3128,8 +3142,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pd->dst_addr_widths = PL330_DMA_BUSWIDTHS;
pd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
pd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
- pd->max_burst = ((pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP) ?
- 1 : PL330_MAX_BURST);
+ pd->max_burst = PL330_MAX_BURST;
ret = dma_async_device_register(pd);
if (ret) {
@@ -3146,16 +3159,11 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
}
}
- adev->dev.dma_parms = &pl330->dma_parms;
-
/*
* This is the limit for transfers with a buswidth of 1, larger
* buswidths will have larger limits.
*/
- ret = dma_set_max_seg_size(&adev->dev, 1900800);
- if (ret)
- dev_err(&adev->dev, "unable to set the seg size\n");
-
+ dma_set_max_seg_size(&adev->dev, 1900800);
init_pl330_debugfs(pl330);
dev_info(&adev->dev,
@@ -3197,7 +3205,7 @@ probe_err2:
return ret;
}
-static int pl330_remove(struct amba_device *adev)
+static void pl330_remove(struct amba_device *adev)
{
struct pl330_dmac *pl330 = amba_get_drvdata(adev);
struct dma_pl330_chan *pch, *_p;
@@ -3237,7 +3245,6 @@ static int pl330_remove(struct amba_device *adev)
if (pl330->rstc)
reset_control_assert(pl330->rstc);
- return 0;
}
static const struct amba_id pl330_ids[] = {
@@ -3252,7 +3259,6 @@ MODULE_DEVICE_TABLE(amba, pl330_ids);
static struct amba_driver pl330_driver = {
.drv = {
- .owner = THIS_MODULE,
.name = "dma-pl330",
.pm = &pl330_pm,
},