summaryrefslogtreecommitdiff
path: root/drivers/dma
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/Kconfig2
-rw-r--r--drivers/dma/at_hdmac_regs.h2
-rw-r--r--drivers/dma/bcm-sba-raid.c117
-rw-r--r--drivers/dma/coh901318.c6
-rw-r--r--drivers/dma/dma-axi-dmac.c75
-rw-r--r--drivers/dma/dmatest.c1
-rw-r--r--drivers/dma/img-mdc-dma.c98
-rw-r--r--drivers/dma/imx-sdma.c14
-rw-r--r--drivers/dma/nbpfaxi.c5
-rw-r--r--drivers/dma/pch_dma.c12
-rw-r--r--drivers/dma/pl330.c39
-rw-r--r--drivers/dma/qcom/bam_dma.c169
12 files changed, 326 insertions, 214 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index fadc4d8783bd..48cf8df7255f 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -115,7 +115,7 @@ config BCM_SBA_RAID
select DMA_ENGINE_RAID
select ASYNC_TX_DISABLE_XOR_VAL_DMA
select ASYNC_TX_DISABLE_PQ_VAL_DMA
- default ARCH_BCM_IPROC
+ default m if ARCH_BCM_IPROC
help
Enable support for Broadcom SBA RAID Engine. The SBA RAID
engine is available on most of the Broadcom iProc SoCs. It
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index 7f58f06157f6..ef3f227ce3e6 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -385,7 +385,7 @@ static void vdbg_dump_regs(struct at_dma_chan *atchan) {}
static void atc_dump_lli(struct at_dma_chan *atchan, struct at_lli *lli)
{
dev_crit(chan2dev(&atchan->chan_common),
- " desc: s%pad d%pad ctrl0x%x:0x%x l0x%pad\n",
+ "desc: s%pad d%pad ctrl0x%x:0x%x l%pad\n",
&lli->saddr, &lli->daddr,
lli->ctrla, lli->ctrlb, &lli->dscr);
}
diff --git a/drivers/dma/bcm-sba-raid.c b/drivers/dma/bcm-sba-raid.c
index 6c2c44724637..3956a018bf5a 100644
--- a/drivers/dma/bcm-sba-raid.c
+++ b/drivers/dma/bcm-sba-raid.c
@@ -1,9 +1,14 @@
/*
* Copyright (C) 2017 Broadcom
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
*/
/*
@@ -25,11 +30,8 @@
*
* The Broadcom SBA RAID driver does not require any register programming
* except submitting request to SBA hardware device via mailbox channels.
- * This driver implements a DMA device with one DMA channel using a set
- * of mailbox channels provided by Broadcom SoC specific ring manager
- * driver. To exploit parallelism (as described above), all DMA request
- * coming to SBA RAID DMA channel are broken down to smaller requests
- * and submitted to multiple mailbox channels in round-robin fashion.
+ * This driver implements a DMA device with one DMA channel using a single
+ * mailbox channel provided by Broadcom SoC specific ring manager driver.
* For having more SBA DMA channels, we can create more SBA device nodes
* in Broadcom SoC specific DTS based on number of hardware rings supported
* by Broadcom SoC ring manager.
@@ -85,6 +87,7 @@
#define SBA_CMD_GALOIS 0xe
#define SBA_MAX_REQ_PER_MBOX_CHANNEL 8192
+#define SBA_MAX_MSG_SEND_PER_MBOX_CHANNEL 8
/* Driver helper macros */
#define to_sba_request(tx) \
@@ -142,9 +145,7 @@ struct sba_device {
u32 max_cmds_pool_size;
/* Maibox client and Mailbox channels */
struct mbox_client client;
- int mchans_count;
- atomic_t mchans_current;
- struct mbox_chan **mchans;
+ struct mbox_chan *mchan;
struct device *mbox_dev;
/* DMA device and DMA channel */
struct dma_device dma_dev;
@@ -200,14 +201,6 @@ static inline u32 __pure sba_cmd_pq_c_mdata(u32 d, u32 b1, u32 b0)
/* ====== General helper routines ===== */
-static void sba_peek_mchans(struct sba_device *sba)
-{
- int mchan_idx;
-
- for (mchan_idx = 0; mchan_idx < sba->mchans_count; mchan_idx++)
- mbox_client_peek_data(sba->mchans[mchan_idx]);
-}
-
static struct sba_request *sba_alloc_request(struct sba_device *sba)
{
bool found = false;
@@ -231,7 +224,7 @@ static struct sba_request *sba_alloc_request(struct sba_device *sba)
* would have completed which will create more
* room for new requests.
*/
- sba_peek_mchans(sba);
+ mbox_client_peek_data(sba->mchan);
return NULL;
}
@@ -369,15 +362,11 @@ static void sba_cleanup_pending_requests(struct sba_device *sba)
static int sba_send_mbox_request(struct sba_device *sba,
struct sba_request *req)
{
- int mchans_idx, ret = 0;
-
- /* Select mailbox channel in round-robin fashion */
- mchans_idx = atomic_inc_return(&sba->mchans_current);
- mchans_idx = mchans_idx % sba->mchans_count;
+ int ret = 0;
/* Send message for the request */
req->msg.error = 0;
- ret = mbox_send_message(sba->mchans[mchans_idx], &req->msg);
+ ret = mbox_send_message(sba->mchan, &req->msg);
if (ret < 0) {
dev_err(sba->dev, "send message failed with error %d", ret);
return ret;
@@ -390,7 +379,7 @@ static int sba_send_mbox_request(struct sba_device *sba,
}
/* Signal txdone for mailbox channel */
- mbox_client_txdone(sba->mchans[mchans_idx], ret);
+ mbox_client_txdone(sba->mchan, ret);
return ret;
}
@@ -402,13 +391,8 @@ static void _sba_process_pending_requests(struct sba_device *sba)
u32 count;
struct sba_request *req;
- /*
- * Process few pending requests
- *
- * For now, we process (<number_of_mailbox_channels> * 8)
- * number of requests at a time.
- */
- count = sba->mchans_count * 8;
+ /* Process few pending requests */
+ count = SBA_MAX_MSG_SEND_PER_MBOX_CHANNEL;
while (!list_empty(&sba->reqs_pending_list) && count) {
/* Get the first pending request */
req = list_first_entry(&sba->reqs_pending_list,
@@ -442,7 +426,9 @@ static void sba_process_received_request(struct sba_device *sba,
WARN_ON(tx->cookie < 0);
if (tx->cookie > 0) {
+ spin_lock_irqsave(&sba->reqs_lock, flags);
dma_cookie_complete(tx);
+ spin_unlock_irqrestore(&sba->reqs_lock, flags);
dmaengine_desc_get_callback_invoke(tx, NULL);
dma_descriptor_unmap(tx);
tx->callback = NULL;
@@ -570,7 +556,7 @@ static enum dma_status sba_tx_status(struct dma_chan *dchan,
if (ret == DMA_COMPLETE)
return ret;
- sba_peek_mchans(sba);
+ mbox_client_peek_data(sba->mchan);
return dma_cookie_status(dchan, cookie, txstate);
}
@@ -1637,7 +1623,7 @@ static int sba_async_register(struct sba_device *sba)
static int sba_probe(struct platform_device *pdev)
{
- int i, ret = 0, mchans_count;
+ int ret = 0;
struct sba_device *sba;
struct platform_device *mbox_pdev;
struct of_phandle_args args;
@@ -1650,12 +1636,11 @@ static int sba_probe(struct platform_device *pdev)
sba->dev = &pdev->dev;
platform_set_drvdata(pdev, sba);
- /* Number of channels equals number of mailbox channels */
+ /* Number of mailbox channels should be atleast 1 */
ret = of_count_phandle_with_args(pdev->dev.of_node,
"mboxes", "#mbox-cells");
if (ret <= 0)
return -ENODEV;
- mchans_count = ret;
/* Determine SBA version from DT compatible string */
if (of_device_is_compatible(sba->dev->of_node, "brcm,iproc-sba"))
@@ -1688,7 +1673,7 @@ static int sba_probe(struct platform_device *pdev)
default:
return -EINVAL;
}
- sba->max_req = SBA_MAX_REQ_PER_MBOX_CHANNEL * mchans_count;
+ sba->max_req = SBA_MAX_REQ_PER_MBOX_CHANNEL;
sba->max_cmd_per_req = sba->max_pq_srcs + 3;
sba->max_xor_srcs = sba->max_cmd_per_req - 1;
sba->max_resp_pool_size = sba->max_req * sba->hw_resp_size;
@@ -1702,55 +1687,30 @@ static int sba_probe(struct platform_device *pdev)
sba->client.knows_txdone = true;
sba->client.tx_tout = 0;
- /* Allocate mailbox channel array */
- sba->mchans = devm_kcalloc(&pdev->dev, mchans_count,
- sizeof(*sba->mchans), GFP_KERNEL);
- if (!sba->mchans)
- return -ENOMEM;
-
- /* Request mailbox channels */
- sba->mchans_count = 0;
- for (i = 0; i < mchans_count; i++) {
- sba->mchans[i] = mbox_request_channel(&sba->client, i);
- if (IS_ERR(sba->mchans[i])) {
- ret = PTR_ERR(sba->mchans[i]);
- goto fail_free_mchans;
- }
- sba->mchans_count++;
+ /* Request mailbox channel */
+ sba->mchan = mbox_request_channel(&sba->client, 0);
+ if (IS_ERR(sba->mchan)) {
+ ret = PTR_ERR(sba->mchan);
+ goto fail_free_mchan;
}
- atomic_set(&sba->mchans_current, 0);
/* Find-out underlying mailbox device */
ret = of_parse_phandle_with_args(pdev->dev.of_node,
"mboxes", "#mbox-cells", 0, &args);
if (ret)
- goto fail_free_mchans;
+ goto fail_free_mchan;
mbox_pdev = of_find_device_by_node(args.np);
of_node_put(args.np);
if (!mbox_pdev) {
ret = -ENODEV;
- goto fail_free_mchans;
+ goto fail_free_mchan;
}
sba->mbox_dev = &mbox_pdev->dev;
- /* All mailbox channels should be of same ring manager device */
- for (i = 1; i < mchans_count; i++) {
- ret = of_parse_phandle_with_args(pdev->dev.of_node,
- "mboxes", "#mbox-cells", i, &args);
- if (ret)
- goto fail_free_mchans;
- mbox_pdev = of_find_device_by_node(args.np);
- of_node_put(args.np);
- if (sba->mbox_dev != &mbox_pdev->dev) {
- ret = -EINVAL;
- goto fail_free_mchans;
- }
- }
-
/* Prealloc channel resource */
ret = sba_prealloc_channel_resources(sba);
if (ret)
- goto fail_free_mchans;
+ goto fail_free_mchan;
/* Check availability of debugfs */
if (!debugfs_initialized())
@@ -1777,24 +1737,22 @@ skip_debugfs:
goto fail_free_resources;
/* Print device info */
- dev_info(sba->dev, "%s using SBAv%d and %d mailbox channels",
+ dev_info(sba->dev, "%s using SBAv%d mailbox channel from %s",
dma_chan_name(&sba->dma_chan), sba->ver+1,
- sba->mchans_count);
+ dev_name(sba->mbox_dev));
return 0;
fail_free_resources:
debugfs_remove_recursive(sba->root);
sba_freeup_channel_resources(sba);
-fail_free_mchans:
- for (i = 0; i < sba->mchans_count; i++)
- mbox_free_channel(sba->mchans[i]);
+fail_free_mchan:
+ mbox_free_channel(sba->mchan);
return ret;
}
static int sba_remove(struct platform_device *pdev)
{
- int i;
struct sba_device *sba = platform_get_drvdata(pdev);
dma_async_device_unregister(&sba->dma_dev);
@@ -1803,8 +1761,7 @@ static int sba_remove(struct platform_device *pdev)
sba_freeup_channel_resources(sba);
- for (i = 0; i < sba->mchans_count; i++)
- mbox_free_channel(sba->mchans[i]);
+ mbox_free_channel(sba->mchan);
return 0;
}
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index 74794c9859f6..da74fd74636b 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -1319,8 +1319,8 @@ static void coh901318_list_print(struct coh901318_chan *cohc,
int i = 0;
while (l) {
- dev_vdbg(COHC_2_DEV(cohc), "i %d, lli %p, ctrl 0x%x, src 0x%pad"
- ", dst 0x%pad, link 0x%pad virt_link_addr 0x%p\n",
+ dev_vdbg(COHC_2_DEV(cohc), "i %d, lli %p, ctrl 0x%x, src %pad"
+ ", dst %pad, link %pad virt_link_addr 0x%p\n",
i, l, l->control, &l->src_addr, &l->dst_addr,
&l->link_addr, l->virt_link_addr);
i++;
@@ -2231,7 +2231,7 @@ coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
spin_lock_irqsave(&cohc->lock, flg);
dev_vdbg(COHC_2_DEV(cohc),
- "[%s] channel %d src 0x%pad dest 0x%pad size %zu\n",
+ "[%s] channel %d src %pad dest %pad size %zu\n",
__func__, cohc->id, &src, &dest, size);
if (flags & DMA_PREP_INTERRUPT)
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
index 7f0b9aa15867..2419fe524daa 100644
--- a/drivers/dma/dma-axi-dmac.c
+++ b/drivers/dma/dma-axi-dmac.c
@@ -72,6 +72,9 @@
#define AXI_DMAC_FLAG_CYCLIC BIT(0)
+/* The maximum ID allocated by the hardware is 31 */
+#define AXI_DMAC_SG_UNUSED 32U
+
struct axi_dmac_sg {
dma_addr_t src_addr;
dma_addr_t dest_addr;
@@ -80,6 +83,7 @@ struct axi_dmac_sg {
unsigned int dest_stride;
unsigned int src_stride;
unsigned int id;
+ bool schedule_when_free;
};
struct axi_dmac_desc {
@@ -200,11 +204,21 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan)
}
sg = &desc->sg[desc->num_submitted];
+ /* Already queued in cyclic mode. Wait for it to finish */
+ if (sg->id != AXI_DMAC_SG_UNUSED) {
+ sg->schedule_when_free = true;
+ return;
+ }
+
desc->num_submitted++;
- if (desc->num_submitted == desc->num_sgs)
- chan->next_desc = NULL;
- else
+ if (desc->num_submitted == desc->num_sgs) {
+ if (desc->cyclic)
+ desc->num_submitted = 0; /* Start again */
+ else
+ chan->next_desc = NULL;
+ } else {
chan->next_desc = desc;
+ }
sg->id = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_ID);
@@ -220,9 +234,11 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan)
/*
* If the hardware supports cyclic transfers and there is no callback to
- * call, enable hw cyclic mode to avoid unnecessary interrupts.
+ * call and only a single segment, enable hw cyclic mode to avoid
+ * unnecessary interrupts.
*/
- if (chan->hw_cyclic && desc->cyclic && !desc->vdesc.tx.callback)
+ if (chan->hw_cyclic && desc->cyclic && !desc->vdesc.tx.callback &&
+ desc->num_sgs == 1)
flags |= AXI_DMAC_FLAG_CYCLIC;
axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, sg->x_len - 1);
@@ -237,37 +253,52 @@ static struct axi_dmac_desc *axi_dmac_active_desc(struct axi_dmac_chan *chan)
struct axi_dmac_desc, vdesc.node);
}
-static void axi_dmac_transfer_done(struct axi_dmac_chan *chan,
+static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan,
unsigned int completed_transfers)
{
struct axi_dmac_desc *active;
struct axi_dmac_sg *sg;
+ bool start_next = false;
active = axi_dmac_active_desc(chan);
if (!active)
- return;
+ return false;
- if (active->cyclic) {
- vchan_cyclic_callback(&active->vdesc);
- } else {
- do {
- sg = &active->sg[active->num_completed];
- if (!(BIT(sg->id) & completed_transfers))
- break;
- active->num_completed++;
- if (active->num_completed == active->num_sgs) {
+ do {
+ sg = &active->sg[active->num_completed];
+ if (sg->id == AXI_DMAC_SG_UNUSED) /* Not yet submitted */
+ break;
+ if (!(BIT(sg->id) & completed_transfers))
+ break;
+ active->num_completed++;
+ sg->id = AXI_DMAC_SG_UNUSED;
+ if (sg->schedule_when_free) {
+ sg->schedule_when_free = false;
+ start_next = true;
+ }
+
+ if (active->cyclic)
+ vchan_cyclic_callback(&active->vdesc);
+
+ if (active->num_completed == active->num_sgs) {
+ if (active->cyclic) {
+ active->num_completed = 0; /* wrap around */
+ } else {
list_del(&active->vdesc.node);
vchan_cookie_complete(&active->vdesc);
active = axi_dmac_active_desc(chan);
}
- } while (active);
- }
+ }
+ } while (active);
+
+ return start_next;
}
static irqreturn_t axi_dmac_interrupt_handler(int irq, void *devid)
{
struct axi_dmac *dmac = devid;
unsigned int pending;
+ bool start_next = false;
pending = axi_dmac_read(dmac, AXI_DMAC_REG_IRQ_PENDING);
if (!pending)
@@ -281,10 +312,10 @@ static irqreturn_t axi_dmac_interrupt_handler(int irq, void *devid)
unsigned int completed;
completed = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_DONE);
- axi_dmac_transfer_done(&dmac->chan, completed);
+ start_next = axi_dmac_transfer_done(&dmac->chan, completed);
}
/* Space has become available in the descriptor queue */
- if (pending & AXI_DMAC_IRQ_SOT)
+ if ((pending & AXI_DMAC_IRQ_SOT) || start_next)
axi_dmac_start_transfer(&dmac->chan);
spin_unlock(&dmac->chan.vchan.lock);
@@ -334,12 +365,16 @@ static void axi_dmac_issue_pending(struct dma_chan *c)
static struct axi_dmac_desc *axi_dmac_alloc_desc(unsigned int num_sgs)
{
struct axi_dmac_desc *desc;
+ unsigned int i;
desc = kzalloc(sizeof(struct axi_dmac_desc) +
sizeof(struct axi_dmac_sg) * num_sgs, GFP_NOWAIT);
if (!desc)
return NULL;
+ for (i = 0; i < num_sgs; i++)
+ desc->sg[i].id = AXI_DMAC_SG_UNUSED;
+
desc->num_sgs = num_sgs;
return desc;
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 34ff53290b03..47edc7fbf91f 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -702,6 +702,7 @@ static int dmatest_func(void *data)
* free it this time?" dancing. For now, just
* leave it dangling.
*/
+ WARN(1, "dmatest: Kernel stack may be corrupted!!\n");
dmaengine_unmap_put(um);
result("test timed out", total_tests, src_off, dst_off,
len, 0);
diff --git a/drivers/dma/img-mdc-dma.c b/drivers/dma/img-mdc-dma.c
index 54db1411ce73..0391f930aecc 100644
--- a/drivers/dma/img-mdc-dma.c
+++ b/drivers/dma/img-mdc-dma.c
@@ -23,6 +23,7 @@
#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -730,14 +731,23 @@ static int mdc_slave_config(struct dma_chan *chan,
return 0;
}
+static int mdc_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct mdc_chan *mchan = to_mdc_chan(chan);
+ struct device *dev = mdma2dev(mchan->mdma);
+
+ return pm_runtime_get_sync(dev);
+}
+
static void mdc_free_chan_resources(struct dma_chan *chan)
{
struct mdc_chan *mchan = to_mdc_chan(chan);
struct mdc_dma *mdma = mchan->mdma;
+ struct device *dev = mdma2dev(mdma);
mdc_terminate_all(chan);
-
mdma->soc->disable_chan(mchan);
+ pm_runtime_put(dev);
}
static irqreturn_t mdc_chan_irq(int irq, void *dev_id)
@@ -854,6 +864,22 @@ static const struct of_device_id mdc_dma_of_match[] = {
};
MODULE_DEVICE_TABLE(of, mdc_dma_of_match);
+static int img_mdc_runtime_suspend(struct device *dev)
+{
+ struct mdc_dma *mdma = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(mdma->clk);
+
+ return 0;
+}
+
+static int img_mdc_runtime_resume(struct device *dev)
+{
+ struct mdc_dma *mdma = dev_get_drvdata(dev);
+
+ return clk_prepare_enable(mdma->clk);
+}
+
static int mdc_dma_probe(struct platform_device *pdev)
{
struct mdc_dma *mdma;
@@ -883,10 +909,6 @@ static int mdc_dma_probe(struct platform_device *pdev)
if (IS_ERR(mdma->clk))
return PTR_ERR(mdma->clk);
- ret = clk_prepare_enable(mdma->clk);
- if (ret)
- return ret;
-
dma_cap_zero(mdma->dma_dev.cap_mask);
dma_cap_set(DMA_SLAVE, mdma->dma_dev.cap_mask);
dma_cap_set(DMA_PRIVATE, mdma->dma_dev.cap_mask);
@@ -919,12 +941,13 @@ static int mdc_dma_probe(struct platform_device *pdev)
"img,max-burst-multiplier",
&mdma->max_burst_mult);
if (ret)
- goto disable_clk;
+ return ret;
mdma->dma_dev.dev = &pdev->dev;
mdma->dma_dev.device_prep_slave_sg = mdc_prep_slave_sg;
mdma->dma_dev.device_prep_dma_cyclic = mdc_prep_dma_cyclic;
mdma->dma_dev.device_prep_dma_memcpy = mdc_prep_dma_memcpy;
+ mdma->dma_dev.device_alloc_chan_resources = mdc_alloc_chan_resources;
mdma->dma_dev.device_free_chan_resources = mdc_free_chan_resources;
mdma->dma_dev.device_tx_status = mdc_tx_status;
mdma->dma_dev.device_issue_pending = mdc_issue_pending;
@@ -945,15 +968,14 @@ static int mdc_dma_probe(struct platform_device *pdev)
mchan->mdma = mdma;
mchan->chan_nr = i;
mchan->irq = platform_get_irq(pdev, i);
- if (mchan->irq < 0) {
- ret = mchan->irq;
- goto disable_clk;
- }
+ if (mchan->irq < 0)
+ return mchan->irq;
+
ret = devm_request_irq(&pdev->dev, mchan->irq, mdc_chan_irq,
IRQ_TYPE_LEVEL_HIGH,
dev_name(&pdev->dev), mchan);
if (ret < 0)
- goto disable_clk;
+ return ret;
mchan->vc.desc_free = mdc_desc_free;
vchan_init(&mchan->vc, &mdma->dma_dev);
@@ -962,14 +984,19 @@ static int mdc_dma_probe(struct platform_device *pdev)
mdma->desc_pool = dmam_pool_create(dev_name(&pdev->dev), &pdev->dev,
sizeof(struct mdc_hw_list_desc),
4, 0);
- if (!mdma->desc_pool) {
- ret = -ENOMEM;
- goto disable_clk;
+ if (!mdma->desc_pool)
+ return -ENOMEM;
+
+ pm_runtime_enable(&pdev->dev);
+ if (!pm_runtime_enabled(&pdev->dev)) {
+ ret = img_mdc_runtime_resume(&pdev->dev);
+ if (ret)
+ return ret;
}
ret = dma_async_device_register(&mdma->dma_dev);
if (ret)
- goto disable_clk;
+ goto suspend;
ret = of_dma_controller_register(pdev->dev.of_node, mdc_of_xlate, mdma);
if (ret)
@@ -982,8 +1009,10 @@ static int mdc_dma_probe(struct platform_device *pdev)
unregister:
dma_async_device_unregister(&mdma->dma_dev);
-disable_clk:
- clk_disable_unprepare(mdma->clk);
+suspend:
+ if (!pm_runtime_enabled(&pdev->dev))
+ img_mdc_runtime_suspend(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
return ret;
}
@@ -1004,14 +1033,47 @@ static int mdc_dma_remove(struct platform_device *pdev)
tasklet_kill(&mchan->vc.task);
}
- clk_disable_unprepare(mdma->clk);
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ img_mdc_runtime_suspend(&pdev->dev);
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int img_mdc_suspend_late(struct device *dev)
+{
+ struct mdc_dma *mdma = dev_get_drvdata(dev);
+ int i;
+
+ /* Check that all channels are idle */
+ for (i = 0; i < mdma->nr_channels; i++) {
+ struct mdc_chan *mchan = &mdma->channels[i];
+
+ if (unlikely(mchan->desc))
+ return -EBUSY;
+ }
+
+ return pm_runtime_force_suspend(dev);
+}
+
+static int img_mdc_resume_early(struct device *dev)
+{
+ return pm_runtime_force_resume(dev);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops img_mdc_pm_ops = {
+ SET_RUNTIME_PM_OPS(img_mdc_runtime_suspend,
+ img_mdc_runtime_resume, NULL)
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(img_mdc_suspend_late,
+ img_mdc_resume_early)
+};
+
static struct platform_driver mdc_dma_driver = {
.driver = {
.name = "img-mdc-dma",
+ .pm = &img_mdc_pm_ops,
.of_match_table = of_match_ptr(mdc_dma_of_match),
},
.probe = mdc_dma_probe,
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index a67ec1bdc4e0..2184881afe76 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -178,6 +178,14 @@
#define SDMA_WATERMARK_LEVEL_HWE BIT(29)
#define SDMA_WATERMARK_LEVEL_CONT BIT(31)
+#define SDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
+
+#define SDMA_DMA_DIRECTIONS (BIT(DMA_DEV_TO_MEM) | \
+ BIT(DMA_MEM_TO_DEV) | \
+ BIT(DMA_DEV_TO_DEV))
+
/*
* Mode/Count of data node descriptors - IPCv2
*/
@@ -1851,9 +1859,9 @@ static int sdma_probe(struct platform_device *pdev)
sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
sdma->dma_device.device_config = sdma_config;
sdma->dma_device.device_terminate_all = sdma_disable_channel_with_delay;
- sdma->dma_device.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
- sdma->dma_device.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
- sdma->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ sdma->dma_device.src_addr_widths = SDMA_DMA_BUSWIDTHS;
+ sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS;
+ sdma->dma_device.directions = SDMA_DMA_DIRECTIONS;
sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
sdma->dma_device.device_issue_pending = sdma_issue_pending;
sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
index d3f918a9ee76..50559338239b 100644
--- a/drivers/dma/nbpfaxi.c
+++ b/drivers/dma/nbpfaxi.c
@@ -1286,7 +1286,6 @@ MODULE_DEVICE_TABLE(of, nbpf_match);
static int nbpf_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- const struct of_device_id *of_id = of_match_device(nbpf_match, dev);
struct device_node *np = dev->of_node;
struct nbpf_device *nbpf;
struct dma_device *dma_dev;
@@ -1300,10 +1299,10 @@ static int nbpf_probe(struct platform_device *pdev)
BUILD_BUG_ON(sizeof(struct nbpf_desc_page) > PAGE_SIZE);
/* DT only */
- if (!np || !of_id || !of_id->data)
+ if (!np)
return -ENODEV;
- cfg = of_id->data;
+ cfg = of_device_get_match_data(dev);
num_channels = cfg->num_channels;
nbpf = devm_kzalloc(dev, sizeof(*nbpf) + num_channels *
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index f9028e9d0dfc..afd8f27bda96 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -123,7 +123,7 @@ struct pch_dma_chan {
struct pch_dma {
struct dma_device dma;
void __iomem *membase;
- struct pci_pool *pool;
+ struct dma_pool *pool;
struct pch_dma_regs regs;
struct pch_dma_desc_regs ch_regs[MAX_CHAN_NR];
struct pch_dma_chan channels[MAX_CHAN_NR];
@@ -437,7 +437,7 @@ static struct pch_dma_desc *pdc_alloc_desc(struct dma_chan *chan, gfp_t flags)
struct pch_dma *pd = to_pd(chan->device);
dma_addr_t addr;
- desc = pci_pool_zalloc(pd->pool, flags, &addr);
+ desc = dma_pool_zalloc(pd->pool, flags, &addr);
if (desc) {
INIT_LIST_HEAD(&desc->tx_list);
dma_async_tx_descriptor_init(&desc->txd, chan);
@@ -549,7 +549,7 @@ static void pd_free_chan_resources(struct dma_chan *chan)
spin_unlock_irq(&pd_chan->lock);
list_for_each_entry_safe(desc, _d, &tmp_list, desc_node)
- pci_pool_free(pd->pool, desc, desc->txd.phys);
+ dma_pool_free(pd->pool, desc, desc->txd.phys);
pdc_enable_irq(chan, 0);
}
@@ -880,7 +880,7 @@ static int pch_dma_probe(struct pci_dev *pdev,
goto err_iounmap;
}
- pd->pool = pci_pool_create("pch_dma_desc_pool", pdev,
+ pd->pool = dma_pool_create("pch_dma_desc_pool", &pdev->dev,
sizeof(struct pch_dma_desc), 4, 0);
if (!pd->pool) {
dev_err(&pdev->dev, "Failed to alloc DMA descriptors\n");
@@ -931,7 +931,7 @@ static int pch_dma_probe(struct pci_dev *pdev,
return 0;
err_free_pool:
- pci_pool_destroy(pd->pool);
+ dma_pool_destroy(pd->pool);
err_free_irq:
free_irq(pdev->irq, pd);
err_iounmap:
@@ -963,7 +963,7 @@ static void pch_dma_remove(struct pci_dev *pdev)
tasklet_kill(&pd_chan->tasklet);
}
- pci_pool_destroy(pd->pool);
+ dma_pool_destroy(pd->pool);
pci_iounmap(pdev, pd->membase);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index f122c2a7b9f0..d7327fd5f445 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2390,7 +2390,8 @@ static inline void _init_desc(struct dma_pl330_desc *desc)
}
/* Returns the number of descriptors added to the DMAC pool */
-static int add_desc(struct pl330_dmac *pl330, gfp_t flg, int count)
+static int add_desc(struct list_head *pool, spinlock_t *lock,
+ gfp_t flg, int count)
{
struct dma_pl330_desc *desc;
unsigned long flags;
@@ -2400,27 +2401,28 @@ static int add_desc(struct pl330_dmac *pl330, gfp_t flg, int count)
if (!desc)
return 0;
- spin_lock_irqsave(&pl330->pool_lock, flags);
+ spin_lock_irqsave(lock, flags);
for (i = 0; i < count; i++) {
_init_desc(&desc[i]);
- list_add_tail(&desc[i].node, &pl330->desc_pool);
+ list_add_tail(&desc[i].node, pool);
}
- spin_unlock_irqrestore(&pl330->pool_lock, flags);
+ spin_unlock_irqrestore(lock, flags);
return count;
}
-static struct dma_pl330_desc *pluck_desc(struct pl330_dmac *pl330)
+static struct dma_pl330_desc *pluck_desc(struct list_head *pool,
+ spinlock_t *lock)
{
struct dma_pl330_desc *desc = NULL;
unsigned long flags;
- spin_lock_irqsave(&pl330->pool_lock, flags);
+ spin_lock_irqsave(lock, flags);
- if (!list_empty(&pl330->desc_pool)) {
- desc = list_entry(pl330->desc_pool.next,
+ if (!list_empty(pool)) {
+ desc = list_entry(pool->next,
struct dma_pl330_desc, node);
list_del_init(&desc->node);
@@ -2429,7 +2431,7 @@ static struct dma_pl330_desc *pluck_desc(struct pl330_dmac *pl330)
desc->txd.callback = NULL;
}
- spin_unlock_irqrestore(&pl330->pool_lock, flags);
+ spin_unlock_irqrestore(lock, flags);
return desc;
}
@@ -2441,20 +2443,18 @@ static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
struct dma_pl330_desc *desc;
/* Pluck one desc from the pool of DMAC */
- desc = pluck_desc(pl330);
+ desc = pluck_desc(&pl330->desc_pool, &pl330->pool_lock);
/* If the DMAC pool is empty, alloc new */
if (!desc) {
- if (!add_desc(pl330, GFP_ATOMIC, 1))
- return NULL;
+ DEFINE_SPINLOCK(lock);
+ LIST_HEAD(pool);
- /* Try again */
- desc = pluck_desc(pl330);
- if (!desc) {
- dev_err(pch->dmac->ddma.dev,
- "%s:%d ALERT!\n", __func__, __LINE__);
+ if (!add_desc(&pool, &lock, GFP_ATOMIC, 1))
return NULL;
- }
+
+ desc = pluck_desc(&pool, &lock);
+ WARN_ON(!desc || !list_empty(&pool));
}
/* Initialize the descriptor */
@@ -2868,7 +2868,8 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
spin_lock_init(&pl330->pool_lock);
/* Create a descriptor pool of default size */
- if (!add_desc(pl330, GFP_KERNEL, NR_DEFAULT_DESC))
+ if (!add_desc(&pl330->desc_pool, &pl330->pool_lock,
+ GFP_KERNEL, NR_DEFAULT_DESC))
dev_warn(&adev->dev, "unable to allocate desc\n");
INIT_LIST_HEAD(&pd->channels);
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
index 6d89fb6a6a92..d076940e0c69 100644
--- a/drivers/dma/qcom/bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -46,6 +46,7 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_dma.h>
+#include <linux/circ_buf.h>
#include <linux/clk.h>
#include <linux/dmaengine.h>
#include <linux/pm_runtime.h>
@@ -78,6 +79,8 @@ struct bam_async_desc {
struct bam_desc_hw *curr_desc;
+ /* list node for the desc in the bam_chan list of descriptors */
+ struct list_head desc_node;
enum dma_transfer_direction dir;
size_t length;
struct bam_desc_hw desc[0];
@@ -347,6 +350,8 @@ static const struct reg_offset_data bam_v1_7_reg_info[] = {
#define BAM_DESC_FIFO_SIZE SZ_32K
#define MAX_DESCRIPTORS (BAM_DESC_FIFO_SIZE / sizeof(struct bam_desc_hw) - 1)
#define BAM_FIFO_SIZE (SZ_32K - 8)
+#define IS_BUSY(chan) (CIRC_SPACE(bchan->tail, bchan->head,\
+ MAX_DESCRIPTORS + 1) == 0)
struct bam_chan {
struct virt_dma_chan vc;
@@ -356,8 +361,6 @@ struct bam_chan {
/* configuration from device tree */
u32 id;
- struct bam_async_desc *curr_txd; /* current running dma */
-
/* runtime configuration */
struct dma_slave_config slave;
@@ -372,6 +375,8 @@ struct bam_chan {
unsigned int initialized; /* is the channel hw initialized? */
unsigned int paused; /* is the channel paused? */
unsigned int reconfigure; /* new slave config? */
+ /* list of descriptors currently processed */
+ struct list_head desc_list;
struct list_head node;
};
@@ -539,7 +544,7 @@ static void bam_free_chan(struct dma_chan *chan)
vchan_free_chan_resources(to_virt_chan(chan));
- if (bchan->curr_txd) {
+ if (!list_empty(&bchan->desc_list)) {
dev_err(bchan->bdev->dev, "Cannot free busy channel\n");
goto err;
}
@@ -632,8 +637,6 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
if (flags & DMA_PREP_INTERRUPT)
async_desc->flags |= DESC_FLAG_EOT;
- else
- async_desc->flags |= DESC_FLAG_INT;
async_desc->num_desc = num_alloc;
async_desc->curr_desc = async_desc->desc;
@@ -684,14 +687,16 @@ err_out:
static int bam_dma_terminate_all(struct dma_chan *chan)
{
struct bam_chan *bchan = to_bam_chan(chan);
+ struct bam_async_desc *async_desc, *tmp;
unsigned long flag;
LIST_HEAD(head);
/* remove all transactions, including active transaction */
spin_lock_irqsave(&bchan->vc.lock, flag);
- if (bchan->curr_txd) {
- list_add(&bchan->curr_txd->vd.node, &bchan->vc.desc_issued);
- bchan->curr_txd = NULL;
+ list_for_each_entry_safe(async_desc, tmp,
+ &bchan->desc_list, desc_node) {
+ list_add(&async_desc->vd.node, &bchan->vc.desc_issued);
+ list_del(&async_desc->desc_node);
}
vchan_get_all_descriptors(&bchan->vc, &head);
@@ -763,9 +768,9 @@ static int bam_resume(struct dma_chan *chan)
*/
static u32 process_channel_irqs(struct bam_device *bdev)
{
- u32 i, srcs, pipe_stts;
+ u32 i, srcs, pipe_stts, offset, avail;
unsigned long flags;
- struct bam_async_desc *async_desc;
+ struct bam_async_desc *async_desc, *tmp;
srcs = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_EE));
@@ -785,27 +790,40 @@ static u32 process_channel_irqs(struct bam_device *bdev)
writel_relaxed(pipe_stts, bam_addr(bdev, i, BAM_P_IRQ_CLR));
spin_lock_irqsave(&bchan->vc.lock, flags);
- async_desc = bchan->curr_txd;
- if (async_desc) {
- async_desc->num_desc -= async_desc->xfer_len;
- async_desc->curr_desc += async_desc->xfer_len;
- bchan->curr_txd = NULL;
+ offset = readl_relaxed(bam_addr(bdev, i, BAM_P_SW_OFSTS)) &
+ P_SW_OFSTS_MASK;
+ offset /= sizeof(struct bam_desc_hw);
+
+ /* Number of bytes available to read */
+ avail = CIRC_CNT(offset, bchan->head, MAX_DESCRIPTORS + 1);
+
+ list_for_each_entry_safe(async_desc, tmp,
+ &bchan->desc_list, desc_node) {
+ /* Not enough data to read */
+ if (avail < async_desc->xfer_len)
+ break;
/* manage FIFO */
bchan->head += async_desc->xfer_len;
bchan->head %= MAX_DESCRIPTORS;
+ async_desc->num_desc -= async_desc->xfer_len;
+ async_desc->curr_desc += async_desc->xfer_len;
+ avail -= async_desc->xfer_len;
+
/*
- * if complete, process cookie. Otherwise
+ * if complete, process cookie. Otherwise
* push back to front of desc_issued so that
* it gets restarted by the tasklet
*/
- if (!async_desc->num_desc)
+ if (!async_desc->num_desc) {
vchan_cookie_complete(&async_desc->vd);
- else
+ } else {
list_add(&async_desc->vd.node,
- &bchan->vc.desc_issued);
+ &bchan->vc.desc_issued);
+ }
+ list_del(&async_desc->desc_node);
}
spin_unlock_irqrestore(&bchan->vc.lock, flags);
@@ -867,6 +885,7 @@ static enum dma_status bam_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
struct bam_chan *bchan = to_bam_chan(chan);
+ struct bam_async_desc *async_desc;
struct virt_dma_desc *vd;
int ret;
size_t residue = 0;
@@ -882,11 +901,17 @@ static enum dma_status bam_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
spin_lock_irqsave(&bchan->vc.lock, flags);
vd = vchan_find_desc(&bchan->vc, cookie);
- if (vd)
+ if (vd) {
residue = container_of(vd, struct bam_async_desc, vd)->length;
- else if (bchan->curr_txd && bchan->curr_txd->vd.tx.cookie == cookie)
- for (i = 0; i < bchan->curr_txd->num_desc; i++)
- residue += bchan->curr_txd->curr_desc[i].size;
+ } else {
+ list_for_each_entry(async_desc, &bchan->desc_list, desc_node) {
+ if (async_desc->vd.tx.cookie != cookie)
+ continue;
+
+ for (i = 0; i < async_desc->num_desc; i++)
+ residue += async_desc->curr_desc[i].size;
+ }
+ }
spin_unlock_irqrestore(&bchan->vc.lock, flags);
@@ -927,63 +952,86 @@ static void bam_start_dma(struct bam_chan *bchan)
{
struct virt_dma_desc *vd = vchan_next_desc(&bchan->vc);
struct bam_device *bdev = bchan->bdev;
- struct bam_async_desc *async_desc;
+ struct bam_async_desc *async_desc = NULL;
struct bam_desc_hw *desc;
struct bam_desc_hw *fifo = PTR_ALIGN(bchan->fifo_virt,
sizeof(struct bam_desc_hw));
int ret;
+ unsigned int avail;
+ struct dmaengine_desc_callback cb;
lockdep_assert_held(&bchan->vc.lock);
if (!vd)
return;
- list_del(&vd->node);
-
- async_desc = container_of(vd, struct bam_async_desc, vd);
- bchan->curr_txd = async_desc;
-
ret = pm_runtime_get_sync(bdev->dev);
if (ret < 0)
return;
- /* on first use, initialize the channel hardware */
- if (!bchan->initialized)
- bam_chan_init_hw(bchan, async_desc->dir);
+ while (vd && !IS_BUSY(bchan)) {
+ list_del(&vd->node);
- /* apply new slave config changes, if necessary */
- if (bchan->reconfigure)
- bam_apply_new_config(bchan, async_desc->dir);
+ async_desc = container_of(vd, struct bam_async_desc, vd);
- desc = bchan->curr_txd->curr_desc;
+ /* on first use, initialize the channel hardware */
+ if (!bchan->initialized)
+ bam_chan_init_hw(bchan, async_desc->dir);
- if (async_desc->num_desc > MAX_DESCRIPTORS)
- async_desc->xfer_len = MAX_DESCRIPTORS;
- else
- async_desc->xfer_len = async_desc->num_desc;
+ /* apply new slave config changes, if necessary */
+ if (bchan->reconfigure)
+ bam_apply_new_config(bchan, async_desc->dir);
- /* set any special flags on the last descriptor */
- if (async_desc->num_desc == async_desc->xfer_len)
- desc[async_desc->xfer_len - 1].flags |=
- cpu_to_le16(async_desc->flags);
- else
- desc[async_desc->xfer_len - 1].flags |=
- cpu_to_le16(DESC_FLAG_INT);
+ desc = async_desc->curr_desc;
+ avail = CIRC_SPACE(bchan->tail, bchan->head,
+ MAX_DESCRIPTORS + 1);
+
+ if (async_desc->num_desc > avail)
+ async_desc->xfer_len = avail;
+ else
+ async_desc->xfer_len = async_desc->num_desc;
+
+ /* set any special flags on the last descriptor */
+ if (async_desc->num_desc == async_desc->xfer_len)
+ desc[async_desc->xfer_len - 1].flags |=
+ cpu_to_le16(async_desc->flags);
- if (bchan->tail + async_desc->xfer_len > MAX_DESCRIPTORS) {
- u32 partial = MAX_DESCRIPTORS - bchan->tail;
+ vd = vchan_next_desc(&bchan->vc);
- memcpy(&fifo[bchan->tail], desc,
- partial * sizeof(struct bam_desc_hw));
- memcpy(fifo, &desc[partial], (async_desc->xfer_len - partial) *
+ dmaengine_desc_get_callback(&async_desc->vd.tx, &cb);
+
+ /*
+ * An interrupt is generated at this desc, if
+ * - FIFO is FULL.
+ * - No more descriptors to add.
+ * - If a callback completion was requested for this DESC,
+ * In this case, BAM will deliver the completion callback
+ * for this desc and continue processing the next desc.
+ */
+ if (((avail <= async_desc->xfer_len) || !vd ||
+ dmaengine_desc_callback_valid(&cb)) &&
+ !(async_desc->flags & DESC_FLAG_EOT))
+ desc[async_desc->xfer_len - 1].flags |=
+ cpu_to_le16(DESC_FLAG_INT);
+
+ if (bchan->tail + async_desc->xfer_len > MAX_DESCRIPTORS) {
+ u32 partial = MAX_DESCRIPTORS - bchan->tail;
+
+ memcpy(&fifo[bchan->tail], desc,
+ partial * sizeof(struct bam_desc_hw));
+ memcpy(fifo, &desc[partial],
+ (async_desc->xfer_len - partial) *
sizeof(struct bam_desc_hw));
- } else {
- memcpy(&fifo[bchan->tail], desc,
- async_desc->xfer_len * sizeof(struct bam_desc_hw));
- }
+ } else {
+ memcpy(&fifo[bchan->tail], desc,
+ async_desc->xfer_len *
+ sizeof(struct bam_desc_hw));
+ }
- bchan->tail += async_desc->xfer_len;
- bchan->tail %= MAX_DESCRIPTORS;
+ bchan->tail += async_desc->xfer_len;
+ bchan->tail %= MAX_DESCRIPTORS;
+ list_add_tail(&async_desc->desc_node, &bchan->desc_list);
+ }
/* ensure descriptor writes and dma start not reordered */
wmb();
@@ -1012,7 +1060,7 @@ static void dma_tasklet(unsigned long data)
bchan = &bdev->channels[i];
spin_lock_irqsave(&bchan->vc.lock, flags);
- if (!list_empty(&bchan->vc.desc_issued) && !bchan->curr_txd)
+ if (!list_empty(&bchan->vc.desc_issued) && !IS_BUSY(bchan))
bam_start_dma(bchan);
spin_unlock_irqrestore(&bchan->vc.lock, flags);
}
@@ -1033,7 +1081,7 @@ static void bam_issue_pending(struct dma_chan *chan)
spin_lock_irqsave(&bchan->vc.lock, flags);
/* if work pending and idle, start a transaction */
- if (vchan_issue_pending(&bchan->vc) && !bchan->curr_txd)
+ if (vchan_issue_pending(&bchan->vc) && !IS_BUSY(bchan))
bam_start_dma(bchan);
spin_unlock_irqrestore(&bchan->vc.lock, flags);
@@ -1133,6 +1181,7 @@ static void bam_channel_init(struct bam_device *bdev, struct bam_chan *bchan,
vchan_init(&bchan->vc, &bdev->common);
bchan->vc.desc_free = bam_dma_free_desc;
+ INIT_LIST_HEAD(&bchan->desc_list);
}
static const struct of_device_id bam_of_match[] = {