summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-08-04 18:44:38 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-08-04 18:44:38 -0700
commit31be1d0fbd950395701d9fd47d8fb1f99c996f61 (patch)
treea47055246206605177b455114203c7ccea9d0a50 /drivers
parent36001a2fa6cc63d58664a2a99b90a864f9eb587a (diff)
parenta1873f837f9e5c1001462a635af1b0bab31aa9fd (diff)
Merge tag 'dmaengine-6.0-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine
Pull dmaengine updates from Vinod Koul: "New support / Core: - Remove DMA_MEMCPY_SG for lack of users - Tegra 234 dmaengine support - Mediatek MT8365 dma support - Apple ADMAC driver Updates: - Yaml conversion for ST-Ericsson DMA40 binding and Freescale edma - rz-dmac updates and device_synchronize support - Bunch of typo in comments fixes in drivers - multithread support in sf-pdma driver" * tag 'dmaengine-6.0-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine: (50 commits) dmaengine: mediatek: mtk-hsdma: Fix typo 'the the' in comment dmaengine: axi-dmac: check cache coherency register dmaengine: sh: rz-dmac: Add device_synchronize callback dmaengine: sprd: Cleanup in .remove() after pm_runtime_get_sync() failed dmaengine: tegra: Add terminate() for Tegra234 dt-bindings: dmaengine: Add compatible for Tegra234 dmaengine: xilinx: use strscpy to replace strlcpy dmaengine: imx-sdma: Add FIFO stride support for multi FIFO script dmaengine: idxd: Correct IAX operation code names dmaengine: imx-dma: Cast of_device_get_match_data() with (uintptr_t) dmaengine: dw-axi-dmac: ignore interrupt if no descriptor dmaengine: dw-axi-dmac: do not print NULL LLI during error dmaengine: altera-msgdma: Fixed some inconsistent function name descriptions dmaengine: imx-sdma: Add missing struct documentation dmaengine: sf-pdma: Add multithread support for a DMA channel dt-bindings: dma: dw-axi-dmac: extend the number of interrupts dmaengine: dmatest: use strscpy to replace strlcpy dmaengine: ste_dma40: fix typo in comment dmaengine: jz4780: fix typo in comment dmaengine: s3c24xx: fix typo in comment ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/dma/Kconfig8
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/altera-msgdma.c4
-rw-r--r--drivers/dma/amba-pl08x.c2
-rw-r--r--drivers/dma/apple-admac.c818
-rw-r--r--drivers/dma/at_xdmac.c2
-rw-r--r--drivers/dma/dma-axi-dmac.c16
-rw-r--r--drivers/dma/dma-jz4780.c2
-rw-r--r--drivers/dma/dmaengine.c7
-rw-r--r--drivers/dma/dmatest.c45
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c11
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-core.c8
-rw-r--r--drivers/dma/dw/rzn1-dmamux.c3
-rw-r--r--drivers/dma/ep93xx_dma.c2
-rw-r--r--drivers/dma/fsl-edma-common.c3
-rw-r--r--drivers/dma/imx-dma.c2
-rw-r--r--drivers/dma/imx-sdma.c38
-rw-r--r--drivers/dma/mediatek/mtk-cqdma.c2
-rw-r--r--drivers/dma/mediatek/mtk-hsdma.c4
-rw-r--r--drivers/dma/mv_xor_v2.c2
-rw-r--r--drivers/dma/owl-dma.c2
-rw-r--r--drivers/dma/s3c24xx-dma.c2
-rw-r--r--drivers/dma/sf-pdma/sf-pdma.c44
-rw-r--r--drivers/dma/sh/rz-dmac.c17
-rw-r--r--drivers/dma/sprd-dma.c5
-rw-r--r--drivers/dma/ste_dma40.c2
-rw-r--r--drivers/dma/stm32-mdma.c5
-rw-r--r--drivers/dma/sun4i-dma.c32
-rw-r--r--drivers/dma/tegra186-gpc-dma.c26
-rw-r--r--drivers/dma/ti/k3-psil-j721s2.c8
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c122
-rw-r--r--drivers/dma/xilinx/xilinx_dpdma.c6
32 files changed, 1035 insertions, 216 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 487ed4ddc3be..a06d2a7627aa 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -85,6 +85,14 @@ config AMCC_PPC440SPE_ADMA
help
Enable support for the AMCC PPC440SPe RAID engines.
+config APPLE_ADMAC
+ tristate "Apple ADMAC support"
+ depends on ARCH_APPLE || COMPILE_TEST
+ select DMA_ENGINE
+ default ARCH_APPLE
+ help
+ Enable support for Audio DMA Controller found on Apple Silicon SoCs.
+
config AT_HDMAC
tristate "Atmel AHB DMA support"
depends on ARCH_AT91
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 2f1b87ffd7ab..10f7d4241001 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_ALTERA_MSGDMA) += altera-msgdma.o
obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
obj-$(CONFIG_AMD_PTDMA) += ptdma/
+obj-$(CONFIG_APPLE_ADMAC) += apple-admac.o
obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
obj-$(CONFIG_AT_XDMAC) += at_xdmac.o
obj-$(CONFIG_AXI_DMAC) += dma-axi-dmac.o
diff --git a/drivers/dma/altera-msgdma.c b/drivers/dma/altera-msgdma.c
index 6f56dfd375e3..4153c2edb049 100644
--- a/drivers/dma/altera-msgdma.c
+++ b/drivers/dma/altera-msgdma.c
@@ -749,7 +749,7 @@ static irqreturn_t msgdma_irq_handler(int irq, void *data)
}
/**
- * msgdma_chan_remove - Channel remove function
+ * msgdma_dev_remove() - Device remove function
* @mdev: Pointer to the Altera mSGDMA device structure
*/
static void msgdma_dev_remove(struct msgdma_device *mdev)
@@ -918,7 +918,7 @@ fail:
}
/**
- * msgdma_dma_remove - Driver remove function
+ * msgdma_remove() - Driver remove function
* @pdev: Pointer to the platform_device structure
*
* Return: Always '0'
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index a4a794e62ac2..487a01aa207d 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -231,7 +231,7 @@ enum pl08x_dma_chan_state {
/**
* struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel
- * @vc: wrappped virtual channel
+ * @vc: wrapped virtual channel
* @phychan: the physical channel utilized by this channel, if there is one
* @name: name of channel
* @cd: channel platform data
diff --git a/drivers/dma/apple-admac.c b/drivers/dma/apple-admac.c
new file mode 100644
index 000000000000..d1f74a3aa999
--- /dev/null
+++ b/drivers/dma/apple-admac.c
@@ -0,0 +1,818 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Driver for Audio DMA Controller (ADMAC) on t8103 (M1) and other Apple chips
+ *
+ * Copyright (C) The Asahi Linux Contributors
+ */
+
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_dma.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+
+#include "dmaengine.h"
+
+#define NCHANNELS_MAX 64
+#define IRQ_NOUTPUTS 4
+
+#define RING_WRITE_SLOT GENMASK(1, 0)
+#define RING_READ_SLOT GENMASK(5, 4)
+#define RING_FULL BIT(9)
+#define RING_EMPTY BIT(8)
+#define RING_ERR BIT(10)
+
+#define STATUS_DESC_DONE BIT(0)
+#define STATUS_ERR BIT(6)
+
+#define FLAG_DESC_NOTIFY BIT(16)
+
+#define REG_TX_START 0x0000
+#define REG_TX_STOP 0x0004
+#define REG_RX_START 0x0008
+#define REG_RX_STOP 0x000c
+
+#define REG_CHAN_CTL(ch) (0x8000 + (ch) * 0x200)
+#define REG_CHAN_CTL_RST_RINGS BIT(0)
+
+#define REG_DESC_RING(ch) (0x8070 + (ch) * 0x200)
+#define REG_REPORT_RING(ch) (0x8074 + (ch) * 0x200)
+
+#define REG_RESIDUE(ch) (0x8064 + (ch) * 0x200)
+
+#define REG_BUS_WIDTH(ch) (0x8040 + (ch) * 0x200)
+
+#define BUS_WIDTH_8BIT 0x00
+#define BUS_WIDTH_16BIT 0x01
+#define BUS_WIDTH_32BIT 0x02
+#define BUS_WIDTH_FRAME_2_WORDS 0x10
+#define BUS_WIDTH_FRAME_4_WORDS 0x20
+
+#define CHAN_BUFSIZE 0x8000
+
+#define REG_CHAN_FIFOCTL(ch) (0x8054 + (ch) * 0x200)
+#define CHAN_FIFOCTL_LIMIT GENMASK(31, 16)
+#define CHAN_FIFOCTL_THRESHOLD GENMASK(15, 0)
+
+#define REG_DESC_WRITE(ch) (0x10000 + ((ch) / 2) * 0x4 + ((ch) & 1) * 0x4000)
+#define REG_REPORT_READ(ch) (0x10100 + ((ch) / 2) * 0x4 + ((ch) & 1) * 0x4000)
+
+#define REG_TX_INTSTATE(idx) (0x0030 + (idx) * 4)
+#define REG_RX_INTSTATE(idx) (0x0040 + (idx) * 4)
+#define REG_CHAN_INTSTATUS(ch, idx) (0x8010 + (ch) * 0x200 + (idx) * 4)
+#define REG_CHAN_INTMASK(ch, idx) (0x8020 + (ch) * 0x200 + (idx) * 4)
+
+struct admac_data;
+struct admac_tx;
+
+struct admac_chan {
+ unsigned int no;
+ struct admac_data *host;
+ struct dma_chan chan;
+ struct tasklet_struct tasklet;
+
+ spinlock_t lock;
+ struct admac_tx *current_tx;
+ int nperiod_acks;
+
+ /*
+ * We maintain a 'submitted' and 'issued' list mainly for interface
+ * correctness. Typical use of the driver (per channel) will be
+ * prepping, submitting and issuing a single cyclic transaction which
+ * will stay current until terminate_all is called.
+ */
+ struct list_head submitted;
+ struct list_head issued;
+
+ struct list_head to_free;
+};
+
+struct admac_data {
+ struct dma_device dma;
+ struct device *dev;
+ __iomem void *base;
+
+ int irq_index;
+ int nchannels;
+ struct admac_chan channels[];
+};
+
+struct admac_tx {
+ struct dma_async_tx_descriptor tx;
+ bool cyclic;
+ dma_addr_t buf_addr;
+ dma_addr_t buf_end;
+ size_t buf_len;
+ size_t period_len;
+
+ size_t submitted_pos;
+ size_t reclaimed_pos;
+
+ struct list_head node;
+};
+
+static void admac_modify(struct admac_data *ad, int reg, u32 mask, u32 val)
+{
+ void __iomem *addr = ad->base + reg;
+ u32 curr = readl_relaxed(addr);
+
+ writel_relaxed((curr & ~mask) | (val & mask), addr);
+}
+
+static struct admac_chan *to_admac_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct admac_chan, chan);
+}
+
+static struct admac_tx *to_admac_tx(struct dma_async_tx_descriptor *tx)
+{
+ return container_of(tx, struct admac_tx, tx);
+}
+
+static enum dma_transfer_direction admac_chan_direction(int channo)
+{
+ /* Channel directions are hardwired */
+ return (channo & 1) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
+}
+
+static dma_cookie_t admac_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct admac_tx *adtx = to_admac_tx(tx);
+ struct admac_chan *adchan = to_admac_chan(tx->chan);
+ unsigned long flags;
+ dma_cookie_t cookie;
+
+ spin_lock_irqsave(&adchan->lock, flags);
+ cookie = dma_cookie_assign(tx);
+ list_add_tail(&adtx->node, &adchan->submitted);
+ spin_unlock_irqrestore(&adchan->lock, flags);
+
+ return cookie;
+}
+
+static int admac_desc_free(struct dma_async_tx_descriptor *tx)
+{
+ kfree(to_admac_tx(tx));
+
+ return 0;
+}
+
+static struct dma_async_tx_descriptor *admac_prep_dma_cyclic(
+ struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction direction,
+ unsigned long flags)
+{
+ struct admac_chan *adchan = container_of(chan, struct admac_chan, chan);
+ struct admac_tx *adtx;
+
+ if (direction != admac_chan_direction(adchan->no))
+ return NULL;
+
+ adtx = kzalloc(sizeof(*adtx), GFP_NOWAIT);
+ if (!adtx)
+ return NULL;
+
+ adtx->cyclic = true;
+
+ adtx->buf_addr = buf_addr;
+ adtx->buf_len = buf_len;
+ adtx->buf_end = buf_addr + buf_len;
+ adtx->period_len = period_len;
+
+ adtx->submitted_pos = 0;
+ adtx->reclaimed_pos = 0;
+
+ dma_async_tx_descriptor_init(&adtx->tx, chan);
+ adtx->tx.tx_submit = admac_tx_submit;
+ adtx->tx.desc_free = admac_desc_free;
+
+ return &adtx->tx;
+}
+
+/*
+ * Write one hardware descriptor for a dmaengine cyclic transaction.
+ */
+static void admac_cyclic_write_one_desc(struct admac_data *ad, int channo,
+ struct admac_tx *tx)
+{
+ dma_addr_t addr;
+
+ addr = tx->buf_addr + (tx->submitted_pos % tx->buf_len);
+
+ /* If happens means we have buggy code */
+ WARN_ON_ONCE(addr + tx->period_len > tx->buf_end);
+
+ dev_dbg(ad->dev, "ch%d descriptor: addr=0x%pad len=0x%zx flags=0x%lx\n",
+ channo, &addr, tx->period_len, FLAG_DESC_NOTIFY);
+
+ writel_relaxed(lower_32_bits(addr), ad->base + REG_DESC_WRITE(channo));
+ writel_relaxed(upper_32_bits(addr), ad->base + REG_DESC_WRITE(channo));
+ writel_relaxed(tx->period_len, ad->base + REG_DESC_WRITE(channo));
+ writel_relaxed(FLAG_DESC_NOTIFY, ad->base + REG_DESC_WRITE(channo));
+
+ tx->submitted_pos += tx->period_len;
+ tx->submitted_pos %= 2 * tx->buf_len;
+}
+
+/*
+ * Write all the hardware descriptors for a dmaengine cyclic
+ * transaction there is space for.
+ */
+static void admac_cyclic_write_desc(struct admac_data *ad, int channo,
+ struct admac_tx *tx)
+{
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ if (readl_relaxed(ad->base + REG_DESC_RING(channo)) & RING_FULL)
+ break;
+ admac_cyclic_write_one_desc(ad, channo, tx);
+ }
+}
+
+static int admac_ring_noccupied_slots(int ringval)
+{
+ int wrslot = FIELD_GET(RING_WRITE_SLOT, ringval);
+ int rdslot = FIELD_GET(RING_READ_SLOT, ringval);
+
+ if (wrslot != rdslot) {
+ return (wrslot + 4 - rdslot) % 4;
+ } else {
+ WARN_ON((ringval & (RING_FULL | RING_EMPTY)) == 0);
+
+ if (ringval & RING_FULL)
+ return 4;
+ else
+ return 0;
+ }
+}
+
+/*
+ * Read from hardware the residue of a cyclic dmaengine transaction.
+ */
+static u32 admac_cyclic_read_residue(struct admac_data *ad, int channo,
+ struct admac_tx *adtx)
+{
+ u32 ring1, ring2;
+ u32 residue1, residue2;
+ int nreports;
+ size_t pos;
+
+ ring1 = readl_relaxed(ad->base + REG_REPORT_RING(channo));
+ residue1 = readl_relaxed(ad->base + REG_RESIDUE(channo));
+ ring2 = readl_relaxed(ad->base + REG_REPORT_RING(channo));
+ residue2 = readl_relaxed(ad->base + REG_RESIDUE(channo));
+
+ if (residue2 > residue1) {
+ /*
+ * Controller must have loaded next descriptor between
+ * the two residue reads
+ */
+ nreports = admac_ring_noccupied_slots(ring1) + 1;
+ } else {
+ /* No descriptor load between the two reads, ring2 is safe to use */
+ nreports = admac_ring_noccupied_slots(ring2);
+ }
+
+ pos = adtx->reclaimed_pos + adtx->period_len * (nreports + 1) - residue2;
+
+ return adtx->buf_len - pos % adtx->buf_len;
+}
+
+static enum dma_status admac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct admac_chan *adchan = to_admac_chan(chan);
+ struct admac_data *ad = adchan->host;
+ struct admac_tx *adtx;
+
+ enum dma_status ret;
+ size_t residue;
+ unsigned long flags;
+
+ ret = dma_cookie_status(chan, cookie, txstate);
+ if (ret == DMA_COMPLETE || !txstate)
+ return ret;
+
+ spin_lock_irqsave(&adchan->lock, flags);
+ adtx = adchan->current_tx;
+
+ if (adtx && adtx->tx.cookie == cookie) {
+ ret = DMA_IN_PROGRESS;
+ residue = admac_cyclic_read_residue(ad, adchan->no, adtx);
+ } else {
+ ret = DMA_IN_PROGRESS;
+ residue = 0;
+ list_for_each_entry(adtx, &adchan->issued, node) {
+ if (adtx->tx.cookie == cookie) {
+ residue = adtx->buf_len;
+ break;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&adchan->lock, flags);
+
+ dma_set_residue(txstate, residue);
+ return ret;
+}
+
+static void admac_start_chan(struct admac_chan *adchan)
+{
+ struct admac_data *ad = adchan->host;
+ u32 startbit = 1 << (adchan->no / 2);
+
+ writel_relaxed(STATUS_DESC_DONE | STATUS_ERR,
+ ad->base + REG_CHAN_INTSTATUS(adchan->no, ad->irq_index));
+ writel_relaxed(STATUS_DESC_DONE | STATUS_ERR,
+ ad->base + REG_CHAN_INTMASK(adchan->no, ad->irq_index));
+
+ switch (admac_chan_direction(adchan->no)) {
+ case DMA_MEM_TO_DEV:
+ writel_relaxed(startbit, ad->base + REG_TX_START);
+ break;
+ case DMA_DEV_TO_MEM:
+ writel_relaxed(startbit, ad->base + REG_RX_START);
+ break;
+ default:
+ break;
+ }
+ dev_dbg(adchan->host->dev, "ch%d start\n", adchan->no);
+}
+
+static void admac_stop_chan(struct admac_chan *adchan)
+{
+ struct admac_data *ad = adchan->host;
+ u32 stopbit = 1 << (adchan->no / 2);
+
+ switch (admac_chan_direction(adchan->no)) {
+ case DMA_MEM_TO_DEV:
+ writel_relaxed(stopbit, ad->base + REG_TX_STOP);
+ break;
+ case DMA_DEV_TO_MEM:
+ writel_relaxed(stopbit, ad->base + REG_RX_STOP);
+ break;
+ default:
+ break;
+ }
+ dev_dbg(adchan->host->dev, "ch%d stop\n", adchan->no);
+}
+
+static void admac_reset_rings(struct admac_chan *adchan)
+{
+ struct admac_data *ad = adchan->host;
+
+ writel_relaxed(REG_CHAN_CTL_RST_RINGS,
+ ad->base + REG_CHAN_CTL(adchan->no));
+ writel_relaxed(0, ad->base + REG_CHAN_CTL(adchan->no));
+}
+
+static void admac_start_current_tx(struct admac_chan *adchan)
+{
+ struct admac_data *ad = adchan->host;
+ int ch = adchan->no;
+
+ admac_reset_rings(adchan);
+ writel_relaxed(0, ad->base + REG_CHAN_CTL(ch));
+
+ admac_cyclic_write_one_desc(ad, ch, adchan->current_tx);
+ admac_start_chan(adchan);
+ admac_cyclic_write_desc(ad, ch, adchan->current_tx);
+}
+
+static void admac_issue_pending(struct dma_chan *chan)
+{
+ struct admac_chan *adchan = to_admac_chan(chan);
+ struct admac_tx *tx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adchan->lock, flags);
+ list_splice_tail_init(&adchan->submitted, &adchan->issued);
+ if (!list_empty(&adchan->issued) && !adchan->current_tx) {
+ tx = list_first_entry(&adchan->issued, struct admac_tx, node);
+ list_del(&tx->node);
+
+ adchan->current_tx = tx;
+ adchan->nperiod_acks = 0;
+ admac_start_current_tx(adchan);
+ }
+ spin_unlock_irqrestore(&adchan->lock, flags);
+}
+
+static int admac_pause(struct dma_chan *chan)
+{
+ struct admac_chan *adchan = to_admac_chan(chan);
+
+ admac_stop_chan(adchan);
+
+ return 0;
+}
+
+static int admac_resume(struct dma_chan *chan)
+{
+ struct admac_chan *adchan = to_admac_chan(chan);
+
+ admac_start_chan(adchan);
+
+ return 0;
+}
+
+static int admac_terminate_all(struct dma_chan *chan)
+{
+ struct admac_chan *adchan = to_admac_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&adchan->lock, flags);
+ admac_stop_chan(adchan);
+ admac_reset_rings(adchan);
+
+ adchan->current_tx = NULL;
+ /*
+ * Descriptors can only be freed after the tasklet
+ * has been killed (in admac_synchronize).
+ */
+ list_splice_tail_init(&adchan->submitted, &adchan->to_free);
+ list_splice_tail_init(&adchan->issued, &adchan->to_free);
+ spin_unlock_irqrestore(&adchan->lock, flags);
+
+ return 0;
+}
+
+static void admac_synchronize(struct dma_chan *chan)
+{
+ struct admac_chan *adchan = to_admac_chan(chan);
+ struct admac_tx *adtx, *_adtx;
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&adchan->lock, flags);
+ list_splice_tail_init(&adchan->to_free, &head);
+ spin_unlock_irqrestore(&adchan->lock, flags);
+
+ tasklet_kill(&adchan->tasklet);
+
+ list_for_each_entry_safe(adtx, _adtx, &head, node) {
+ list_del(&adtx->node);
+ admac_desc_free(&adtx->tx);
+ }
+}
+
+static int admac_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct admac_chan *adchan = to_admac_chan(chan);
+
+ dma_cookie_init(&adchan->chan);
+ return 0;
+}
+
+static void admac_free_chan_resources(struct dma_chan *chan)
+{
+ admac_terminate_all(chan);
+ admac_synchronize(chan);
+}
+
+static struct dma_chan *admac_dma_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct admac_data *ad = (struct admac_data *) ofdma->of_dma_data;
+ unsigned int index;
+
+ if (dma_spec->args_count != 1)
+ return NULL;
+
+ index = dma_spec->args[0];
+
+ if (index >= ad->nchannels) {
+ dev_err(ad->dev, "channel index %u out of bounds\n", index);
+ return NULL;
+ }
+
+ return &ad->channels[index].chan;
+}
+
+static int admac_drain_reports(struct admac_data *ad, int channo)
+{
+ int count;
+
+ for (count = 0; count < 4; count++) {
+ u32 countval_hi, countval_lo, unk1, flags;
+
+ if (readl_relaxed(ad->base + REG_REPORT_RING(channo)) & RING_EMPTY)
+ break;
+
+ countval_lo = readl_relaxed(ad->base + REG_REPORT_READ(channo));
+ countval_hi = readl_relaxed(ad->base + REG_REPORT_READ(channo));
+ unk1 = readl_relaxed(ad->base + REG_REPORT_READ(channo));
+ flags = readl_relaxed(ad->base + REG_REPORT_READ(channo));
+
+ dev_dbg(ad->dev, "ch%d report: countval=0x%llx unk1=0x%x flags=0x%x\n",
+ channo, ((u64) countval_hi) << 32 | countval_lo, unk1, flags);
+ }
+
+ return count;
+}
+
+static void admac_handle_status_err(struct admac_data *ad, int channo)
+{
+ bool handled = false;
+
+ if (readl_relaxed(ad->base + REG_DESC_RING(channo)) & RING_ERR) {
+ writel_relaxed(RING_ERR, ad->base + REG_DESC_RING(channo));
+ dev_err_ratelimited(ad->dev, "ch%d descriptor ring error\n", channo);
+ handled = true;
+ }
+
+ if (readl_relaxed(ad->base + REG_REPORT_RING(channo)) & RING_ERR) {
+ writel_relaxed(RING_ERR, ad->base + REG_REPORT_RING(channo));
+ dev_err_ratelimited(ad->dev, "ch%d report ring error\n", channo);
+ handled = true;
+ }
+
+ if (unlikely(!handled)) {
+ dev_err(ad->dev, "ch%d unknown error, masking errors as cause of IRQs\n", channo);
+ admac_modify(ad, REG_CHAN_INTMASK(channo, ad->irq_index),
+ STATUS_ERR, 0);
+ }
+}
+
+static void admac_handle_status_desc_done(struct admac_data *ad, int channo)
+{
+ struct admac_chan *adchan = &ad->channels[channo];
+ unsigned long flags;
+ int nreports;
+
+ writel_relaxed(STATUS_DESC_DONE,
+ ad->base + REG_CHAN_INTSTATUS(channo, ad->irq_index));
+
+ spin_lock_irqsave(&adchan->lock, flags);
+ nreports = admac_drain_reports(ad, channo);
+
+ if (adchan->current_tx) {
+ struct admac_tx *tx = adchan->current_tx;
+
+ adchan->nperiod_acks += nreports;
+ tx->reclaimed_pos += nreports * tx->period_len;
+ tx->reclaimed_pos %= 2 * tx->buf_len;
+
+ admac_cyclic_write_desc(ad, channo, tx);
+ tasklet_schedule(&adchan->tasklet);
+ }
+ spin_unlock_irqrestore(&adchan->lock, flags);
+}
+
+static void admac_handle_chan_int(struct admac_data *ad, int no)
+{
+ u32 cause = readl_relaxed(ad->base + REG_CHAN_INTSTATUS(no, ad->irq_index));
+
+ if (cause & STATUS_ERR)
+ admac_handle_status_err(ad, no);
+
+ if (cause & STATUS_DESC_DONE)
+ admac_handle_status_desc_done(ad, no);
+}
+
+static irqreturn_t admac_interrupt(int irq, void *devid)
+{
+ struct admac_data *ad = devid;
+ u32 rx_intstate, tx_intstate;
+ int i;
+
+ rx_intstate = readl_relaxed(ad->base + REG_RX_INTSTATE(ad->irq_index));
+ tx_intstate = readl_relaxed(ad->base + REG_TX_INTSTATE(ad->irq_index));
+
+ if (!tx_intstate && !rx_intstate)
+ return IRQ_NONE;
+
+ for (i = 0; i < ad->nchannels; i += 2) {
+ if (tx_intstate & 1)
+ admac_handle_chan_int(ad, i);
+ tx_intstate >>= 1;
+ }
+
+ for (i = 1; i < ad->nchannels; i += 2) {
+ if (rx_intstate & 1)
+ admac_handle_chan_int(ad, i);
+ rx_intstate >>= 1;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void admac_chan_tasklet(struct tasklet_struct *t)
+{
+ struct admac_chan *adchan = from_tasklet(adchan, t, tasklet);
+ struct admac_tx *adtx;
+ struct dmaengine_desc_callback cb;
+ struct dmaengine_result tx_result;
+ int nacks;
+
+ spin_lock_irq(&adchan->lock);
+ adtx = adchan->current_tx;
+ nacks = adchan->nperiod_acks;
+ adchan->nperiod_acks = 0;
+ spin_unlock_irq(&adchan->lock);
+
+ if (!adtx || !nacks)
+ return;
+
+ tx_result.result = DMA_TRANS_NOERROR;
+ tx_result.residue = 0;
+
+ dmaengine_desc_get_callback(&adtx->tx, &cb);
+ while (nacks--)
+ dmaengine_desc_callback_invoke(&cb, &tx_result);
+}
+
+static int admac_device_config(struct dma_chan *chan,
+ struct dma_slave_config *config)
+{
+ struct admac_chan *adchan = to_admac_chan(chan);
+ struct admac_data *ad = adchan->host;
+ bool is_tx = admac_chan_direction(adchan->no) == DMA_MEM_TO_DEV;
+ int wordsize = 0;
+ u32 bus_width = 0;
+
+ switch (is_tx ? config->dst_addr_width : config->src_addr_width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ wordsize = 1;
+ bus_width |= BUS_WIDTH_8BIT;
+ break;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ wordsize = 2;
+ bus_width |= BUS_WIDTH_16BIT;
+ break;
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ wordsize = 4;
+ bus_width |= BUS_WIDTH_32BIT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * We take port_window_size to be the number of words in a frame.
+ *
+ * The controller has some means of out-of-band signalling, to the peripheral,
+ * of words position in a frame. That's where the importance of this control
+ * comes from.
+ */
+ switch (is_tx ? config->dst_port_window_size : config->src_port_window_size) {
+ case 0 ... 1:
+ break;
+ case 2:
+ bus_width |= BUS_WIDTH_FRAME_2_WORDS;
+ break;
+ case 4:
+ bus_width |= BUS_WIDTH_FRAME_4_WORDS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ writel_relaxed(bus_width, ad->base + REG_BUS_WIDTH(adchan->no));
+
+ /*
+ * By FIFOCTL_LIMIT we seem to set the maximal number of bytes allowed to be
+ * held in controller's per-channel FIFO. Transfers seem to be triggered
+ * around the time FIFO occupancy touches FIFOCTL_THRESHOLD.
+ *
+ * The numbers we set are more or less arbitrary.
+ */
+ writel_relaxed(FIELD_PREP(CHAN_FIFOCTL_LIMIT, 0x30 * wordsize)
+ | FIELD_PREP(CHAN_FIFOCTL_THRESHOLD, 0x18 * wordsize),
+ ad->base + REG_CHAN_FIFOCTL(adchan->no));
+
+ return 0;
+}
+
+static int admac_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct admac_data *ad;
+ struct dma_device *dma;
+ int nchannels;
+ int err, irq, i;
+
+ err = of_property_read_u32(np, "dma-channels", &nchannels);
+ if (err || nchannels > NCHANNELS_MAX) {
+ dev_err(&pdev->dev, "missing or invalid dma-channels property\n");
+ return -EINVAL;
+ }
+
+ ad = devm_kzalloc(&pdev->dev, struct_size(ad, channels, nchannels), GFP_KERNEL);
+ if (!ad)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ad);
+ ad->dev = &pdev->dev;
+ ad->nchannels = nchannels;
+
+ /*
+ * The controller has 4 IRQ outputs. Try them all until
+ * we find one we can use.
+ */
+ for (i = 0; i < IRQ_NOUTPUTS; i++) {
+ irq = platform_get_irq_optional(pdev, i);
+ if (irq >= 0) {
+ ad->irq_index = i;
+ break;
+ }
+ }
+
+ if (irq < 0)
+ return dev_err_probe(&pdev->dev, irq, "no usable interrupt\n");
+
+ err = devm_request_irq(&pdev->dev, irq, admac_interrupt,
+ 0, dev_name(&pdev->dev), ad);
+ if (err)
+ return dev_err_probe(&pdev->dev, err,
+ "unable to register interrupt\n");
+
+ ad->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ad->base))
+ return dev_err_probe(&pdev->dev, PTR_ERR(ad->base),
+ "unable to obtain MMIO resource\n");
+
+ dma = &ad->dma;
+
+ dma_cap_set(DMA_PRIVATE, dma->cap_mask);
+ dma_cap_set(DMA_CYCLIC, dma->cap_mask);
+
+ dma->dev = &pdev->dev;
+ dma->device_alloc_chan_resources = admac_alloc_chan_resources;
+ dma->device_free_chan_resources = admac_free_chan_resources;
+ dma->device_tx_status = admac_tx_status;
+ dma->device_issue_pending = admac_issue_pending;
+ dma->device_terminate_all = admac_terminate_all;
+ dma->device_synchronize = admac_synchronize;
+ dma->device_prep_dma_cyclic = admac_prep_dma_cyclic;
+ dma->device_config = admac_device_config;
+ dma->device_pause = admac_pause;
+ dma->device_resume = admac_resume;
+
+ dma->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
+ dma->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+ dma->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+
+ INIT_LIST_HEAD(&dma->channels);
+ for (i = 0; i < nchannels; i++) {
+ struct admac_chan *adchan = &ad->channels[i];
+
+ adchan->host = ad;
+ adchan->no = i;
+ adchan->chan.device = &ad->dma;
+ spin_lock_init(&adchan->lock);
+ INIT_LIST_HEAD(&adchan->submitted);
+ INIT_LIST_HEAD(&adchan->issued);
+ INIT_LIST_HEAD(&adchan->to_free);
+ list_add_tail(&adchan->chan.device_node, &dma->channels);
+ tasklet_setup(&adchan->tasklet, admac_chan_tasklet);
+ }
+
+ err = dma_async_device_register(&ad->dma);
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "failed to register DMA device\n");
+
+ err = of_dma_controller_register(pdev->dev.of_node, admac_dma_of_xlate, ad);
+ if (err) {
+ dma_async_device_unregister(&ad->dma);
+ return dev_err_probe(&pdev->dev, err, "failed to register with OF\n");
+ }
+
+ return 0;
+}
+
+static int admac_remove(struct platform_device *pdev)
+{
+ struct admac_data *ad = platform_get_drvdata(pdev);
+
+ of_dma_controller_free(pdev->dev.of_node);
+ dma_async_device_unregister(&ad->dma);
+
+ return 0;
+}
+
+static const struct of_device_id admac_of_match[] = {
+ { .compatible = "apple,admac", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, admac_of_match);
+
+static struct platform_driver apple_admac_driver = {
+ .driver = {
+ .name = "apple-admac",
+ .of_match_table = admac_of_match,
+ },
+ .probe = admac_probe,
+ .remove = admac_remove,
+};
+module_platform_driver(apple_admac_driver);
+
+MODULE_AUTHOR("Martin Povišer <povik+lin@cutebit.org>");
+MODULE_DESCRIPTION("Driver for Audio DMA Controller (ADMAC) on Apple SoCs");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 7b3e6030f7b4..b102d8eb5d83 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -649,7 +649,7 @@ static int at_xdmac_compute_chan_conf(struct dma_chan *chan,
}
/*
- * Only check that maxburst and addr width values are supported by the
+ * Only check that maxburst and addr width values are supported by
* the controller but not that the configuration is good to perform the
* transfer since we don't know the direction at this stage.
*/
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
index 5161b73c30c4..f30dabc99795 100644
--- a/drivers/dma/dma-axi-dmac.c
+++ b/drivers/dma/dma-axi-dmac.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_dma.h>
+#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
@@ -55,6 +56,9 @@
#define AXI_DMAC_DMA_DST_TYPE_GET(x) FIELD_GET(AXI_DMAC_DMA_DST_TYPE_MSK, x)
#define AXI_DMAC_DMA_DST_WIDTH_MSK GENMASK(3, 0)
#define AXI_DMAC_DMA_DST_WIDTH_GET(x) FIELD_GET(AXI_DMAC_DMA_DST_WIDTH_MSK, x)
+#define AXI_DMAC_REG_COHERENCY_DESC 0x14
+#define AXI_DMAC_DST_COHERENT_MSK BIT(0)
+#define AXI_DMAC_DST_COHERENT_GET(x) FIELD_GET(AXI_DMAC_DST_COHERENT_MSK, x)
#define AXI_DMAC_REG_IRQ_MASK 0x80
#define AXI_DMAC_REG_IRQ_PENDING 0x84
@@ -979,6 +983,18 @@ static int axi_dmac_probe(struct platform_device *pdev)
axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_MASK, 0x00);
+ if (of_dma_is_coherent(pdev->dev.of_node)) {
+ ret = axi_dmac_read(dmac, AXI_DMAC_REG_COHERENCY_DESC);
+
+ if (version < ADI_AXI_PCORE_VER(4, 4, 'a') ||
+ !AXI_DMAC_DST_COHERENT_GET(ret)) {
+ dev_err(dmac->dma_dev.dev,
+ "Coherent DMA not supported in hardware");
+ ret = -EINVAL;
+ goto err_clk_disable;
+ }
+ }
+
ret = dma_async_device_register(dma_dev);
if (ret)
goto err_clk_disable;
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index e2ec540e6519..2a483802d9ee 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -388,7 +388,7 @@ static struct dma_async_tx_descriptor *jz4780_dma_prep_slave_sg(
if (i != (sg_len - 1) &&
!(jzdma->soc_data->flags & JZ_SOC_DATA_BREAK_LINKS)) {
- /* Automatically proceeed to the next descriptor. */
+ /* Automatically proceed to the next descriptor. */
desc->desc[i].dcm |= JZ_DMA_DCM_LINK;
/*
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index e80feeea0e01..c741b6431958 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -1153,13 +1153,6 @@ int dma_async_device_register(struct dma_device *device)
return -EIO;
}
- if (dma_has_cap(DMA_MEMCPY_SG, device->cap_mask) && !device->device_prep_dma_memcpy_sg) {
- dev_err(device->dev,
- "Device claims capability %s, but op is not defined\n",
- "DMA_MEMCPY_SG");
- return -EIO;
- }
-
if (dma_has_cap(DMA_XOR, device->cap_mask) && !device->device_prep_dma_xor) {
dev_err(device->dev,
"Device claims capability %s, but op is not defined\n",
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index f696246f57fd..9fe2ae794316 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -22,51 +22,50 @@
#include <linux/wait.h>
static unsigned int test_buf_size = 16384;
-module_param(test_buf_size, uint, S_IRUGO | S_IWUSR);
+module_param(test_buf_size, uint, 0644);
MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
static char test_device[32];
-module_param_string(device, test_device, sizeof(test_device),
- S_IRUGO | S_IWUSR);
+module_param_string(device, test_device, sizeof(test_device), 0644);
MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
static unsigned int threads_per_chan = 1;
-module_param(threads_per_chan, uint, S_IRUGO | S_IWUSR);
+module_param(threads_per_chan, uint, 0644);
MODULE_PARM_DESC(threads_per_chan,
"Number of threads to start per channel (default: 1)");
static unsigned int max_channels;
-module_param(max_channels, uint, S_IRUGO | S_IWUSR);
+module_param(max_channels, uint, 0644);
MODULE_PARM_DESC(max_channels,
"Maximum number of channels to use (default: all)");
static unsigned int iterations;
-module_param(iterations, uint, S_IRUGO | S_IWUSR);
+module_param(iterations, uint, 0644);
MODULE_PARM_DESC(iterations,
"Iterations before stopping test (default: infinite)");
static unsigned int dmatest;
-module_param(dmatest, uint, S_IRUGO | S_IWUSR);
+module_param(dmatest, uint, 0644);
MODULE_PARM_DESC(dmatest,
"dmatest 0-memcpy 1-memset (default: 0)");
static unsigned int xor_sources = 3;
-module_param(xor_sources, uint, S_IRUGO | S_IWUSR);
+module_param(xor_sources, uint, 0644);
MODULE_PARM_DESC(xor_sources,
"Number of xor source buffers (default: 3)");
static unsigned int pq_sources = 3;
-module_param(pq_sources, uint, S_IRUGO | S_IWUSR);
+module_param(pq_sources, uint, 0644);
MODULE_PARM_DESC(pq_sources,
"Number of p+q source buffers (default: 3)");
static int timeout = 3000;
-module_param(timeout, int, S_IRUGO | S_IWUSR);
+module_param(timeout, int, 0644);
MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
"Pass -1 for infinite timeout");
static bool noverify;
-module_param(noverify, bool, S_IRUGO | S_IWUSR);
+module_param(noverify, bool, 0644);
MODULE_PARM_DESC(noverify, "Disable data verification (default: verify)");
static bool norandom;
@@ -74,7 +73,7 @@ module_param(norandom, bool, 0644);
MODULE_PARM_DESC(norandom, "Disable random offset setup (default: random)");
static bool verbose;
-module_param(verbose, bool, S_IRUGO | S_IWUSR);
+module_param(verbose, bool, 0644);
MODULE_PARM_DESC(verbose, "Enable \"success\" result messages (default: off)");
static int alignment = -1;
@@ -86,7 +85,7 @@ module_param(transfer_size, uint, 0644);
MODULE_PARM_DESC(transfer_size, "Optional custom transfer size in bytes (default: not used (0))");
static bool polled;
-module_param(polled, bool, S_IRUGO | S_IWUSR);
+module_param(polled, bool, 0644);
MODULE_PARM_DESC(polled, "Use polling for completion instead of interrupts");
/**
@@ -154,7 +153,7 @@ static const struct kernel_param_ops run_ops = {
.get = dmatest_run_get,
};
static bool dmatest_run;
-module_param_cb(run, &run_ops, &dmatest_run, S_IRUGO | S_IWUSR);
+module_param_cb(run, &run_ops, &dmatest_run, 0644);
MODULE_PARM_DESC(run, "Run the test (default: false)");
static int dmatest_chan_set(const char *val, const struct kernel_param *kp);
@@ -290,7 +289,7 @@ static const struct kernel_param_ops wait_ops = {
.get = dmatest_wait_get,
.set = param_set_bool,
};
-module_param_cb(wait, &wait_ops, &wait, S_IRUGO);
+module_param_cb(wait, &wait_ops, &wait, 0444);
MODULE_PARM_DESC(wait, "Wait for tests to complete (default: false)");
static bool dmatest_match_channel(struct dmatest_params *params,
@@ -579,10 +578,10 @@ static int dmatest_func(void *data)
unsigned int total_tests = 0;
dma_cookie_t cookie;
enum dma_status status;
- enum dma_ctrl_flags flags;
+ enum dma_ctrl_flags flags;
u8 *pq_coefs = NULL;
int ret;
- unsigned int buf_size;
+ unsigned int buf_size;
struct dmatest_data *src;
struct dmatest_data *dst;
int i;
@@ -1095,8 +1094,8 @@ static void add_threaded_test(struct dmatest_info *info)
/* Copy test parameters */
params->buf_size = test_buf_size;
- strlcpy(params->channel, strim(test_channel), sizeof(params->channel));
- strlcpy(params->device, strim(test_device), sizeof(params->device));
+ strscpy(params->channel, strim(test_channel), sizeof(params->channel));
+ strscpy(params->device, strim(test_device), sizeof(params->device));
params->threads_per_chan = threads_per_chan;
params->max_channels = max_channels;
params->iterations = iterations;
@@ -1240,7 +1239,7 @@ static int dmatest_chan_set(const char *val, const struct kernel_param *kp)
dtc = list_last_entry(&info->channels,
struct dmatest_chan,
node);
- strlcpy(chan_reset_val,
+ strscpy(chan_reset_val,
dma_chan_name(dtc->chan),
sizeof(chan_reset_val));
ret = -EBUSY;
@@ -1263,14 +1262,14 @@ static int dmatest_chan_set(const char *val, const struct kernel_param *kp)
if ((strcmp(dma_chan_name(dtc->chan), strim(test_channel)) != 0)
&& (strcmp("", strim(test_channel)) != 0)) {
ret = -EINVAL;
- strlcpy(chan_reset_val, dma_chan_name(dtc->chan),
+ strscpy(chan_reset_val, dma_chan_name(dtc->chan),
sizeof(chan_reset_val));
goto add_chan_err;
}
} else {
/* Clear test_channel if no channels were added successfully */
- strlcpy(chan_reset_val, "", sizeof(chan_reset_val));
+ strscpy(chan_reset_val, "", sizeof(chan_reset_val));
ret = -EBUSY;
goto add_chan_err;
}
@@ -1295,7 +1294,7 @@ static int dmatest_chan_get(char *val, const struct kernel_param *kp)
mutex_lock(&info->lock);
if (!is_threaded_test_run(info) && !is_threaded_test_pending(info)) {
stop_threaded_test(info);
- strlcpy(test_channel, "", sizeof(test_channel));
+ strscpy(test_channel, "", sizeof(test_channel));
}
mutex_unlock(&info->lock);
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
index c741da02b67e..a183d93bd7e2 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
@@ -982,6 +982,11 @@ static int dw_axi_dma_chan_slave_config(struct dma_chan *dchan,
static void axi_chan_dump_lli(struct axi_dma_chan *chan,
struct axi_dma_hw_desc *desc)
{
+ if (!desc->lli) {
+ dev_err(dchan2dev(&chan->vc.chan), "NULL LLI\n");
+ return;
+ }
+
dev_err(dchan2dev(&chan->vc.chan),
"SAR: 0x%llx DAR: 0x%llx LLP: 0x%llx BTS 0x%x CTL: 0x%x:%08x",
le64_to_cpu(desc->lli->sar),
@@ -1049,6 +1054,11 @@ static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan)
/* The completed descriptor currently is in the head of vc list */
vd = vchan_next_desc(&chan->vc);
+ if (!vd) {
+ dev_err(chan2dev(chan), "BUG: %s, IRQ with no descriptors\n",
+ axi_chan_name(chan));
+ goto out;
+ }
if (chan->cyclic) {
desc = vd_to_axi_desc(vd);
@@ -1078,6 +1088,7 @@ static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan)
axi_chan_start_first_queued(chan);
}
+out:
spin_unlock_irqrestore(&chan->vc.lock, flags);
}
diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.c b/drivers/dma/dw-edma/dw-edma-v0-core.c
index 33bc1e6c4cf2..c73b9ed1ce74 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-core.c
+++ b/drivers/dma/dw-edma/dw-edma-v0-core.c
@@ -414,19 +414,11 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
SET_CH_32(dw, chan->dir, chan->id, ch_control1,
(DW_EDMA_V0_CCS | DW_EDMA_V0_LLE));
/* Linked list */
-
- #ifdef CONFIG_64BIT
/* llp is not aligned on 64bit -> keep 32bit accesses */
SET_CH_32(dw, chan->dir, chan->id, llp.lsb,
lower_32_bits(chunk->ll_region.paddr));
SET_CH_32(dw, chan->dir, chan->id, llp.msb,
upper_32_bits(chunk->ll_region.paddr));
- #else /* CONFIG_64BIT */
- SET_CH_32(dw, chan->dir, chan->id, llp.lsb,
- lower_32_bits(chunk->ll_region.paddr));
- SET_CH_32(dw, chan->dir, chan->id, llp.msb,
- upper_32_bits(chunk->ll_region.paddr));
- #endif /* CONFIG_64BIT */
}
/* Doorbell */
SET_RW_32(dw, chan->dir, doorbell,
diff --git a/drivers/dma/dw/rzn1-dmamux.c b/drivers/dma/dw/rzn1-dmamux.c
index 11d254e450b0..f9912c3dd4d7 100644
--- a/drivers/dma/dw/rzn1-dmamux.c
+++ b/drivers/dma/dw/rzn1-dmamux.c
@@ -102,10 +102,12 @@ free_map:
return ERR_PTR(ret);
}
+#ifdef CONFIG_OF
static const struct of_device_id rzn1_dmac_match[] = {
{ .compatible = "renesas,rzn1-dma" },
{}
};
+#endif
static int rzn1_dmamux_probe(struct platform_device *pdev)
{
@@ -140,6 +142,7 @@ static const struct of_device_id rzn1_dmamux_match[] = {
{ .compatible = "renesas,rzn1-dmamux" },
{}
};
+MODULE_DEVICE_TABLE(of, rzn1_dmamux_match);
static struct platform_driver rzn1_dmamux_driver = {
.driver = {
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index 971ff5f9ae84..d19ea885c63e 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -1183,7 +1183,7 @@ fail:
*
* Synchronizes the DMA channel termination to the current context. When this
* function returns it is guaranteed that all transfers for previously issued
- * descriptors have stopped and and it is safe to free the memory associated
+ * descriptors have stopped and it is safe to free the memory associated
* with them. Furthermore it is guaranteed that all complete callback functions
* for a previously submitted descriptor have finished running and it is safe to
* free resources accessed from within the complete callbacks.
diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c
index 3ae05d1446a5..a06a1575a2a5 100644
--- a/drivers/dma/fsl-edma-common.c
+++ b/drivers/dma/fsl-edma-common.c
@@ -559,9 +559,6 @@ struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
}
for_each_sg(sgl, sg, sg_len, i) {
- /* get next sg's physical address */
- last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
-
if (direction == DMA_MEM_TO_DEV) {
src_addr = sg_dma_address(sg);
dst_addr = fsl_chan->dma_dev_addr;
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 3bffe3ecbd1b..65c6094ce063 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -1047,7 +1047,7 @@ static int __init imxdma_probe(struct platform_device *pdev)
return -ENOMEM;
imxdma->dev = &pdev->dev;
- imxdma->devtype = (enum imx_dma_type)of_device_get_match_data(&pdev->dev);
+ imxdma->devtype = (uintptr_t)of_device_get_match_data(&pdev->dev);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
imxdma->base = devm_ioremap_resource(&pdev->dev, res);
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index f37a276f519e..fbea5f62dd98 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -183,12 +183,14 @@
BIT(DMA_DEV_TO_DEV))
#define SDMA_WATERMARK_LEVEL_N_FIFOS GENMASK(15, 12)
+#define SDMA_WATERMARK_LEVEL_OFF_FIFOS GENMASK(19, 16)
+#define SDMA_WATERMARK_LEVEL_WORDS_PER_FIFO GENMASK(31, 28)
#define SDMA_WATERMARK_LEVEL_SW_DONE BIT(23)
#define SDMA_DONE0_CONFIG_DONE_SEL BIT(7)
#define SDMA_DONE0_CONFIG_DONE_DIS BIT(6)
-/**
+/*
* struct sdma_script_start_addrs - SDMA script start pointers
*
* start addresses of the different functions in the physical
@@ -424,6 +426,14 @@ struct sdma_desc {
* @data: specific sdma interface structure
* @bd_pool: dma_pool for bd
* @terminate_worker: used to call back into terminate work function
+ * @terminated: terminated list
+ * @is_ram_script: flag for script in ram
+ * @n_fifos_src: number of source device fifos
+ * @n_fifos_dst: number of destination device fifos
+ * @sw_done: software done flag
+ * @stride_fifos_src: stride for source device FIFOs
+ * @stride_fifos_dst: stride for destination device FIFOs
+ * @words_per_fifo: copy number of words one time for one FIFO
*/
struct sdma_channel {
struct virt_dma_chan vc;
@@ -451,6 +461,9 @@ struct sdma_channel {
bool is_ram_script;
unsigned int n_fifos_src;
unsigned int n_fifos_dst;
+ unsigned int stride_fifos_src;
+ unsigned int stride_fifos_dst;
+ unsigned int words_per_fifo;
bool sw_done;
};
@@ -1240,17 +1253,29 @@ static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac)
static void sdma_set_watermarklevel_for_sais(struct sdma_channel *sdmac)
{
unsigned int n_fifos;
+ unsigned int stride_fifos;
+ unsigned int words_per_fifo;
if (sdmac->sw_done)
sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_SW_DONE;
- if (sdmac->direction == DMA_DEV_TO_MEM)
+ if (sdmac->direction == DMA_DEV_TO_MEM) {
n_fifos = sdmac->n_fifos_src;
- else
+ stride_fifos = sdmac->stride_fifos_src;
+ } else {
n_fifos = sdmac->n_fifos_dst;
+ stride_fifos = sdmac->stride_fifos_dst;
+ }
+
+ words_per_fifo = sdmac->words_per_fifo;
sdmac->watermark_level |=
FIELD_PREP(SDMA_WATERMARK_LEVEL_N_FIFOS, n_fifos);
+ sdmac->watermark_level |=
+ FIELD_PREP(SDMA_WATERMARK_LEVEL_OFF_FIFOS, stride_fifos);
+ if (words_per_fifo)
+ sdmac->watermark_level |=
+ FIELD_PREP(SDMA_WATERMARK_LEVEL_WORDS_PER_FIFO, (words_per_fifo - 1));
}
static int sdma_config_channel(struct dma_chan *chan)
@@ -1764,6 +1789,9 @@ static int sdma_config(struct dma_chan *chan,
}
sdmac->n_fifos_src = sdmacfg->n_fifos_src;
sdmac->n_fifos_dst = sdmacfg->n_fifos_dst;
+ sdmac->stride_fifos_src = sdmacfg->stride_fifos_src;
+ sdmac->stride_fifos_dst = sdmacfg->stride_fifos_dst;
+ sdmac->words_per_fifo = sdmacfg->words_per_fifo;
sdmac->sw_done = sdmacfg->sw_done;
}
@@ -2183,8 +2211,8 @@ static int sdma_probe(struct platform_device *pdev)
if (ret)
goto err_clk;
- ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0, "sdma",
- sdma);
+ ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0,
+ dev_name(&pdev->dev), sdma);
if (ret)
goto err_irq;
diff --git a/drivers/dma/mediatek/mtk-cqdma.c b/drivers/dma/mediatek/mtk-cqdma.c
index f8847c48ba03..9ae92b8940ef 100644
--- a/drivers/dma/mediatek/mtk-cqdma.c
+++ b/drivers/dma/mediatek/mtk-cqdma.c
@@ -373,7 +373,7 @@ static void mtk_cqdma_tasklet_cb(struct tasklet_struct *t)
/*
* free child CVD after completion.
- * the parent CVD would be freeed with desc_free by user.
+ * the parent CVD would be freed with desc_free by user.
*/
if (cvd->parent != cvd)
kfree(cvd);
diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c
index 9ebd9231f62f..f7717c44b887 100644
--- a/drivers/dma/mediatek/mtk-hsdma.c
+++ b/drivers/dma/mediatek/mtk-hsdma.c
@@ -138,7 +138,7 @@ struct mtk_hsdma_vdesc {
/**
* struct mtk_hsdma_cb - This is the struct holding extra info required for RX
- * ring to know what relevant VD the the PD is being
+ * ring to know what relevant VD the PD is being
* mapped to.
* @vd: Pointer to the relevant VD.
* @flag: Flag indicating what action should be taken when VD
@@ -761,7 +761,7 @@ static void mtk_hsdma_free_active_desc(struct dma_chan *c)
/*
* Once issue_synchronize is being set, which means once the hardware
* consumes all descriptors for the channel in the ring, the
- * synchronization must be be notified immediately it is completed.
+ * synchronization must be notified immediately it is completed.
*/
spin_lock(&hvc->vc.lock);
if (!list_empty(&hvc->desc_hw_processing)) {
diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
index f10b29034da1..f629ef6fd3c2 100644
--- a/drivers/dma/mv_xor_v2.c
+++ b/drivers/dma/mv_xor_v2.c
@@ -313,7 +313,7 @@ mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx)
"%s sw_desc %p: async_tx %p\n",
__func__, sw_desc, &sw_desc->async_tx);
- /* assign coookie */
+ /* assign cookie */
spin_lock_bh(&xor_dev->lock);
cookie = dma_cookie_assign(tx);
diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c
index 1f0bbaed4643..95a462a1f511 100644
--- a/drivers/dma/owl-dma.c
+++ b/drivers/dma/owl-dma.c
@@ -193,7 +193,7 @@ struct owl_dma_pchan {
/**
* struct owl_dma_pchan - Wrapper for DMA ENGINE channel
- * @vc: wrappped virtual channel
+ * @vc: wrapped virtual channel
* @pchan: the physical channel utilized by this channel
* @txd: active transaction on this channel
* @cfg: slave configuration for this channel
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c
index 8e14c72d03f0..f6ed7e889781 100644
--- a/drivers/dma/s3c24xx-dma.c
+++ b/drivers/dma/s3c24xx-dma.c
@@ -202,7 +202,7 @@ struct s3c24xx_dma_phy {
* struct s3c24xx_dma_chan - this structure wraps a DMA ENGINE channel
* @id: the id of the channel
* @name: name of the channel
- * @vc: wrappped virtual channel
+ * @vc: wrapped virtual channel
* @phy: the physical channel utilized by this channel, if there is one
* @runtime_addr: address for RX/TX according to the runtime config
* @at: active transaction on this channel
diff --git a/drivers/dma/sf-pdma/sf-pdma.c b/drivers/dma/sf-pdma/sf-pdma.c
index db5a4ef76077..4f8b8498c5c6 100644
--- a/drivers/dma/sf-pdma/sf-pdma.c
+++ b/drivers/dma/sf-pdma/sf-pdma.c
@@ -52,16 +52,6 @@ static inline struct sf_pdma_desc *to_sf_pdma_desc(struct virt_dma_desc *vd)
static struct sf_pdma_desc *sf_pdma_alloc_desc(struct sf_pdma_chan *chan)
{
struct sf_pdma_desc *desc;
- unsigned long flags;
-
- spin_lock_irqsave(&chan->lock, flags);
-
- if (chan->desc && !chan->desc->in_use) {
- spin_unlock_irqrestore(&chan->lock, flags);
- return chan->desc;
- }
-
- spin_unlock_irqrestore(&chan->lock, flags);
desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
if (!desc)
@@ -111,7 +101,6 @@ sf_pdma_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dest, dma_addr_t src,
desc->async_tx = vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
spin_lock_irqsave(&chan->vchan.lock, iflags);
- chan->desc = desc;
sf_pdma_fill_desc(desc, dest, src, len);
spin_unlock_irqrestore(&chan->vchan.lock, iflags);
@@ -170,11 +159,17 @@ static size_t sf_pdma_desc_residue(struct sf_pdma_chan *chan,
unsigned long flags;
u64 residue = 0;
struct sf_pdma_desc *desc;
- struct dma_async_tx_descriptor *tx;
+ struct dma_async_tx_descriptor *tx = NULL;
spin_lock_irqsave(&chan->vchan.lock, flags);
- tx = &chan->desc->vdesc.tx;
+ list_for_each_entry(vd, &chan->vchan.desc_submitted, node)
+ if (vd->tx.cookie == cookie)
+ tx = &vd->tx;
+
+ if (!tx)
+ goto out;
+
if (cookie == tx->chan->completed_cookie)
goto out;
@@ -241,6 +236,19 @@ static void sf_pdma_enable_request(struct sf_pdma_chan *chan)
writel(v, regs->ctrl);
}
+static struct sf_pdma_desc *sf_pdma_get_first_pending_desc(struct sf_pdma_chan *chan)
+{
+ struct virt_dma_chan *vchan = &chan->vchan;
+ struct virt_dma_desc *vdesc;
+
+ if (list_empty(&vchan->desc_issued))
+ return NULL;
+
+ vdesc = list_first_entry(&vchan->desc_issued, struct virt_dma_desc, node);
+
+ return container_of(vdesc, struct sf_pdma_desc, vdesc);
+}
+
static void sf_pdma_xfer_desc(struct sf_pdma_chan *chan)
{
struct sf_pdma_desc *desc = chan->desc;
@@ -268,8 +276,11 @@ static void sf_pdma_issue_pending(struct dma_chan *dchan)
spin_lock_irqsave(&chan->vchan.lock, flags);
- if (vchan_issue_pending(&chan->vchan) && chan->desc)
+ if (!chan->desc && vchan_issue_pending(&chan->vchan)) {
+ /* vchan_issue_pending has made a check that desc in not NULL */
+ chan->desc = sf_pdma_get_first_pending_desc(chan);
sf_pdma_xfer_desc(chan);
+ }
spin_unlock_irqrestore(&chan->vchan.lock, flags);
}
@@ -298,6 +309,11 @@ static void sf_pdma_donebh_tasklet(struct tasklet_struct *t)
spin_lock_irqsave(&chan->vchan.lock, flags);
list_del(&chan->desc->vdesc.node);
vchan_cookie_complete(&chan->desc->vdesc);
+
+ chan->desc = sf_pdma_get_first_pending_desc(chan);
+ if (chan->desc)
+ sf_pdma_xfer_desc(chan);
+
spin_unlock_irqrestore(&chan->vchan.lock, flags);
}
diff --git a/drivers/dma/sh/rz-dmac.c b/drivers/dma/sh/rz-dmac.c
index ee2872e7d64c..476847a4916b 100644
--- a/drivers/dma/sh/rz-dmac.c
+++ b/drivers/dma/sh/rz-dmac.c
@@ -12,6 +12,7 @@
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/interrupt.h>
+#include <linux/iopoll.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -630,6 +631,21 @@ static void rz_dmac_virt_desc_free(struct virt_dma_desc *vd)
*/
}
+static void rz_dmac_device_synchronize(struct dma_chan *chan)
+{
+ struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
+ struct rz_dmac *dmac = to_rz_dmac(chan->device);
+ u32 chstat;
+ int ret;
+
+ ret = read_poll_timeout(rz_dmac_ch_readl, chstat, !(chstat & CHSTAT_EN),
+ 100, 100000, false, channel, CHSTAT, 1);
+ if (ret < 0)
+ dev_warn(dmac->dev, "DMA Timeout");
+
+ rz_dmac_set_dmars_register(dmac, channel->index, 0);
+}
+
/*
* -----------------------------------------------------------------------------
* IRQ handling
@@ -909,6 +925,7 @@ static int rz_dmac_probe(struct platform_device *pdev)
engine->device_config = rz_dmac_config;
engine->device_terminate_all = rz_dmac_terminate_all;
engine->device_issue_pending = rz_dmac_issue_pending;
+ engine->device_synchronize = rz_dmac_device_synchronize;
engine->copy_align = DMAENGINE_ALIGN_1_BYTE;
dma_set_max_seg_size(engine->dev, U32_MAX);
diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
index 2138b80435ab..474d3ba8ec9f 100644
--- a/drivers/dma/sprd-dma.c
+++ b/drivers/dma/sprd-dma.c
@@ -1237,11 +1237,8 @@ static int sprd_dma_remove(struct platform_device *pdev)
{
struct sprd_dma_dev *sdev = platform_get_drvdata(pdev);
struct sprd_dma_chn *c, *cn;
- int ret;
- ret = pm_runtime_get_sync(&pdev->dev);
- if (ret < 0)
- return ret;
+ pm_runtime_get_sync(&pdev->dev);
/* explicitly free the irq */
if (sdev->irq > 0)
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index e1827393143f..f093e08c23b1 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -1970,7 +1970,7 @@ static int d40_config_memcpy(struct d40_chan *d40c)
dma_has_cap(DMA_SLAVE, cap)) {
d40c->dma_cfg = dma40_memcpy_conf_phy;
- /* Generate interrrupt at end of transfer or relink. */
+ /* Generate interrupt at end of transfer or relink. */
d40c->dst_def_cfg |= BIT(D40_SREG_CFG_TIM_POS);
/* Generate interrupt on error. */
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index caf0cce8f528..b11927ed4367 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -1328,12 +1328,7 @@ static irqreturn_t stm32_mdma_irq_handler(int irq, void *devid)
return IRQ_NONE;
}
id = __ffs(status);
-
chan = &dmadev->chan[id];
- if (!chan) {
- dev_warn(mdma2dev(dmadev), "MDMA channel not initialized\n");
- return IRQ_NONE;
- }
/* Handle interrupt for the channel */
spin_lock(&chan->vchan.lock);
diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c
index 93f1645ae928..f291b1b4db32 100644
--- a/drivers/dma/sun4i-dma.c
+++ b/drivers/dma/sun4i-dma.c
@@ -7,6 +7,7 @@
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/clk.h>
+#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/dmapool.h>
#include <linux/interrupt.h>
@@ -122,6 +123,15 @@
SUN4I_DDMA_PARA_DST_WAIT_CYCLES(2) | \
SUN4I_DDMA_PARA_SRC_WAIT_CYCLES(2))
+/*
+ * Normal DMA supports individual transfers (segments) up to 128k.
+ * Dedicated DMA supports transfers up to 16M. We can only report
+ * one size limit, so we have to use the smaller value.
+ */
+#define SUN4I_NDMA_MAX_SEG_SIZE SZ_128K
+#define SUN4I_DDMA_MAX_SEG_SIZE SZ_16M
+#define SUN4I_DMA_MAX_SEG_SIZE SUN4I_NDMA_MAX_SEG_SIZE
+
struct sun4i_dma_pchan {
/* Register base of channel */
void __iomem *base;
@@ -155,7 +165,8 @@ struct sun4i_dma_contract {
struct virt_dma_desc vd;
struct list_head demands;
struct list_head completed_demands;
- int is_cyclic;
+ bool is_cyclic : 1;
+ bool use_half_int : 1;
};
struct sun4i_dma_dev {
@@ -372,7 +383,7 @@ static int __execute_vchan_pending(struct sun4i_dma_dev *priv,
if (promise) {
vchan->contract = contract;
vchan->pchan = pchan;
- set_pchan_interrupt(priv, pchan, contract->is_cyclic, 1);
+ set_pchan_interrupt(priv, pchan, contract->use_half_int, 1);
configure_pchan(pchan, promise);
}
@@ -735,12 +746,21 @@ sun4i_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf, size_t len,
*
* Which requires half the engine programming for the same
* functionality.
+ *
+ * This only works if two periods fit in a single promise. That will
+ * always be the case for dedicated DMA, where the hardware has a much
+ * larger maximum transfer size than advertised to clients.
*/
- nr_periods = DIV_ROUND_UP(len / period_len, 2);
+ if (vchan->is_dedicated || period_len <= SUN4I_NDMA_MAX_SEG_SIZE / 2) {
+ period_len *= 2;
+ contract->use_half_int = 1;
+ }
+
+ nr_periods = DIV_ROUND_UP(len, period_len);
for (i = 0; i < nr_periods; i++) {
/* Calculate the offset in the buffer and the length needed */
- offset = i * period_len * 2;
- plength = min((len - offset), (period_len * 2));
+ offset = i * period_len;
+ plength = min((len - offset), period_len);
if (dir == DMA_MEM_TO_DEV)
src = buf + offset;
else
@@ -1149,6 +1169,8 @@ static int sun4i_dma_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
spin_lock_init(&priv->lock);
+ dma_set_max_seg_size(&pdev->dev, SUN4I_DMA_MAX_SEG_SIZE);
+
dma_cap_zero(priv->slave.cap_mask);
dma_cap_set(DMA_PRIVATE, priv->slave.cap_mask);
dma_cap_set(DMA_MEMCPY, priv->slave.cap_mask);
diff --git a/drivers/dma/tegra186-gpc-dma.c b/drivers/dma/tegra186-gpc-dma.c
index 05cd451f541d..fa9bda4a2bc6 100644
--- a/drivers/dma/tegra186-gpc-dma.c
+++ b/drivers/dma/tegra186-gpc-dma.c
@@ -157,8 +157,8 @@
* If any burst is in flight and DMA paused then this is the time to complete
* on-flight burst and update DMA status register.
*/
-#define TEGRA_GPCDMA_BURST_COMPLETE_TIME 20
-#define TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT 100
+#define TEGRA_GPCDMA_BURST_COMPLETE_TIME 10
+#define TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT 5000 /* 5 msec */
/* Channel base address offset from GPCDMA base address */
#define TEGRA_GPCDMA_CHANNEL_BASE_ADD_OFFSET 0x20000
@@ -432,6 +432,17 @@ static int tegra_dma_device_resume(struct dma_chan *dc)
return 0;
}
+static inline int tegra_dma_pause_noerr(struct tegra_dma_channel *tdc)
+{
+ /* Return 0 irrespective of PAUSE status.
+ * This is useful to recover channels that can exit out of flush
+ * state when the channel is disabled.
+ */
+
+ tegra_dma_pause(tdc);
+ return 0;
+}
+
static void tegra_dma_disable(struct tegra_dma_channel *tdc)
{
u32 csr, status;
@@ -1292,6 +1303,14 @@ static const struct tegra_dma_chip_data tegra194_dma_chip_data = {
.terminate = tegra_dma_pause,
};
+static const struct tegra_dma_chip_data tegra234_dma_chip_data = {
+ .nr_channels = 31,
+ .channel_reg_size = SZ_64K,
+ .max_dma_count = SZ_1G,
+ .hw_support_pause = true,
+ .terminate = tegra_dma_pause_noerr,
+};
+
static const struct of_device_id tegra_dma_of_match[] = {
{
.compatible = "nvidia,tegra186-gpcdma",
@@ -1300,6 +1319,9 @@ static const struct of_device_id tegra_dma_of_match[] = {
.compatible = "nvidia,tegra194-gpcdma",
.data = &tegra194_dma_chip_data,
}, {
+ .compatible = "nvidia,tegra234-gpcdma",
+ .data = &tegra234_dma_chip_data,
+ }, {
},
};
MODULE_DEVICE_TABLE(of, tegra_dma_of_match);
diff --git a/drivers/dma/ti/k3-psil-j721s2.c b/drivers/dma/ti/k3-psil-j721s2.c
index 4c4172a4d271..a488c2250623 100644
--- a/drivers/dma/ti/k3-psil-j721s2.c
+++ b/drivers/dma/ti/k3-psil-j721s2.c
@@ -112,6 +112,11 @@ static struct psil_ep j721s2_src_ep_map[] = {
PSIL_PDMA_XY_PKT(0x4707),
PSIL_PDMA_XY_PKT(0x4708),
PSIL_PDMA_XY_PKT(0x4709),
+ /* MAIN SA2UL */
+ PSIL_SA2UL(0x4a40, 0),
+ PSIL_SA2UL(0x4a41, 0),
+ PSIL_SA2UL(0x4a42, 0),
+ PSIL_SA2UL(0x4a43, 0),
/* CPSW0 */
PSIL_ETHERNET(0x7000),
/* MCU_PDMA0 (MCU_PDMA_MISC_G0) - SPI0 */
@@ -144,6 +149,9 @@ static struct psil_ep j721s2_src_ep_map[] = {
/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */
static struct psil_ep j721s2_dst_ep_map[] = {
+ /* MAIN SA2UL */
+ PSIL_SA2UL(0xca40, 1),
+ PSIL_SA2UL(0xca41, 1),
/* CPSW0 */
PSIL_ETHERNET(0xf000),
PSIL_ETHERNET(0xf001),
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index cd62bbb50e8b..6276934d4d2b 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -2128,126 +2128,6 @@ error:
}
/**
- * xilinx_cdma_prep_memcpy_sg - prepare descriptors for a memcpy_sg transaction
- * @dchan: DMA channel
- * @dst_sg: Destination scatter list
- * @dst_sg_len: Number of entries in destination scatter list
- * @src_sg: Source scatter list
- * @src_sg_len: Number of entries in source scatter list
- * @flags: transfer ack flags
- *
- * Return: Async transaction descriptor on success and NULL on failure
- */
-static struct dma_async_tx_descriptor *xilinx_cdma_prep_memcpy_sg(
- struct dma_chan *dchan, struct scatterlist *dst_sg,
- unsigned int dst_sg_len, struct scatterlist *src_sg,
- unsigned int src_sg_len, unsigned long flags)
-{
- struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
- struct xilinx_dma_tx_descriptor *desc;
- struct xilinx_cdma_tx_segment *segment, *prev = NULL;
- struct xilinx_cdma_desc_hw *hw;
- size_t len, dst_avail, src_avail;
- dma_addr_t dma_dst, dma_src;
-
- if (unlikely(dst_sg_len == 0 || src_sg_len == 0))
- return NULL;
-
- if (unlikely(!dst_sg || !src_sg))
- return NULL;
-
- desc = xilinx_dma_alloc_tx_descriptor(chan);
- if (!desc)
- return NULL;
-
- dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
- desc->async_tx.tx_submit = xilinx_dma_tx_submit;
-
- dst_avail = sg_dma_len(dst_sg);
- src_avail = sg_dma_len(src_sg);
- /*
- * loop until there is either no more source or no more destination
- * scatterlist entry
- */
- while (true) {
- len = min_t(size_t, src_avail, dst_avail);
- len = min_t(size_t, len, chan->xdev->max_buffer_len);
- if (len == 0)
- goto fetch;
-
- /* Allocate the link descriptor from DMA pool */
- segment = xilinx_cdma_alloc_tx_segment(chan);
- if (!segment)
- goto error;
-
- dma_dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) -
- dst_avail;
- dma_src = sg_dma_address(src_sg) + sg_dma_len(src_sg) -
- src_avail;
- hw = &segment->hw;
- hw->control = len;
- hw->src_addr = dma_src;
- hw->dest_addr = dma_dst;
- if (chan->ext_addr) {
- hw->src_addr_msb = upper_32_bits(dma_src);
- hw->dest_addr_msb = upper_32_bits(dma_dst);
- }
-
- if (prev) {
- prev->hw.next_desc = segment->phys;
- if (chan->ext_addr)
- prev->hw.next_desc_msb =
- upper_32_bits(segment->phys);
- }
-
- prev = segment;
- dst_avail -= len;
- src_avail -= len;
- list_add_tail(&segment->node, &desc->segments);
-
-fetch:
- /* Fetch the next dst scatterlist entry */
- if (dst_avail == 0) {
- if (dst_sg_len == 0)
- break;
- dst_sg = sg_next(dst_sg);
- if (dst_sg == NULL)
- break;
- dst_sg_len--;
- dst_avail = sg_dma_len(dst_sg);
- }
- /* Fetch the next src scatterlist entry */
- if (src_avail == 0) {
- if (src_sg_len == 0)
- break;
- src_sg = sg_next(src_sg);
- if (src_sg == NULL)
- break;
- src_sg_len--;
- src_avail = sg_dma_len(src_sg);
- }
- }
-
- if (list_empty(&desc->segments)) {
- dev_err(chan->xdev->dev,
- "%s: Zero-size SG transfer requested\n", __func__);
- goto error;
- }
-
- /* Link the last hardware descriptor with the first. */
- segment = list_first_entry(&desc->segments,
- struct xilinx_cdma_tx_segment, node);
- desc->async_tx.phys = segment->phys;
- prev->hw.next_desc = segment->phys;
-
- return &desc->async_tx;
-
-error:
- xilinx_dma_free_tx_descriptor(chan, desc);
- return NULL;
-}
-
-/**
* xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
* @dchan: DMA channel
* @sgl: scatterlist to transfer to/from
@@ -3240,9 +3120,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
DMA_RESIDUE_GRANULARITY_SEGMENT;
} else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
- dma_cap_set(DMA_MEMCPY_SG, xdev->common.cap_mask);
xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
- xdev->common.device_prep_dma_memcpy_sg = xilinx_cdma_prep_memcpy_sg;
/* Residue calculation is supported by only AXI DMA and CDMA */
xdev->common.residue_granularity =
DMA_RESIDUE_GRANULARITY_SEGMENT;
diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c
index b0f4948b00a5..84dc5240a807 100644
--- a/drivers/dma/xilinx/xilinx_dpdma.c
+++ b/drivers/dma/xilinx/xilinx_dpdma.c
@@ -376,7 +376,7 @@ static ssize_t xilinx_dpdma_debugfs_read(struct file *f, char __user *buf,
if (ret < 0)
goto done;
} else {
- strlcpy(kern_buff, "No testcase executed",
+ strscpy(kern_buff, "No testcase executed",
XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE);
}
@@ -1652,10 +1652,8 @@ static int xilinx_dpdma_probe(struct platform_device *pdev)
dpdma_hw_init(xdev);
xdev->irq = platform_get_irq(pdev, 0);
- if (xdev->irq < 0) {
- dev_err(xdev->dev, "failed to get platform irq\n");
+ if (xdev->irq < 0)
return xdev->irq;
- }
ret = request_irq(xdev->irq, xilinx_dpdma_irq_handler, IRQF_SHARED,
dev_name(xdev->dev), xdev);