summaryrefslogtreecommitdiff
path: root/drivers/dma
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/Kconfig20
-rw-r--r--drivers/dma/Makefile2
-rw-r--r--drivers/dma/amd/ptdma/ptdma-dmaengine.c23
-rw-r--r--drivers/dma/amd/ptdma/ptdma.h1
-rw-r--r--drivers/dma/arm-dma350.c660
-rw-r--r--drivers/dma/at_xdmac.c6
-rw-r--r--drivers/dma/cv1800b-dmamux.c259
-rw-r--r--drivers/dma/dma-axi-dmac.c2
-rw-r--r--drivers/dma/dmaengine.c30
-rw-r--r--drivers/dma/dw-edma/dw-edma-core.c12
-rw-r--r--drivers/dma/dw-edma/dw-edma-pcie.c65
-rw-r--r--drivers/dma/fsl-dpaa2-qdma/dpdmai.c5
-rw-r--r--drivers/dma/fsl-edma-common.c30
-rw-r--r--drivers/dma/fsl-edma-common.h18
-rw-r--r--drivers/dma/fsl-edma-main.c114
-rw-r--r--drivers/dma/fsl-qdma.c3
-rw-r--r--drivers/dma/fsldma.c20
-rw-r--r--drivers/dma/fsldma.h1
-rw-r--r--drivers/dma/idxd/cdev.c10
-rw-r--r--drivers/dma/idxd/idxd.h2
-rw-r--r--drivers/dma/idxd/init.c1
-rw-r--r--drivers/dma/idxd/registers.h60
-rw-r--r--drivers/dma/idxd/sysfs.c6
-rw-r--r--drivers/dma/imx-dma.c3
-rw-r--r--drivers/dma/ioat/dma.c3
-rw-r--r--drivers/dma/mediatek/mtk-cqdma.c4
-rw-r--r--drivers/dma/mmp_tdma.c2
-rw-r--r--drivers/dma/mv_xor.c21
-rw-r--r--drivers/dma/nbpfaxi.c24
-rw-r--r--drivers/dma/qcom/gpi.c11
-rw-r--r--drivers/dma/sh/Kconfig2
-rw-r--r--drivers/dma/sh/rz-dmac.c84
-rw-r--r--drivers/dma/stm32/stm32-dma.c12
-rw-r--r--drivers/dma/stm32/stm32-dma3.c10
-rw-r--r--drivers/dma/stm32/stm32-mdma.c8
-rw-r--r--drivers/dma/sun4i-dma.c46
-rw-r--r--drivers/dma/tegra210-adma.c185
-rw-r--r--drivers/dma/ti/Kconfig4
-rw-r--r--drivers/dma/ti/k3-udma.c3
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c4
40 files changed, 1539 insertions, 237 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index df2d2dc00a05..05c7c7d9e5a4 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -89,10 +89,17 @@ config APPLE_ADMAC
tristate "Apple ADMAC support"
depends on ARCH_APPLE || COMPILE_TEST
select DMA_ENGINE
- default ARCH_APPLE
help
Enable support for Audio DMA Controller found on Apple Silicon SoCs.
+config ARM_DMA350
+ tristate "Arm DMA-350 support"
+ depends on ARM || ARM64 || COMPILE_TEST
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Enable support for the Arm DMA-350 controller.
+
config AT_HDMAC
tristate "Atmel AHB DMA support"
depends on ARCH_AT91
@@ -103,7 +110,7 @@ config AT_HDMAC
config AT_XDMAC
tristate "Atmel XDMA support"
- depends on ARCH_AT91
+ depends on ARCH_MICROCHIP
select DMA_ENGINE
help
Support the Atmel XDMA controller.
@@ -564,6 +571,15 @@ config PLX_DMA
These are exposed via extra functions on the switch's
upstream port. Each function exposes one DMA channel.
+config SOPHGO_CV1800B_DMAMUX
+ tristate "Sophgo CV1800/SG2000 series SoC DMA multiplexer support"
+ depends on MFD_SYSCON
+ depends on ARCH_SOPHGO || COMPILE_TEST
+ help
+ Support for the DMA multiplexer on Sophgo CV1800/SG2000
+ series SoCs.
+ Say Y here if your board have this soc.
+
config STE_DMA40
bool "ST-Ericsson DMA40 support"
depends on ARCH_U8500
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 19ba465011a6..a54d7688392b 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_ALTERA_MSGDMA) += altera-msgdma.o
obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
obj-$(CONFIG_APPLE_ADMAC) += apple-admac.o
+obj-$(CONFIG_ARM_DMA350) += arm-dma350.o
obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
obj-$(CONFIG_AT_XDMAC) += at_xdmac.o
obj-$(CONFIG_AXI_DMAC) += dma-axi-dmac.o
@@ -70,6 +71,7 @@ obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
obj-$(CONFIG_PXA_DMA) += pxa_dma.o
obj-$(CONFIG_RENESAS_DMA) += sh/
obj-$(CONFIG_SF_PDMA) += sf-pdma/
+obj-$(CONFIG_SOPHGO_CV1800B_DMAMUX) += cv1800b-dmamux.o
obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
obj-$(CONFIG_SPRD_DMA) += sprd-dma.o
obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
diff --git a/drivers/dma/amd/ptdma/ptdma-dmaengine.c b/drivers/dma/amd/ptdma/ptdma-dmaengine.c
index 81339664036f..628c49ce5de9 100644
--- a/drivers/dma/amd/ptdma/ptdma-dmaengine.c
+++ b/drivers/dma/amd/ptdma/ptdma-dmaengine.c
@@ -566,7 +566,6 @@ int pt_dmaengine_register(struct pt_device *pt)
struct ae4_device *ae4 = NULL;
struct pt_dma_chan *chan;
char *desc_cache_name;
- char *cmd_cache_name;
int ret, i;
if (pt->ver == AE4_DMA_VERSION)
@@ -582,27 +581,17 @@ int pt_dmaengine_register(struct pt_device *pt)
if (!pt->pt_dma_chan)
return -ENOMEM;
- cmd_cache_name = devm_kasprintf(pt->dev, GFP_KERNEL,
- "%s-dmaengine-cmd-cache",
- dev_name(pt->dev));
- if (!cmd_cache_name)
- return -ENOMEM;
-
desc_cache_name = devm_kasprintf(pt->dev, GFP_KERNEL,
"%s-dmaengine-desc-cache",
dev_name(pt->dev));
- if (!desc_cache_name) {
- ret = -ENOMEM;
- goto err_cache;
- }
+ if (!desc_cache_name)
+ return -ENOMEM;
pt->dma_desc_cache = kmem_cache_create(desc_cache_name,
sizeof(struct pt_dma_desc), 0,
SLAB_HWCACHE_ALIGN, NULL);
- if (!pt->dma_desc_cache) {
- ret = -ENOMEM;
- goto err_cache;
- }
+ if (!pt->dma_desc_cache)
+ return -ENOMEM;
dma_dev->dev = pt->dev;
dma_dev->src_addr_widths = DMA_SLAVE_BUSWIDTH_64_BYTES;
@@ -656,9 +645,6 @@ int pt_dmaengine_register(struct pt_device *pt)
err_reg:
kmem_cache_destroy(pt->dma_desc_cache);
-err_cache:
- kmem_cache_destroy(pt->dma_cmd_cache);
-
return ret;
}
EXPORT_SYMBOL_GPL(pt_dmaengine_register);
@@ -670,5 +656,4 @@ void pt_dmaengine_unregister(struct pt_device *pt)
dma_async_device_unregister(dma_dev);
kmem_cache_destroy(pt->dma_desc_cache);
- kmem_cache_destroy(pt->dma_cmd_cache);
}
diff --git a/drivers/dma/amd/ptdma/ptdma.h b/drivers/dma/amd/ptdma/ptdma.h
index 0a7939105e51..ef3f55632107 100644
--- a/drivers/dma/amd/ptdma/ptdma.h
+++ b/drivers/dma/amd/ptdma/ptdma.h
@@ -254,7 +254,6 @@ struct pt_device {
/* Support for the DMA Engine capabilities */
struct dma_device dma_dev;
struct pt_dma_chan *pt_dma_chan;
- struct kmem_cache *dma_cmd_cache;
struct kmem_cache *dma_desc_cache;
wait_queue_head_t lsb_queue;
diff --git a/drivers/dma/arm-dma350.c b/drivers/dma/arm-dma350.c
new file mode 100644
index 000000000000..9efe2ca7d5ec
--- /dev/null
+++ b/drivers/dma/arm-dma350.c
@@ -0,0 +1,660 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2024-2025 Arm Limited
+// Arm DMA-350 driver
+
+#include <linux/bitfield.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "dmaengine.h"
+#include "virt-dma.h"
+
+#define DMAINFO 0x0f00
+
+#define DMA_BUILDCFG0 0xb0
+#define DMA_CFG_DATA_WIDTH GENMASK(18, 16)
+#define DMA_CFG_ADDR_WIDTH GENMASK(15, 10)
+#define DMA_CFG_NUM_CHANNELS GENMASK(9, 4)
+
+#define DMA_BUILDCFG1 0xb4
+#define DMA_CFG_NUM_TRIGGER_IN GENMASK(8, 0)
+
+#define IIDR 0xc8
+#define IIDR_PRODUCTID GENMASK(31, 20)
+#define IIDR_VARIANT GENMASK(19, 16)
+#define IIDR_REVISION GENMASK(15, 12)
+#define IIDR_IMPLEMENTER GENMASK(11, 0)
+
+#define PRODUCTID_DMA350 0x3a0
+#define IMPLEMENTER_ARM 0x43b
+
+#define DMACH(n) (0x1000 + 0x0100 * (n))
+
+#define CH_CMD 0x00
+#define CH_CMD_RESUME BIT(5)
+#define CH_CMD_PAUSE BIT(4)
+#define CH_CMD_STOP BIT(3)
+#define CH_CMD_DISABLE BIT(2)
+#define CH_CMD_CLEAR BIT(1)
+#define CH_CMD_ENABLE BIT(0)
+
+#define CH_STATUS 0x04
+#define CH_STAT_RESUMEWAIT BIT(21)
+#define CH_STAT_PAUSED BIT(20)
+#define CH_STAT_STOPPED BIT(19)
+#define CH_STAT_DISABLED BIT(18)
+#define CH_STAT_ERR BIT(17)
+#define CH_STAT_DONE BIT(16)
+#define CH_STAT_INTR_ERR BIT(1)
+#define CH_STAT_INTR_DONE BIT(0)
+
+#define CH_INTREN 0x08
+#define CH_INTREN_ERR BIT(1)
+#define CH_INTREN_DONE BIT(0)
+
+#define CH_CTRL 0x0c
+#define CH_CTRL_USEDESTRIGIN BIT(26)
+#define CH_CTRL_USESRCTRIGIN BIT(26)
+#define CH_CTRL_DONETYPE GENMASK(23, 21)
+#define CH_CTRL_REGRELOADTYPE GENMASK(20, 18)
+#define CH_CTRL_XTYPE GENMASK(11, 9)
+#define CH_CTRL_TRANSIZE GENMASK(2, 0)
+
+#define CH_SRCADDR 0x10
+#define CH_SRCADDRHI 0x14
+#define CH_DESADDR 0x18
+#define CH_DESADDRHI 0x1c
+#define CH_XSIZE 0x20
+#define CH_XSIZEHI 0x24
+#define CH_SRCTRANSCFG 0x28
+#define CH_DESTRANSCFG 0x2c
+#define CH_CFG_MAXBURSTLEN GENMASK(19, 16)
+#define CH_CFG_PRIVATTR BIT(11)
+#define CH_CFG_SHAREATTR GENMASK(9, 8)
+#define CH_CFG_MEMATTR GENMASK(7, 0)
+
+#define TRANSCFG_DEVICE \
+ FIELD_PREP(CH_CFG_MAXBURSTLEN, 0xf) | \
+ FIELD_PREP(CH_CFG_SHAREATTR, SHAREATTR_OSH) | \
+ FIELD_PREP(CH_CFG_MEMATTR, MEMATTR_DEVICE)
+#define TRANSCFG_NC \
+ FIELD_PREP(CH_CFG_MAXBURSTLEN, 0xf) | \
+ FIELD_PREP(CH_CFG_SHAREATTR, SHAREATTR_OSH) | \
+ FIELD_PREP(CH_CFG_MEMATTR, MEMATTR_NC)
+#define TRANSCFG_WB \
+ FIELD_PREP(CH_CFG_MAXBURSTLEN, 0xf) | \
+ FIELD_PREP(CH_CFG_SHAREATTR, SHAREATTR_ISH) | \
+ FIELD_PREP(CH_CFG_MEMATTR, MEMATTR_WB)
+
+#define CH_XADDRINC 0x30
+#define CH_XY_DES GENMASK(31, 16)
+#define CH_XY_SRC GENMASK(15, 0)
+
+#define CH_FILLVAL 0x38
+#define CH_SRCTRIGINCFG 0x4c
+#define CH_DESTRIGINCFG 0x50
+#define CH_LINKATTR 0x70
+#define CH_LINK_SHAREATTR GENMASK(9, 8)
+#define CH_LINK_MEMATTR GENMASK(7, 0)
+
+#define CH_AUTOCFG 0x74
+#define CH_LINKADDR 0x78
+#define CH_LINKADDR_EN BIT(0)
+
+#define CH_LINKADDRHI 0x7c
+#define CH_ERRINFO 0x90
+#define CH_ERRINFO_AXIRDPOISERR BIT(18)
+#define CH_ERRINFO_AXIWRRESPERR BIT(17)
+#define CH_ERRINFO_AXIRDRESPERR BIT(16)
+
+#define CH_BUILDCFG0 0xf8
+#define CH_CFG_INC_WIDTH GENMASK(29, 26)
+#define CH_CFG_DATA_WIDTH GENMASK(24, 22)
+#define CH_CFG_DATA_BUF_SIZE GENMASK(7, 0)
+
+#define CH_BUILDCFG1 0xfc
+#define CH_CFG_HAS_CMDLINK BIT(8)
+#define CH_CFG_HAS_TRIGSEL BIT(7)
+#define CH_CFG_HAS_TRIGIN BIT(5)
+#define CH_CFG_HAS_WRAP BIT(1)
+
+
+#define LINK_REGCLEAR BIT(0)
+#define LINK_INTREN BIT(2)
+#define LINK_CTRL BIT(3)
+#define LINK_SRCADDR BIT(4)
+#define LINK_SRCADDRHI BIT(5)
+#define LINK_DESADDR BIT(6)
+#define LINK_DESADDRHI BIT(7)
+#define LINK_XSIZE BIT(8)
+#define LINK_XSIZEHI BIT(9)
+#define LINK_SRCTRANSCFG BIT(10)
+#define LINK_DESTRANSCFG BIT(11)
+#define LINK_XADDRINC BIT(12)
+#define LINK_FILLVAL BIT(14)
+#define LINK_SRCTRIGINCFG BIT(19)
+#define LINK_DESTRIGINCFG BIT(20)
+#define LINK_AUTOCFG BIT(29)
+#define LINK_LINKADDR BIT(30)
+#define LINK_LINKADDRHI BIT(31)
+
+
+enum ch_ctrl_donetype {
+ CH_CTRL_DONETYPE_NONE = 0,
+ CH_CTRL_DONETYPE_CMD = 1,
+ CH_CTRL_DONETYPE_CYCLE = 3
+};
+
+enum ch_ctrl_xtype {
+ CH_CTRL_XTYPE_DISABLE = 0,
+ CH_CTRL_XTYPE_CONTINUE = 1,
+ CH_CTRL_XTYPE_WRAP = 2,
+ CH_CTRL_XTYPE_FILL = 3
+};
+
+enum ch_cfg_shareattr {
+ SHAREATTR_NSH = 0,
+ SHAREATTR_OSH = 2,
+ SHAREATTR_ISH = 3
+};
+
+enum ch_cfg_memattr {
+ MEMATTR_DEVICE = 0x00,
+ MEMATTR_NC = 0x44,
+ MEMATTR_WB = 0xff
+};
+
+struct d350_desc {
+ struct virt_dma_desc vd;
+ u32 command[16];
+ u16 xsize;
+ u16 xsizehi;
+ u8 tsz;
+};
+
+struct d350_chan {
+ struct virt_dma_chan vc;
+ struct d350_desc *desc;
+ void __iomem *base;
+ int irq;
+ enum dma_status status;
+ dma_cookie_t cookie;
+ u32 residue;
+ u8 tsz;
+ bool has_trig;
+ bool has_wrap;
+ bool coherent;
+};
+
+struct d350 {
+ struct dma_device dma;
+ int nchan;
+ int nreq;
+ struct d350_chan channels[] __counted_by(nchan);
+};
+
+static inline struct d350_chan *to_d350_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct d350_chan, vc.chan);
+}
+
+static inline struct d350_desc *to_d350_desc(struct virt_dma_desc *vd)
+{
+ return container_of(vd, struct d350_desc, vd);
+}
+
+static void d350_desc_free(struct virt_dma_desc *vd)
+{
+ kfree(to_d350_desc(vd));
+}
+
+static struct dma_async_tx_descriptor *d350_prep_memcpy(struct dma_chan *chan,
+ dma_addr_t dest, dma_addr_t src, size_t len, unsigned long flags)
+{
+ struct d350_chan *dch = to_d350_chan(chan);
+ struct d350_desc *desc;
+ u32 *cmd;
+
+ desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
+ if (!desc)
+ return NULL;
+
+ desc->tsz = __ffs(len | dest | src | (1 << dch->tsz));
+ desc->xsize = lower_16_bits(len >> desc->tsz);
+ desc->xsizehi = upper_16_bits(len >> desc->tsz);
+
+ cmd = desc->command;
+ cmd[0] = LINK_CTRL | LINK_SRCADDR | LINK_SRCADDRHI | LINK_DESADDR |
+ LINK_DESADDRHI | LINK_XSIZE | LINK_XSIZEHI | LINK_SRCTRANSCFG |
+ LINK_DESTRANSCFG | LINK_XADDRINC | LINK_LINKADDR;
+
+ cmd[1] = FIELD_PREP(CH_CTRL_TRANSIZE, desc->tsz) |
+ FIELD_PREP(CH_CTRL_XTYPE, CH_CTRL_XTYPE_CONTINUE) |
+ FIELD_PREP(CH_CTRL_DONETYPE, CH_CTRL_DONETYPE_CMD);
+
+ cmd[2] = lower_32_bits(src);
+ cmd[3] = upper_32_bits(src);
+ cmd[4] = lower_32_bits(dest);
+ cmd[5] = upper_32_bits(dest);
+ cmd[6] = FIELD_PREP(CH_XY_SRC, desc->xsize) | FIELD_PREP(CH_XY_DES, desc->xsize);
+ cmd[7] = FIELD_PREP(CH_XY_SRC, desc->xsizehi) | FIELD_PREP(CH_XY_DES, desc->xsizehi);
+ cmd[8] = dch->coherent ? TRANSCFG_WB : TRANSCFG_NC;
+ cmd[9] = dch->coherent ? TRANSCFG_WB : TRANSCFG_NC;
+ cmd[10] = FIELD_PREP(CH_XY_SRC, 1) | FIELD_PREP(CH_XY_DES, 1);
+ cmd[11] = 0;
+
+ return vchan_tx_prep(&dch->vc, &desc->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *d350_prep_memset(struct dma_chan *chan,
+ dma_addr_t dest, int value, size_t len, unsigned long flags)
+{
+ struct d350_chan *dch = to_d350_chan(chan);
+ struct d350_desc *desc;
+ u32 *cmd;
+
+ desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
+ if (!desc)
+ return NULL;
+
+ desc->tsz = __ffs(len | dest | (1 << dch->tsz));
+ desc->xsize = lower_16_bits(len >> desc->tsz);
+ desc->xsizehi = upper_16_bits(len >> desc->tsz);
+
+ cmd = desc->command;
+ cmd[0] = LINK_CTRL | LINK_DESADDR | LINK_DESADDRHI |
+ LINK_XSIZE | LINK_XSIZEHI | LINK_DESTRANSCFG |
+ LINK_XADDRINC | LINK_FILLVAL | LINK_LINKADDR;
+
+ cmd[1] = FIELD_PREP(CH_CTRL_TRANSIZE, desc->tsz) |
+ FIELD_PREP(CH_CTRL_XTYPE, CH_CTRL_XTYPE_FILL) |
+ FIELD_PREP(CH_CTRL_DONETYPE, CH_CTRL_DONETYPE_CMD);
+
+ cmd[2] = lower_32_bits(dest);
+ cmd[3] = upper_32_bits(dest);
+ cmd[4] = FIELD_PREP(CH_XY_DES, desc->xsize);
+ cmd[5] = FIELD_PREP(CH_XY_DES, desc->xsizehi);
+ cmd[6] = dch->coherent ? TRANSCFG_WB : TRANSCFG_NC;
+ cmd[7] = FIELD_PREP(CH_XY_DES, 1);
+ cmd[8] = (u8)value * 0x01010101;
+ cmd[9] = 0;
+
+ return vchan_tx_prep(&dch->vc, &desc->vd, flags);
+}
+
+static int d350_pause(struct dma_chan *chan)
+{
+ struct d350_chan *dch = to_d350_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dch->vc.lock, flags);
+ if (dch->status == DMA_IN_PROGRESS) {
+ writel_relaxed(CH_CMD_PAUSE, dch->base + CH_CMD);
+ dch->status = DMA_PAUSED;
+ }
+ spin_unlock_irqrestore(&dch->vc.lock, flags);
+
+ return 0;
+}
+
+static int d350_resume(struct dma_chan *chan)
+{
+ struct d350_chan *dch = to_d350_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dch->vc.lock, flags);
+ if (dch->status == DMA_PAUSED) {
+ writel_relaxed(CH_CMD_RESUME, dch->base + CH_CMD);
+ dch->status = DMA_IN_PROGRESS;
+ }
+ spin_unlock_irqrestore(&dch->vc.lock, flags);
+
+ return 0;
+}
+
+static u32 d350_get_residue(struct d350_chan *dch)
+{
+ u32 res, xsize, xsizehi, hi_new;
+ int retries = 3; /* 1st time unlucky, 2nd improbable, 3rd just broken */
+
+ hi_new = readl_relaxed(dch->base + CH_XSIZEHI);
+ do {
+ xsizehi = hi_new;
+ xsize = readl_relaxed(dch->base + CH_XSIZE);
+ hi_new = readl_relaxed(dch->base + CH_XSIZEHI);
+ } while (xsizehi != hi_new && --retries);
+
+ res = FIELD_GET(CH_XY_DES, xsize);
+ res |= FIELD_GET(CH_XY_DES, xsizehi) << 16;
+
+ return res << dch->desc->tsz;
+}
+
+static int d350_terminate_all(struct dma_chan *chan)
+{
+ struct d350_chan *dch = to_d350_chan(chan);
+ unsigned long flags;
+ LIST_HEAD(list);
+
+ spin_lock_irqsave(&dch->vc.lock, flags);
+ writel_relaxed(CH_CMD_STOP, dch->base + CH_CMD);
+ if (dch->desc) {
+ if (dch->status != DMA_ERROR)
+ vchan_terminate_vdesc(&dch->desc->vd);
+ dch->desc = NULL;
+ dch->status = DMA_COMPLETE;
+ }
+ vchan_get_all_descriptors(&dch->vc, &list);
+ list_splice_tail(&list, &dch->vc.desc_terminated);
+ spin_unlock_irqrestore(&dch->vc.lock, flags);
+
+ return 0;
+}
+
+static void d350_synchronize(struct dma_chan *chan)
+{
+ struct d350_chan *dch = to_d350_chan(chan);
+
+ vchan_synchronize(&dch->vc);
+}
+
+static u32 d350_desc_bytes(struct d350_desc *desc)
+{
+ return ((u32)desc->xsizehi << 16 | desc->xsize) << desc->tsz;
+}
+
+static enum dma_status d350_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ struct dma_tx_state *state)
+{
+ struct d350_chan *dch = to_d350_chan(chan);
+ struct virt_dma_desc *vd;
+ enum dma_status status;
+ unsigned long flags;
+ u32 residue = 0;
+
+ status = dma_cookie_status(chan, cookie, state);
+
+ spin_lock_irqsave(&dch->vc.lock, flags);
+ if (cookie == dch->cookie) {
+ status = dch->status;
+ if (status == DMA_IN_PROGRESS || status == DMA_PAUSED)
+ dch->residue = d350_get_residue(dch);
+ residue = dch->residue;
+ } else if ((vd = vchan_find_desc(&dch->vc, cookie))) {
+ residue = d350_desc_bytes(to_d350_desc(vd));
+ } else if (status == DMA_IN_PROGRESS) {
+ /* Somebody else terminated it? */
+ status = DMA_ERROR;
+ }
+ spin_unlock_irqrestore(&dch->vc.lock, flags);
+
+ dma_set_residue(state, residue);
+ return status;
+}
+
+static void d350_start_next(struct d350_chan *dch)
+{
+ u32 hdr, *reg;
+
+ dch->desc = to_d350_desc(vchan_next_desc(&dch->vc));
+ if (!dch->desc)
+ return;
+
+ list_del(&dch->desc->vd.node);
+ dch->status = DMA_IN_PROGRESS;
+ dch->cookie = dch->desc->vd.tx.cookie;
+ dch->residue = d350_desc_bytes(dch->desc);
+
+ hdr = dch->desc->command[0];
+ reg = &dch->desc->command[1];
+
+ if (hdr & LINK_INTREN)
+ writel_relaxed(*reg++, dch->base + CH_INTREN);
+ if (hdr & LINK_CTRL)
+ writel_relaxed(*reg++, dch->base + CH_CTRL);
+ if (hdr & LINK_SRCADDR)
+ writel_relaxed(*reg++, dch->base + CH_SRCADDR);
+ if (hdr & LINK_SRCADDRHI)
+ writel_relaxed(*reg++, dch->base + CH_SRCADDRHI);
+ if (hdr & LINK_DESADDR)
+ writel_relaxed(*reg++, dch->base + CH_DESADDR);
+ if (hdr & LINK_DESADDRHI)
+ writel_relaxed(*reg++, dch->base + CH_DESADDRHI);
+ if (hdr & LINK_XSIZE)
+ writel_relaxed(*reg++, dch->base + CH_XSIZE);
+ if (hdr & LINK_XSIZEHI)
+ writel_relaxed(*reg++, dch->base + CH_XSIZEHI);
+ if (hdr & LINK_SRCTRANSCFG)
+ writel_relaxed(*reg++, dch->base + CH_SRCTRANSCFG);
+ if (hdr & LINK_DESTRANSCFG)
+ writel_relaxed(*reg++, dch->base + CH_DESTRANSCFG);
+ if (hdr & LINK_XADDRINC)
+ writel_relaxed(*reg++, dch->base + CH_XADDRINC);
+ if (hdr & LINK_FILLVAL)
+ writel_relaxed(*reg++, dch->base + CH_FILLVAL);
+ if (hdr & LINK_SRCTRIGINCFG)
+ writel_relaxed(*reg++, dch->base + CH_SRCTRIGINCFG);
+ if (hdr & LINK_DESTRIGINCFG)
+ writel_relaxed(*reg++, dch->base + CH_DESTRIGINCFG);
+ if (hdr & LINK_AUTOCFG)
+ writel_relaxed(*reg++, dch->base + CH_AUTOCFG);
+ if (hdr & LINK_LINKADDR)
+ writel_relaxed(*reg++, dch->base + CH_LINKADDR);
+ if (hdr & LINK_LINKADDRHI)
+ writel_relaxed(*reg++, dch->base + CH_LINKADDRHI);
+
+ writel(CH_CMD_ENABLE, dch->base + CH_CMD);
+}
+
+static void d350_issue_pending(struct dma_chan *chan)
+{
+ struct d350_chan *dch = to_d350_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dch->vc.lock, flags);
+ if (vchan_issue_pending(&dch->vc) && !dch->desc)
+ d350_start_next(dch);
+ spin_unlock_irqrestore(&dch->vc.lock, flags);
+}
+
+static irqreturn_t d350_irq(int irq, void *data)
+{
+ struct d350_chan *dch = data;
+ struct device *dev = dch->vc.chan.device->dev;
+ struct virt_dma_desc *vd = &dch->desc->vd;
+ u32 ch_status;
+
+ ch_status = readl(dch->base + CH_STATUS);
+ if (!ch_status)
+ return IRQ_NONE;
+
+ if (ch_status & CH_STAT_INTR_ERR) {
+ u32 errinfo = readl_relaxed(dch->base + CH_ERRINFO);
+
+ if (errinfo & (CH_ERRINFO_AXIRDPOISERR | CH_ERRINFO_AXIRDRESPERR))
+ vd->tx_result.result = DMA_TRANS_READ_FAILED;
+ else if (errinfo & CH_ERRINFO_AXIWRRESPERR)
+ vd->tx_result.result = DMA_TRANS_WRITE_FAILED;
+ else
+ vd->tx_result.result = DMA_TRANS_ABORTED;
+
+ vd->tx_result.residue = d350_get_residue(dch);
+ } else if (!(ch_status & CH_STAT_INTR_DONE)) {
+ dev_warn(dev, "Unexpected IRQ source? 0x%08x\n", ch_status);
+ }
+ writel_relaxed(ch_status, dch->base + CH_STATUS);
+
+ spin_lock(&dch->vc.lock);
+ vchan_cookie_complete(vd);
+ if (ch_status & CH_STAT_INTR_DONE) {
+ dch->status = DMA_COMPLETE;
+ dch->residue = 0;
+ d350_start_next(dch);
+ } else {
+ dch->status = DMA_ERROR;
+ dch->residue = vd->tx_result.residue;
+ }
+ spin_unlock(&dch->vc.lock);
+
+ return IRQ_HANDLED;
+}
+
+static int d350_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct d350_chan *dch = to_d350_chan(chan);
+ int ret = request_irq(dch->irq, d350_irq, IRQF_SHARED,
+ dev_name(&dch->vc.chan.dev->device), dch);
+ if (!ret)
+ writel_relaxed(CH_INTREN_DONE | CH_INTREN_ERR, dch->base + CH_INTREN);
+
+ return ret;
+}
+
+static void d350_free_chan_resources(struct dma_chan *chan)
+{
+ struct d350_chan *dch = to_d350_chan(chan);
+
+ writel_relaxed(0, dch->base + CH_INTREN);
+ free_irq(dch->irq, dch);
+ vchan_free_chan_resources(&dch->vc);
+}
+
+static int d350_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct d350 *dmac;
+ void __iomem *base;
+ u32 reg;
+ int ret, nchan, dw, aw, r, p;
+ bool coherent, memset;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ reg = readl_relaxed(base + DMAINFO + IIDR);
+ r = FIELD_GET(IIDR_VARIANT, reg);
+ p = FIELD_GET(IIDR_REVISION, reg);
+ if (FIELD_GET(IIDR_IMPLEMENTER, reg) != IMPLEMENTER_ARM ||
+ FIELD_GET(IIDR_PRODUCTID, reg) != PRODUCTID_DMA350)
+ return dev_err_probe(dev, -ENODEV, "Not a DMA-350!");
+
+ reg = readl_relaxed(base + DMAINFO + DMA_BUILDCFG0);
+ nchan = FIELD_GET(DMA_CFG_NUM_CHANNELS, reg) + 1;
+ dw = 1 << FIELD_GET(DMA_CFG_DATA_WIDTH, reg);
+ aw = FIELD_GET(DMA_CFG_ADDR_WIDTH, reg) + 1;
+
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(aw));
+ coherent = device_get_dma_attr(dev) == DEV_DMA_COHERENT;
+
+ dmac = devm_kzalloc(dev, struct_size(dmac, channels, nchan), GFP_KERNEL);
+ if (!dmac)
+ return -ENOMEM;
+
+ dmac->nchan = nchan;
+
+ reg = readl_relaxed(base + DMAINFO + DMA_BUILDCFG1);
+ dmac->nreq = FIELD_GET(DMA_CFG_NUM_TRIGGER_IN, reg);
+
+ dev_dbg(dev, "DMA-350 r%dp%d with %d channels, %d requests\n", r, p, dmac->nchan, dmac->nreq);
+
+ dmac->dma.dev = dev;
+ for (int i = min(dw, 16); i > 0; i /= 2) {
+ dmac->dma.src_addr_widths |= BIT(i);
+ dmac->dma.dst_addr_widths |= BIT(i);
+ }
+ dmac->dma.directions = BIT(DMA_MEM_TO_MEM);
+ dmac->dma.descriptor_reuse = true;
+ dmac->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+ dmac->dma.device_alloc_chan_resources = d350_alloc_chan_resources;
+ dmac->dma.device_free_chan_resources = d350_free_chan_resources;
+ dma_cap_set(DMA_MEMCPY, dmac->dma.cap_mask);
+ dmac->dma.device_prep_dma_memcpy = d350_prep_memcpy;
+ dmac->dma.device_pause = d350_pause;
+ dmac->dma.device_resume = d350_resume;
+ dmac->dma.device_terminate_all = d350_terminate_all;
+ dmac->dma.device_synchronize = d350_synchronize;
+ dmac->dma.device_tx_status = d350_tx_status;
+ dmac->dma.device_issue_pending = d350_issue_pending;
+ INIT_LIST_HEAD(&dmac->dma.channels);
+
+ /* Would be nice to have per-channel caps for this... */
+ memset = true;
+ for (int i = 0; i < nchan; i++) {
+ struct d350_chan *dch = &dmac->channels[i];
+
+ dch->base = base + DMACH(i);
+ writel_relaxed(CH_CMD_CLEAR, dch->base + CH_CMD);
+
+ reg = readl_relaxed(dch->base + CH_BUILDCFG1);
+ if (!(FIELD_GET(CH_CFG_HAS_CMDLINK, reg))) {
+ dev_warn(dev, "No command link support on channel %d\n", i);
+ continue;
+ }
+ dch->irq = platform_get_irq(pdev, i);
+ if (dch->irq < 0)
+ return dev_err_probe(dev, dch->irq,
+ "Failed to get IRQ for channel %d\n", i);
+
+ dch->has_wrap = FIELD_GET(CH_CFG_HAS_WRAP, reg);
+ dch->has_trig = FIELD_GET(CH_CFG_HAS_TRIGIN, reg) &
+ FIELD_GET(CH_CFG_HAS_TRIGSEL, reg);
+
+ /* Fill is a special case of Wrap */
+ memset &= dch->has_wrap;
+
+ reg = readl_relaxed(dch->base + CH_BUILDCFG0);
+ dch->tsz = FIELD_GET(CH_CFG_DATA_WIDTH, reg);
+
+ reg = FIELD_PREP(CH_LINK_SHAREATTR, coherent ? SHAREATTR_ISH : SHAREATTR_OSH);
+ reg |= FIELD_PREP(CH_LINK_MEMATTR, coherent ? MEMATTR_WB : MEMATTR_NC);
+ writel_relaxed(reg, dch->base + CH_LINKATTR);
+
+ dch->vc.desc_free = d350_desc_free;
+ vchan_init(&dch->vc, &dmac->dma);
+ }
+
+ if (memset) {
+ dma_cap_set(DMA_MEMSET, dmac->dma.cap_mask);
+ dmac->dma.device_prep_dma_memset = d350_prep_memset;
+ }
+
+ platform_set_drvdata(pdev, dmac);
+
+ ret = dma_async_device_register(&dmac->dma);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to register DMA device\n");
+
+ return 0;
+}
+
+static void d350_remove(struct platform_device *pdev)
+{
+ struct d350 *dmac = platform_get_drvdata(pdev);
+
+ dma_async_device_unregister(&dmac->dma);
+}
+
+static const struct of_device_id d350_of_match[] __maybe_unused = {
+ { .compatible = "arm,dma-350" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, d350_of_match);
+
+static struct platform_driver d350_driver = {
+ .driver = {
+ .name = "arm-dma350",
+ .of_match_table = of_match_ptr(d350_of_match),
+ },
+ .probe = d350_probe,
+ .remove = d350_remove,
+};
+module_platform_driver(d350_driver);
+
+MODULE_AUTHOR("Robin Murphy <robin.murphy@arm.com>");
+MODULE_DESCRIPTION("Arm DMA-350 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index ba25c23164e7..3fbc74710a13 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -2033,10 +2033,8 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan)
* at_xdmac_start_xfer() for this descriptor. Now it's time
* to release it.
*/
- if (desc->active_xfer) {
- pm_runtime_put_autosuspend(atxdmac->dev);
- pm_runtime_mark_last_busy(atxdmac->dev);
- }
+ if (desc->active_xfer)
+ pm_runtime_put_noidle(atxdmac->dev);
}
clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
diff --git a/drivers/dma/cv1800b-dmamux.c b/drivers/dma/cv1800b-dmamux.c
new file mode 100644
index 000000000000..e900d6595617
--- /dev/null
+++ b/drivers/dma/cv1800b-dmamux.c
@@ -0,0 +1,259 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025 Inochi Amaoto <inochiama@gmail.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/cleanup.h>
+#include <linux/module.h>
+#include <linux/of_dma.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/llist.h>
+#include <linux/regmap.h>
+#include <linux/spinlock.h>
+#include <linux/mfd/syscon.h>
+
+#define REG_DMA_CHANNEL_REMAP0 0x154
+#define REG_DMA_CHANNEL_REMAP1 0x158
+#define REG_DMA_INT_MUX 0x298
+
+#define DMAMUX_NCELLS 2
+#define MAX_DMA_MAPPING_ID 42
+#define MAX_DMA_CPU_ID 2
+#define MAX_DMA_CH_ID 7
+
+#define DMAMUX_INTMUX_REGISTER_LEN 4
+#define DMAMUX_NR_CH_PER_REGISTER 4
+#define DMAMUX_BIT_PER_CH 8
+#define DMAMUX_CH_MASk GENMASK(5, 0)
+#define DMAMUX_INT_BIT_PER_CPU 10
+#define DMAMUX_CH_UPDATE_BIT BIT(31)
+
+#define DMAMUX_CH_REGPOS(chid) \
+ ((chid) / DMAMUX_NR_CH_PER_REGISTER)
+#define DMAMUX_CH_REGOFF(chid) \
+ ((chid) % DMAMUX_NR_CH_PER_REGISTER)
+#define DMAMUX_CH_REG(chid) \
+ ((DMAMUX_CH_REGPOS(chid) * sizeof(u32)) + \
+ REG_DMA_CHANNEL_REMAP0)
+#define DMAMUX_CH_SET(chid, val) \
+ (((val) << (DMAMUX_CH_REGOFF(chid) * DMAMUX_BIT_PER_CH)) | \
+ DMAMUX_CH_UPDATE_BIT)
+#define DMAMUX_CH_MASK(chid) \
+ DMAMUX_CH_SET(chid, DMAMUX_CH_MASk)
+
+#define DMAMUX_INT_BIT(chid, cpuid) \
+ BIT((cpuid) * DMAMUX_INT_BIT_PER_CPU + (chid))
+#define DMAMUX_INTEN_BIT(cpuid) \
+ DMAMUX_INT_BIT(8, cpuid)
+#define DMAMUX_INT_CH_BIT(chid, cpuid) \
+ (DMAMUX_INT_BIT(chid, cpuid) | DMAMUX_INTEN_BIT(cpuid))
+#define DMAMUX_INT_MASK(chid) \
+ (DMAMUX_INT_BIT(chid, 0) | \
+ DMAMUX_INT_BIT(chid, 1) | \
+ DMAMUX_INT_BIT(chid, 2))
+#define DMAMUX_INT_CH_MASK(chid, cpuid) \
+ (DMAMUX_INT_MASK(chid) | DMAMUX_INTEN_BIT(cpuid))
+
+struct cv1800_dmamux_data {
+ struct dma_router dmarouter;
+ struct regmap *regmap;
+ spinlock_t lock;
+ struct llist_head free_maps;
+ struct llist_head reserve_maps;
+ DECLARE_BITMAP(mapped_peripherals, MAX_DMA_MAPPING_ID);
+};
+
+struct cv1800_dmamux_map {
+ struct llist_node node;
+ unsigned int channel;
+ unsigned int peripheral;
+ unsigned int cpu;
+};
+
+static void cv1800_dmamux_free(struct device *dev, void *route_data)
+{
+ struct cv1800_dmamux_data *dmamux = dev_get_drvdata(dev);
+ struct cv1800_dmamux_map *map = route_data;
+
+ guard(spinlock_irqsave)(&dmamux->lock);
+
+ regmap_update_bits(dmamux->regmap,
+ DMAMUX_CH_REG(map->channel),
+ DMAMUX_CH_MASK(map->channel),
+ DMAMUX_CH_UPDATE_BIT);
+
+ regmap_update_bits(dmamux->regmap, REG_DMA_INT_MUX,
+ DMAMUX_INT_CH_MASK(map->channel, map->cpu),
+ DMAMUX_INTEN_BIT(map->cpu));
+
+ dev_dbg(dev, "free channel %u for req %u (cpu %u)\n",
+ map->channel, map->peripheral, map->cpu);
+}
+
+static void *cv1800_dmamux_route_allocate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
+ struct cv1800_dmamux_data *dmamux = platform_get_drvdata(pdev);
+ struct cv1800_dmamux_map *map;
+ struct llist_node *node;
+ unsigned long flags;
+ unsigned int chid, devid, cpuid;
+ int ret;
+
+ if (dma_spec->args_count != DMAMUX_NCELLS) {
+ dev_err(&pdev->dev, "invalid number of dma mux args\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ devid = dma_spec->args[0];
+ cpuid = dma_spec->args[1];
+ dma_spec->args_count = 1;
+
+ if (devid > MAX_DMA_MAPPING_ID) {
+ dev_err(&pdev->dev, "invalid device id: %u\n", devid);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (cpuid > MAX_DMA_CPU_ID) {
+ dev_err(&pdev->dev, "invalid cpu id: %u\n", cpuid);
+ return ERR_PTR(-EINVAL);
+ }
+
+ dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
+ if (!dma_spec->np) {
+ dev_err(&pdev->dev, "can't get dma master\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ spin_lock_irqsave(&dmamux->lock, flags);
+
+ if (test_bit(devid, dmamux->mapped_peripherals)) {
+ llist_for_each_entry(map, dmamux->reserve_maps.first, node) {
+ if (map->peripheral == devid && map->cpu == cpuid)
+ goto found;
+ }
+
+ ret = -EINVAL;
+ goto failed;
+ } else {
+ node = llist_del_first(&dmamux->free_maps);
+ if (!node) {
+ ret = -ENODEV;
+ goto failed;
+ }
+
+ map = llist_entry(node, struct cv1800_dmamux_map, node);
+ llist_add(&map->node, &dmamux->reserve_maps);
+ set_bit(devid, dmamux->mapped_peripherals);
+ }
+
+found:
+ chid = map->channel;
+ map->peripheral = devid;
+ map->cpu = cpuid;
+
+ regmap_set_bits(dmamux->regmap,
+ DMAMUX_CH_REG(chid),
+ DMAMUX_CH_SET(chid, devid));
+
+ regmap_update_bits(dmamux->regmap, REG_DMA_INT_MUX,
+ DMAMUX_INT_CH_MASK(chid, cpuid),
+ DMAMUX_INT_CH_BIT(chid, cpuid));
+
+ spin_unlock_irqrestore(&dmamux->lock, flags);
+
+ dma_spec->args[0] = chid;
+
+ dev_dbg(&pdev->dev, "register channel %u for req %u (cpu %u)\n",
+ chid, devid, cpuid);
+
+ return map;
+
+failed:
+ spin_unlock_irqrestore(&dmamux->lock, flags);
+ of_node_put(dma_spec->np);
+ dev_err(&pdev->dev, "errno %d\n", ret);
+ return ERR_PTR(ret);
+}
+
+static int cv1800_dmamux_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *mux_node = dev->of_node;
+ struct cv1800_dmamux_data *data;
+ struct cv1800_dmamux_map *tmp;
+ struct device *parent = dev->parent;
+ struct regmap *regmap = NULL;
+ unsigned int i;
+
+ if (!parent)
+ return -ENODEV;
+
+ regmap = device_node_to_regmap(parent->of_node);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ spin_lock_init(&data->lock);
+ init_llist_head(&data->free_maps);
+ init_llist_head(&data->reserve_maps);
+
+ for (i = 0; i <= MAX_DMA_CH_ID; i++) {
+ tmp = devm_kmalloc(dev, sizeof(*tmp), GFP_KERNEL);
+ if (!tmp) {
+ /* It is OK for not allocating all channel */
+ dev_warn(dev, "can not allocate channel %u\n", i);
+ continue;
+ }
+
+ init_llist_node(&tmp->node);
+ tmp->channel = i;
+ llist_add(&tmp->node, &data->free_maps);
+ }
+
+ /* if no channel is allocated, the probe must fail */
+ if (llist_empty(&data->free_maps))
+ return -ENOMEM;
+
+ data->regmap = regmap;
+ data->dmarouter.dev = dev;
+ data->dmarouter.route_free = cv1800_dmamux_free;
+
+ platform_set_drvdata(pdev, data);
+
+ return of_dma_router_register(mux_node,
+ cv1800_dmamux_route_allocate,
+ &data->dmarouter);
+}
+
+static void cv1800_dmamux_remove(struct platform_device *pdev)
+{
+ of_dma_controller_free(pdev->dev.of_node);
+}
+
+static const struct of_device_id cv1800_dmamux_ids[] = {
+ { .compatible = "sophgo,cv1800b-dmamux", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cv1800_dmamux_ids);
+
+static struct platform_driver cv1800_dmamux_driver = {
+ .probe = cv1800_dmamux_probe,
+ .remove = cv1800_dmamux_remove,
+ .driver = {
+ .name = "cv1800-dmamux",
+ .of_match_table = cv1800_dmamux_ids,
+ },
+};
+module_platform_driver(cv1800_dmamux_driver);
+
+MODULE_AUTHOR("Inochi Amaoto <inochiama@gmail.com>");
+MODULE_DESCRIPTION("Sophgo CV1800/SG2000 Series SoC DMAMUX driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
index 36943b0c6d60..5b06b0dc67ee 100644
--- a/drivers/dma/dma-axi-dmac.c
+++ b/drivers/dma/dma-axi-dmac.c
@@ -6,6 +6,7 @@
* Author: Lars-Peter Clausen <lars@metafoo.de>
*/
+#include <linux/adi-axi-common.h>
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/device.h>
@@ -22,7 +23,6 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
-#include <linux/fpga/adi-axi-common.h>
#include <dt-bindings/dma/axi-dmac.h>
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 758fcd0546d8..ca13cd39330b 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -926,6 +926,36 @@ void dma_release_channel(struct dma_chan *chan)
}
EXPORT_SYMBOL_GPL(dma_release_channel);
+static void dmaenginem_release_channel(void *chan)
+{
+ dma_release_channel(chan);
+}
+
+/**
+ * devm_dma_request_chan - try to allocate an exclusive slave channel
+ * @dev: pointer to client device structure
+ * @name: slave channel name
+ *
+ * Returns pointer to appropriate DMA channel on success or an error pointer.
+ *
+ * The operation is managed and will be undone on driver detach.
+ */
+
+struct dma_chan *devm_dma_request_chan(struct device *dev, const char *name)
+{
+ struct dma_chan *chan = dma_request_chan(dev, name);
+ int ret = 0;
+
+ if (!IS_ERR(chan))
+ ret = devm_add_action_or_reset(dev, dmaenginem_release_channel, chan);
+
+ if (ret)
+ return ERR_PTR(ret);
+
+ return chan;
+}
+EXPORT_SYMBOL_GPL(devm_dma_request_chan);
+
/**
* dmaengine_get - register interest in dma_channels
*/
diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c
index c2b88cc99e5d..b43255f914f3 100644
--- a/drivers/dma/dw-edma/dw-edma-core.c
+++ b/drivers/dma/dw-edma/dw-edma-core.c
@@ -24,18 +24,6 @@
#include "../virt-dma.h"
static inline
-struct device *dchan2dev(struct dma_chan *dchan)
-{
- return &dchan->dev->device;
-}
-
-static inline
-struct device *chan2dev(struct dw_edma_chan *chan)
-{
- return &chan->vc.chan.dev->device;
-}
-
-static inline
struct dw_edma_desc *vd2dw_edma_desc(struct virt_dma_desc *vd)
{
return container_of(vd, struct dw_edma_desc, vd);
diff --git a/drivers/dma/dw-edma/dw-edma-pcie.c b/drivers/dma/dw-edma/dw-edma-pcie.c
index 1c6043751dc9..3371e0a76d3c 100644
--- a/drivers/dma/dw-edma/dw-edma-pcie.c
+++ b/drivers/dma/dw-edma/dw-edma-pcie.c
@@ -136,7 +136,8 @@ static void dw_edma_pcie_get_vsec_dma_data(struct pci_dev *pdev,
map = FIELD_GET(DW_PCIE_VSEC_DMA_MAP, val);
if (map != EDMA_MF_EDMA_LEGACY &&
map != EDMA_MF_EDMA_UNROLL &&
- map != EDMA_MF_HDMA_COMPAT)
+ map != EDMA_MF_HDMA_COMPAT &&
+ map != EDMA_MF_HDMA_NATIVE)
return;
pdata->mf = map;
@@ -160,12 +161,16 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
const struct pci_device_id *pid)
{
struct dw_edma_pcie_data *pdata = (void *)pid->driver_data;
- struct dw_edma_pcie_data vsec_data;
+ struct dw_edma_pcie_data *vsec_data __free(kfree) = NULL;
struct device *dev = &pdev->dev;
struct dw_edma_chip *chip;
int err, nr_irqs;
int i, mask;
+ vsec_data = kmalloc(sizeof(*vsec_data), GFP_KERNEL);
+ if (!vsec_data)
+ return -ENOMEM;
+
/* Enable PCI device */
err = pcim_enable_device(pdev);
if (err) {
@@ -173,23 +178,23 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
return err;
}
- memcpy(&vsec_data, pdata, sizeof(struct dw_edma_pcie_data));
+ memcpy(vsec_data, pdata, sizeof(struct dw_edma_pcie_data));
/*
* Tries to find if exists a PCIe Vendor-Specific Extended Capability
* for the DMA, if one exists, then reconfigures it.
*/
- dw_edma_pcie_get_vsec_dma_data(pdev, &vsec_data);
+ dw_edma_pcie_get_vsec_dma_data(pdev, vsec_data);
/* Mapping PCI BAR regions */
- mask = BIT(vsec_data.rg.bar);
- for (i = 0; i < vsec_data.wr_ch_cnt; i++) {
- mask |= BIT(vsec_data.ll_wr[i].bar);
- mask |= BIT(vsec_data.dt_wr[i].bar);
+ mask = BIT(vsec_data->rg.bar);
+ for (i = 0; i < vsec_data->wr_ch_cnt; i++) {
+ mask |= BIT(vsec_data->ll_wr[i].bar);
+ mask |= BIT(vsec_data->dt_wr[i].bar);
}
- for (i = 0; i < vsec_data.rd_ch_cnt; i++) {
- mask |= BIT(vsec_data.ll_rd[i].bar);
- mask |= BIT(vsec_data.dt_rd[i].bar);
+ for (i = 0; i < vsec_data->rd_ch_cnt; i++) {
+ mask |= BIT(vsec_data->ll_rd[i].bar);
+ mask |= BIT(vsec_data->dt_rd[i].bar);
}
err = pcim_iomap_regions(pdev, mask, pci_name(pdev));
if (err) {
@@ -212,7 +217,7 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
return -ENOMEM;
/* IRQs allocation */
- nr_irqs = pci_alloc_irq_vectors(pdev, 1, vsec_data.irqs,
+ nr_irqs = pci_alloc_irq_vectors(pdev, 1, vsec_data->irqs,
PCI_IRQ_MSI | PCI_IRQ_MSIX);
if (nr_irqs < 1) {
pci_err(pdev, "fail to alloc IRQ vector (number of IRQs=%u)\n",
@@ -223,22 +228,22 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
/* Data structure initialization */
chip->dev = dev;
- chip->mf = vsec_data.mf;
+ chip->mf = vsec_data->mf;
chip->nr_irqs = nr_irqs;
chip->ops = &dw_edma_pcie_plat_ops;
- chip->ll_wr_cnt = vsec_data.wr_ch_cnt;
- chip->ll_rd_cnt = vsec_data.rd_ch_cnt;
+ chip->ll_wr_cnt = vsec_data->wr_ch_cnt;
+ chip->ll_rd_cnt = vsec_data->rd_ch_cnt;
- chip->reg_base = pcim_iomap_table(pdev)[vsec_data.rg.bar];
+ chip->reg_base = pcim_iomap_table(pdev)[vsec_data->rg.bar];
if (!chip->reg_base)
return -ENOMEM;
for (i = 0; i < chip->ll_wr_cnt; i++) {
struct dw_edma_region *ll_region = &chip->ll_region_wr[i];
struct dw_edma_region *dt_region = &chip->dt_region_wr[i];
- struct dw_edma_block *ll_block = &vsec_data.ll_wr[i];
- struct dw_edma_block *dt_block = &vsec_data.dt_wr[i];
+ struct dw_edma_block *ll_block = &vsec_data->ll_wr[i];
+ struct dw_edma_block *dt_block = &vsec_data->dt_wr[i];
ll_region->vaddr.io = pcim_iomap_table(pdev)[ll_block->bar];
if (!ll_region->vaddr.io)
@@ -262,8 +267,8 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
for (i = 0; i < chip->ll_rd_cnt; i++) {
struct dw_edma_region *ll_region = &chip->ll_region_rd[i];
struct dw_edma_region *dt_region = &chip->dt_region_rd[i];
- struct dw_edma_block *ll_block = &vsec_data.ll_rd[i];
- struct dw_edma_block *dt_block = &vsec_data.dt_rd[i];
+ struct dw_edma_block *ll_block = &vsec_data->ll_rd[i];
+ struct dw_edma_block *dt_block = &vsec_data->dt_rd[i];
ll_region->vaddr.io = pcim_iomap_table(pdev)[ll_block->bar];
if (!ll_region->vaddr.io)
@@ -291,35 +296,37 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
pci_dbg(pdev, "Version:\teDMA Unroll (0x%x)\n", chip->mf);
else if (chip->mf == EDMA_MF_HDMA_COMPAT)
pci_dbg(pdev, "Version:\tHDMA Compatible (0x%x)\n", chip->mf);
+ else if (chip->mf == EDMA_MF_HDMA_NATIVE)
+ pci_dbg(pdev, "Version:\tHDMA Native (0x%x)\n", chip->mf);
else
pci_dbg(pdev, "Version:\tUnknown (0x%x)\n", chip->mf);
pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p)\n",
- vsec_data.rg.bar, vsec_data.rg.off, vsec_data.rg.sz,
+ vsec_data->rg.bar, vsec_data->rg.off, vsec_data->rg.sz,
chip->reg_base);
for (i = 0; i < chip->ll_wr_cnt; i++) {
pci_dbg(pdev, "L. List:\tWRITE CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
- i, vsec_data.ll_wr[i].bar,
- vsec_data.ll_wr[i].off, chip->ll_region_wr[i].sz,
+ i, vsec_data->ll_wr[i].bar,
+ vsec_data->ll_wr[i].off, chip->ll_region_wr[i].sz,
chip->ll_region_wr[i].vaddr.io, &chip->ll_region_wr[i].paddr);
pci_dbg(pdev, "Data:\tWRITE CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
- i, vsec_data.dt_wr[i].bar,
- vsec_data.dt_wr[i].off, chip->dt_region_wr[i].sz,
+ i, vsec_data->dt_wr[i].bar,
+ vsec_data->dt_wr[i].off, chip->dt_region_wr[i].sz,
chip->dt_region_wr[i].vaddr.io, &chip->dt_region_wr[i].paddr);
}
for (i = 0; i < chip->ll_rd_cnt; i++) {
pci_dbg(pdev, "L. List:\tREAD CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
- i, vsec_data.ll_rd[i].bar,
- vsec_data.ll_rd[i].off, chip->ll_region_rd[i].sz,
+ i, vsec_data->ll_rd[i].bar,
+ vsec_data->ll_rd[i].off, chip->ll_region_rd[i].sz,
chip->ll_region_rd[i].vaddr.io, &chip->ll_region_rd[i].paddr);
pci_dbg(pdev, "Data:\tREAD CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
- i, vsec_data.dt_rd[i].bar,
- vsec_data.dt_rd[i].off, chip->dt_region_rd[i].sz,
+ i, vsec_data->dt_rd[i].bar,
+ vsec_data->dt_rd[i].off, chip->dt_region_rd[i].sz,
chip->dt_region_rd[i].vaddr.io, &chip->dt_region_rd[i].paddr);
}
diff --git a/drivers/dma/fsl-dpaa2-qdma/dpdmai.c b/drivers/dma/fsl-dpaa2-qdma/dpdmai.c
index b4323d243d6d..4be81db24a19 100644
--- a/drivers/dma/fsl-dpaa2-qdma/dpdmai.c
+++ b/drivers/dma/fsl-dpaa2-qdma/dpdmai.c
@@ -48,11 +48,6 @@ struct dpdmai_cmd_destroy {
__le32 dpdmai_id;
} __packed;
-static inline u64 mc_enc(int lsoffset, int width, u64 val)
-{
- return (val & MAKE_UMASK64(width)) << lsoffset;
-}
-
/**
* dpdmai_open() - Open a control session for the specified object
* @mc_io: Pointer to MC portal's I/O object
diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c
index 443b2430466c..4976d7dde080 100644
--- a/drivers/dma/fsl-edma-common.c
+++ b/drivers/dma/fsl-edma-common.c
@@ -95,7 +95,7 @@ static void fsl_edma3_enable_request(struct fsl_edma_chan *fsl_chan)
}
val = edma_readl_chreg(fsl_chan, ch_csr);
- val |= EDMA_V3_CH_CSR_ERQ;
+ val |= EDMA_V3_CH_CSR_ERQ | EDMA_V3_CH_CSR_EEI;
edma_writel_chreg(fsl_chan, val, ch_csr);
}
@@ -821,7 +821,7 @@ void fsl_edma_issue_pending(struct dma_chan *chan)
int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
{
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
- int ret;
+ int ret = 0;
if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_HAS_CHCLK)
clk_prepare_enable(fsl_chan->clk);
@@ -831,17 +831,29 @@ int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
sizeof(struct fsl_edma_hw_tcd64) : sizeof(struct fsl_edma_hw_tcd),
32, 0);
- if (fsl_chan->txirq) {
+ if (fsl_chan->txirq)
ret = request_irq(fsl_chan->txirq, fsl_chan->irq_handler, IRQF_SHARED,
fsl_chan->chan_name, fsl_chan);
- if (ret) {
- dma_pool_destroy(fsl_chan->tcd_pool);
- return ret;
- }
- }
+ if (ret)
+ goto err_txirq;
+
+ if (fsl_chan->errirq > 0)
+ ret = request_irq(fsl_chan->errirq, fsl_chan->errirq_handler, IRQF_SHARED,
+ fsl_chan->errirq_name, fsl_chan);
+
+ if (ret)
+ goto err_errirq;
return 0;
+
+err_errirq:
+ if (fsl_chan->txirq)
+ free_irq(fsl_chan->txirq, fsl_chan);
+err_txirq:
+ dma_pool_destroy(fsl_chan->tcd_pool);
+
+ return ret;
}
void fsl_edma_free_chan_resources(struct dma_chan *chan)
@@ -862,6 +874,8 @@ void fsl_edma_free_chan_resources(struct dma_chan *chan)
if (fsl_chan->txirq)
free_irq(fsl_chan->txirq, fsl_chan);
+ if (fsl_chan->errirq)
+ free_irq(fsl_chan->errirq, fsl_chan);
vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
dma_pool_destroy(fsl_chan->tcd_pool);
diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h
index 10a5565ddfd7..205a96489094 100644
--- a/drivers/dma/fsl-edma-common.h
+++ b/drivers/dma/fsl-edma-common.h
@@ -71,6 +71,18 @@
#define EDMA_V3_CH_ES_ERR BIT(31)
#define EDMA_V3_MP_ES_VLD BIT(31)
+#define EDMA_V3_CH_ERR_DBE BIT(0)
+#define EDMA_V3_CH_ERR_SBE BIT(1)
+#define EDMA_V3_CH_ERR_SGE BIT(2)
+#define EDMA_V3_CH_ERR_NCE BIT(3)
+#define EDMA_V3_CH_ERR_DOE BIT(4)
+#define EDMA_V3_CH_ERR_DAE BIT(5)
+#define EDMA_V3_CH_ERR_SOE BIT(6)
+#define EDMA_V3_CH_ERR_SAE BIT(7)
+#define EDMA_V3_CH_ERR_ECX BIT(8)
+#define EDMA_V3_CH_ERR_UCE BIT(9)
+#define EDMA_V3_CH_ERR BIT(31)
+
enum fsl_edma_pm_state {
RUNNING = 0,
SUSPENDED,
@@ -162,6 +174,7 @@ struct fsl_edma_chan {
u32 dma_dev_size;
enum dma_data_direction dma_dir;
char chan_name[32];
+ char errirq_name[36];
void __iomem *tcd;
void __iomem *mux_addr;
u32 real_count;
@@ -174,7 +187,9 @@ struct fsl_edma_chan {
int priority;
int hw_chanid;
int txirq;
+ int errirq;
irqreturn_t (*irq_handler)(int irq, void *dev_id);
+ irqreturn_t (*errirq_handler)(int irq, void *dev_id);
bool is_rxchan;
bool is_remote;
bool is_multi_fifo;
@@ -208,6 +223,9 @@ struct fsl_edma_desc {
/* Need clean CHn_CSR DONE before enable TCD's MAJORELINK */
#define FSL_EDMA_DRV_CLEAR_DONE_E_LINK BIT(14)
#define FSL_EDMA_DRV_TCD64 BIT(15)
+/* All channel ERR IRQ share one IRQ line */
+#define FSL_EDMA_DRV_ERRIRQ_SHARE BIT(16)
+
#define FSL_EDMA_DRV_EDMA3 (FSL_EDMA_DRV_SPLIT_REG | \
FSL_EDMA_DRV_BUS_8BYTE | \
diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c
index 66bfa28d984e..97583c7d51a2 100644
--- a/drivers/dma/fsl-edma-main.c
+++ b/drivers/dma/fsl-edma-main.c
@@ -50,6 +50,83 @@ static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static void fsl_edma3_err_check(struct fsl_edma_chan *fsl_chan)
+{
+ unsigned int ch_err;
+ u32 val;
+
+ scoped_guard(spinlock, &fsl_chan->vchan.lock) {
+ ch_err = edma_readl_chreg(fsl_chan, ch_es);
+ if (!(ch_err & EDMA_V3_CH_ERR))
+ return;
+
+ edma_writel_chreg(fsl_chan, EDMA_V3_CH_ERR, ch_es);
+ val = edma_readl_chreg(fsl_chan, ch_csr);
+ val &= ~EDMA_V3_CH_CSR_ERQ;
+ edma_writel_chreg(fsl_chan, val, ch_csr);
+ }
+
+ /* Ignore this interrupt since channel has been disabled already */
+ if (!fsl_chan->edesc)
+ return;
+
+ if (ch_err & EDMA_V3_CH_ERR_DBE)
+ dev_err(&fsl_chan->pdev->dev, "Destination Bus Error interrupt.\n");
+
+ if (ch_err & EDMA_V3_CH_ERR_SBE)
+ dev_err(&fsl_chan->pdev->dev, "Source Bus Error interrupt.\n");
+
+ if (ch_err & EDMA_V3_CH_ERR_SGE)
+ dev_err(&fsl_chan->pdev->dev, "Scatter/Gather Configuration Error interrupt.\n");
+
+ if (ch_err & EDMA_V3_CH_ERR_NCE)
+ dev_err(&fsl_chan->pdev->dev, "NBYTES/CITER Configuration Error interrupt.\n");
+
+ if (ch_err & EDMA_V3_CH_ERR_DOE)
+ dev_err(&fsl_chan->pdev->dev, "Destination Offset Error interrupt.\n");
+
+ if (ch_err & EDMA_V3_CH_ERR_DAE)
+ dev_err(&fsl_chan->pdev->dev, "Destination Address Error interrupt.\n");
+
+ if (ch_err & EDMA_V3_CH_ERR_SOE)
+ dev_err(&fsl_chan->pdev->dev, "Source Offset Error interrupt.\n");
+
+ if (ch_err & EDMA_V3_CH_ERR_SAE)
+ dev_err(&fsl_chan->pdev->dev, "Source Address Error interrupt.\n");
+
+ if (ch_err & EDMA_V3_CH_ERR_ECX)
+ dev_err(&fsl_chan->pdev->dev, "Transfer Canceled interrupt.\n");
+
+ if (ch_err & EDMA_V3_CH_ERR_UCE)
+ dev_err(&fsl_chan->pdev->dev, "Uncorrectable TCD error during channel execution interrupt.\n");
+
+ fsl_chan->status = DMA_ERROR;
+}
+
+static irqreturn_t fsl_edma3_err_handler_per_chan(int irq, void *dev_id)
+{
+ struct fsl_edma_chan *fsl_chan = dev_id;
+
+ fsl_edma3_err_check(fsl_chan);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t fsl_edma3_err_handler_shared(int irq, void *dev_id)
+{
+ struct fsl_edma_engine *fsl_edma = dev_id;
+ unsigned int ch;
+
+ for (ch = 0; ch < fsl_edma->n_chans; ch++) {
+ if (fsl_edma->chan_masked & BIT(ch))
+ continue;
+
+ fsl_edma3_err_check(&fsl_edma->chans[ch]);
+ }
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t fsl_edma3_tx_handler(int irq, void *dev_id)
{
struct fsl_edma_chan *fsl_chan = dev_id;
@@ -309,7 +386,8 @@ fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma
static int fsl_edma3_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
{
- int i;
+ char *errirq_name;
+ int i, ret;
for (i = 0; i < fsl_edma->n_chans; i++) {
@@ -324,6 +402,27 @@ static int fsl_edma3_irq_init(struct platform_device *pdev, struct fsl_edma_engi
return -EINVAL;
fsl_chan->irq_handler = fsl_edma3_tx_handler;
+
+ if (!(fsl_edma->drvdata->flags & FSL_EDMA_DRV_ERRIRQ_SHARE)) {
+ fsl_chan->errirq = fsl_chan->txirq;
+ fsl_chan->errirq_handler = fsl_edma3_err_handler_per_chan;
+ }
+ }
+
+ /* All channel err use one irq number */
+ if (fsl_edma->drvdata->flags & FSL_EDMA_DRV_ERRIRQ_SHARE) {
+ /* last one is error irq */
+ fsl_edma->errirq = platform_get_irq_optional(pdev, fsl_edma->n_chans);
+ if (fsl_edma->errirq < 0)
+ return 0; /* dts miss err irq, treat as no err irq case */
+
+ errirq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s-err",
+ dev_name(&pdev->dev));
+
+ ret = devm_request_irq(&pdev->dev, fsl_edma->errirq, fsl_edma3_err_handler_shared,
+ 0, errirq_name, fsl_edma);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Can't register eDMA err IRQ.\n");
}
return 0;
@@ -464,7 +563,8 @@ static struct fsl_edma_drvdata imx7ulp_data = {
};
static struct fsl_edma_drvdata imx8qm_data = {
- .flags = FSL_EDMA_DRV_HAS_PD | FSL_EDMA_DRV_EDMA3 | FSL_EDMA_DRV_MEM_REMOTE,
+ .flags = FSL_EDMA_DRV_HAS_PD | FSL_EDMA_DRV_EDMA3 | FSL_EDMA_DRV_MEM_REMOTE
+ | FSL_EDMA_DRV_ERRIRQ_SHARE,
.chreg_space_sz = 0x10000,
.chreg_off = 0x10000,
.setup_irq = fsl_edma3_irq_init,
@@ -481,14 +581,15 @@ static struct fsl_edma_drvdata imx8ulp_data = {
};
static struct fsl_edma_drvdata imx93_data3 = {
- .flags = FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA3,
+ .flags = FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA3 | FSL_EDMA_DRV_ERRIRQ_SHARE,
.chreg_space_sz = 0x10000,
.chreg_off = 0x10000,
.setup_irq = fsl_edma3_irq_init,
};
static struct fsl_edma_drvdata imx93_data4 = {
- .flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA4,
+ .flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA4
+ | FSL_EDMA_DRV_ERRIRQ_SHARE,
.chreg_space_sz = 0x8000,
.chreg_off = 0x10000,
.mux_off = 0x10000 + offsetof(struct fsl_edma3_ch_reg, ch_mux),
@@ -498,7 +599,7 @@ static struct fsl_edma_drvdata imx93_data4 = {
static struct fsl_edma_drvdata imx95_data5 = {
.flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA4 |
- FSL_EDMA_DRV_TCD64,
+ FSL_EDMA_DRV_TCD64 | FSL_EDMA_DRV_ERRIRQ_SHARE,
.chreg_space_sz = 0x8000,
.chreg_off = 0x10000,
.mux_off = 0x200,
@@ -700,6 +801,9 @@ static int fsl_edma_probe(struct platform_device *pdev)
snprintf(fsl_chan->chan_name, sizeof(fsl_chan->chan_name), "%s-CH%02d",
dev_name(&pdev->dev), i);
+ snprintf(fsl_chan->errirq_name, sizeof(fsl_chan->errirq_name),
+ "%s-CH%02d-err", dev_name(&pdev->dev), i);
+
fsl_chan->edma = fsl_edma;
fsl_chan->pm_state = RUNNING;
fsl_chan->srcid = 0;
diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
index 823f5c6bc2e1..21e13f1207cb 100644
--- a/drivers/dma/fsl-qdma.c
+++ b/drivers/dma/fsl-qdma.c
@@ -148,6 +148,9 @@
* @__reserved1: Reserved field.
* @cfg8b_w1: Compound descriptor command queue origin produced
* by qDMA and dynamic debug field.
+ * @__reserved2: Reserved field.
+ * @cmd: Command for QDMA (see FSL_QDMA_CMD_RWTTYPE and
+ * others).
* @data: Pointer to the memory 40-bit address, describes DMA
* source information and DMA destination information.
*/
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index b5e7d18b9766..9b126a260267 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -1226,6 +1226,8 @@ static int fsldma_of_probe(struct platform_device *op)
fdev->dev = &op->dev;
INIT_LIST_HEAD(&fdev->common.channels);
+ /* The DMA address bits supported for this device. */
+ fdev->addr_bits = (long)device_get_match_data(fdev->dev);
/* ioremap the registers for use */
fdev->regs = of_iomap(op->dev.of_node, 0);
@@ -1254,7 +1256,7 @@ static int fsldma_of_probe(struct platform_device *op)
fdev->common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
fdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
- dma_set_mask(&(op->dev), DMA_BIT_MASK(36));
+ dma_set_mask(&(op->dev), DMA_BIT_MASK(fdev->addr_bits));
platform_set_drvdata(op, fdev);
@@ -1387,10 +1389,20 @@ static const struct dev_pm_ops fsldma_pm_ops = {
};
#endif
+/* The .data field is used for dma-bit-mask. */
static const struct of_device_id fsldma_of_ids[] = {
- { .compatible = "fsl,elo3-dma", },
- { .compatible = "fsl,eloplus-dma", },
- { .compatible = "fsl,elo-dma", },
+ {
+ .compatible = "fsl,elo3-dma",
+ .data = (void *)40,
+ },
+ {
+ .compatible = "fsl,eloplus-dma",
+ .data = (void *)36,
+ },
+ {
+ .compatible = "fsl,elo-dma",
+ .data = (void *)32,
+ },
{}
};
MODULE_DEVICE_TABLE(of, fsldma_of_ids);
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
index 308bed0a560a..d7b7a3138b85 100644
--- a/drivers/dma/fsldma.h
+++ b/drivers/dma/fsldma.h
@@ -124,6 +124,7 @@ struct fsldma_device {
struct fsldma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE];
u32 feature; /* The same as DMA channels */
int irq; /* Channel IRQ */
+ int addr_bits; /* DMA addressing bits supported */
};
/* Define macros for fsldma_chan->feature property */
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
index 6d12033649f8..7e4715f92773 100644
--- a/drivers/dma/idxd/cdev.c
+++ b/drivers/dma/idxd/cdev.c
@@ -349,7 +349,9 @@ static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid)
set_bit(h, evl->bmap);
h = (h + 1) % size;
}
- drain_workqueue(wq->wq);
+ if (wq->wq)
+ drain_workqueue(wq->wq);
+
mutex_unlock(&evl->lock);
}
@@ -442,10 +444,12 @@ static int idxd_submit_user_descriptor(struct idxd_user_context *ctx,
* DSA devices are capable of indirect ("batch") command submission.
* On devices where direct user submissions are not safe, we cannot
* allow this since there is no good way for us to verify these
- * indirect commands.
+ * indirect commands. Narrow the restriction of operations with the
+ * BATCH opcode to only DSA version 1 devices.
*/
if (is_dsa_dev(idxd_dev) && descriptor.opcode == DSA_OPCODE_BATCH &&
- !wq->idxd->user_submission_safe)
+ wq->idxd->hw.version == DEVICE_VERSION_1 &&
+ !wq->idxd->user_submission_safe)
return -EINVAL;
/*
* As per the programming specification, the completion address must be
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index 214b8039439f..74e6695881e6 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -19,7 +19,6 @@
#define IDXD_DRIVER_VERSION "1.00"
-extern struct kmem_cache *idxd_desc_pool;
extern bool tc_override;
struct idxd_wq;
@@ -171,7 +170,6 @@ struct idxd_cdev {
#define DRIVER_NAME_SIZE 128
-#define IDXD_ALLOCATED_BATCH_SIZE 128U
#define WQ_NAME_SIZE 1024
#define WQ_TYPE_SIZE 10
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 80355d03004d..35bdefd3728b 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -1036,7 +1036,6 @@ static void idxd_reset_prepare(struct pci_dev *pdev)
const char *idxd_name;
int rc;
- dev = &idxd->pdev->dev;
idxd_name = dev_name(idxd_confdev(idxd));
struct idxd_saved_states *idxd_saved __free(kfree) =
diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h
index 006ba206ab1b..9c1c546fe443 100644
--- a/drivers/dma/idxd/registers.h
+++ b/drivers/dma/idxd/registers.h
@@ -45,7 +45,7 @@ union gen_cap_reg {
u64 rsvd3:32;
};
u64 bits;
-} __packed;
+};
#define IDXD_GENCAP_OFFSET 0x10
union wq_cap_reg {
@@ -65,7 +65,7 @@ union wq_cap_reg {
u64 rsvd4:8;
};
u64 bits;
-} __packed;
+};
#define IDXD_WQCAP_OFFSET 0x20
#define IDXD_WQCFG_MIN 5
@@ -79,7 +79,7 @@ union group_cap_reg {
u64 rsvd:45;
};
u64 bits;
-} __packed;
+};
#define IDXD_GRPCAP_OFFSET 0x30
union engine_cap_reg {
@@ -88,7 +88,7 @@ union engine_cap_reg {
u64 rsvd:56;
};
u64 bits;
-} __packed;
+};
#define IDXD_ENGCAP_OFFSET 0x38
@@ -114,7 +114,7 @@ union offsets_reg {
u64 rsvd:48;
};
u64 bits[2];
-} __packed;
+};
#define IDXD_TABLE_MULT 0x100
@@ -128,7 +128,7 @@ union gencfg_reg {
u32 rsvd2:18;
};
u32 bits;
-} __packed;
+};
#define IDXD_GENCTRL_OFFSET 0x88
union genctrl_reg {
@@ -139,7 +139,7 @@ union genctrl_reg {
u32 rsvd:29;
};
u32 bits;
-} __packed;
+};
#define IDXD_GENSTATS_OFFSET 0x90
union gensts_reg {
@@ -149,7 +149,7 @@ union gensts_reg {
u32 rsvd:28;
};
u32 bits;
-} __packed;
+};
enum idxd_device_status_state {
IDXD_DEVICE_STATE_DISABLED = 0,
@@ -183,7 +183,7 @@ union idxd_command_reg {
u32 int_req:1;
};
u32 bits;
-} __packed;
+};
enum idxd_cmd {
IDXD_CMD_ENABLE_DEVICE = 1,
@@ -213,7 +213,7 @@ union cmdsts_reg {
u8 active:1;
};
u32 bits;
-} __packed;
+};
#define IDXD_CMDSTS_ACTIVE 0x80000000
#define IDXD_CMDSTS_ERR_MASK 0xff
#define IDXD_CMDSTS_RES_SHIFT 8
@@ -284,7 +284,7 @@ union sw_err_reg {
u64 rsvd5;
};
u64 bits[4];
-} __packed;
+};
union iaa_cap_reg {
struct {
@@ -303,7 +303,7 @@ union iaa_cap_reg {
u64 rsvd:52;
};
u64 bits;
-} __packed;
+};
#define IDXD_IAACAP_OFFSET 0x180
@@ -320,7 +320,7 @@ union evlcfg_reg {
u64 rsvd2:28;
};
u64 bits[2];
-} __packed;
+};
#define IDXD_EVL_SIZE_MIN 0x0040
#define IDXD_EVL_SIZE_MAX 0xffff
@@ -334,7 +334,7 @@ union msix_perm {
u32 pasid:20;
};
u32 bits;
-} __packed;
+};
union group_flags {
struct {
@@ -352,13 +352,13 @@ union group_flags {
u64 rsvd5:26;
};
u64 bits;
-} __packed;
+};
struct grpcfg {
u64 wqs[4];
u64 engines;
union group_flags flags;
-} __packed;
+};
union wqcfg {
struct {
@@ -410,7 +410,7 @@ union wqcfg {
u64 op_config[4];
};
u32 bits[16];
-} __packed;
+};
#define WQCFG_PASID_IDX 2
#define WQCFG_PRIVL_IDX 2
@@ -474,7 +474,7 @@ union idxd_perfcap {
u64 rsvd3:8;
};
u64 bits;
-} __packed;
+};
#define IDXD_EVNTCAP_OFFSET 0x80
union idxd_evntcap {
@@ -483,7 +483,7 @@ union idxd_evntcap {
u64 rsvd:36;
};
u64 bits;
-} __packed;
+};
struct idxd_event {
union {
@@ -493,7 +493,7 @@ struct idxd_event {
};
u32 val;
};
-} __packed;
+};
#define IDXD_CNTRCAP_OFFSET 0x800
struct idxd_cntrcap {
@@ -506,7 +506,7 @@ struct idxd_cntrcap {
u32 val;
};
struct idxd_event events[];
-} __packed;
+};
#define IDXD_PERFRST_OFFSET 0x10
union idxd_perfrst {
@@ -516,7 +516,7 @@ union idxd_perfrst {
u32 rsvd:30;
};
u32 val;
-} __packed;
+};
#define IDXD_OVFSTATUS_OFFSET 0x30
#define IDXD_PERFFRZ_OFFSET 0x20
@@ -533,7 +533,7 @@ union idxd_cntrcfg {
u64 rsvd3:4;
};
u64 val;
-} __packed;
+};
#define IDXD_FLTCFG_OFFSET 0x300
@@ -543,7 +543,7 @@ union idxd_cntrdata {
u64 event_count_value;
};
u64 val;
-} __packed;
+};
union event_cfg {
struct {
@@ -551,7 +551,7 @@ union event_cfg {
u64 event_enc:28;
};
u64 val;
-} __packed;
+};
union filter_cfg {
struct {
@@ -562,7 +562,7 @@ union filter_cfg {
u64 eng:8;
};
u64 val;
-} __packed;
+};
#define IDXD_EVLSTATUS_OFFSET 0xf0
@@ -580,7 +580,7 @@ union evl_status_reg {
u32 bits_upper32;
};
u64 bits;
-} __packed;
+};
#define IDXD_MAX_BATCH_IDENT 256
@@ -620,17 +620,17 @@ struct __evl_entry {
};
u64 fault_addr;
u64 rsvd5;
-} __packed;
+};
struct dsa_evl_entry {
struct __evl_entry e;
struct dsa_completion_record cr;
-} __packed;
+};
struct iax_evl_entry {
struct __evl_entry e;
u64 rsvd[4];
struct iax_completion_record cr;
-} __packed;
+};
#endif
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index 6af493f6ba77..9f0701021af0 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -1208,9 +1208,11 @@ static ssize_t op_cap_show_common(struct device *dev, char *buf, unsigned long *
/* On systems where direct user submissions are not safe, we need to clear out
* the BATCH capability from the capability mask in sysfs since we cannot support
- * that command on such systems.
+ * that command on such systems. Narrow the restriction of operations with the
+ * BATCH opcode to only DSA version 1 devices.
*/
- if (i == DSA_OPCODE_BATCH/64 && !confdev_to_idxd(dev)->user_submission_safe)
+ if (i == DSA_OPCODE_BATCH/64 && !confdev_to_idxd(dev)->user_submission_safe &&
+ confdev_to_idxd(dev)->hw.version == DEVICE_VERSION_1)
clear_bit(DSA_OPCODE_BATCH % 64, &val);
pos += sysfs_emit_at(buf, pos, "%*pb", 64, &val);
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index b96cc0a83872..ba434657059a 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -337,7 +337,8 @@ static void imxdma_disable_hw(struct imxdma_channel *imxdmac)
static void imxdma_watchdog(struct timer_list *t)
{
- struct imxdma_channel *imxdmac = from_timer(imxdmac, t, watchdog);
+ struct imxdma_channel *imxdmac = timer_container_of(imxdmac, t,
+ watchdog);
struct imxdma_engine *imxdma = imxdmac->imxdma;
int channel = imxdmac->channel;
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 06a813cc7641..b8fff8333aef 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -901,7 +901,8 @@ static void ioat_reboot_chan(struct ioatdma_chan *ioat_chan)
void ioat_timer_event(struct timer_list *t)
{
- struct ioatdma_chan *ioat_chan = from_timer(ioat_chan, t, timer);
+ struct ioatdma_chan *ioat_chan = timer_container_of(ioat_chan, t,
+ timer);
dma_addr_t phys_complete;
u64 status;
diff --git a/drivers/dma/mediatek/mtk-cqdma.c b/drivers/dma/mediatek/mtk-cqdma.c
index 47c8adfdc155..9f0c41ca7770 100644
--- a/drivers/dma/mediatek/mtk-cqdma.c
+++ b/drivers/dma/mediatek/mtk-cqdma.c
@@ -449,9 +449,9 @@ static enum dma_status mtk_cqdma_tx_status(struct dma_chan *c,
return ret;
spin_lock_irqsave(&cvc->pc->lock, flags);
- spin_lock_irqsave(&cvc->vc.lock, flags);
+ spin_lock(&cvc->vc.lock);
vd = mtk_cqdma_find_active_desc(c, cookie);
- spin_unlock_irqrestore(&cvc->vc.lock, flags);
+ spin_unlock(&cvc->vc.lock);
spin_unlock_irqrestore(&cvc->pc->lock, flags);
if (vd) {
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index c8dc504510f1..b7fb843c67a6 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -641,7 +641,7 @@ static int mmp_tdma_probe(struct platform_device *pdev)
int chan_num = TDMA_CHANNEL_NUM;
struct gen_pool *pool = NULL;
- type = (enum mmp_tdma_type)device_get_match_data(&pdev->dev);
+ type = (kernel_ulong_t)device_get_match_data(&pdev->dev);
/* always have couple channels */
tdev = devm_kzalloc(&pdev->dev, sizeof(*tdev), GFP_KERNEL);
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index fa6e4646fdc2..1fdcb0f5c9e7 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -1061,8 +1061,16 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
*/
mv_chan->dummy_src_addr = dma_map_single(dma_dev->dev,
mv_chan->dummy_src, MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dma_dev->dev, mv_chan->dummy_src_addr))
+ return ERR_PTR(-ENOMEM);
+
mv_chan->dummy_dst_addr = dma_map_single(dma_dev->dev,
mv_chan->dummy_dst, MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
+ if (dma_mapping_error(dma_dev->dev, mv_chan->dummy_dst_addr)) {
+ ret = -ENOMEM;
+ goto err_unmap_src;
+ }
+
/* allocate coherent memory for hardware descriptors
* note: writecombine gives slightly better performance, but
@@ -1071,8 +1079,10 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
mv_chan->dma_desc_pool_virt =
dma_alloc_wc(&pdev->dev, MV_XOR_POOL_SIZE, &mv_chan->dma_desc_pool,
GFP_KERNEL);
- if (!mv_chan->dma_desc_pool_virt)
- return ERR_PTR(-ENOMEM);
+ if (!mv_chan->dma_desc_pool_virt) {
+ ret = -ENOMEM;
+ goto err_unmap_dst;
+ }
/* discover transaction capabilities from the platform data */
dma_dev->cap_mask = cap_mask;
@@ -1155,6 +1165,13 @@ err_free_irq:
err_free_dma:
dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
+err_unmap_dst:
+ dma_unmap_single(dma_dev->dev, mv_chan->dummy_dst_addr,
+ MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
+err_unmap_src:
+ dma_unmap_single(dma_dev->dev, mv_chan->dummy_src_addr,
+ MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
+
return ERR_PTR(ret);
}
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
index 0d6324c4e2be..765462303de0 100644
--- a/drivers/dma/nbpfaxi.c
+++ b/drivers/dma/nbpfaxi.c
@@ -711,6 +711,9 @@ static int nbpf_desc_page_alloc(struct nbpf_channel *chan)
list_add_tail(&ldesc->node, &lhead);
ldesc->hwdesc_dma_addr = dma_map_single(dchan->device->dev,
hwdesc, sizeof(*hwdesc), DMA_TO_DEVICE);
+ if (dma_mapping_error(dchan->device->dev,
+ ldesc->hwdesc_dma_addr))
+ goto unmap_error;
dev_dbg(dev, "%s(): mapped 0x%p to %pad\n", __func__,
hwdesc, &ldesc->hwdesc_dma_addr);
@@ -737,6 +740,16 @@ static int nbpf_desc_page_alloc(struct nbpf_channel *chan)
spin_unlock_irq(&chan->lock);
return ARRAY_SIZE(dpage->desc);
+
+unmap_error:
+ while (i--) {
+ ldesc--; hwdesc--;
+
+ dma_unmap_single(dchan->device->dev, ldesc->hwdesc_dma_addr,
+ sizeof(hwdesc), DMA_TO_DEVICE);
+ }
+
+ return -ENOMEM;
}
static void nbpf_desc_put(struct nbpf_desc *desc)
@@ -1351,7 +1364,7 @@ static int nbpf_probe(struct platform_device *pdev)
if (irqs == 1) {
eirq = irqbuf[0];
- for (i = 0; i <= num_channels; i++)
+ for (i = 0; i < num_channels; i++)
nbpf->chan[i].irq = irqbuf[0];
} else {
eirq = platform_get_irq_byname(pdev, "error");
@@ -1361,16 +1374,15 @@ static int nbpf_probe(struct platform_device *pdev)
if (irqs == num_channels + 1) {
struct nbpf_channel *chan;
- for (i = 0, chan = nbpf->chan; i <= num_channels;
+ for (i = 0, chan = nbpf->chan; i < num_channels;
i++, chan++) {
/* Skip the error IRQ */
if (irqbuf[i] == eirq)
i++;
+ if (i >= ARRAY_SIZE(irqbuf))
+ return -EINVAL;
chan->irq = irqbuf[i];
}
-
- if (chan != nbpf->chan + num_channels)
- return -EINVAL;
} else {
/* 2 IRQs and more than one channel */
if (irqbuf[0] == eirq)
@@ -1378,7 +1390,7 @@ static int nbpf_probe(struct platform_device *pdev)
else
irq = irqbuf[0];
- for (i = 0; i <= num_channels; i++)
+ for (i = 0; i < num_channels; i++)
nbpf->chan[i].irq = irq;
}
}
diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
index b1f0001cc99c..8e87738086b2 100644
--- a/drivers/dma/qcom/gpi.c
+++ b/drivers/dma/qcom/gpi.c
@@ -569,17 +569,6 @@ static inline void gpi_write_reg(struct gpii *gpii, void __iomem *addr, u32 val)
writel_relaxed(val, addr);
}
-/* gpi_write_reg_field - write to specific bit field */
-static inline void gpi_write_reg_field(struct gpii *gpii, void __iomem *addr,
- u32 mask, u32 shift, u32 val)
-{
- u32 tmp = gpi_read_reg(gpii, addr);
-
- tmp &= ~mask;
- val = tmp | ((val << shift) & mask);
- gpi_write_reg(gpii, addr, val);
-}
-
static __always_inline void
gpi_update_reg(struct gpii *gpii, u32 offset, u32 mask, u32 val)
{
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig
index 6ea5a880b433..8184d475a49a 100644
--- a/drivers/dma/sh/Kconfig
+++ b/drivers/dma/sh/Kconfig
@@ -16,7 +16,7 @@ config SH_DMAE_BASE
depends on SUPERH || COMPILE_TEST
depends on !SUPERH || SH_DMA
depends on !SH_DMA_API
- default y
+ default SUPERH || SH_DMA
select RENESAS_DMA
help
Enable support for the Renesas SuperH DMA controllers.
diff --git a/drivers/dma/sh/rz-dmac.c b/drivers/dma/sh/rz-dmac.c
index 9235db551026..1f687b08d6b8 100644
--- a/drivers/dma/sh/rz-dmac.c
+++ b/drivers/dma/sh/rz-dmac.c
@@ -14,6 +14,7 @@
#include <linux/dmaengine.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
+#include <linux/irqchip/irq-renesas-rzv2h.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -89,8 +90,14 @@ struct rz_dmac_chan {
#define to_rz_dmac_chan(c) container_of(c, struct rz_dmac_chan, vc.chan)
+struct rz_dmac_icu {
+ struct platform_device *pdev;
+ u8 dmac_index;
+};
+
struct rz_dmac {
struct dma_device engine;
+ struct rz_dmac_icu icu;
struct device *dev;
struct reset_control *rstc;
void __iomem *base;
@@ -99,6 +106,8 @@ struct rz_dmac {
unsigned int n_channels;
struct rz_dmac_chan *channels;
+ bool has_icu;
+
DECLARE_BITMAP(modules, 1024);
};
@@ -167,6 +176,9 @@ struct rz_dmac {
#define RZ_DMAC_MAX_CHANNELS 16
#define DMAC_NR_LMDESC 64
+/* RZ/V2H ICU related */
+#define RZV2H_MAX_DMAC_INDEX 4
+
/*
* -----------------------------------------------------------------------------
* Device access
@@ -324,7 +336,13 @@ static void rz_dmac_prepare_desc_for_memcpy(struct rz_dmac_chan *channel)
lmdesc->chext = 0;
lmdesc->header = HEADER_LV;
- rz_dmac_set_dmars_register(dmac, channel->index, 0);
+ if (dmac->has_icu) {
+ rzv2h_icu_register_dma_req(dmac->icu.pdev, dmac->icu.dmac_index,
+ channel->index,
+ RZV2H_ICU_DMAC_REQ_NO_DEFAULT);
+ } else {
+ rz_dmac_set_dmars_register(dmac, channel->index, 0);
+ }
channel->chcfg = chcfg;
channel->chctrl = CHCTRL_STG | CHCTRL_SETEN;
@@ -375,7 +393,13 @@ static void rz_dmac_prepare_descs_for_slave_sg(struct rz_dmac_chan *channel)
channel->lmdesc.tail = lmdesc;
- rz_dmac_set_dmars_register(dmac, channel->index, channel->mid_rid);
+ if (dmac->has_icu) {
+ rzv2h_icu_register_dma_req(dmac->icu.pdev, dmac->icu.dmac_index,
+ channel->index, channel->mid_rid);
+ } else {
+ rz_dmac_set_dmars_register(dmac, channel->index, channel->mid_rid);
+ }
+
channel->chctrl = CHCTRL_SETEN;
}
@@ -647,7 +671,13 @@ static void rz_dmac_device_synchronize(struct dma_chan *chan)
if (ret < 0)
dev_warn(dmac->dev, "DMA Timeout");
- rz_dmac_set_dmars_register(dmac, channel->index, 0);
+ if (dmac->has_icu) {
+ rzv2h_icu_register_dma_req(dmac->icu.pdev, dmac->icu.dmac_index,
+ channel->index,
+ RZV2H_ICU_DMAC_REQ_NO_DEFAULT);
+ } else {
+ rz_dmac_set_dmars_register(dmac, channel->index, 0);
+ }
}
/*
@@ -748,7 +778,8 @@ static struct dma_chan *rz_dmac_of_xlate(struct of_phandle_args *dma_spec,
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
- return dma_request_channel(mask, rz_dmac_chan_filter, dma_spec);
+ return __dma_request_channel(&mask, rz_dmac_chan_filter, dma_spec,
+ ofdma->of_node);
}
/*
@@ -823,6 +854,38 @@ static int rz_dmac_chan_probe(struct rz_dmac *dmac,
return 0;
}
+static int rz_dmac_parse_of_icu(struct device *dev, struct rz_dmac *dmac)
+{
+ struct device_node *np = dev->of_node;
+ struct of_phandle_args args;
+ uint32_t dmac_index;
+ int ret;
+
+ ret = of_parse_phandle_with_fixed_args(np, "renesas,icu", 1, 0, &args);
+ if (ret == -ENOENT)
+ return 0;
+ if (ret)
+ return ret;
+
+ dmac->has_icu = true;
+
+ dmac->icu.pdev = of_find_device_by_node(args.np);
+ of_node_put(args.np);
+ if (!dmac->icu.pdev) {
+ dev_err(dev, "ICU device not found.\n");
+ return -ENODEV;
+ }
+
+ dmac_index = args.args[0];
+ if (dmac_index > RZV2H_MAX_DMAC_INDEX) {
+ dev_err(dev, "DMAC index %u invalid.\n", dmac_index);
+ return -EINVAL;
+ }
+ dmac->icu.dmac_index = dmac_index;
+
+ return 0;
+}
+
static int rz_dmac_parse_of(struct device *dev, struct rz_dmac *dmac)
{
struct device_node *np = dev->of_node;
@@ -839,7 +902,7 @@ static int rz_dmac_parse_of(struct device *dev, struct rz_dmac *dmac)
return -EINVAL;
}
- return 0;
+ return rz_dmac_parse_of_icu(dev, dmac);
}
static int rz_dmac_probe(struct platform_device *pdev)
@@ -873,9 +936,11 @@ static int rz_dmac_probe(struct platform_device *pdev)
if (IS_ERR(dmac->base))
return PTR_ERR(dmac->base);
- dmac->ext_base = devm_platform_ioremap_resource(pdev, 1);
- if (IS_ERR(dmac->ext_base))
- return PTR_ERR(dmac->ext_base);
+ if (!dmac->has_icu) {
+ dmac->ext_base = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(dmac->ext_base))
+ return PTR_ERR(dmac->ext_base);
+ }
/* Register interrupt handler for error */
irq = platform_get_irq_byname(pdev, irqname);
@@ -990,9 +1055,12 @@ static void rz_dmac_remove(struct platform_device *pdev)
reset_control_assert(dmac->rstc);
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+
+ platform_device_put(dmac->icu.pdev);
}
static const struct of_device_id of_rz_dmac_match[] = {
+ { .compatible = "renesas,r9a09g057-dmac", },
{ .compatible = "renesas,rz-dmac", },
{ /* Sentinel */ }
};
diff --git a/drivers/dma/stm32/stm32-dma.c b/drivers/dma/stm32/stm32-dma.c
index 917f8e922373..04389936c8a6 100644
--- a/drivers/dma/stm32/stm32-dma.c
+++ b/drivers/dma/stm32/stm32-dma.c
@@ -613,7 +613,7 @@ static void stm32_dma_start_transfer(struct stm32_dma_chan *chan)
reg->dma_scr |= STM32_DMA_SCR_EN;
stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), reg->dma_scr);
- dev_dbg(chan2dev(chan), "vchan %pK: started\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: started\n", &chan->vchan);
}
static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan)
@@ -676,7 +676,7 @@ static void stm32_dma_handle_chan_paused(struct stm32_dma_chan *chan)
chan->status = DMA_PAUSED;
- dev_dbg(chan2dev(chan), "vchan %pK: paused\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: paused\n", &chan->vchan);
}
static void stm32_dma_post_resume_reconfigure(struct stm32_dma_chan *chan)
@@ -728,7 +728,7 @@ static void stm32_dma_post_resume_reconfigure(struct stm32_dma_chan *chan)
dma_scr |= STM32_DMA_SCR_EN;
stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr);
- dev_dbg(chan2dev(chan), "vchan %pK: reconfigured after pause/resume\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: reconfigured after pause/resume\n", &chan->vchan);
}
static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan, u32 scr)
@@ -744,7 +744,7 @@ static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan, u32 scr)
/* cyclic while CIRC/DBM disable => post resume reconfiguration needed */
if (!(scr & (STM32_DMA_SCR_CIRC | STM32_DMA_SCR_DBM)))
stm32_dma_post_resume_reconfigure(chan);
- else if (scr & STM32_DMA_SCR_DBM)
+ else if (scr & STM32_DMA_SCR_DBM && chan->desc->num_sgs > 2)
stm32_dma_configure_next_sg(chan);
} else {
chan->busy = false;
@@ -820,7 +820,7 @@ static void stm32_dma_issue_pending(struct dma_chan *c)
spin_lock_irqsave(&chan->vchan.lock, flags);
if (vchan_issue_pending(&chan->vchan) && !chan->desc && !chan->busy) {
- dev_dbg(chan2dev(chan), "vchan %pK: issued\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: issued\n", &chan->vchan);
stm32_dma_start_transfer(chan);
}
@@ -922,7 +922,7 @@ static int stm32_dma_resume(struct dma_chan *c)
spin_unlock_irqrestore(&chan->vchan.lock, flags);
- dev_dbg(chan2dev(chan), "vchan %pK: resumed\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: resumed\n", &chan->vchan);
return 0;
}
diff --git a/drivers/dma/stm32/stm32-dma3.c b/drivers/dma/stm32/stm32-dma3.c
index 0c6c4258b195..50e7106c5cb7 100644
--- a/drivers/dma/stm32/stm32-dma3.c
+++ b/drivers/dma/stm32/stm32-dma3.c
@@ -801,7 +801,7 @@ static void stm32_dma3_chan_start(struct stm32_dma3_chan *chan)
chan->dma_status = DMA_IN_PROGRESS;
- dev_dbg(chan2dev(chan), "vchan %pK: started\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: started\n", &chan->vchan);
}
static int stm32_dma3_chan_suspend(struct stm32_dma3_chan *chan, bool susp)
@@ -1452,7 +1452,7 @@ static int stm32_dma3_pause(struct dma_chan *c)
chan->dma_status = DMA_PAUSED;
- dev_dbg(chan2dev(chan), "vchan %pK: paused\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: paused\n", &chan->vchan);
return 0;
}
@@ -1465,7 +1465,7 @@ static int stm32_dma3_resume(struct dma_chan *c)
chan->dma_status = DMA_IN_PROGRESS;
- dev_dbg(chan2dev(chan), "vchan %pK: resumed\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: resumed\n", &chan->vchan);
return 0;
}
@@ -1490,7 +1490,7 @@ static int stm32_dma3_terminate_all(struct dma_chan *c)
spin_unlock_irqrestore(&chan->vchan.lock, flags);
vchan_dma_desc_free_list(&chan->vchan, &head);
- dev_dbg(chan2dev(chan), "vchan %pK: terminated\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: terminated\n", &chan->vchan);
return 0;
}
@@ -1543,7 +1543,7 @@ static void stm32_dma3_issue_pending(struct dma_chan *c)
spin_lock_irqsave(&chan->vchan.lock, flags);
if (vchan_issue_pending(&chan->vchan) && !chan->swdesc) {
- dev_dbg(chan2dev(chan), "vchan %pK: issued\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: issued\n", &chan->vchan);
stm32_dma3_chan_start(chan);
}
diff --git a/drivers/dma/stm32/stm32-mdma.c b/drivers/dma/stm32/stm32-mdma.c
index e6d525901de7..080c1c725216 100644
--- a/drivers/dma/stm32/stm32-mdma.c
+++ b/drivers/dma/stm32/stm32-mdma.c
@@ -1187,7 +1187,7 @@ static void stm32_mdma_start_transfer(struct stm32_mdma_chan *chan)
chan->busy = true;
- dev_dbg(chan2dev(chan), "vchan %pK: started\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: started\n", &chan->vchan);
}
static void stm32_mdma_issue_pending(struct dma_chan *c)
@@ -1200,7 +1200,7 @@ static void stm32_mdma_issue_pending(struct dma_chan *c)
if (!vchan_issue_pending(&chan->vchan))
goto end;
- dev_dbg(chan2dev(chan), "vchan %pK: issued\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: issued\n", &chan->vchan);
if (!chan->desc && !chan->busy)
stm32_mdma_start_transfer(chan);
@@ -1220,7 +1220,7 @@ static int stm32_mdma_pause(struct dma_chan *c)
spin_unlock_irqrestore(&chan->vchan.lock, flags);
if (!ret)
- dev_dbg(chan2dev(chan), "vchan %pK: pause\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: pause\n", &chan->vchan);
return ret;
}
@@ -1261,7 +1261,7 @@ static int stm32_mdma_resume(struct dma_chan *c)
spin_unlock_irqrestore(&chan->vchan.lock, flags);
- dev_dbg(chan2dev(chan), "vchan %pK: resume\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: resume\n", &chan->vchan);
return 0;
}
diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c
index 24796aaaddfa..00d2fd38d17f 100644
--- a/drivers/dma/sun4i-dma.c
+++ b/drivers/dma/sun4i-dma.c
@@ -1249,11 +1249,10 @@ static int sun4i_dma_probe(struct platform_device *pdev)
if (priv->irq < 0)
return priv->irq;
- priv->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(priv->clk)) {
- dev_err(&pdev->dev, "No clock specified\n");
- return PTR_ERR(priv->clk);
- }
+ priv->clk = devm_clk_get_enabled(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->clk),
+ "Couldn't start the clock\n");
if (priv->cfg->has_reset) {
priv->rst = devm_reset_control_get_exclusive_deasserted(&pdev->dev, NULL);
@@ -1328,12 +1327,6 @@ static int sun4i_dma_probe(struct platform_device *pdev)
vchan_init(&vchan->vc, &priv->slave);
}
- ret = clk_prepare_enable(priv->clk);
- if (ret) {
- dev_err(&pdev->dev, "Couldn't enable the clock\n");
- return ret;
- }
-
/*
* Make sure the IRQs are all disabled and accounted for. The bootloader
* likes to leave these dirty
@@ -1343,33 +1336,23 @@ static int sun4i_dma_probe(struct platform_device *pdev)
ret = devm_request_irq(&pdev->dev, priv->irq, sun4i_dma_interrupt,
0, dev_name(&pdev->dev), priv);
- if (ret) {
- dev_err(&pdev->dev, "Cannot request IRQ\n");
- goto err_clk_disable;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Cannot request IRQ\n");
- ret = dma_async_device_register(&priv->slave);
- if (ret) {
- dev_warn(&pdev->dev, "Failed to register DMA engine device\n");
- goto err_clk_disable;
- }
+ ret = dmaenginem_async_device_register(&priv->slave);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to register DMA engine device\n");
ret = of_dma_controller_register(pdev->dev.of_node, sun4i_dma_of_xlate,
priv);
- if (ret) {
- dev_err(&pdev->dev, "of_dma_controller_register failed\n");
- goto err_dma_unregister;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to register translation function\n");
dev_dbg(&pdev->dev, "Successfully probed SUN4I_DMA\n");
return 0;
-
-err_dma_unregister:
- dma_async_device_unregister(&priv->slave);
-err_clk_disable:
- clk_disable_unprepare(priv->clk);
- return ret;
}
static void sun4i_dma_remove(struct platform_device *pdev)
@@ -1380,9 +1363,6 @@ static void sun4i_dma_remove(struct platform_device *pdev)
disable_irq(priv->irq);
of_dma_controller_free(pdev->dev.of_node);
- dma_async_device_unregister(&priv->slave);
-
- clk_disable_unprepare(priv->clk);
}
static struct sun4i_dma_config sun4i_a10_dma_cfg = {
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index ce80ac4b1a1b..fad896ff29a2 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -27,10 +27,10 @@
#define ADMA_CH_INT_CLEAR 0x1c
#define ADMA_CH_CTRL 0x24
-#define ADMA_CH_CTRL_DIR(val) (((val) & 0xf) << 12)
+#define ADMA_CH_CTRL_DIR(val, mask, shift) (((val) & (mask)) << (shift))
#define ADMA_CH_CTRL_DIR_AHUB2MEM 2
#define ADMA_CH_CTRL_DIR_MEM2AHUB 4
-#define ADMA_CH_CTRL_MODE_CONTINUOUS (2 << 8)
+#define ADMA_CH_CTRL_MODE_CONTINUOUS(shift) (2 << (shift))
#define ADMA_CH_CTRL_FLOWCTRL_EN BIT(1)
#define ADMA_CH_CTRL_XFER_PAUSE_SHIFT 0
@@ -41,15 +41,27 @@
#define ADMA_CH_CONFIG_MAX_BURST_SIZE 16
#define ADMA_CH_CONFIG_WEIGHT_FOR_WRR(val) ((val) & 0xf)
#define ADMA_CH_CONFIG_MAX_BUFS 8
-#define TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(reqs) (reqs << 4)
+#define TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(reqs) ((reqs) << 4)
+
+#define ADMA_GLOBAL_CH_CONFIG 0x400
+#define ADMA_GLOBAL_CH_CONFIG_WEIGHT_FOR_WRR(val) ((val) & 0x7)
+#define ADMA_GLOBAL_CH_CONFIG_OUTSTANDING_REQS(reqs) ((reqs) << 8)
#define TEGRA186_ADMA_GLOBAL_PAGE_CHGRP 0x30
#define TEGRA186_ADMA_GLOBAL_PAGE_RX_REQ 0x70
#define TEGRA186_ADMA_GLOBAL_PAGE_TX_REQ 0x84
+#define TEGRA264_ADMA_GLOBAL_PAGE_CHGRP_0 0x44
+#define TEGRA264_ADMA_GLOBAL_PAGE_CHGRP_1 0x48
+#define TEGRA264_ADMA_GLOBAL_PAGE_RX_REQ_0 0x100
+#define TEGRA264_ADMA_GLOBAL_PAGE_RX_REQ_1 0x104
+#define TEGRA264_ADMA_GLOBAL_PAGE_TX_REQ_0 0x180
+#define TEGRA264_ADMA_GLOBAL_PAGE_TX_REQ_1 0x184
+#define TEGRA264_ADMA_GLOBAL_PAGE_OFFSET 0x8
#define ADMA_CH_FIFO_CTRL 0x2c
#define ADMA_CH_TX_FIFO_SIZE_SHIFT 8
#define ADMA_CH_RX_FIFO_SIZE_SHIFT 0
+#define ADMA_GLOBAL_CH_FIFO_CTRL 0x300
#define ADMA_CH_LOWER_SRC_ADDR 0x34
#define ADMA_CH_LOWER_TRG_ADDR 0x3c
@@ -73,36 +85,48 @@ struct tegra_adma;
* @adma_get_burst_config: Function callback used to set DMA burst size.
* @global_reg_offset: Register offset of DMA global register.
* @global_int_clear: Register offset of DMA global interrupt clear.
+ * @global_ch_fifo_base: Global channel fifo ctrl base offset
+ * @global_ch_config_base: Global channel config base offset
* @ch_req_tx_shift: Register offset for AHUB transmit channel select.
* @ch_req_rx_shift: Register offset for AHUB receive channel select.
+ * @ch_dir_shift: Channel direction bit position.
+ * @ch_mode_shift: Channel mode bit position.
* @ch_base_offset: Register offset of DMA channel registers.
+ * @ch_tc_offset_diff: From TC register onwards offset differs for Tegra264
* @ch_fifo_ctrl: Default value for channel FIFO CTRL register.
+ * @ch_config: Outstanding and WRR config values
* @ch_req_mask: Mask for Tx or Rx channel select.
+ * @ch_dir_mask: Mask for channel direction.
* @ch_req_max: Maximum number of Tx or Rx channels available.
* @ch_reg_size: Size of DMA channel register space.
* @nr_channels: Number of DMA channels available.
* @ch_fifo_size_mask: Mask for FIFO size field.
* @sreq_index_offset: Slave channel index offset.
* @max_page: Maximum ADMA Channel Page.
- * @has_outstanding_reqs: If DMA channel can have outstanding requests.
* @set_global_pg_config: Global page programming.
*/
struct tegra_adma_chip_data {
unsigned int (*adma_get_burst_config)(unsigned int burst_size);
unsigned int global_reg_offset;
unsigned int global_int_clear;
+ unsigned int global_ch_fifo_base;
+ unsigned int global_ch_config_base;
unsigned int ch_req_tx_shift;
unsigned int ch_req_rx_shift;
+ unsigned int ch_dir_shift;
+ unsigned int ch_mode_shift;
unsigned int ch_base_offset;
+ unsigned int ch_tc_offset_diff;
unsigned int ch_fifo_ctrl;
+ unsigned int ch_config;
unsigned int ch_req_mask;
+ unsigned int ch_dir_mask;
unsigned int ch_req_max;
unsigned int ch_reg_size;
unsigned int nr_channels;
unsigned int ch_fifo_size_mask;
unsigned int sreq_index_offset;
unsigned int max_page;
- bool has_outstanding_reqs;
void (*set_global_pg_config)(struct tegra_adma *tdma);
};
@@ -112,6 +136,7 @@ struct tegra_adma_chip_data {
struct tegra_adma_chan_regs {
unsigned int ctrl;
unsigned int config;
+ unsigned int global_config;
unsigned int src_addr;
unsigned int trg_addr;
unsigned int fifo_ctrl;
@@ -150,6 +175,9 @@ struct tegra_adma_chan {
/* Transfer count and position info */
unsigned int tx_buf_count;
unsigned int tx_buf_pos;
+
+ unsigned int global_ch_fifo_offset;
+ unsigned int global_ch_config_offset;
};
/*
@@ -246,6 +274,29 @@ static void tegra186_adma_global_page_config(struct tegra_adma *tdma)
tdma_write(tdma, TEGRA186_ADMA_GLOBAL_PAGE_TX_REQ + (tdma->ch_page_no * 0x4), 0xffffff);
}
+static void tegra264_adma_global_page_config(struct tegra_adma *tdma)
+{
+ u32 global_page_offset = tdma->ch_page_no * TEGRA264_ADMA_GLOBAL_PAGE_OFFSET;
+
+ /* If the default page (page1) is not used, then clear page1 registers */
+ if (tdma->ch_page_no) {
+ tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_CHGRP_0, 0);
+ tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_CHGRP_1, 0);
+ tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_RX_REQ_0, 0);
+ tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_RX_REQ_1, 0);
+ tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_TX_REQ_0, 0);
+ tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_TX_REQ_1, 0);
+ }
+
+ /* Program global registers for selected page */
+ tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_CHGRP_0 + global_page_offset, 0xffffffff);
+ tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_CHGRP_1 + global_page_offset, 0xffffffff);
+ tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_RX_REQ_0 + global_page_offset, 0xffffffff);
+ tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_RX_REQ_1 + global_page_offset, 0x1);
+ tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_TX_REQ_0 + global_page_offset, 0xffffffff);
+ tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_TX_REQ_1 + global_page_offset, 0x1);
+}
+
static int tegra_adma_init(struct tegra_adma *tdma)
{
u32 status;
@@ -404,11 +455,21 @@ static void tegra_adma_start(struct tegra_adma_chan *tdc)
tdc->tx_buf_pos = 0;
tdc->tx_buf_count = 0;
- tdma_ch_write(tdc, ADMA_CH_TC, ch_regs->tc);
+ tdma_ch_write(tdc, ADMA_CH_TC - tdc->tdma->cdata->ch_tc_offset_diff, ch_regs->tc);
tdma_ch_write(tdc, ADMA_CH_CTRL, ch_regs->ctrl);
- tdma_ch_write(tdc, ADMA_CH_LOWER_SRC_ADDR, ch_regs->src_addr);
- tdma_ch_write(tdc, ADMA_CH_LOWER_TRG_ADDR, ch_regs->trg_addr);
- tdma_ch_write(tdc, ADMA_CH_FIFO_CTRL, ch_regs->fifo_ctrl);
+ tdma_ch_write(tdc, ADMA_CH_LOWER_SRC_ADDR - tdc->tdma->cdata->ch_tc_offset_diff,
+ ch_regs->src_addr);
+ tdma_ch_write(tdc, ADMA_CH_LOWER_TRG_ADDR - tdc->tdma->cdata->ch_tc_offset_diff,
+ ch_regs->trg_addr);
+
+ if (!tdc->tdma->cdata->global_ch_fifo_base)
+ tdma_ch_write(tdc, ADMA_CH_FIFO_CTRL, ch_regs->fifo_ctrl);
+ else if (tdc->global_ch_fifo_offset)
+ tdma_write(tdc->tdma, tdc->global_ch_fifo_offset, ch_regs->fifo_ctrl);
+
+ if (tdc->global_ch_config_offset)
+ tdma_write(tdc->tdma, tdc->global_ch_config_offset, ch_regs->global_config);
+
tdma_ch_write(tdc, ADMA_CH_CONFIG, ch_regs->config);
/* Start ADMA */
@@ -421,7 +482,8 @@ static unsigned int tegra_adma_get_residue(struct tegra_adma_chan *tdc)
{
struct tegra_adma_desc *desc = tdc->desc;
unsigned int max = ADMA_CH_XFER_STATUS_COUNT_MASK + 1;
- unsigned int pos = tdma_ch_read(tdc, ADMA_CH_XFER_STATUS);
+ unsigned int pos = tdma_ch_read(tdc, ADMA_CH_XFER_STATUS -
+ tdc->tdma->cdata->ch_tc_offset_diff);
unsigned int periods_remaining;
/*
@@ -627,13 +689,16 @@ static int tegra_adma_set_xfer_params(struct tegra_adma_chan *tdc,
return -EINVAL;
}
- ch_regs->ctrl |= ADMA_CH_CTRL_DIR(adma_dir) |
- ADMA_CH_CTRL_MODE_CONTINUOUS |
+ ch_regs->ctrl |= ADMA_CH_CTRL_DIR(adma_dir, cdata->ch_dir_mask,
+ cdata->ch_dir_shift) |
+ ADMA_CH_CTRL_MODE_CONTINUOUS(cdata->ch_mode_shift) |
ADMA_CH_CTRL_FLOWCTRL_EN;
ch_regs->config |= cdata->adma_get_burst_config(burst_size);
- ch_regs->config |= ADMA_CH_CONFIG_WEIGHT_FOR_WRR(1);
- if (cdata->has_outstanding_reqs)
- ch_regs->config |= TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(8);
+
+ if (cdata->global_ch_config_base)
+ ch_regs->global_config |= cdata->ch_config;
+ else
+ ch_regs->config |= cdata->ch_config;
/*
* 'sreq_index' represents the current ADMAIF channel number and as per
@@ -788,12 +853,23 @@ static int __maybe_unused tegra_adma_runtime_suspend(struct device *dev)
/* skip if channel is not active */
if (!ch_reg->cmd)
continue;
- ch_reg->tc = tdma_ch_read(tdc, ADMA_CH_TC);
- ch_reg->src_addr = tdma_ch_read(tdc, ADMA_CH_LOWER_SRC_ADDR);
- ch_reg->trg_addr = tdma_ch_read(tdc, ADMA_CH_LOWER_TRG_ADDR);
+ ch_reg->tc = tdma_ch_read(tdc, ADMA_CH_TC - tdma->cdata->ch_tc_offset_diff);
+ ch_reg->src_addr = tdma_ch_read(tdc, ADMA_CH_LOWER_SRC_ADDR -
+ tdma->cdata->ch_tc_offset_diff);
+ ch_reg->trg_addr = tdma_ch_read(tdc, ADMA_CH_LOWER_TRG_ADDR -
+ tdma->cdata->ch_tc_offset_diff);
ch_reg->ctrl = tdma_ch_read(tdc, ADMA_CH_CTRL);
- ch_reg->fifo_ctrl = tdma_ch_read(tdc, ADMA_CH_FIFO_CTRL);
+
+ if (tdc->global_ch_config_offset)
+ ch_reg->global_config = tdma_read(tdc->tdma, tdc->global_ch_config_offset);
+
+ if (!tdc->tdma->cdata->global_ch_fifo_base)
+ ch_reg->fifo_ctrl = tdma_ch_read(tdc, ADMA_CH_FIFO_CTRL);
+ else if (tdc->global_ch_fifo_offset)
+ ch_reg->fifo_ctrl = tdma_read(tdc->tdma, tdc->global_ch_fifo_offset);
+
ch_reg->config = tdma_ch_read(tdc, ADMA_CH_CONFIG);
+
}
clk_disable:
@@ -832,12 +908,23 @@ static int __maybe_unused tegra_adma_runtime_resume(struct device *dev)
/* skip if channel was not active earlier */
if (!ch_reg->cmd)
continue;
- tdma_ch_write(tdc, ADMA_CH_TC, ch_reg->tc);
- tdma_ch_write(tdc, ADMA_CH_LOWER_SRC_ADDR, ch_reg->src_addr);
- tdma_ch_write(tdc, ADMA_CH_LOWER_TRG_ADDR, ch_reg->trg_addr);
+ tdma_ch_write(tdc, ADMA_CH_TC - tdma->cdata->ch_tc_offset_diff, ch_reg->tc);
+ tdma_ch_write(tdc, ADMA_CH_LOWER_SRC_ADDR - tdma->cdata->ch_tc_offset_diff,
+ ch_reg->src_addr);
+ tdma_ch_write(tdc, ADMA_CH_LOWER_TRG_ADDR - tdma->cdata->ch_tc_offset_diff,
+ ch_reg->trg_addr);
tdma_ch_write(tdc, ADMA_CH_CTRL, ch_reg->ctrl);
- tdma_ch_write(tdc, ADMA_CH_FIFO_CTRL, ch_reg->fifo_ctrl);
+
+ if (!tdc->tdma->cdata->global_ch_fifo_base)
+ tdma_ch_write(tdc, ADMA_CH_FIFO_CTRL, ch_reg->fifo_ctrl);
+ else if (tdc->global_ch_fifo_offset)
+ tdma_write(tdc->tdma, tdc->global_ch_fifo_offset, ch_reg->fifo_ctrl);
+
+ if (tdc->global_ch_config_offset)
+ tdma_write(tdc->tdma, tdc->global_ch_config_offset, ch_reg->global_config);
+
tdma_ch_write(tdc, ADMA_CH_CONFIG, ch_reg->config);
+
tdma_ch_write(tdc, ADMA_CH_CMD, ch_reg->cmd);
}
@@ -848,17 +935,23 @@ static const struct tegra_adma_chip_data tegra210_chip_data = {
.adma_get_burst_config = tegra210_adma_get_burst_config,
.global_reg_offset = 0xc00,
.global_int_clear = 0x20,
+ .global_ch_fifo_base = 0,
+ .global_ch_config_base = 0,
.ch_req_tx_shift = 28,
.ch_req_rx_shift = 24,
+ .ch_dir_shift = 12,
+ .ch_mode_shift = 8,
.ch_base_offset = 0,
+ .ch_tc_offset_diff = 0,
+ .ch_config = ADMA_CH_CONFIG_WEIGHT_FOR_WRR(1),
.ch_req_mask = 0xf,
+ .ch_dir_mask = 0xf,
.ch_req_max = 10,
.ch_reg_size = 0x80,
.nr_channels = 22,
.ch_fifo_size_mask = 0xf,
.sreq_index_offset = 2,
.max_page = 0,
- .has_outstanding_reqs = false,
.set_global_pg_config = NULL,
};
@@ -866,23 +959,56 @@ static const struct tegra_adma_chip_data tegra186_chip_data = {
.adma_get_burst_config = tegra186_adma_get_burst_config,
.global_reg_offset = 0,
.global_int_clear = 0x402c,
+ .global_ch_fifo_base = 0,
+ .global_ch_config_base = 0,
.ch_req_tx_shift = 27,
.ch_req_rx_shift = 22,
+ .ch_dir_shift = 12,
+ .ch_mode_shift = 8,
.ch_base_offset = 0x10000,
+ .ch_tc_offset_diff = 0,
+ .ch_config = ADMA_CH_CONFIG_WEIGHT_FOR_WRR(1) |
+ TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(8),
.ch_req_mask = 0x1f,
+ .ch_dir_mask = 0xf,
.ch_req_max = 20,
.ch_reg_size = 0x100,
.nr_channels = 32,
.ch_fifo_size_mask = 0x1f,
.sreq_index_offset = 4,
.max_page = 4,
- .has_outstanding_reqs = true,
.set_global_pg_config = tegra186_adma_global_page_config,
};
+static const struct tegra_adma_chip_data tegra264_chip_data = {
+ .adma_get_burst_config = tegra186_adma_get_burst_config,
+ .global_reg_offset = 0,
+ .global_int_clear = 0x800c,
+ .global_ch_fifo_base = ADMA_GLOBAL_CH_FIFO_CTRL,
+ .global_ch_config_base = ADMA_GLOBAL_CH_CONFIG,
+ .ch_req_tx_shift = 26,
+ .ch_req_rx_shift = 20,
+ .ch_dir_shift = 10,
+ .ch_mode_shift = 7,
+ .ch_base_offset = 0x10000,
+ .ch_tc_offset_diff = 4,
+ .ch_config = ADMA_GLOBAL_CH_CONFIG_WEIGHT_FOR_WRR(1) |
+ ADMA_GLOBAL_CH_CONFIG_OUTSTANDING_REQS(8),
+ .ch_req_mask = 0x3f,
+ .ch_dir_mask = 7,
+ .ch_req_max = 32,
+ .ch_reg_size = 0x100,
+ .nr_channels = 64,
+ .ch_fifo_size_mask = 0x7f,
+ .sreq_index_offset = 0,
+ .max_page = 10,
+ .set_global_pg_config = tegra264_adma_global_page_config,
+};
+
static const struct of_device_id tegra_adma_of_match[] = {
{ .compatible = "nvidia,tegra210-adma", .data = &tegra210_chip_data },
{ .compatible = "nvidia,tegra186-adma", .data = &tegra186_chip_data },
+ { .compatible = "nvidia,tegra264-adma", .data = &tegra264_chip_data },
{ },
};
MODULE_DEVICE_TABLE(of, tegra_adma_of_match);
@@ -985,6 +1111,15 @@ static int tegra_adma_probe(struct platform_device *pdev)
tdc->chan_addr = tdma->ch_base_addr + (cdata->ch_reg_size * i);
+ if (tdma->base_addr) {
+ if (cdata->global_ch_fifo_base)
+ tdc->global_ch_fifo_offset = cdata->global_ch_fifo_base + (4 * i);
+
+ if (cdata->global_ch_config_base)
+ tdc->global_ch_config_offset =
+ cdata->global_ch_config_base + (4 * i);
+ }
+
tdc->irq = of_irq_get(pdev->dev.of_node, i);
if (tdc->irq <= 0) {
ret = tdc->irq ?: -ENXIO;
diff --git a/drivers/dma/ti/Kconfig b/drivers/dma/ti/Kconfig
index 2adc2cca10e9..dbf168146d35 100644
--- a/drivers/dma/ti/Kconfig
+++ b/drivers/dma/ti/Kconfig
@@ -17,7 +17,7 @@ config TI_EDMA
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
select TI_DMA_CROSSBAR if (ARCH_OMAP || COMPILE_TEST)
- default y
+ default ARCH_DAVINCI || ARCH_OMAP || ARCH_KEYSTONE
help
Enable support for the TI EDMA (Enhanced DMA) controller. This DMA
engine is found on TI DaVinci, AM33xx, AM43xx, DRA7xx and Keystone 2
@@ -29,7 +29,7 @@ config DMA_OMAP
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
select TI_DMA_CROSSBAR if (SOC_DRA7XX || COMPILE_TEST)
- default y
+ default ARCH_OMAP
help
Enable support for the TI sDMA (System DMA or DMA4) controller. This
DMA engine is found on OMAP and DRA7xx parts.
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index b6255c0601bb..aa2dc762140f 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -5624,7 +5624,8 @@ static int udma_probe(struct platform_device *pdev)
uc->config.dir = DMA_MEM_TO_MEM;
uc->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d",
dev_name(dev), i);
-
+ if (!uc->name)
+ return -ENOMEM;
vchan_init(&uc->vc, &ud->ddev);
/* Use custom vchan completion handling */
tasklet_setup(&uc->vc.task, udma_vchan_complete);
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 3ad44afd0e74..a34d8f0ceed8 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -2909,6 +2909,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
return -EINVAL;
}
+ xdev->common.directions |= chan->direction;
+
/* Request the interrupt */
chan->irq = of_irq_get(node, chan->tdest);
if (chan->irq < 0)
@@ -3115,6 +3117,8 @@ static int xilinx_dma_probe(struct platform_device *pdev)
}
}
+ dma_set_max_seg_size(xdev->dev, xdev->max_buffer_len);
+
if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
xdev->has_axistream_connected =
of_property_read_bool(node, "xlnx,axistream-connected");