From 06006ad29abeee1f94d1793107309ce100067e46 Mon Sep 17 00:00:00 2001 From: Zong Li Date: Mon, 28 Mar 2022 17:52:22 +0800 Subject: dt-bindings: dma-engine: sifive,fu540: Add dma-channels property and modify compatible Add dma-channels property, then we can determine how many channels there by device tree, rather than statically defining it in PDMA driver. In addition, we also modify the compatible for PDMA versioning scheme. Signed-off-by: Zong Li Reviewed-by: Rob Herring Suggested-by: Palmer Dabbelt Reviewed-by: Palmer Dabbelt Acked-by: Palmer Dabbelt Acked-by: Krzysztof Kozlowski Reviewed-by: Bin Meng Link: https://lore.kernel.org/r/7cc9a7b5f7e6c28fc9eb172c441b5aed2159b8a0.1648461096.git.zong.li@sifive.com Signed-off-by: Vinod Koul --- .../bindings/dma/sifive,fu540-c000-pdma.yaml | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/Documentation/devicetree/bindings/dma/sifive,fu540-c000-pdma.yaml b/Documentation/devicetree/bindings/dma/sifive,fu540-c000-pdma.yaml index 47c46af25536..3271755787b4 100644 --- a/Documentation/devicetree/bindings/dma/sifive,fu540-c000-pdma.yaml +++ b/Documentation/devicetree/bindings/dma/sifive,fu540-c000-pdma.yaml @@ -28,7 +28,15 @@ allOf: properties: compatible: items: - - const: sifive,fu540-c000-pdma + - enum: + - sifive,fu540-c000-pdma + - const: sifive,pdma0 + description: + Should be "sifive,-pdma" and "sifive,pdma". + Supported compatible strings are - + "sifive,fu540-c000-pdma" for the SiFive PDMA v0 as integrated onto the + SiFive FU540 chip resp and "sifive,pdma0" for the SiFive PDMA v0 IP block + with no chip integration tweaks. reg: maxItems: 1 @@ -37,6 +45,12 @@ properties: minItems: 1 maxItems: 8 + dma-channels: + description: For backwards-compatibility, the default value is 4 + minimum: 1 + maximum: 4 + default: 4 + '#dma-cells': const: 1 @@ -50,8 +64,9 @@ unevaluatedProperties: false examples: - | dma-controller@3000000 { - compatible = "sifive,fu540-c000-pdma"; + compatible = "sifive,fu540-c000-pdma", "sifive,pdma0"; reg = <0x3000000 0x8000>; + dma-channels = <4>; interrupts = <23>, <24>, <25>, <26>, <27>, <28>, <29>, <30>; #dma-cells = <1>; }; -- cgit From e2dfce24f4175e8ba183d5800f669fc89843df54 Mon Sep 17 00:00:00 2001 From: Zong Li Date: Mon, 28 Mar 2022 17:52:25 +0800 Subject: dmaengine: sf-pdma: Get number of channel by device tree It currently assumes that there are always four channels, it would cause the error if there is actually less than four channels. Change that by getting number of channel from device tree. For backwards-compatibility, it uses the default value (i.e. 4) when there is no 'dma-channels' information in dts. Signed-off-by: Zong Li Acked-by: Palmer Dabbelt Reviewed-by: Bin Meng Link: https://lore.kernel.org/r/f08a95b6582a51712c5b2c3cb859136d07bfa8b9.1648461096.git.zong.li@sifive.com Signed-off-by: Vinod Koul --- drivers/dma/sf-pdma/sf-pdma.c | 24 ++++++++++++++++-------- drivers/dma/sf-pdma/sf-pdma.h | 8 ++------ 2 files changed, 18 insertions(+), 14 deletions(-) diff --git a/drivers/dma/sf-pdma/sf-pdma.c b/drivers/dma/sf-pdma/sf-pdma.c index f12606aeff87..db5a4ef76077 100644 --- a/drivers/dma/sf-pdma/sf-pdma.c +++ b/drivers/dma/sf-pdma/sf-pdma.c @@ -482,23 +482,30 @@ static void sf_pdma_setup_chans(struct sf_pdma *pdma) static int sf_pdma_probe(struct platform_device *pdev) { struct sf_pdma *pdma; - struct sf_pdma_chan *chan; struct resource *res; - int len, chans; - int ret; + int ret, n_chans; const enum dma_slave_buswidth widths = DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES | DMA_SLAVE_BUSWIDTH_8_BYTES | DMA_SLAVE_BUSWIDTH_16_BYTES | DMA_SLAVE_BUSWIDTH_32_BYTES | DMA_SLAVE_BUSWIDTH_64_BYTES; - chans = PDMA_NR_CH; - len = sizeof(*pdma) + sizeof(*chan) * chans; - pdma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL); + ret = of_property_read_u32(pdev->dev.of_node, "dma-channels", &n_chans); + if (ret) { + /* backwards-compatibility for no dma-channels property */ + dev_dbg(&pdev->dev, "set number of channels to default value: 4\n"); + n_chans = PDMA_MAX_NR_CH; + } else if (n_chans > PDMA_MAX_NR_CH) { + dev_err(&pdev->dev, "the number of channels exceeds the maximum\n"); + return -EINVAL; + } + + pdma = devm_kzalloc(&pdev->dev, struct_size(pdma, chans, n_chans), + GFP_KERNEL); if (!pdma) return -ENOMEM; - pdma->n_chans = chans; + pdma->n_chans = n_chans; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); pdma->membase = devm_ioremap_resource(&pdev->dev, res); @@ -556,7 +563,7 @@ static int sf_pdma_remove(struct platform_device *pdev) struct sf_pdma_chan *ch; int i; - for (i = 0; i < PDMA_NR_CH; i++) { + for (i = 0; i < pdma->n_chans; i++) { ch = &pdma->chans[i]; devm_free_irq(&pdev->dev, ch->txirq, ch); @@ -574,6 +581,7 @@ static int sf_pdma_remove(struct platform_device *pdev) static const struct of_device_id sf_pdma_dt_ids[] = { { .compatible = "sifive,fu540-c000-pdma" }, + { .compatible = "sifive,pdma0" }, {}, }; MODULE_DEVICE_TABLE(of, sf_pdma_dt_ids); diff --git a/drivers/dma/sf-pdma/sf-pdma.h b/drivers/dma/sf-pdma/sf-pdma.h index 0c20167b097d..dcb3687bd5da 100644 --- a/drivers/dma/sf-pdma/sf-pdma.h +++ b/drivers/dma/sf-pdma/sf-pdma.h @@ -22,11 +22,7 @@ #include "../dmaengine.h" #include "../virt-dma.h" -#define PDMA_NR_CH 4 - -#if (PDMA_NR_CH != 4) -#error "Please define PDMA_NR_CH to 4" -#endif +#define PDMA_MAX_NR_CH 4 #define PDMA_BASE_ADDR 0x3000000 #define PDMA_CHAN_OFFSET 0x1000 @@ -118,7 +114,7 @@ struct sf_pdma { void __iomem *membase; void __iomem *mappedbase; u32 n_chans; - struct sf_pdma_chan chans[PDMA_NR_CH]; + struct sf_pdma_chan chans[]; }; #endif /* _SF_PDMA_H */ -- cgit From 448a0994cc698d41d056f14fb9065305b14f3c75 Mon Sep 17 00:00:00 2001 From: Lad Prabhakar Date: Wed, 6 Apr 2022 09:04:17 +0100 Subject: dmaengine: sh: Kconfig: Make RZ_DMAC depend on ARCH_RZG2L The DMAC block is identical on Renesas RZ/G2L, RZ/G2UL and RZ/V2L SoC's, so instead of adding dependency for each SoC's add dependency on ARCH_RZG2L. The ARCH_RZG2L config option is already selected by ARCH_R9A07G043, ARCH_R9A07G044 and ARCH_R9A07G054. Signed-off-by: Lad Prabhakar Link: https://lore.kernel.org/r/20220406080417.14593-1-prabhakar.mahadev-lad.rj@bp.renesas.com Signed-off-by: Vinod Koul --- drivers/dma/sh/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig index b35d705f79e7..c0b2997ab7fd 100644 --- a/drivers/dma/sh/Kconfig +++ b/drivers/dma/sh/Kconfig @@ -50,7 +50,7 @@ config RENESAS_USB_DMAC config RZ_DMAC tristate "Renesas RZ/{G2L,V2L} DMA Controller" - depends on ARCH_R9A07G044 || ARCH_R9A07G054 || COMPILE_TEST + depends on ARCH_RZG2L || COMPILE_TEST select RENESAS_DMA select DMA_VIRTUAL_CHANNELS help -- cgit From 8b0c99371ac8c8db25ce68ce207fee3b22627597 Mon Sep 17 00:00:00 2001 From: Lad Prabhakar Date: Mon, 4 Apr 2022 16:55:55 +0100 Subject: dmaengine: nbpfaxi: Use platform_get_irq_optional() to get the interrupt platform_get_resource(pdev, IORESOURCE_IRQ, ..) relies on static allocation of IRQ resources in DT core code, this causes an issue when using hierarchical interrupt domains using "interrupts" property in the node as this bypasses the hierarchical setup and messes up the irq chaining. In preparation for removal of static setup of IRQ resource from DT core code use platform_get_irq_optional(). There are no non-DT users for this driver so interrupt range (irq_res->start-irq_res->end) is no longer required and with DT we will be sure it will be a single IRQ resource for each index. Signed-off-by: Lad Prabhakar Reviewed-by: Andy Shevchenko Link: https://lore.kernel.org/r/20220404155557.27316-2-prabhakar.mahadev-lad.rj@bp.renesas.com Signed-off-by: Vinod Koul --- drivers/dma/nbpfaxi.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c index 9c52c57919c6..a7063e9cd551 100644 --- a/drivers/dma/nbpfaxi.c +++ b/drivers/dma/nbpfaxi.c @@ -1294,7 +1294,7 @@ static int nbpf_probe(struct platform_device *pdev) struct device_node *np = dev->of_node; struct nbpf_device *nbpf; struct dma_device *dma_dev; - struct resource *iomem, *irq_res; + struct resource *iomem; const struct nbpf_config *cfg; int num_channels; int ret, irq, eirq, i; @@ -1335,13 +1335,11 @@ static int nbpf_probe(struct platform_device *pdev) nbpf->config = cfg; for (i = 0; irqs < ARRAY_SIZE(irqbuf); i++) { - irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, i); - if (!irq_res) - break; - - for (irq = irq_res->start; irq <= irq_res->end; - irq++, irqs++) - irqbuf[irqs] = irq; + irq = platform_get_irq_optional(pdev, i); + if (irq < 0 && irq != -ENXIO) + return irq; + if (irq > 0) + irqbuf[irqs++] = irq; } /* -- cgit From bb40bb695ec8b90bee4bfd7db52ba92c4a0b7586 Mon Sep 17 00:00:00 2001 From: Lad Prabhakar Date: Mon, 4 Apr 2022 16:55:56 +0100 Subject: dmaengine: mediatek: mtk-hsdma: Use platform_get_irq() to get the interrupt platform_get_resource(pdev, IORESOURCE_IRQ, ..) relies on static allocation of IRQ resources in DT core code, this causes an issue when using hierarchical interrupt domains using "interrupts" property in the node as this bypasses the hierarchical setup and messes up the irq chaining. In preparation for removal of static setup of IRQ resource from DT core code use platform_get_irq(). Signed-off-by: Lad Prabhakar Reviewed-by: Andy Shevchenko Link: https://lore.kernel.org/r/20220404155557.27316-3-prabhakar.mahadev-lad.rj@bp.renesas.com Signed-off-by: Vinod Koul --- drivers/dma/mediatek/mtk-hsdma.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c index 6ad8afbb95f2..c0fffde7fe08 100644 --- a/drivers/dma/mediatek/mtk-hsdma.c +++ b/drivers/dma/mediatek/mtk-hsdma.c @@ -923,13 +923,10 @@ static int mtk_hsdma_probe(struct platform_device *pdev) return PTR_ERR(hsdma->clk); } - res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (!res) { - dev_err(&pdev->dev, "No irq resource for %s\n", - dev_name(&pdev->dev)); - return -EINVAL; - } - hsdma->irq = res->start; + err = platform_get_irq(pdev, 0); + if (err < 0) + return err; + hsdma->irq = err; refcount_set(&hsdma->pc_refcnt, 0); spin_lock_init(&hsdma->lock); -- cgit From 80380f89d0f56e0efa43e24c11f989e89b4af952 Mon Sep 17 00:00:00 2001 From: Lad Prabhakar Date: Mon, 4 Apr 2022 16:55:57 +0100 Subject: dmaengine: mediatek-cqdma: Use platform_get_irq() to get the interrupt platform_get_resource(pdev, IORESOURCE_IRQ, ..) relies on static allocation of IRQ resources in DT core code, this causes an issue when using hierarchical interrupt domains using "interrupts" property in the node as this bypasses the hierarchical setup and messes up the irq chaining. In preparation for removal of static setup of IRQ resource from DT core code use platform_get_irq(). Signed-off-by: Lad Prabhakar Reviewed-by: Andy Shevchenko Link: https://lore.kernel.org/r/20220404155557.27316-4-prabhakar.mahadev-lad.rj@bp.renesas.com Signed-off-by: Vinod Koul --- drivers/dma/mediatek/mtk-cqdma.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/drivers/dma/mediatek/mtk-cqdma.c b/drivers/dma/mediatek/mtk-cqdma.c index 41ef9f15d3d5..f8847c48ba03 100644 --- a/drivers/dma/mediatek/mtk-cqdma.c +++ b/drivers/dma/mediatek/mtk-cqdma.c @@ -751,7 +751,6 @@ static int mtk_cqdma_probe(struct platform_device *pdev) struct mtk_cqdma_device *cqdma; struct mtk_cqdma_vchan *vc; struct dma_device *dd; - struct resource *res; int err; u32 i; @@ -824,13 +823,10 @@ static int mtk_cqdma_probe(struct platform_device *pdev) return PTR_ERR(cqdma->pc[i]->base); /* allocate IRQ resource */ - res = platform_get_resource(pdev, IORESOURCE_IRQ, i); - if (!res) { - dev_err(&pdev->dev, "No irq resource for %s\n", - dev_name(&pdev->dev)); - return -EINVAL; - } - cqdma->pc[i]->irq = res->start; + err = platform_get_irq(pdev, i); + if (err < 0) + return err; + cqdma->pc[i]->irq = err; err = devm_request_irq(&pdev->dev, cqdma->pc[i]->irq, mtk_cqdma_irq, 0, dev_name(&pdev->dev), -- cgit From 3157dd0a366183adaea2f4d8721961637d562fee Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Thu, 7 Apr 2022 11:28:28 -0700 Subject: dmaengine: idxd: don't load pasid config until needed The driver currently programs the system pasid to the WQ preemptively when system pasid is enabled. Given that a dwq will reprogram the pasid and possibly a different pasid, the programming is not necessary. The pasid_en bit can be set for swq as it does not need pasid programming but needs the pasid_en bit. Remove system pasid programming on device config write. Add pasid programming for kernel wq type on wq driver enable. The char dev driver already reprograms the dwq on ->open() call so there's no change. Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/164935607115.1660372.6734518676950372366.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Vinod Koul --- drivers/dma/idxd/device.c | 66 ++++++++++++++++++++++++++++++++++---------- drivers/dma/idxd/registers.h | 1 + 2 files changed, 53 insertions(+), 14 deletions(-) diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c index 3061fe857d69..2903f8bb30e1 100644 --- a/drivers/dma/idxd/device.c +++ b/drivers/dma/idxd/device.c @@ -299,24 +299,46 @@ void idxd_wqs_unmap_portal(struct idxd_device *idxd) } } -int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid) +static void __idxd_wq_set_priv_locked(struct idxd_wq *wq, int priv) { struct idxd_device *idxd = wq->idxd; - int rc; union wqcfg wqcfg; unsigned int offset; - rc = idxd_wq_disable(wq, false); - if (rc < 0) - return rc; + offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PRIVL_IDX); + spin_lock(&idxd->dev_lock); + wqcfg.bits[WQCFG_PRIVL_IDX] = ioread32(idxd->reg_base + offset); + wqcfg.priv = priv; + wq->wqcfg->bits[WQCFG_PRIVL_IDX] = wqcfg.bits[WQCFG_PRIVL_IDX]; + iowrite32(wqcfg.bits[WQCFG_PRIVL_IDX], idxd->reg_base + offset); + spin_unlock(&idxd->dev_lock); +} + +static void __idxd_wq_set_pasid_locked(struct idxd_wq *wq, int pasid) +{ + struct idxd_device *idxd = wq->idxd; + union wqcfg wqcfg; + unsigned int offset; offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX); spin_lock(&idxd->dev_lock); wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset); wqcfg.pasid_en = 1; wqcfg.pasid = pasid; + wq->wqcfg->bits[WQCFG_PASID_IDX] = wqcfg.bits[WQCFG_PASID_IDX]; iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset); spin_unlock(&idxd->dev_lock); +} + +int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid) +{ + int rc; + + rc = idxd_wq_disable(wq, false); + if (rc < 0) + return rc; + + __idxd_wq_set_pasid_locked(wq, pasid); rc = idxd_wq_enable(wq); if (rc < 0) @@ -797,7 +819,7 @@ static int idxd_wq_config_write(struct idxd_wq *wq) */ for (i = 0; i < WQCFG_STRIDES(idxd); i++) { wq_offset = WQCFG_OFFSET(idxd, wq->id, i); - wq->wqcfg->bits[i] = ioread32(idxd->reg_base + wq_offset); + wq->wqcfg->bits[i] |= ioread32(idxd->reg_base + wq_offset); } if (wq->size == 0 && wq->type != IDXD_WQT_NONE) @@ -813,14 +835,8 @@ static int idxd_wq_config_write(struct idxd_wq *wq) if (wq_dedicated(wq)) wq->wqcfg->mode = 1; - if (device_pasid_enabled(idxd)) { - wq->wqcfg->pasid_en = 1; - if (wq->type == IDXD_WQT_KERNEL && wq_dedicated(wq)) - wq->wqcfg->pasid = idxd->pasid; - } - /* - * Here the priv bit is set depending on the WQ type. priv = 1 if the + * The WQ priv bit is set depending on the WQ type. priv = 1 if the * WQ type is kernel to indicate privileged access. This setting only * matters for dedicated WQ. According to the DSA spec: * If the WQ is in dedicated mode, WQ PASID Enable is 1, and the @@ -830,7 +846,6 @@ static int idxd_wq_config_write(struct idxd_wq *wq) * In the case of a dedicated kernel WQ that is not able to support * the PASID cap, then the configuration will be rejected. */ - wq->wqcfg->priv = !!(wq->type == IDXD_WQT_KERNEL); if (wq_dedicated(wq) && wq->wqcfg->pasid_en && !idxd_device_pasid_priv_enabled(idxd) && wq->type == IDXD_WQT_KERNEL) { @@ -1263,6 +1278,29 @@ int __drv_enable_wq(struct idxd_wq *wq) } } + /* + * In the event that the WQ is configurable for pasid and priv bits. + * For kernel wq, the driver should setup the pasid, pasid_en, and priv bit. + * However, for non-kernel wq, the driver should only set the pasid_en bit for + * shared wq. A dedicated wq that is not 'kernel' type will configure pasid and + * pasid_en later on so there is no need to setup. + */ + if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) { + int priv = 0; + + if (device_pasid_enabled(idxd)) { + if (is_idxd_wq_kernel(wq) || wq_shared(wq)) { + u32 pasid = wq_dedicated(wq) ? idxd->pasid : 0; + + __idxd_wq_set_pasid_locked(wq, pasid); + } + } + + if (is_idxd_wq_kernel(wq)) + priv = 1; + __idxd_wq_set_priv_locked(wq, priv); + } + rc = 0; spin_lock(&idxd->dev_lock); if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h index aa642aecdc0b..02449aa9c454 100644 --- a/drivers/dma/idxd/registers.h +++ b/drivers/dma/idxd/registers.h @@ -353,6 +353,7 @@ union wqcfg { } __packed; #define WQCFG_PASID_IDX 2 +#define WQCFG_PRIVL_IDX 2 #define WQCFG_OCCUP_IDX 6 #define WQCFG_OCCUP_MASK 0xffff -- cgit From 9060a7a46a9463ec519a9436fa2b84f5407b138e Mon Sep 17 00:00:00 2001 From: jianchunfu Date: Sun, 3 Apr 2022 20:31:20 +0800 Subject: dmaengine: ep93xx: Remove redundant word in comment Remove the second 'to' which is repeated. Signed-off-by: jianchunfu Link: https://lore.kernel.org/r/20220403123120.7794-1-jianchunfu@cmss.chinamobile.com Signed-off-by: Vinod Koul --- drivers/dma/ep93xx_dma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c index 98f9ee70362e..971ff5f9ae84 100644 --- a/drivers/dma/ep93xx_dma.c +++ b/drivers/dma/ep93xx_dma.c @@ -132,7 +132,7 @@ struct ep93xx_dma_desc { /** * struct ep93xx_dma_chan - an EP93xx DMA M2P/M2M channel * @chan: dmaengine API channel - * @edma: pointer to to the engine device + * @edma: pointer to the engine device * @regs: memory mapped registers * @irq: interrupt number of the channel * @clk: clock used by this channel -- cgit From 105989311442e4d74a0226e555503dd57aa1e896 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Sat, 2 Apr 2022 11:54:25 +0200 Subject: dmaengine: bestcomm: Prepare cleanup of powerpc's asm/prom.h powerpc's asm/prom.h brings some headers that it doesn't need itself. In order to clean it up, first add missing headers in users of asm/prom.h Signed-off-by: Christophe Leroy Link: https://lore.kernel.org/r/f98acba303489bdf003e7256460696225b00702e.1648833428.git.christophe.leroy@csgroup.eu Signed-off-by: Vinod Koul --- drivers/dma/bestcomm/bestcomm.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/dma/bestcomm/bestcomm.c b/drivers/dma/bestcomm/bestcomm.c index 8c42e5ca00a9..1822a7034630 100644 --- a/drivers/dma/bestcomm/bestcomm.c +++ b/drivers/dma/bestcomm/bestcomm.c @@ -17,7 +17,9 @@ #include #include #include +#include #include +#include #include #include #include -- cgit From ec834f1cc7cf471b090bb0a6ee077a7bb90a3549 Mon Sep 17 00:00:00 2001 From: Amelie Delaunay Date: Wed, 30 Mar 2022 12:36:45 +0200 Subject: dmaengine: stm32-mdma: check the channel availability (secure or not) STM32_MDMA_CCR bit[8] is used to enable Secure Mode (SM). If this bit is set, it means that all the channel registers are write-protected. So the channel is not available for Linux use. Add stm32_mdma_filter_fn() callback filter and give it to __dma_request_chan (instead of dma_get_any_slave_channel()), to exclude the channel if it is marked Secure. Signed-off-by: Amelie Delaunay Link: https://lore.kernel.org/r/20220330103645.99969-1-amelie.delaunay@foss.st.com Signed-off-by: Vinod Koul --- drivers/dma/stm32-mdma.c | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c index 6f57ff0e7b37..95e5831e490a 100644 --- a/drivers/dma/stm32-mdma.c +++ b/drivers/dma/stm32-mdma.c @@ -73,6 +73,7 @@ #define STM32_MDMA_CCR_WEX BIT(14) #define STM32_MDMA_CCR_HEX BIT(13) #define STM32_MDMA_CCR_BEX BIT(12) +#define STM32_MDMA_CCR_SM BIT(8) #define STM32_MDMA_CCR_PL_MASK GENMASK(7, 6) #define STM32_MDMA_CCR_PL(n) FIELD_PREP(STM32_MDMA_CCR_PL_MASK, (n)) #define STM32_MDMA_CCR_TCIE BIT(5) @@ -248,6 +249,7 @@ struct stm32_mdma_device { u32 nr_channels; u32 nr_requests; u32 nr_ahb_addr_masks; + u32 chan_reserved; struct stm32_mdma_chan chan[STM32_MDMA_MAX_CHANNELS]; u32 ahb_addr_masks[]; }; @@ -1456,10 +1458,23 @@ static void stm32_mdma_free_chan_resources(struct dma_chan *c) chan->desc_pool = NULL; } +static bool stm32_mdma_filter_fn(struct dma_chan *c, void *fn_param) +{ + struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c); + struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan); + + /* Check if chan is marked Secure */ + if (dmadev->chan_reserved & BIT(chan->id)) + return false; + + return true; +} + static struct dma_chan *stm32_mdma_of_xlate(struct of_phandle_args *dma_spec, struct of_dma *ofdma) { struct stm32_mdma_device *dmadev = ofdma->of_dma_data; + dma_cap_mask_t mask = dmadev->ddev.cap_mask; struct stm32_mdma_chan *chan; struct dma_chan *c; struct stm32_mdma_chan_config config; @@ -1485,7 +1500,7 @@ static struct dma_chan *stm32_mdma_of_xlate(struct of_phandle_args *dma_spec, return NULL; } - c = dma_get_any_slave_channel(&dmadev->ddev); + c = __dma_request_channel(&mask, stm32_mdma_filter_fn, &config, ofdma->of_node); if (!c) { dev_err(mdma2dev(dmadev), "No more channels available\n"); return NULL; @@ -1615,6 +1630,10 @@ static int stm32_mdma_probe(struct platform_device *pdev) for (i = 0; i < dmadev->nr_channels; i++) { chan = &dmadev->chan[i]; chan->id = i; + + if (stm32_mdma_read(dmadev, STM32_MDMA_CCR(i)) & STM32_MDMA_CCR_SM) + dmadev->chan_reserved |= BIT(i); + chan->vchan.desc_free = stm32_mdma_desc_free; vchan_init(&chan->vchan, dd); } -- cgit From 1f854536a8336c61c89ab040bbc874c75325d37c Mon Sep 17 00:00:00 2001 From: Paul Kocialkowski Date: Thu, 31 Mar 2022 15:41:14 +0200 Subject: dmaengine: Clarify cyclic transfer residue documentation The current documentation for the residue reported in a cyclic transfer case mentions that the reported residue should be relative to the current period only. However the definition of DMA_RESIDUE_GRANULARITY_SEGMENT specifies that the residue should be updated after each period for a cyclic transfer, which is in direct contradiction. Moreover the pcm_dmaengine common code uses the residue relative to the whole cyclic buffer size, not one period. Correct the residue-related documentation to reflect this. Signed-off-by: Paul Kocialkowski Link: https://lore.kernel.org/r/20220331134114.703782-1-paul.kocialkowski@bootlin.com Signed-off-by: Vinod Koul --- Documentation/driver-api/dmaengine/provider.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/driver-api/dmaengine/provider.rst b/Documentation/driver-api/dmaengine/provider.rst index 0072c9c7efd3..d3fa80e333b1 100644 --- a/Documentation/driver-api/dmaengine/provider.rst +++ b/Documentation/driver-api/dmaengine/provider.rst @@ -457,7 +457,7 @@ supported. - Should use dma_set_residue to report it - In the case of a cyclic transfer, it should only take into - account the current period. + account the total size of the cyclic buffer. - Should return DMA_OUT_OF_ORDER if the device does not support in order completion and is completing the operation out of order. -- cgit From 81f5eb2b11ba748ee7e393e2faf32af773ae332d Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Mon, 21 Mar 2022 13:40:51 -0700 Subject: dmaengine: idxd: remove trailing white space on input str for wq name Add string processing with strim() in order to remove trailing white spaces that may be input by user for the wq->name. Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/164789525123.2799661.13795829125221129132.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Vinod Koul --- drivers/dma/idxd/sysfs.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c index 7e19ab92b61a..7e628e31ce24 100644 --- a/drivers/dma/idxd/sysfs.c +++ b/drivers/dma/idxd/sysfs.c @@ -832,6 +832,7 @@ static ssize_t wq_name_store(struct device *dev, size_t count) { struct idxd_wq *wq = confdev_to_wq(dev); + char *input, *pos; if (wq->state != IDXD_WQ_DISABLED) return -EPERM; @@ -846,9 +847,14 @@ static ssize_t wq_name_store(struct device *dev, if (wq->type == IDXD_WQT_KERNEL && device_pasid_enabled(wq->idxd)) return -EOPNOTSUPP; + input = kstrndup(buf, count, GFP_KERNEL); + if (!input) + return -ENOMEM; + + pos = strim(input); memset(wq->name, 0, WQ_NAME_SIZE + 1); - strncpy(wq->name, buf, WQ_NAME_SIZE); - strreplace(wq->name, '\n', '\0'); + sprintf(wq->name, "%s", pos); + kfree(input); return count; } -- cgit From 745bd600941375ed6d2b4139f3b90619ac0fcd12 Mon Sep 17 00:00:00 2001 From: Olivier Dautricourt Date: Thu, 17 Mar 2022 18:56:55 +0100 Subject: MAINTAINERS: update my email address This email should now be used to contact me. Signed-off-by: Olivier Dautricourt Link: https://lore.kernel.org/r/85c4174fa162bd946ccf3e08dcfc9b83cfe69b5c.1647539776.git.olivier.dautricourt@orolia.com Signed-off-by: Vinod Koul --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MAINTAINERS b/MAINTAINERS index fd768d43e048..2cdb36afceb8 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -819,7 +819,7 @@ S: Maintained F: drivers/mailbox/mailbox-altera.c ALTERA MSGDMA IP CORE DRIVER -M: Olivier Dautricourt +M: Olivier Dautricourt R: Stefan Roese L: dmaengine@vger.kernel.org S: Odd Fixes -- cgit From cc4abaa67e051ddc1bb4e00186870adbd780d1cd Mon Sep 17 00:00:00 2001 From: Olivier Dautricourt Date: Thu, 17 Mar 2022 18:56:56 +0100 Subject: dt-bindings: altr,msgdma: update my email address This email should now be used to contact me. Signed-off-by: Olivier Dautricourt Acked-by: Rob Herring Link: https://lore.kernel.org/r/dc3decf1dae172c688017bd3ada2ad2b7d060c1e.1647539776.git.olivier.dautricourt@orolia.com Signed-off-by: Vinod Koul --- Documentation/devicetree/bindings/dma/altr,msgdma.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/devicetree/bindings/dma/altr,msgdma.yaml b/Documentation/devicetree/bindings/dma/altr,msgdma.yaml index b193ee2db4a7..b53ac7631a76 100644 --- a/Documentation/devicetree/bindings/dma/altr,msgdma.yaml +++ b/Documentation/devicetree/bindings/dma/altr,msgdma.yaml @@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml# title: Altera mSGDMA IP core maintainers: - - Olivier Dautricourt + - Olivier Dautricourt description: | Altera / Intel modular Scatter-Gather Direct Memory Access (mSGDMA) -- cgit From 729106266a50400181c3baa508c79bead46eb8e0 Mon Sep 17 00:00:00 2001 From: Akhil R Date: Fri, 25 Feb 2022 18:50:43 +0530 Subject: dt-bindings: dmaengine: Add doc for tegra gpcdma Add DT binding document for Nvidia Tegra GPCDMA controller. Signed-off-by: Rajesh Gumasta Signed-off-by: Akhil R Reviewed-by: Jon Hunter Reviewed-by: Rob Herring Acked-by: Thierry Reding Link: https://lore.kernel.org/r/20220225132044.14478-2-akhilrajeev@nvidia.com Signed-off-by: Vinod Koul --- .../bindings/dma/nvidia,tegra186-gpc-dma.yaml | 110 +++++++++++++++++++++ 1 file changed, 110 insertions(+) create mode 100644 Documentation/devicetree/bindings/dma/nvidia,tegra186-gpc-dma.yaml diff --git a/Documentation/devicetree/bindings/dma/nvidia,tegra186-gpc-dma.yaml b/Documentation/devicetree/bindings/dma/nvidia,tegra186-gpc-dma.yaml new file mode 100644 index 000000000000..9dd1476d1849 --- /dev/null +++ b/Documentation/devicetree/bindings/dma/nvidia,tegra186-gpc-dma.yaml @@ -0,0 +1,110 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/dma/nvidia,tegra186-gpc-dma.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: NVIDIA Tegra GPC DMA Controller Device Tree Bindings + +description: | + The Tegra General Purpose Central (GPC) DMA controller is used for faster + data transfers between memory to memory, memory to device and device to + memory. + +maintainers: + - Jon Hunter + - Rajesh Gumasta + +allOf: + - $ref: "dma-controller.yaml#" + +properties: + compatible: + oneOf: + - const: nvidia,tegra186-gpcdma + - items: + - const: nvidia,tegra194-gpcdma + - const: nvidia,tegra186-gpcdma + + "#dma-cells": + const: 1 + + reg: + maxItems: 1 + + interrupts: + description: + Should contain all of the per-channel DMA interrupts in + ascending order with respect to the DMA channel index. + minItems: 1 + maxItems: 31 + + resets: + maxItems: 1 + + reset-names: + const: gpcdma + + iommus: + maxItems: 1 + + dma-coherent: true + +required: + - compatible + - reg + - interrupts + - resets + - reset-names + - "#dma-cells" + - iommus + +additionalProperties: false + +examples: + - | + #include + #include + #include + + dma-controller@2600000 { + compatible = "nvidia,tegra186-gpcdma"; + reg = <0x2600000 0x210000>; + resets = <&bpmp TEGRA186_RESET_GPCDMA>; + reset-names = "gpcdma"; + interrupts = , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + #dma-cells = <1>; + iommus = <&smmu TEGRA186_SID_GPCDMA_0>; + dma-coherent; + }; +... -- cgit From ee17028009d49fffed8cc963455d33b1fd3f1d08 Mon Sep 17 00:00:00 2001 From: Akhil R Date: Fri, 25 Feb 2022 18:50:44 +0530 Subject: dmaengine: tegra: Add tegra gpcdma driver Adding GPC DMA controller driver for Tegra. The driver supports dma transfers between memory to memory, IO peripheral to memory and memory to IO peripheral. Co-developed-by: Pavan Kunapuli Signed-off-by: Pavan Kunapuli Co-developed-by: Rajesh Gumasta Signed-off-by: Rajesh Gumasta Signed-off-by: Akhil R Reviewed-by: Jon Hunter Reviewed-by: Dmitry Osipenko Acked-by: Thierry Reding Link: https://lore.kernel.org/r/20220225132044.14478-3-akhilrajeev@nvidia.com Signed-off-by: Vinod Koul --- drivers/dma/Kconfig | 11 + drivers/dma/Makefile | 1 + drivers/dma/tegra186-gpc-dma.c | 1507 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 1519 insertions(+) create mode 100644 drivers/dma/tegra186-gpc-dma.c diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index d5de3f77d3aa..cc1464e4acde 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -629,6 +629,17 @@ config TXX9_DMAC Support the TXx9 SoC internal DMA controller. This can be integrated in chips such as the Toshiba TX4927/38/39. +config TEGRA186_GPC_DMA + tristate "NVIDIA Tegra GPC DMA support" + depends on (ARCH_TEGRA || COMPILE_TEST) && ARCH_DMA_ADDR_T_64BIT + select DMA_ENGINE + help + Support for the NVIDIA Tegra General Purpose Central DMA controller. + The DMA controller has multiple DMA channels which can be configured + for different peripherals like UART, SPI, etc which are on APB bus. + This DMA controller transfers data from memory to peripheral FIFO + or vice versa. It also supports memory to memory data transfer. + config TEGRA20_APB_DMA tristate "NVIDIA Tegra20 APB DMA support" depends on ARCH_TEGRA || COMPILE_TEST diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 616d926cf2a5..2f1b87ffd7ab 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -72,6 +72,7 @@ obj-$(CONFIG_STM32_MDMA) += stm32-mdma.o obj-$(CONFIG_SPRD_DMA) += sprd-dma.o obj-$(CONFIG_S3C24XX_DMAC) += s3c24xx-dma.o obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o +obj-$(CONFIG_TEGRA186_GPC_DMA) += tegra186-gpc-dma.o obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o obj-$(CONFIG_TEGRA210_ADMA) += tegra210-adma.o obj-$(CONFIG_TIMB_DMA) += timb_dma.o diff --git a/drivers/dma/tegra186-gpc-dma.c b/drivers/dma/tegra186-gpc-dma.c new file mode 100644 index 000000000000..f12327732041 --- /dev/null +++ b/drivers/dma/tegra186-gpc-dma.c @@ -0,0 +1,1507 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * DMA driver for NVIDIA Tegra GPC DMA controller. + * + * Copyright (c) 2014-2022, NVIDIA CORPORATION. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "virt-dma.h" + +/* CSR register */ +#define TEGRA_GPCDMA_CHAN_CSR 0x00 +#define TEGRA_GPCDMA_CSR_ENB BIT(31) +#define TEGRA_GPCDMA_CSR_IE_EOC BIT(30) +#define TEGRA_GPCDMA_CSR_ONCE BIT(27) + +#define TEGRA_GPCDMA_CSR_FC_MODE GENMASK(25, 24) +#define TEGRA_GPCDMA_CSR_FC_MODE_NO_MMIO \ + FIELD_PREP(TEGRA_GPCDMA_CSR_FC_MODE, 0) +#define TEGRA_GPCDMA_CSR_FC_MODE_ONE_MMIO \ + FIELD_PREP(TEGRA_GPCDMA_CSR_FC_MODE, 1) +#define TEGRA_GPCDMA_CSR_FC_MODE_TWO_MMIO \ + FIELD_PREP(TEGRA_GPCDMA_CSR_FC_MODE, 2) +#define TEGRA_GPCDMA_CSR_FC_MODE_FOUR_MMIO \ + FIELD_PREP(TEGRA_GPCDMA_CSR_FC_MODE, 3) + +#define TEGRA_GPCDMA_CSR_DMA GENMASK(23, 21) +#define TEGRA_GPCDMA_CSR_DMA_IO2MEM_NO_FC \ + FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 0) +#define TEGRA_GPCDMA_CSR_DMA_IO2MEM_FC \ + FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 1) +#define TEGRA_GPCDMA_CSR_DMA_MEM2IO_NO_FC \ + FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 2) +#define TEGRA_GPCDMA_CSR_DMA_MEM2IO_FC \ + FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 3) +#define TEGRA_GPCDMA_CSR_DMA_MEM2MEM \ + FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 4) +#define TEGRA_GPCDMA_CSR_DMA_FIXED_PAT \ + FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 6) + +#define TEGRA_GPCDMA_CSR_REQ_SEL_MASK GENMASK(20, 16) +#define TEGRA_GPCDMA_CSR_REQ_SEL_UNUSED \ + FIELD_PREP(TEGRA_GPCDMA_CSR_REQ_SEL_MASK, 4) +#define TEGRA_GPCDMA_CSR_IRQ_MASK BIT(15) +#define TEGRA_GPCDMA_CSR_WEIGHT GENMASK(13, 10) + +/* STATUS register */ +#define TEGRA_GPCDMA_CHAN_STATUS 0x004 +#define TEGRA_GPCDMA_STATUS_BUSY BIT(31) +#define TEGRA_GPCDMA_STATUS_ISE_EOC BIT(30) +#define TEGRA_GPCDMA_STATUS_PING_PONG BIT(28) +#define TEGRA_GPCDMA_STATUS_DMA_ACTIVITY BIT(27) +#define TEGRA_GPCDMA_STATUS_CHANNEL_PAUSE BIT(26) +#define TEGRA_GPCDMA_STATUS_CHANNEL_RX BIT(25) +#define TEGRA_GPCDMA_STATUS_CHANNEL_TX BIT(24) +#define TEGRA_GPCDMA_STATUS_IRQ_INTR_STA BIT(23) +#define TEGRA_GPCDMA_STATUS_IRQ_STA BIT(21) +#define TEGRA_GPCDMA_STATUS_IRQ_TRIG_STA BIT(20) + +#define TEGRA_GPCDMA_CHAN_CSRE 0x008 +#define TEGRA_GPCDMA_CHAN_CSRE_PAUSE BIT(31) + +/* Source address */ +#define TEGRA_GPCDMA_CHAN_SRC_PTR 0x00C + +/* Destination address */ +#define TEGRA_GPCDMA_CHAN_DST_PTR 0x010 + +/* High address pointer */ +#define TEGRA_GPCDMA_CHAN_HIGH_ADDR_PTR 0x014 +#define TEGRA_GPCDMA_HIGH_ADDR_SRC_PTR GENMASK(7, 0) +#define TEGRA_GPCDMA_HIGH_ADDR_DST_PTR GENMASK(23, 16) + +/* MC sequence register */ +#define TEGRA_GPCDMA_CHAN_MCSEQ 0x18 +#define TEGRA_GPCDMA_MCSEQ_DATA_SWAP BIT(31) +#define TEGRA_GPCDMA_MCSEQ_REQ_COUNT GENMASK(30, 25) +#define TEGRA_GPCDMA_MCSEQ_BURST GENMASK(24, 23) +#define TEGRA_GPCDMA_MCSEQ_BURST_2 \ + FIELD_PREP(TEGRA_GPCDMA_MCSEQ_BURST, 0) +#define TEGRA_GPCDMA_MCSEQ_BURST_16 \ + FIELD_PREP(TEGRA_GPCDMA_MCSEQ_BURST, 3) +#define TEGRA_GPCDMA_MCSEQ_WRAP1 GENMASK(22, 20) +#define TEGRA_GPCDMA_MCSEQ_WRAP0 GENMASK(19, 17) +#define TEGRA_GPCDMA_MCSEQ_WRAP_NONE 0 + +#define TEGRA_GPCDMA_MCSEQ_STREAM_ID1_MASK GENMASK(13, 7) +#define TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK GENMASK(6, 0) + +/* MMIO sequence register */ +#define TEGRA_GPCDMA_CHAN_MMIOSEQ 0x01c +#define TEGRA_GPCDMA_MMIOSEQ_DBL_BUF BIT(31) +#define TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH GENMASK(30, 28) +#define TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_8 \ + FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH, 0) +#define TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_16 \ + FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH, 1) +#define TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_32 \ + FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH, 2) +#define TEGRA_GPCDMA_MMIOSEQ_DATA_SWAP BIT(27) +#define TEGRA_GPCDMA_MMIOSEQ_BURST_SHIFT 23 +#define TEGRA_GPCDMA_MMIOSEQ_BURST_MIN 2U +#define TEGRA_GPCDMA_MMIOSEQ_BURST_MAX 32U +#define TEGRA_GPCDMA_MMIOSEQ_BURST(bs) \ + (GENMASK((fls(bs) - 2), 0) << TEGRA_GPCDMA_MMIOSEQ_BURST_SHIFT) +#define TEGRA_GPCDMA_MMIOSEQ_MASTER_ID GENMASK(22, 19) +#define TEGRA_GPCDMA_MMIOSEQ_WRAP_WORD GENMASK(18, 16) +#define TEGRA_GPCDMA_MMIOSEQ_MMIO_PROT GENMASK(8, 7) + +/* Channel WCOUNT */ +#define TEGRA_GPCDMA_CHAN_WCOUNT 0x20 + +/* Transfer count */ +#define TEGRA_GPCDMA_CHAN_XFER_COUNT 0x24 + +/* DMA byte count status */ +#define TEGRA_GPCDMA_CHAN_DMA_BYTE_STATUS 0x28 + +/* Error Status Register */ +#define TEGRA_GPCDMA_CHAN_ERR_STATUS 0x30 +#define TEGRA_GPCDMA_CHAN_ERR_TYPE_SHIFT 8 +#define TEGRA_GPCDMA_CHAN_ERR_TYPE_MASK 0xF +#define TEGRA_GPCDMA_CHAN_ERR_TYPE(err) ( \ + ((err) >> TEGRA_GPCDMA_CHAN_ERR_TYPE_SHIFT) & \ + TEGRA_GPCDMA_CHAN_ERR_TYPE_MASK) +#define TEGRA_DMA_BM_FIFO_FULL_ERR 0xF +#define TEGRA_DMA_PERIPH_FIFO_FULL_ERR 0xE +#define TEGRA_DMA_PERIPH_ID_ERR 0xD +#define TEGRA_DMA_STREAM_ID_ERR 0xC +#define TEGRA_DMA_MC_SLAVE_ERR 0xB +#define TEGRA_DMA_MMIO_SLAVE_ERR 0xA + +/* Fixed Pattern */ +#define TEGRA_GPCDMA_CHAN_FIXED_PATTERN 0x34 + +#define TEGRA_GPCDMA_CHAN_TZ 0x38 +#define TEGRA_GPCDMA_CHAN_TZ_MMIO_PROT_1 BIT(0) +#define TEGRA_GPCDMA_CHAN_TZ_MC_PROT_1 BIT(1) + +#define TEGRA_GPCDMA_CHAN_SPARE 0x3c +#define TEGRA_GPCDMA_CHAN_SPARE_EN_LEGACY_FC BIT(16) + +/* + * If any burst is in flight and DMA paused then this is the time to complete + * on-flight burst and update DMA status register. + */ +#define TEGRA_GPCDMA_BURST_COMPLETE_TIME 20 +#define TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT 100 + +/* Channel base address offset from GPCDMA base address */ +#define TEGRA_GPCDMA_CHANNEL_BASE_ADD_OFFSET 0x20000 + +struct tegra_dma; +struct tegra_dma_channel; + +/* + * tegra_dma_chip_data Tegra chip specific DMA data + * @nr_channels: Number of channels available in the controller. + * @channel_reg_size: Channel register size. + * @max_dma_count: Maximum DMA transfer count supported by DMA controller. + * @hw_support_pause: DMA HW engine support pause of the channel. + */ +struct tegra_dma_chip_data { + bool hw_support_pause; + unsigned int nr_channels; + unsigned int channel_reg_size; + unsigned int max_dma_count; + int (*terminate)(struct tegra_dma_channel *tdc); +}; + +/* DMA channel registers */ +struct tegra_dma_channel_regs { + u32 csr; + u32 src_ptr; + u32 dst_ptr; + u32 high_addr_ptr; + u32 mc_seq; + u32 mmio_seq; + u32 wcount; + u32 fixed_pattern; +}; + +/* + * tegra_dma_sg_req: DMA request details to configure hardware. This + * contains the details for one transfer to configure DMA hw. + * The client's request for data transfer can be broken into multiple + * sub-transfer as per requester details and hw support. This sub transfer + * get added as an array in Tegra DMA desc which manages the transfer details. + */ +struct tegra_dma_sg_req { + unsigned int len; + struct tegra_dma_channel_regs ch_regs; +}; + +/* + * tegra_dma_desc: Tegra DMA descriptors which uses virt_dma_desc to + * manage client request and keep track of transfer status, callbacks + * and request counts etc. + */ +struct tegra_dma_desc { + bool cyclic; + unsigned int bytes_req; + unsigned int bytes_xfer; + unsigned int sg_idx; + unsigned int sg_count; + struct virt_dma_desc vd; + struct tegra_dma_channel *tdc; + struct tegra_dma_sg_req sg_req[]; +}; + +/* + * tegra_dma_channel: Channel specific information + */ +struct tegra_dma_channel { + bool config_init; + char name[30]; + enum dma_transfer_direction sid_dir; + int id; + int irq; + int slave_id; + struct tegra_dma *tdma; + struct virt_dma_chan vc; + struct tegra_dma_desc *dma_desc; + struct dma_slave_config dma_sconfig; + unsigned int stream_id; + unsigned long chan_base_offset; +}; + +/* + * tegra_dma: Tegra DMA specific information + */ +struct tegra_dma { + const struct tegra_dma_chip_data *chip_data; + unsigned long sid_m2d_reserved; + unsigned long sid_d2m_reserved; + void __iomem *base_addr; + struct device *dev; + struct dma_device dma_dev; + struct reset_control *rst; + struct tegra_dma_channel channels[]; +}; + +static inline void tdc_write(struct tegra_dma_channel *tdc, + u32 reg, u32 val) +{ + writel_relaxed(val, tdc->tdma->base_addr + tdc->chan_base_offset + reg); +} + +static inline u32 tdc_read(struct tegra_dma_channel *tdc, u32 reg) +{ + return readl_relaxed(tdc->tdma->base_addr + tdc->chan_base_offset + reg); +} + +static inline struct tegra_dma_channel *to_tegra_dma_chan(struct dma_chan *dc) +{ + return container_of(dc, struct tegra_dma_channel, vc.chan); +} + +static inline struct tegra_dma_desc *vd_to_tegra_dma_desc(struct virt_dma_desc *vd) +{ + return container_of(vd, struct tegra_dma_desc, vd); +} + +static inline struct device *tdc2dev(struct tegra_dma_channel *tdc) +{ + return tdc->vc.chan.device->dev; +} + +static void tegra_dma_dump_chan_regs(struct tegra_dma_channel *tdc) +{ + dev_dbg(tdc2dev(tdc), "DMA Channel %d name %s register dump:\n", + tdc->id, tdc->name); + dev_dbg(tdc2dev(tdc), "CSR %x STA %x CSRE %x SRC %x DST %x\n", + tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSR), + tdc_read(tdc, TEGRA_GPCDMA_CHAN_STATUS), + tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSRE), + tdc_read(tdc, TEGRA_GPCDMA_CHAN_SRC_PTR), + tdc_read(tdc, TEGRA_GPCDMA_CHAN_DST_PTR) + ); + dev_dbg(tdc2dev(tdc), "MCSEQ %x IOSEQ %x WCNT %x XFER %x BSTA %x\n", + tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ), + tdc_read(tdc, TEGRA_GPCDMA_CHAN_MMIOSEQ), + tdc_read(tdc, TEGRA_GPCDMA_CHAN_WCOUNT), + tdc_read(tdc, TEGRA_GPCDMA_CHAN_XFER_COUNT), + tdc_read(tdc, TEGRA_GPCDMA_CHAN_DMA_BYTE_STATUS) + ); + dev_dbg(tdc2dev(tdc), "DMA ERR_STA %x\n", + tdc_read(tdc, TEGRA_GPCDMA_CHAN_ERR_STATUS)); +} + +static int tegra_dma_sid_reserve(struct tegra_dma_channel *tdc, + enum dma_transfer_direction direction) +{ + struct tegra_dma *tdma = tdc->tdma; + int sid = tdc->slave_id; + + if (!is_slave_direction(direction)) + return 0; + + switch (direction) { + case DMA_MEM_TO_DEV: + if (test_and_set_bit(sid, &tdma->sid_m2d_reserved)) { + dev_err(tdma->dev, "slave id already in use\n"); + return -EINVAL; + } + break; + case DMA_DEV_TO_MEM: + if (test_and_set_bit(sid, &tdma->sid_d2m_reserved)) { + dev_err(tdma->dev, "slave id already in use\n"); + return -EINVAL; + } + break; + default: + break; + } + + tdc->sid_dir = direction; + + return 0; +} + +static void tegra_dma_sid_free(struct tegra_dma_channel *tdc) +{ + struct tegra_dma *tdma = tdc->tdma; + int sid = tdc->slave_id; + + switch (tdc->sid_dir) { + case DMA_MEM_TO_DEV: + clear_bit(sid, &tdma->sid_m2d_reserved); + break; + case DMA_DEV_TO_MEM: + clear_bit(sid, &tdma->sid_d2m_reserved); + break; + default: + break; + } + + tdc->sid_dir = DMA_TRANS_NONE; +} + +static void tegra_dma_desc_free(struct virt_dma_desc *vd) +{ + kfree(container_of(vd, struct tegra_dma_desc, vd)); +} + +static int tegra_dma_slave_config(struct dma_chan *dc, + struct dma_slave_config *sconfig) +{ + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); + + memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig)); + tdc->config_init = true; + + return 0; +} + +static int tegra_dma_pause(struct tegra_dma_channel *tdc) +{ + int ret; + u32 val; + + val = tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSRE); + val |= TEGRA_GPCDMA_CHAN_CSRE_PAUSE; + tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSRE, val); + + /* Wait until busy bit is de-asserted */ + ret = readl_relaxed_poll_timeout_atomic(tdc->tdma->base_addr + + tdc->chan_base_offset + TEGRA_GPCDMA_CHAN_STATUS, + val, + !(val & TEGRA_GPCDMA_STATUS_BUSY), + TEGRA_GPCDMA_BURST_COMPLETE_TIME, + TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT); + + if (ret) { + dev_err(tdc2dev(tdc), "DMA pause timed out\n"); + tegra_dma_dump_chan_regs(tdc); + } + + return ret; +} + +static int tegra_dma_device_pause(struct dma_chan *dc) +{ + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); + unsigned long flags; + int ret; + + if (!tdc->tdma->chip_data->hw_support_pause) + return -ENOSYS; + + spin_lock_irqsave(&tdc->vc.lock, flags); + ret = tegra_dma_pause(tdc); + spin_unlock_irqrestore(&tdc->vc.lock, flags); + + return ret; +} + +static void tegra_dma_resume(struct tegra_dma_channel *tdc) +{ + u32 val; + + val = tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSRE); + val &= ~TEGRA_GPCDMA_CHAN_CSRE_PAUSE; + tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSRE, val); +} + +static int tegra_dma_device_resume(struct dma_chan *dc) +{ + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); + unsigned long flags; + + if (!tdc->tdma->chip_data->hw_support_pause) + return -ENOSYS; + + spin_lock_irqsave(&tdc->vc.lock, flags); + tegra_dma_resume(tdc); + spin_unlock_irqrestore(&tdc->vc.lock, flags); + + return 0; +} + +static void tegra_dma_disable(struct tegra_dma_channel *tdc) +{ + u32 csr, status; + + csr = tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSR); + + /* Disable interrupts */ + csr &= ~TEGRA_GPCDMA_CSR_IE_EOC; + + /* Disable DMA */ + csr &= ~TEGRA_GPCDMA_CSR_ENB; + tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR, csr); + + /* Clear interrupt status if it is there */ + status = tdc_read(tdc, TEGRA_GPCDMA_CHAN_STATUS); + if (status & TEGRA_GPCDMA_STATUS_ISE_EOC) { + dev_dbg(tdc2dev(tdc), "%s():clearing interrupt\n", __func__); + tdc_write(tdc, TEGRA_GPCDMA_CHAN_STATUS, status); + } +} + +static void tegra_dma_configure_next_sg(struct tegra_dma_channel *tdc) +{ + struct tegra_dma_desc *dma_desc = tdc->dma_desc; + struct tegra_dma_channel_regs *ch_regs; + int ret; + u32 val; + + dma_desc->sg_idx++; + + /* Reset the sg index for cyclic transfers */ + if (dma_desc->sg_idx == dma_desc->sg_count) + dma_desc->sg_idx = 0; + + /* Configure next transfer immediately after DMA is busy */ + ret = readl_relaxed_poll_timeout_atomic(tdc->tdma->base_addr + + tdc->chan_base_offset + TEGRA_GPCDMA_CHAN_STATUS, + val, + (val & TEGRA_GPCDMA_STATUS_BUSY), 0, + TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT); + if (ret) + return; + + ch_regs = &dma_desc->sg_req[dma_desc->sg_idx].ch_regs; + + tdc_write(tdc, TEGRA_GPCDMA_CHAN_WCOUNT, ch_regs->wcount); + tdc_write(tdc, TEGRA_GPCDMA_CHAN_SRC_PTR, ch_regs->src_ptr); + tdc_write(tdc, TEGRA_GPCDMA_CHAN_DST_PTR, ch_regs->dst_ptr); + tdc_write(tdc, TEGRA_GPCDMA_CHAN_HIGH_ADDR_PTR, ch_regs->high_addr_ptr); + + /* Start DMA */ + tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR, + ch_regs->csr | TEGRA_GPCDMA_CSR_ENB); +} + +static void tegra_dma_start(struct tegra_dma_channel *tdc) +{ + struct tegra_dma_desc *dma_desc = tdc->dma_desc; + struct tegra_dma_channel_regs *ch_regs; + struct virt_dma_desc *vdesc; + + if (!dma_desc) { + vdesc = vchan_next_desc(&tdc->vc); + if (!vdesc) + return; + + dma_desc = vd_to_tegra_dma_desc(vdesc); + list_del(&vdesc->node); + dma_desc->tdc = tdc; + tdc->dma_desc = dma_desc; + + tegra_dma_resume(tdc); + } + + ch_regs = &dma_desc->sg_req[dma_desc->sg_idx].ch_regs; + + tdc_write(tdc, TEGRA_GPCDMA_CHAN_WCOUNT, ch_regs->wcount); + tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR, 0); + tdc_write(tdc, TEGRA_GPCDMA_CHAN_SRC_PTR, ch_regs->src_ptr); + tdc_write(tdc, TEGRA_GPCDMA_CHAN_DST_PTR, ch_regs->dst_ptr); + tdc_write(tdc, TEGRA_GPCDMA_CHAN_HIGH_ADDR_PTR, ch_regs->high_addr_ptr); + tdc_write(tdc, TEGRA_GPCDMA_CHAN_FIXED_PATTERN, ch_regs->fixed_pattern); + tdc_write(tdc, TEGRA_GPCDMA_CHAN_MMIOSEQ, ch_regs->mmio_seq); + tdc_write(tdc, TEGRA_GPCDMA_CHAN_MCSEQ, ch_regs->mc_seq); + tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR, ch_regs->csr); + + /* Start DMA */ + tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR, + ch_regs->csr | TEGRA_GPCDMA_CSR_ENB); +} + +static void tegra_dma_xfer_complete(struct tegra_dma_channel *tdc) +{ + vchan_cookie_complete(&tdc->dma_desc->vd); + + tegra_dma_sid_free(tdc); + tdc->dma_desc = NULL; +} + +static void tegra_dma_chan_decode_error(struct tegra_dma_channel *tdc, + unsigned int err_status) +{ + switch (TEGRA_GPCDMA_CHAN_ERR_TYPE(err_status)) { + case TEGRA_DMA_BM_FIFO_FULL_ERR: + dev_err(tdc->tdma->dev, + "GPCDMA CH%d bm fifo full\n", tdc->id); + break; + + case TEGRA_DMA_PERIPH_FIFO_FULL_ERR: + dev_err(tdc->tdma->dev, + "GPCDMA CH%d peripheral fifo full\n", tdc->id); + break; + + case TEGRA_DMA_PERIPH_ID_ERR: + dev_err(tdc->tdma->dev, + "GPCDMA CH%d illegal peripheral id\n", tdc->id); + break; + + case TEGRA_DMA_STREAM_ID_ERR: + dev_err(tdc->tdma->dev, + "GPCDMA CH%d illegal stream id\n", tdc->id); + break; + + case TEGRA_DMA_MC_SLAVE_ERR: + dev_err(tdc->tdma->dev, + "GPCDMA CH%d mc slave error\n", tdc->id); + break; + + case TEGRA_DMA_MMIO_SLAVE_ERR: + dev_err(tdc->tdma->dev, + "GPCDMA CH%d mmio slave error\n", tdc->id); + break; + + default: + dev_err(tdc->tdma->dev, + "GPCDMA CH%d security violation %x\n", tdc->id, + err_status); + } +} + +static irqreturn_t tegra_dma_isr(int irq, void *dev_id) +{ + struct tegra_dma_channel *tdc = dev_id; + struct tegra_dma_desc *dma_desc = tdc->dma_desc; + struct tegra_dma_sg_req *sg_req; + u32 status; + + /* Check channel error status register */ + status = tdc_read(tdc, TEGRA_GPCDMA_CHAN_ERR_STATUS); + if (status) { + tegra_dma_chan_decode_error(tdc, status); + tegra_dma_dump_chan_regs(tdc); + tdc_write(tdc, TEGRA_GPCDMA_CHAN_ERR_STATUS, 0xFFFFFFFF); + } + + spin_lock(&tdc->vc.lock); + status = tdc_read(tdc, TEGRA_GPCDMA_CHAN_STATUS); + if (!(status & TEGRA_GPCDMA_STATUS_ISE_EOC)) + goto irq_done; + + tdc_write(tdc, TEGRA_GPCDMA_CHAN_STATUS, + TEGRA_GPCDMA_STATUS_ISE_EOC); + + if (!dma_desc) + goto irq_done; + + sg_req = dma_desc->sg_req; + dma_desc->bytes_xfer += sg_req[dma_desc->sg_idx].len; + + if (dma_desc->cyclic) { + vchan_cyclic_callback(&dma_desc->vd); + tegra_dma_configure_next_sg(tdc); + } else { + dma_desc->sg_idx++; + if (dma_desc->sg_idx == dma_desc->sg_count) + tegra_dma_xfer_complete(tdc); + else + tegra_dma_start(tdc); + } + +irq_done: + spin_unlock(&tdc->vc.lock); + return IRQ_HANDLED; +} + +static void tegra_dma_issue_pending(struct dma_chan *dc) +{ + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); + unsigned long flags; + + if (tdc->dma_desc) + return; + + spin_lock_irqsave(&tdc->vc.lock, flags); + if (vchan_issue_pending(&tdc->vc)) + tegra_dma_start(tdc); + + /* + * For cyclic DMA transfers, program the second + * transfer parameters as soon as the first DMA + * transfer is started inorder for the DMA + * controller to trigger the second transfer + * with the correct parameters. + */ + if (tdc->dma_desc && tdc->dma_desc->cyclic) + tegra_dma_configure_next_sg(tdc); + + spin_unlock_irqrestore(&tdc->vc.lock, flags); +} + +static int tegra_dma_stop_client(struct tegra_dma_channel *tdc) +{ + int ret; + u32 status, csr; + + /* + * Change the client associated with the DMA channel + * to stop DMA engine from starting any more bursts for + * the given client and wait for in flight bursts to complete + */ + csr = tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSR); + csr &= ~(TEGRA_GPCDMA_CSR_REQ_SEL_MASK); + csr |= TEGRA_GPCDMA_CSR_REQ_SEL_UNUSED; + tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR, csr); + + /* Wait for in flight data transfer to finish */ + udelay(TEGRA_GPCDMA_BURST_COMPLETE_TIME); + + /* If TX/RX path is still active wait till it becomes + * inactive + */ + + ret = readl_relaxed_poll_timeout_atomic(tdc->tdma->base_addr + + tdc->chan_base_offset + + TEGRA_GPCDMA_CHAN_STATUS, + status, + !(status & (TEGRA_GPCDMA_STATUS_CHANNEL_TX | + TEGRA_GPCDMA_STATUS_CHANNEL_RX)), + 5, + TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT); + if (ret) { + dev_err(tdc2dev(tdc), "Timeout waiting for DMA burst completion!\n"); + tegra_dma_dump_chan_regs(tdc); + } + + return ret; +} + +static int tegra_dma_terminate_all(struct dma_chan *dc) +{ + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); + unsigned long flags; + LIST_HEAD(head); + int err; + + spin_lock_irqsave(&tdc->vc.lock, flags); + + if (tdc->dma_desc) { + err = tdc->tdma->chip_data->terminate(tdc); + if (err) { + spin_unlock_irqrestore(&tdc->vc.lock, flags); + return err; + } + + tegra_dma_disable(tdc); + tdc->dma_desc = NULL; + } + + tegra_dma_sid_free(tdc); + vchan_get_all_descriptors(&tdc->vc, &head); + spin_unlock_irqrestore(&tdc->vc.lock, flags); + + vchan_dma_desc_free_list(&tdc->vc, &head); + + return 0; +} + +static int tegra_dma_get_residual(struct tegra_dma_channel *tdc) +{ + struct tegra_dma_desc *dma_desc = tdc->dma_desc; + struct tegra_dma_sg_req *sg_req = dma_desc->sg_req; + unsigned int bytes_xfer, residual; + u32 wcount = 0, status; + + wcount = tdc_read(tdc, TEGRA_GPCDMA_CHAN_XFER_COUNT); + + /* + * Set wcount = 0 if EOC bit is set. The transfer would have + * already completed and the CHAN_XFER_COUNT could have updated + * for the next transfer, specifically in case of cyclic transfers. + */ + status = tdc_read(tdc, TEGRA_GPCDMA_CHAN_STATUS); + if (status & TEGRA_GPCDMA_STATUS_ISE_EOC) + wcount = 0; + + bytes_xfer = dma_desc->bytes_xfer + + sg_req[dma_desc->sg_idx].len - (wcount * 4); + + residual = dma_desc->bytes_req - (bytes_xfer % dma_desc->bytes_req); + + return residual; +} + +static enum dma_status tegra_dma_tx_status(struct dma_chan *dc, + dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); + struct tegra_dma_desc *dma_desc; + struct virt_dma_desc *vd; + unsigned int residual; + unsigned long flags; + enum dma_status ret; + + ret = dma_cookie_status(dc, cookie, txstate); + if (ret == DMA_COMPLETE) + return ret; + + spin_lock_irqsave(&tdc->vc.lock, flags); + vd = vchan_find_desc(&tdc->vc, cookie); + if (vd) { + dma_desc = vd_to_tegra_dma_desc(vd); + residual = dma_desc->bytes_req; + dma_set_residue(txstate, residual); + } else if (tdc->dma_desc && tdc->dma_desc->vd.tx.cookie == cookie) { + residual = tegra_dma_get_residual(tdc); + dma_set_residue(txstate, residual); + } else { + dev_err(tdc2dev(tdc), "cookie %d is not found\n", cookie); + } + spin_unlock_irqrestore(&tdc->vc.lock, flags); + + return ret; +} + +static inline int get_bus_width(struct tegra_dma_channel *tdc, + enum dma_slave_buswidth slave_bw) +{ + switch (slave_bw) { + case DMA_SLAVE_BUSWIDTH_1_BYTE: + return TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_8; + case DMA_SLAVE_BUSWIDTH_2_BYTES: + return TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_16; + case DMA_SLAVE_BUSWIDTH_4_BYTES: + return TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_32; + default: + dev_err(tdc2dev(tdc), "given slave bus width is not supported\n"); + return -EINVAL; + } +} + +static unsigned int get_burst_size(struct tegra_dma_channel *tdc, + u32 burst_size, enum dma_slave_buswidth slave_bw, + int len) +{ + unsigned int burst_mmio_width, burst_byte; + + /* + * burst_size from client is in terms of the bus_width. + * convert that into words. + * If burst_size is not specified from client, then use + * len to calculate the optimum burst size + */ + burst_byte = burst_size ? burst_size * slave_bw : len; + burst_mmio_width = burst_byte / 4; + + if (burst_mmio_width < TEGRA_GPCDMA_MMIOSEQ_BURST_MIN) + return 0; + + burst_mmio_width = min(burst_mmio_width, TEGRA_GPCDMA_MMIOSEQ_BURST_MAX); + + return TEGRA_GPCDMA_MMIOSEQ_BURST(burst_mmio_width); +} + +static int get_transfer_param(struct tegra_dma_channel *tdc, + enum dma_transfer_direction direction, + u32 *apb_addr, + u32 *mmio_seq, + u32 *csr, + unsigned int *burst_size, + enum dma_slave_buswidth *slave_bw) +{ + switch (direction) { + case DMA_MEM_TO_DEV: + *apb_addr = tdc->dma_sconfig.dst_addr; + *mmio_seq = get_bus_width(tdc, tdc->dma_sconfig.dst_addr_width); + *burst_size = tdc->dma_sconfig.dst_maxburst; + *slave_bw = tdc->dma_sconfig.dst_addr_width; + *csr = TEGRA_GPCDMA_CSR_DMA_MEM2IO_FC; + return 0; + case DMA_DEV_TO_MEM: + *apb_addr = tdc->dma_sconfig.src_addr; + *mmio_seq = get_bus_width(tdc, tdc->dma_sconfig.src_addr_width); + *burst_size = tdc->dma_sconfig.src_maxburst; + *slave_bw = tdc->dma_sconfig.src_addr_width; + *csr = TEGRA_GPCDMA_CSR_DMA_IO2MEM_FC; + return 0; + case DMA_MEM_TO_MEM: + *burst_size = tdc->dma_sconfig.src_addr_width; + *csr = TEGRA_GPCDMA_CSR_DMA_MEM2MEM; + return 0; + default: + dev_err(tdc2dev(tdc), "DMA direction is not supported\n"); + } + + return -EINVAL; +} + +static struct dma_async_tx_descriptor * +tegra_dma_prep_dma_memset(struct dma_chan *dc, dma_addr_t dest, int value, + size_t len, unsigned long flags) +{ + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); + unsigned int max_dma_count = tdc->tdma->chip_data->max_dma_count; + struct tegra_dma_sg_req *sg_req; + struct tegra_dma_desc *dma_desc; + u32 csr, mc_seq; + + if ((len & 3) || (dest & 3) || len > max_dma_count) { + dev_err(tdc2dev(tdc), + "DMA length/memory address is not supported\n"); + return NULL; + } + + /* Set DMA mode to fixed pattern */ + csr = TEGRA_GPCDMA_CSR_DMA_FIXED_PAT; + /* Enable once or continuous mode */ + csr |= TEGRA_GPCDMA_CSR_ONCE; + /* Enable IRQ mask */ + csr |= TEGRA_GPCDMA_CSR_IRQ_MASK; + /* Enable the DMA interrupt */ + if (flags & DMA_PREP_INTERRUPT) + csr |= TEGRA_GPCDMA_CSR_IE_EOC; + /* Configure default priority weight for the channel */ + csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_WEIGHT, 1); + + mc_seq = tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ); + /* retain stream-id and clean rest */ + mc_seq &= TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK; + + /* Set the address wrapping */ + mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP0, + TEGRA_GPCDMA_MCSEQ_WRAP_NONE); + mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP1, + TEGRA_GPCDMA_MCSEQ_WRAP_NONE); + + /* Program outstanding MC requests */ + mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_REQ_COUNT, 1); + /* Set burst size */ + mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_16; + + dma_desc = kzalloc(struct_size(dma_desc, sg_req, 1), GFP_NOWAIT); + if (!dma_desc) + return NULL; + + dma_desc->bytes_req = len; + dma_desc->sg_count = 1; + sg_req = dma_desc->sg_req; + + sg_req[0].ch_regs.src_ptr = 0; + sg_req[0].ch_regs.dst_ptr = dest; + sg_req[0].ch_regs.high_addr_ptr = + FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_DST_PTR, (dest >> 32)); + sg_req[0].ch_regs.fixed_pattern = value; + /* Word count reg takes value as (N +1) words */ + sg_req[0].ch_regs.wcount = ((len - 4) >> 2); + sg_req[0].ch_regs.csr = csr; + sg_req[0].ch_regs.mmio_seq = 0; + sg_req[0].ch_regs.mc_seq = mc_seq; + sg_req[0].len = len; + + dma_desc->cyclic = false; + return vchan_tx_prep(&tdc->vc, &dma_desc->vd, flags); +} + +static struct dma_async_tx_descriptor * +tegra_dma_prep_dma_memcpy(struct dma_chan *dc, dma_addr_t dest, + dma_addr_t src, size_t len, unsigned long flags) +{ + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); + struct tegra_dma_sg_req *sg_req; + struct tegra_dma_desc *dma_desc; + unsigned int max_dma_count; + u32 csr, mc_seq; + + max_dma_count = tdc->tdma->chip_data->max_dma_count; + if ((len & 3) || (src & 3) || (dest & 3) || len > max_dma_count) { + dev_err(tdc2dev(tdc), + "DMA length/memory address is not supported\n"); + return NULL; + } + + /* Set DMA mode to memory to memory transfer */ + csr = TEGRA_GPCDMA_CSR_DMA_MEM2MEM; + /* Enable once or continuous mode */ + csr |= TEGRA_GPCDMA_CSR_ONCE; + /* Enable IRQ mask */ + csr |= TEGRA_GPCDMA_CSR_IRQ_MASK; + /* Enable the DMA interrupt */ + if (flags & DMA_PREP_INTERRUPT) + csr |= TEGRA_GPCDMA_CSR_IE_EOC; + /* Configure default priority weight for the channel */ + csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_WEIGHT, 1); + + mc_seq = tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ); + /* retain stream-id and clean rest */ + mc_seq &= (TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK) | + (TEGRA_GPCDMA_MCSEQ_STREAM_ID1_MASK); + + /* Set the address wrapping */ + mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP0, + TEGRA_GPCDMA_MCSEQ_WRAP_NONE); + mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP1, + TEGRA_GPCDMA_MCSEQ_WRAP_NONE); + + /* Program outstanding MC requests */ + mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_REQ_COUNT, 1); + /* Set burst size */ + mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_16; + + dma_desc = kzalloc(struct_size(dma_desc, sg_req, 1), GFP_NOWAIT); + if (!dma_desc) + return NULL; + + dma_desc->bytes_req = len; + dma_desc->sg_count = 1; + sg_req = dma_desc->sg_req; + + sg_req[0].ch_regs.src_ptr = src; + sg_req[0].ch_regs.dst_ptr = dest; + sg_req[0].ch_regs.high_addr_ptr = + FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_SRC_PTR, (src >> 32)); + sg_req[0].ch_regs.high_addr_ptr |= + FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_DST_PTR, (dest >> 32)); + /* Word count reg takes value as (N +1) words */ + sg_req[0].ch_regs.wcount = ((len - 4) >> 2); + sg_req[0].ch_regs.csr = csr; + sg_req[0].ch_regs.mmio_seq = 0; + sg_req[0].ch_regs.mc_seq = mc_seq; + sg_req[0].len = len; + + dma_desc->cyclic = false; + return vchan_tx_prep(&tdc->vc, &dma_desc->vd, flags); +} + +static struct dma_async_tx_descriptor * +tegra_dma_prep_slave_sg(struct dma_chan *dc, struct scatterlist *sgl, + unsigned int sg_len, enum dma_transfer_direction direction, + unsigned long flags, void *context) +{ + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); + unsigned int max_dma_count = tdc->tdma->chip_data->max_dma_count; + u32 csr, mc_seq, apb_ptr = 0, mmio_seq = 0; + enum dma_slave_buswidth slave_bw; + struct tegra_dma_sg_req *sg_req; + struct tegra_dma_desc *dma_desc; + struct scatterlist *sg; + u32 burst_size; + unsigned int i; + int ret; + + if (!tdc->config_init) { + dev_err(tdc2dev(tdc), "DMA channel is not configured\n"); + return NULL; + } + if (sg_len < 1) { + dev_err(tdc2dev(tdc), "Invalid segment length %d\n", sg_len); + return NULL; + } + + ret = tegra_dma_sid_reserve(tdc, direction); + if (ret) + return NULL; + + ret = get_transfer_param(tdc, direction, &apb_ptr, &mmio_seq, &csr, + &burst_size, &slave_bw); + if (ret < 0) + return NULL; + + /* Enable once or continuous mode */ + csr |= TEGRA_GPCDMA_CSR_ONCE; + /* Program the slave id in requestor select */ + csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_REQ_SEL_MASK, tdc->slave_id); + /* Enable IRQ mask */ + csr |= TEGRA_GPCDMA_CSR_IRQ_MASK; + /* Configure default priority weight for the channel*/ + csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_WEIGHT, 1); + + /* Enable the DMA interrupt */ + if (flags & DMA_PREP_INTERRUPT) + csr |= TEGRA_GPCDMA_CSR_IE_EOC; + + mc_seq = tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ); + /* retain stream-id and clean rest */ + mc_seq &= TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK; + + /* Set the address wrapping on both MC and MMIO side */ + + mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP0, + TEGRA_GPCDMA_MCSEQ_WRAP_NONE); + mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP1, + TEGRA_GPCDMA_MCSEQ_WRAP_NONE); + mmio_seq |= FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_WRAP_WORD, 1); + + /* Program 2 MC outstanding requests by default. */ + mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_REQ_COUNT, 1); + + /* Setting MC burst size depending on MMIO burst size */ + if (burst_size == 64) + mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_16; + else + mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_2; + + dma_desc = kzalloc(struct_size(dma_desc, sg_req, sg_len), GFP_NOWAIT); + if (!dma_desc) + return NULL; + + dma_desc->sg_count = sg_len; + sg_req = dma_desc->sg_req; + + /* Make transfer requests */ + for_each_sg(sgl, sg, sg_len, i) { + u32 len; + dma_addr_t mem; + + mem = sg_dma_address(sg); + len = sg_dma_len(sg); + + if ((len & 3) || (mem & 3) || len > max_dma_count) { + dev_err(tdc2dev(tdc), + "DMA length/memory address is not supported\n"); + kfree(dma_desc); + return NULL; + } + + mmio_seq |= get_burst_size(tdc, burst_size, slave_bw, len); + dma_desc->bytes_req += len; + + if (direction == DMA_MEM_TO_DEV) { + sg_req[i].ch_regs.src_ptr = mem; + sg_req[i].ch_regs.dst_ptr = apb_ptr; + sg_req[i].ch_regs.high_addr_ptr = + FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_SRC_PTR, (mem >> 32)); + } else if (direction == DMA_DEV_TO_MEM) { + sg_req[i].ch_regs.src_ptr = apb_ptr; + sg_req[i].ch_regs.dst_ptr = mem; + sg_req[i].ch_regs.high_addr_ptr = + FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_DST_PTR, (mem >> 32)); + } + + /* + * Word count register takes input in words. Writing a value + * of N into word count register means a req of (N+1) words. + */ + sg_req[i].ch_regs.wcount = ((len - 4) >> 2); + sg_req[i].ch_regs.csr = csr; + sg_req[i].ch_regs.mmio_seq = mmio_seq; + sg_req[i].ch_regs.mc_seq = mc_seq; + sg_req[i].len = len; + } + + dma_desc->cyclic = false; + return vchan_tx_prep(&tdc->vc, &dma_desc->vd, flags); +} + +static struct dma_async_tx_descriptor * +tegra_dma_prep_dma_cyclic(struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len, + size_t period_len, enum dma_transfer_direction direction, + unsigned long flags) +{ + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); + struct tegra_dma_desc *dma_desc; + struct tegra_dma_sg_req *sg_req; + enum dma_slave_buswidth slave_bw; + u32 csr, mc_seq, apb_ptr = 0, mmio_seq = 0, burst_size; + unsigned int max_dma_count, len, period_count, i; + dma_addr_t mem = buf_addr; + int ret; + + if (!buf_len || !period_len) { + dev_err(tdc2dev(tdc), "Invalid buffer/period len\n"); + return NULL; + } + + if (!tdc->config_init) { + dev_err(tdc2dev(tdc), "DMA slave is not configured\n"); + return NULL; + } + + ret = tegra_dma_sid_reserve(tdc, direction); + if (ret) + return NULL; + + /* + * We only support cycle transfer when buf_len is multiple of + * period_len. + */ + if (buf_len % period_len) { + dev_err(tdc2dev(tdc), "buf_len is not multiple of period_len\n"); + return NULL; + } + + len = period_len; + max_dma_count = tdc->tdma->chip_data->max_dma_count; + if ((len & 3) || (buf_addr & 3) || len > max_dma_count) { + dev_err(tdc2dev(tdc), "Req len/mem address is not correct\n"); + return NULL; + } + + ret = get_transfer_param(tdc, direction, &apb_ptr, &mmio_seq, &csr, + &burst_size, &slave_bw); + if (ret < 0) + return NULL; + + /* Enable once or continuous mode */ + csr &= ~TEGRA_GPCDMA_CSR_ONCE; + /* Program the slave id in requestor select */ + csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_REQ_SEL_MASK, tdc->slave_id); + /* Enable IRQ mask */ + csr |= TEGRA_GPCDMA_CSR_IRQ_MASK; + /* Configure default priority weight for the channel*/ + csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_WEIGHT, 1); + + /* Enable the DMA interrupt */ + if (flags & DMA_PREP_INTERRUPT) + csr |= TEGRA_GPCDMA_CSR_IE_EOC; + + mmio_seq |= FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_WRAP_WORD, 1); + + mc_seq = tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ); + /* retain stream-id and clean rest */ + mc_seq &= TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK; + + /* Set the address wrapping on both MC and MMIO side */ + mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP0, + TEGRA_GPCDMA_MCSEQ_WRAP_NONE); + mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP1, + TEGRA_GPCDMA_MCSEQ_WRAP_NONE); + + /* Program 2 MC outstanding requests by default. */ + mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_REQ_COUNT, 1); + /* Setting MC burst size depending on MMIO burst size */ + if (burst_size == 64) + mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_16; + else + mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_2; + + period_count = buf_len / period_len; + dma_desc = kzalloc(struct_size(dma_desc, sg_req, period_count), + GFP_NOWAIT); + if (!dma_desc) + return NULL; + + dma_desc->bytes_req = buf_len; + dma_desc->sg_count = period_count; + sg_req = dma_desc->sg_req; + + /* Split transfer equal to period size */ + for (i = 0; i < period_count; i++) { + mmio_seq |= get_burst_size(tdc, burst_size, slave_bw, len); + if (direction == DMA_MEM_TO_DEV) { + sg_req[i].ch_regs.src_ptr = mem; + sg_req[i].ch_regs.dst_ptr = apb_ptr; + sg_req[i].ch_regs.high_addr_ptr = + FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_SRC_PTR, (mem >> 32)); + } else if (direction == DMA_DEV_TO_MEM) { + sg_req[i].ch_regs.src_ptr = apb_ptr; + sg_req[i].ch_regs.dst_ptr = mem; + sg_req[i].ch_regs.high_addr_ptr = + FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_DST_PTR, (mem >> 32)); + } + /* + * Word count register takes input in words. Writing a value + * of N into word count register means a req of (N+1) words. + */ + sg_req[i].ch_regs.wcount = ((len - 4) >> 2); + sg_req[i].ch_regs.csr = csr; + sg_req[i].ch_regs.mmio_seq = mmio_seq; + sg_req[i].ch_regs.mc_seq = mc_seq; + sg_req[i].len = len; + + mem += len; + } + + dma_desc->cyclic = true; + + return vchan_tx_prep(&tdc->vc, &dma_desc->vd, flags); +} + +static int tegra_dma_alloc_chan_resources(struct dma_chan *dc) +{ + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); + int ret; + + ret = request_irq(tdc->irq, tegra_dma_isr, 0, tdc->name, tdc); + if (ret) { + dev_err(tdc2dev(tdc), "request_irq failed for %s\n", tdc->name); + return ret; + } + + dma_cookie_init(&tdc->vc.chan); + tdc->config_init = false; + return 0; +} + +static void tegra_dma_chan_synchronize(struct dma_chan *dc) +{ + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); + + synchronize_irq(tdc->irq); + vchan_synchronize(&tdc->vc); +} + +static void tegra_dma_free_chan_resources(struct dma_chan *dc) +{ + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); + + dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id); + + tegra_dma_terminate_all(dc); + synchronize_irq(tdc->irq); + + tasklet_kill(&tdc->vc.task); + tdc->config_init = false; + tdc->slave_id = -1; + tdc->sid_dir = DMA_TRANS_NONE; + free_irq(tdc->irq, tdc); + + vchan_free_chan_resources(&tdc->vc); +} + +static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct tegra_dma *tdma = ofdma->of_dma_data; + struct tegra_dma_channel *tdc; + struct dma_chan *chan; + + chan = dma_get_any_slave_channel(&tdma->dma_dev); + if (!chan) + return NULL; + + tdc = to_tegra_dma_chan(chan); + tdc->slave_id = dma_spec->args[0]; + + return chan; +} + +static const struct tegra_dma_chip_data tegra186_dma_chip_data = { + .nr_channels = 31, + .channel_reg_size = SZ_64K, + .max_dma_count = SZ_1G, + .hw_support_pause = false, + .terminate = tegra_dma_stop_client, +}; + +static const struct tegra_dma_chip_data tegra194_dma_chip_data = { + .nr_channels = 31, + .channel_reg_size = SZ_64K, + .max_dma_count = SZ_1G, + .hw_support_pause = true, + .terminate = tegra_dma_pause, +}; + +static const struct of_device_id tegra_dma_of_match[] = { + { + .compatible = "nvidia,tegra186-gpcdma", + .data = &tegra186_dma_chip_data, + }, { + .compatible = "nvidia,tegra194-gpcdma", + .data = &tegra194_dma_chip_data, + }, { + }, +}; +MODULE_DEVICE_TABLE(of, tegra_dma_of_match); + +static int tegra_dma_program_sid(struct tegra_dma_channel *tdc, int stream_id) +{ + unsigned int reg_val = tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ); + + reg_val &= ~(TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK); + reg_val &= ~(TEGRA_GPCDMA_MCSEQ_STREAM_ID1_MASK); + + reg_val |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK, stream_id); + reg_val |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_STREAM_ID1_MASK, stream_id); + + tdc_write(tdc, TEGRA_GPCDMA_CHAN_MCSEQ, reg_val); + return 0; +} + +static int tegra_dma_probe(struct platform_device *pdev) +{ + const struct tegra_dma_chip_data *cdata = NULL; + struct iommu_fwspec *iommu_spec; + unsigned int stream_id, i; + struct tegra_dma *tdma; + struct resource *res; + int ret; + + cdata = of_device_get_match_data(&pdev->dev); + + tdma = devm_kzalloc(&pdev->dev, + struct_size(tdma, channels, cdata->nr_channels), + GFP_KERNEL); + if (!tdma) + return -ENOMEM; + + tdma->dev = &pdev->dev; + tdma->chip_data = cdata; + platform_set_drvdata(pdev, tdma); + + tdma->base_addr = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(tdma->base_addr)) + return PTR_ERR(tdma->base_addr); + + tdma->rst = devm_reset_control_get_exclusive(&pdev->dev, "gpcdma"); + if (IS_ERR(tdma->rst)) { + return dev_err_probe(&pdev->dev, PTR_ERR(tdma->rst), + "Missing controller reset\n"); + } + reset_control_reset(tdma->rst); + + tdma->dma_dev.dev = &pdev->dev; + + iommu_spec = dev_iommu_fwspec_get(&pdev->dev); + if (!iommu_spec) { + dev_err(&pdev->dev, "Missing iommu stream-id\n"); + return -EINVAL; + } + stream_id = iommu_spec->ids[0] & 0xffff; + + INIT_LIST_HEAD(&tdma->dma_dev.channels); + for (i = 0; i < cdata->nr_channels; i++) { + struct tegra_dma_channel *tdc = &tdma->channels[i]; + + tdc->chan_base_offset = TEGRA_GPCDMA_CHANNEL_BASE_ADD_OFFSET + + i * cdata->channel_reg_size; + res = platform_get_resource(pdev, IORESOURCE_IRQ, i); + if (!res) { + dev_err(&pdev->dev, "No irq resource for chan %d\n", i); + return -EINVAL; + } + tdc->irq = res->start; + snprintf(tdc->name, sizeof(tdc->name), "gpcdma.%d", i); + + tdc->tdma = tdma; + tdc->id = i; + tdc->slave_id = -1; + + vchan_init(&tdc->vc, &tdma->dma_dev); + tdc->vc.desc_free = tegra_dma_desc_free; + + /* program stream-id for this channel */ + tegra_dma_program_sid(tdc, stream_id); + tdc->stream_id = stream_id; + } + + dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask); + dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask); + dma_cap_set(DMA_MEMCPY, tdma->dma_dev.cap_mask); + dma_cap_set(DMA_MEMSET, tdma->dma_dev.cap_mask); + dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask); + + /* + * Only word aligned transfers are supported. Set the copy + * alignment shift. + */ + tdma->dma_dev.copy_align = 2; + tdma->dma_dev.fill_align = 2; + tdma->dma_dev.device_alloc_chan_resources = + tegra_dma_alloc_chan_resources; + tdma->dma_dev.device_free_chan_resources = + tegra_dma_free_chan_resources; + tdma->dma_dev.device_prep_slave_sg = tegra_dma_prep_slave_sg; + tdma->dma_dev.device_prep_dma_memcpy = tegra_dma_prep_dma_memcpy; + tdma->dma_dev.device_prep_dma_memset = tegra_dma_prep_dma_memset; + tdma->dma_dev.device_prep_dma_cyclic = tegra_dma_prep_dma_cyclic; + tdma->dma_dev.device_config = tegra_dma_slave_config; + tdma->dma_dev.device_terminate_all = tegra_dma_terminate_all; + tdma->dma_dev.device_tx_status = tegra_dma_tx_status; + tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending; + tdma->dma_dev.device_pause = tegra_dma_device_pause; + tdma->dma_dev.device_resume = tegra_dma_device_resume; + tdma->dma_dev.device_synchronize = tegra_dma_chan_synchronize; + tdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; + + ret = dma_async_device_register(&tdma->dma_dev); + if (ret < 0) { + dev_err_probe(&pdev->dev, ret, + "GPC DMA driver registration failed\n"); + return ret; + } + + ret = of_dma_controller_register(pdev->dev.of_node, + tegra_dma_of_xlate, tdma); + if (ret < 0) { + dev_err_probe(&pdev->dev, ret, + "GPC DMA OF registration failed\n"); + + dma_async_device_unregister(&tdma->dma_dev); + return ret; + } + + dev_info(&pdev->dev, "GPC DMA driver register %d channels\n", + cdata->nr_channels); + + return 0; +} + +static int tegra_dma_remove(struct platform_device *pdev) +{ + struct tegra_dma *tdma = platform_get_drvdata(pdev); + + of_dma_controller_free(pdev->dev.of_node); + dma_async_device_unregister(&tdma->dma_dev); + + return 0; +} + +static int __maybe_unused tegra_dma_pm_suspend(struct device *dev) +{ + struct tegra_dma *tdma = dev_get_drvdata(dev); + unsigned int i; + + for (i = 0; i < tdma->chip_data->nr_channels; i++) { + struct tegra_dma_channel *tdc = &tdma->channels[i]; + + if (tdc->dma_desc) { + dev_err(tdma->dev, "channel %u busy\n", i); + return -EBUSY; + } + } + + return 0; +} + +static int __maybe_unused tegra_dma_pm_resume(struct device *dev) +{ + struct tegra_dma *tdma = dev_get_drvdata(dev); + unsigned int i; + + reset_control_reset(tdma->rst); + + for (i = 0; i < tdma->chip_data->nr_channels; i++) { + struct tegra_dma_channel *tdc = &tdma->channels[i]; + + tegra_dma_program_sid(tdc, tdc->stream_id); + } + + return 0; +} + +static const struct dev_pm_ops tegra_dma_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(tegra_dma_pm_suspend, tegra_dma_pm_resume) +}; + +static struct platform_driver tegra_dma_driver = { + .driver = { + .name = "tegra-gpcdma", + .pm = &tegra_dma_dev_pm_ops, + .of_match_table = tegra_dma_of_match, + }, + .probe = tegra_dma_probe, + .remove = tegra_dma_remove, +}; + +module_platform_driver(tegra_dma_driver); + +MODULE_DESCRIPTION("NVIDIA Tegra GPC DMA Controller driver"); +MODULE_AUTHOR("Pavan Kunapuli "); +MODULE_AUTHOR("Rajesh Gumasta "); +MODULE_LICENSE("GPL"); -- cgit From 2d7991fe867974a8e5065ee9691451a406b9320d Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Fri, 11 Mar 2022 16:23:22 -0700 Subject: dmaengine: idxd: update IAA definitions for user header Add additional structure definitions for Intel In-memory Analytics Accelerator (IAA/IAX). See specification (1) for more details. 1: https://cdrdv2.intel.com/v1/dl/getContent/721858 Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/164704100212.1373038.18362680016033557757.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Vinod Koul --- include/uapi/linux/idxd.h | 31 ++++++++++++++++++++++++++++--- 1 file changed, 28 insertions(+), 3 deletions(-) diff --git a/include/uapi/linux/idxd.h b/include/uapi/linux/idxd.h index a8f0ff75c430..bce7c43657d5 100644 --- a/include/uapi/linux/idxd.h +++ b/include/uapi/linux/idxd.h @@ -53,6 +53,11 @@ enum idxd_scmd_stat { /* IAX */ #define IDXD_OP_FLAG_RD_SRC2_AECS 0x010000 +#define IDXD_OP_FLAG_RD_SRC2_2ND 0x020000 +#define IDXD_OP_FLAG_WR_SRC2_AECS_COMP 0x040000 +#define IDXD_OP_FLAG_WR_SRC2_AECS_OVFL 0x080000 +#define IDXD_OP_FLAG_SRC2_STS 0x100000 +#define IDXD_OP_FLAG_CRC_RFC3720 0x200000 /* Opcode */ enum dsa_opcode { @@ -81,6 +86,18 @@ enum iax_opcode { IAX_OPCODE_MEMMOVE, IAX_OPCODE_DECOMPRESS = 0x42, IAX_OPCODE_COMPRESS, + IAX_OPCODE_CRC64, + IAX_OPCODE_ZERO_DECOMP_32 = 0x48, + IAX_OPCODE_ZERO_DECOMP_16, + IAX_OPCODE_DECOMP_32 = 0x4c, + IAX_OPCODE_DECOMP_16, + IAX_OPCODE_SCAN = 0x50, + IAX_OPCODE_SET_MEMBER, + IAX_OPCODE_EXTRACT, + IAX_OPCODE_SELECT, + IAX_OPCODE_RLE_BURST, + IAX_OPCDE_FIND_UNIQUE, + IAX_OPCODE_EXPAND, }; /* Completion record status */ @@ -120,6 +137,7 @@ enum iax_completion_status { IAX_COMP_NONE = 0, IAX_COMP_SUCCESS, IAX_COMP_PAGE_FAULT_IR = 0x04, + IAX_COMP_ANALYTICS_ERROR = 0x0a, IAX_COMP_OUTBUF_OVERFLOW, IAX_COMP_BAD_OPCODE = 0x10, IAX_COMP_INVALID_FLAGS, @@ -140,7 +158,10 @@ enum iax_completion_status { IAX_COMP_WATCHDOG, IAX_COMP_INVALID_COMP_FLAG = 0x30, IAX_COMP_INVALID_FILTER_FLAG, - IAX_COMP_INVALID_NUM_ELEMS = 0x33, + IAX_COMP_INVALID_INPUT_SIZE, + IAX_COMP_INVALID_NUM_ELEMS, + IAX_COMP_INVALID_SRC1_WIDTH, + IAX_COMP_INVALID_INVERT_OUT, }; #define DSA_COMP_STATUS_MASK 0x7f @@ -319,8 +340,12 @@ struct iax_completion_record { uint32_t output_size; uint8_t output_bits; uint8_t rsvd3; - uint16_t rsvd4; - uint64_t rsvd5[4]; + uint16_t xor_csum; + uint32_t crc; + uint32_t min; + uint32_t max; + uint32_t sum; + uint64_t rsvd4[2]; } __attribute__((packed)); struct iax_raw_completion_record { -- cgit From 52126d4c03798cc55aa927fea4c776ab26b5a5f0 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sun, 6 Feb 2022 10:47:45 +0100 Subject: dmaengine: Remove a useless mutex According to lib/idr.c, The IDA handles its own locking. It is safe to call any of the IDA functions without synchronisation in your code. so the 'chan_mutex' mutex can just be removed. It is here only to protect some ida_alloc()/ida_free() calls. Signed-off-by: Christophe JAILLET Link: https://lore.kernel.org/r/7180452c1d77b039e27b6f9418e0e7d9dd33c431.1644140845.git.christophe.jaillet@wanadoo.fr Signed-off-by: Vinod Koul --- drivers/dma/dmaengine.c | 7 ------- include/linux/dmaengine.h | 1 - 2 files changed, 8 deletions(-) diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 2cfa8458b51b..e80feeea0e01 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -1053,9 +1053,7 @@ static int __dma_async_device_channel_register(struct dma_device *device, * When the chan_id is a negative value, we are dynamically adding * the channel. Otherwise we are static enumerating. */ - mutex_lock(&device->chan_mutex); chan->chan_id = ida_alloc(&device->chan_ida, GFP_KERNEL); - mutex_unlock(&device->chan_mutex); if (chan->chan_id < 0) { pr_err("%s: unable to alloc ida for chan: %d\n", __func__, chan->chan_id); @@ -1078,9 +1076,7 @@ static int __dma_async_device_channel_register(struct dma_device *device, return 0; err_out_ida: - mutex_lock(&device->chan_mutex); ida_free(&device->chan_ida, chan->chan_id); - mutex_unlock(&device->chan_mutex); err_free_dev: kfree(chan->dev); err_free_local: @@ -1113,9 +1109,7 @@ static void __dma_async_device_channel_unregister(struct dma_device *device, device->chancnt--; chan->dev->chan = NULL; mutex_unlock(&dma_list_mutex); - mutex_lock(&device->chan_mutex); ida_free(&device->chan_ida, chan->chan_id); - mutex_unlock(&device->chan_mutex); device_unregister(&chan->dev->device); free_percpu(chan->local); } @@ -1250,7 +1244,6 @@ int dma_async_device_register(struct dma_device *device) if (rc != 0) return rc; - mutex_init(&device->chan_mutex); ida_init(&device->chan_ida); /* represent channels in sysfs. Probably want devs too */ diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 842d4f7ca752..6db9e03afd0b 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -870,7 +870,6 @@ struct dma_device { struct device *dev; struct module *owner; struct ida chan_ida; - struct mutex chan_mutex; /* to protect chan_ida */ u32 src_addr_widths; u32 dst_addr_widths; -- cgit From 94b8f0e58fa138455edc5414aa097694fede8836 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Wed, 6 Apr 2022 18:55:06 +0530 Subject: dmaengine: qcom: gpi: set chain and link flag for duplex Newer platforms seem to have strict requirement for TRE flags which causes transaction to timeout. This was resolved to missing chain and link flag for duplex spi transaction. So add these two flags. Signed-off-by: Vinod Koul Reviewed-by: Bjorn Andersson Tested-by: Bjorn Andersson Link: https://lore.kernel.org/r/20220406132508.1029348-1-vkoul@kernel.org Signed-off-by: Vinod Koul --- drivers/dma/qcom/gpi.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c index 94f3648f7483..3429ceccd13b 100644 --- a/drivers/dma/qcom/gpi.c +++ b/drivers/dma/qcom/gpi.c @@ -1754,10 +1754,14 @@ static int gpi_create_spi_tre(struct gchan *chan, struct gpi_desc *desc, tre->dword[2] = u32_encode_bits(spi->rx_len, TRE_RX_LEN); tre->dword[3] = u32_encode_bits(TRE_TYPE_GO, TRE_FLAGS_TYPE); - if (spi->cmd == SPI_RX) + if (spi->cmd == SPI_RX) { tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_IEOB); - else + } else if (spi->cmd == SPI_TX) { + tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_CHAIN); + } else { /* SPI_DUPLEX */ tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_CHAIN); + tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_LINK); + } } /* create the dma tre */ -- cgit From dd45d96bd9d3a052f1497f3d813a76d8308af25e Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Wed, 6 Apr 2022 18:55:07 +0530 Subject: dt-bindings: dmaengine: qcom: gpi: add compatible for sm8350/sm8350 Add the compatible for newer qcom socs with gpi dma i.e qcom sm8350 and sm8450. Signed-off-by: Vinod Koul Reviewed-by: Bjorn Andersson Acked-by: Krzysztof Kozlowski Link: https://lore.kernel.org/r/20220406132508.1029348-2-vkoul@kernel.org Signed-off-by: Vinod Koul --- Documentation/devicetree/bindings/dma/qcom,gpi.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Documentation/devicetree/bindings/dma/qcom,gpi.yaml b/Documentation/devicetree/bindings/dma/qcom,gpi.yaml index e614fe3187bb..67f046a4a442 100644 --- a/Documentation/devicetree/bindings/dma/qcom,gpi.yaml +++ b/Documentation/devicetree/bindings/dma/qcom,gpi.yaml @@ -22,6 +22,8 @@ properties: - qcom,sdm845-gpi-dma - qcom,sm8150-gpi-dma - qcom,sm8250-gpi-dma + - qcom,sm8350-gpi-dma + - qcom,sm8450-gpi-dma reg: maxItems: 1 -- cgit From 6316572cf30283d784df0ecf3d517daf020b4507 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Wed, 6 Apr 2022 18:55:08 +0530 Subject: dmaengine: qcom: gpi: Add support for ee_offset Controller on newer SoCs like SM8450 have registers at at offset. Add ee_offset to driver_data and add this compatible for the driver. Signed-off-by: Vinod Koul Reviewed-by: Bjorn Andersson Tested-by: Bjorn Andersson Link: https://lore.kernel.org/r/20220406132508.1029348-3-vkoul@kernel.org Signed-off-by: Vinod Koul --- drivers/dma/qcom/gpi.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c index 3429ceccd13b..0b402d923eae 100644 --- a/drivers/dma/qcom/gpi.c +++ b/drivers/dma/qcom/gpi.c @@ -2152,6 +2152,7 @@ static int gpi_probe(struct platform_device *pdev) { struct gpi_dev *gpi_dev; unsigned int i; + u32 ee_offset; int ret; gpi_dev = devm_kzalloc(&pdev->dev, sizeof(*gpi_dev), GFP_KERNEL); @@ -2179,6 +2180,9 @@ static int gpi_probe(struct platform_device *pdev) return ret; } + ee_offset = (uintptr_t)device_get_match_data(gpi_dev->dev); + gpi_dev->ee_base = gpi_dev->ee_base - ee_offset; + gpi_dev->ev_factor = EV_FACTOR; ret = dma_set_mask(gpi_dev->dev, DMA_BIT_MASK(64)); @@ -2282,9 +2286,10 @@ static int gpi_probe(struct platform_device *pdev) } static const struct of_device_id gpi_of_match[] = { - { .compatible = "qcom,sdm845-gpi-dma" }, - { .compatible = "qcom,sm8150-gpi-dma" }, - { .compatible = "qcom,sm8250-gpi-dma" }, + { .compatible = "qcom,sdm845-gpi-dma", .data = (void *)0x0 }, + { .compatible = "qcom,sm8150-gpi-dma", .data = (void *)0x0 }, + { .compatible = "qcom,sm8250-gpi-dma", .data = (void *)0x0 }, + { .compatible = "qcom,sm8450-gpi-dma", .data = (void *)0x10000 }, { }, }; MODULE_DEVICE_TABLE(of, gpi_of_match); -- cgit From d0a3ef604801e8e8564d15bdeb29497607a0d65a Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Tue, 12 Apr 2022 14:29:59 -0700 Subject: dmaengine: qcom: gpi: Add SM8350 support The Qualcomm SM8350 platform does, like the SM8450, provide a set of GPI controllers with an ee-offset of 0x10000. Add this to the driver. Signed-off-by: Bjorn Andersson Link: https://lore.kernel.org/r/20220412212959.2385085-1-bjorn.andersson@linaro.org Signed-off-by: Vinod Koul --- drivers/dma/qcom/gpi.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c index 0b402d923eae..d8fad6a77560 100644 --- a/drivers/dma/qcom/gpi.c +++ b/drivers/dma/qcom/gpi.c @@ -2289,6 +2289,7 @@ static const struct of_device_id gpi_of_match[] = { { .compatible = "qcom,sdm845-gpi-dma", .data = (void *)0x0 }, { .compatible = "qcom,sm8150-gpi-dma", .data = (void *)0x0 }, { .compatible = "qcom,sm8250-gpi-dma", .data = (void *)0x0 }, + { .compatible = "qcom,sm8350-gpi-dma", .data = (void *)0x10000 }, { .compatible = "qcom,sm8450-gpi-dma", .data = (void *)0x10000 }, { }, }; -- cgit From 766b540df8a374f75d7ad7084da026439c107825 Mon Sep 17 00:00:00 2001 From: Radhey Shyam Pandey Date: Thu, 14 Apr 2022 17:54:21 +0530 Subject: dt-bindings: dmaengine: xilinx_dma: Add MCMDA channel ID index description MCDMA IP provides up to 16 multiple channels of data movement each on MM2S and S2MM paths. Inline with implementation, in the binding add description for the channel ID start index and mention that it's fixed irrespective of the MCDMA IP configuration(number of read/write channels). Signed-off-by: Radhey Shyam Pandey Acked-by: Rob Herring Link: https://lore.kernel.org/r/1649939061-6675-1-git-send-email-radhey.shyam.pandey@xilinx.com Signed-off-by: Vinod Koul --- Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt b/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt index 325aca52cd43..d1700a5c36bf 100644 --- a/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt +++ b/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt @@ -110,7 +110,11 @@ axi_vdma_0: axivdma@40030000 { Required properties: - dmas: a list of <[Video DMA device phandle] [Channel ID]> pairs, where Channel ID is '0' for write/tx and '1' for read/rx - channel. + channel. For MCMDA, MM2S channel(write/tx) ID start from + '0' and is in [0-15] range. S2MM channel(read/rx) ID start + from '16' and is in [16-31] range. These channels ID are + fixed irrespective of IP configuration. + - dma-names: a list of DMA channel names, one per "dmas" entry Example: -- cgit From 1d05a0bdb420ddb3e2d7f39cdc24ff16d6902f55 Mon Sep 17 00:00:00 2001 From: Yunbo Yu Date: Mon, 18 Apr 2022 22:20:21 +0800 Subject: dmaengine: plx_dma: Move spin_lock_bh() to spin_lock() It is unnecessary to call spin_lock_bh() if you are already in a tasklet. Signed-off-by: Yunbo Yu Reviewed-by: Logan Gunthorpe Link: https://lore.kernel.org/r/20220418142021.1241558-1-yuyunbo519@gmail.com Signed-off-by: Vinod Koul --- drivers/dma/plx_dma.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/dma/plx_dma.c b/drivers/dma/plx_dma.c index 1ffcb5ca9788..12725fa1655f 100644 --- a/drivers/dma/plx_dma.c +++ b/drivers/dma/plx_dma.c @@ -137,7 +137,7 @@ static void plx_dma_process_desc(struct plx_dma_dev *plxdev) struct plx_dma_desc *desc; u32 flags; - spin_lock_bh(&plxdev->ring_lock); + spin_lock(&plxdev->ring_lock); while (plxdev->tail != plxdev->head) { desc = plx_dma_get_desc(plxdev, plxdev->tail); @@ -165,7 +165,7 @@ static void plx_dma_process_desc(struct plx_dma_dev *plxdev) plxdev->tail++; } - spin_unlock_bh(&plxdev->ring_lock); + spin_unlock(&plxdev->ring_lock); } static void plx_dma_abort_desc(struct plx_dma_dev *plxdev) -- cgit From 578245307f4afefd8da2ca66979d40b9ef517d67 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Thu, 14 Apr 2022 12:12:16 +0530 Subject: dt-bindings: dmaengine: qcom: gpi: add compatible for sc7280 Document the compatible for GPI DMA controller on SC7280 SoC Signed-off-by: Vinod Koul Acked-by: Krzysztof Kozlowski Link: https://lore.kernel.org/r/20220414064216.1182177-1-vkoul@kernel.org Signed-off-by: Vinod Koul --- Documentation/devicetree/bindings/dma/qcom,gpi.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/Documentation/devicetree/bindings/dma/qcom,gpi.yaml b/Documentation/devicetree/bindings/dma/qcom,gpi.yaml index 67f046a4a442..8a790ffbdaac 100644 --- a/Documentation/devicetree/bindings/dma/qcom,gpi.yaml +++ b/Documentation/devicetree/bindings/dma/qcom,gpi.yaml @@ -19,6 +19,7 @@ allOf: properties: compatible: enum: + - qcom,sc7280-gpi-dma - qcom,sdm845-gpi-dma - qcom,sm8150-gpi-dma - qcom,sm8250-gpi-dma -- cgit From d965068259d13fde49487b45064106d3d0c57a74 Mon Sep 17 00:00:00 2001 From: Ilya Novikov Date: Wed, 13 Apr 2022 14:37:33 +0300 Subject: dmaengine: PTDMA: support polled mode If the DMA_PREP_INTERRUPT flag is not provided, run in polled mode, which significantly improves IOPS: more than twice on chunks < 4K. Signed-off-by: Ilya Novikov Link: https://lore.kernel.org/r/20220413113733.59041-1-i.m.novikov@yadro.com Signed-off-by: Vinod Koul --- drivers/dma/ptdma/ptdma-dev.c | 36 +++++++++++++++++++----------------- drivers/dma/ptdma/ptdma-dmaengine.c | 16 +++++++++++++++- drivers/dma/ptdma/ptdma.h | 13 +++++++++++++ 3 files changed, 47 insertions(+), 18 deletions(-) diff --git a/drivers/dma/ptdma/ptdma-dev.c b/drivers/dma/ptdma/ptdma-dev.c index daafea5bc35d..377da23012ac 100644 --- a/drivers/dma/ptdma/ptdma-dev.c +++ b/drivers/dma/ptdma/ptdma-dev.c @@ -100,6 +100,7 @@ int pt_core_perform_passthru(struct pt_cmd_queue *cmd_q, struct pt_passthru_engine *pt_engine) { struct ptdma_desc desc; + struct pt_device *pt = container_of(cmd_q, struct pt_device, cmd_q); cmd_q->cmd_error = 0; cmd_q->total_pt_ops++; @@ -111,17 +112,12 @@ int pt_core_perform_passthru(struct pt_cmd_queue *cmd_q, desc.dst_lo = lower_32_bits(pt_engine->dst_dma); desc.dw5.dst_hi = upper_32_bits(pt_engine->dst_dma); - return pt_core_execute_cmd(&desc, cmd_q); -} - -static inline void pt_core_disable_queue_interrupts(struct pt_device *pt) -{ - iowrite32(0, pt->cmd_q.reg_control + 0x000C); -} + if (cmd_q->int_en) + pt_core_enable_queue_interrupts(pt); + else + pt_core_disable_queue_interrupts(pt); -static inline void pt_core_enable_queue_interrupts(struct pt_device *pt) -{ - iowrite32(SUPPORTED_INTERRUPTS, pt->cmd_q.reg_control + 0x000C); + return pt_core_execute_cmd(&desc, cmd_q); } static void pt_do_cmd_complete(unsigned long data) @@ -144,14 +140,10 @@ static void pt_do_cmd_complete(unsigned long data) cmd->pt_cmd_callback(cmd->data, cmd->ret); } -static irqreturn_t pt_core_irq_handler(int irq, void *data) +void pt_check_status_trans(struct pt_device *pt, struct pt_cmd_queue *cmd_q) { - struct pt_device *pt = data; - struct pt_cmd_queue *cmd_q = &pt->cmd_q; u32 status; - pt_core_disable_queue_interrupts(pt); - pt->total_interrupts++; status = ioread32(cmd_q->reg_control + 0x0010); if (status) { cmd_q->int_status = status; @@ -162,11 +154,21 @@ static irqreturn_t pt_core_irq_handler(int irq, void *data) if ((status & INT_ERROR) && !cmd_q->cmd_error) cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status); - /* Acknowledge the interrupt */ + /* Acknowledge the completion */ iowrite32(status, cmd_q->reg_control + 0x0010); - pt_core_enable_queue_interrupts(pt); pt_do_cmd_complete((ulong)&pt->tdata); } +} + +static irqreturn_t pt_core_irq_handler(int irq, void *data) +{ + struct pt_device *pt = data; + struct pt_cmd_queue *cmd_q = &pt->cmd_q; + + pt_core_disable_queue_interrupts(pt); + pt->total_interrupts++; + pt_check_status_trans(pt, cmd_q); + pt_core_enable_queue_interrupts(pt); return IRQ_HANDLED; } diff --git a/drivers/dma/ptdma/ptdma-dmaengine.c b/drivers/dma/ptdma/ptdma-dmaengine.c index 91b93e8d9779..ea07cc42f4d0 100644 --- a/drivers/dma/ptdma/ptdma-dmaengine.c +++ b/drivers/dma/ptdma/ptdma-dmaengine.c @@ -171,6 +171,7 @@ static struct pt_dma_desc *pt_alloc_dma_desc(struct pt_dma_chan *chan, vchan_tx_prep(&chan->vc, &desc->vd, flags); desc->pt = chan->pt; + desc->pt->cmd_q.int_en = !!(flags & DMA_PREP_INTERRUPT); desc->issued_to_hw = 0; desc->status = DMA_IN_PROGRESS; @@ -257,6 +258,17 @@ static void pt_issue_pending(struct dma_chan *dma_chan) pt_cmd_callback(desc, 0); } +enum dma_status +pt_tx_status(struct dma_chan *c, dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + struct pt_device *pt = to_pt_chan(c)->pt; + struct pt_cmd_queue *cmd_q = &pt->cmd_q; + + pt_check_status_trans(pt, cmd_q); + return dma_cookie_status(c, cookie, txstate); +} + static int pt_pause(struct dma_chan *dma_chan) { struct pt_dma_chan *chan = to_pt_chan(dma_chan); @@ -291,8 +303,10 @@ static int pt_terminate_all(struct dma_chan *dma_chan) { struct pt_dma_chan *chan = to_pt_chan(dma_chan); unsigned long flags; + struct pt_cmd_queue *cmd_q = &chan->pt->cmd_q; LIST_HEAD(head); + iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_control + 0x0010); spin_lock_irqsave(&chan->vc.lock, flags); vchan_get_all_descriptors(&chan->vc, &head); spin_unlock_irqrestore(&chan->vc.lock, flags); @@ -362,7 +376,7 @@ int pt_dmaengine_register(struct pt_device *pt) dma_dev->device_prep_dma_memcpy = pt_prep_dma_memcpy; dma_dev->device_prep_dma_interrupt = pt_prep_dma_interrupt; dma_dev->device_issue_pending = pt_issue_pending; - dma_dev->device_tx_status = dma_cookie_status; + dma_dev->device_tx_status = pt_tx_status; dma_dev->device_pause = pt_pause; dma_dev->device_resume = pt_resume; dma_dev->device_terminate_all = pt_terminate_all; diff --git a/drivers/dma/ptdma/ptdma.h b/drivers/dma/ptdma/ptdma.h index afbf192c9230..d093c43b7d13 100644 --- a/drivers/dma/ptdma/ptdma.h +++ b/drivers/dma/ptdma/ptdma.h @@ -206,6 +206,9 @@ struct pt_cmd_queue { unsigned int active; unsigned int suspended; + /* Interrupt flag */ + bool int_en; + /* Register addresses for queue */ void __iomem *reg_control; u32 qcontrol; /* Cached control register */ @@ -318,7 +321,17 @@ void pt_core_destroy(struct pt_device *pt); int pt_core_perform_passthru(struct pt_cmd_queue *cmd_q, struct pt_passthru_engine *pt_engine); +void pt_check_status_trans(struct pt_device *pt, struct pt_cmd_queue *cmd_q); void pt_start_queue(struct pt_cmd_queue *cmd_q); void pt_stop_queue(struct pt_cmd_queue *cmd_q); +static inline void pt_core_disable_queue_interrupts(struct pt_device *pt) +{ + iowrite32(0, pt->cmd_q.reg_control + 0x000C); +} + +static inline void pt_core_enable_queue_interrupts(struct pt_device *pt) +{ + iowrite32(SUPPORTED_INTERRUPTS, pt->cmd_q.reg_control + 0x000C); +} #endif -- cgit From 2128565a8d3055623f651712b7faa3d66ce4f02c Mon Sep 17 00:00:00 2001 From: Aidan MacDonald Date: Mon, 11 Apr 2022 16:36:18 +0100 Subject: dmaengine: jz4780: set DMA maximum segment size Set the maximum segment size, since the hardware can do transfers larger than the default 64 KiB returned by dma_get_max_seg_size(). The maximum segment size is limited by the 24-bit transfer count field in DMA descriptors. The number of bytes is equal to the transfer count times the transfer size unit, which is selected by the driver based on the DMA buffer address and length of the transfer. The size unit can be as small as 1 byte, so set the maximum segment size to 2^24-1 bytes to ensure the transfer count will not overflow regardless of the size unit selected by the driver. Signed-off-by: Aidan MacDonald Link: https://lore.kernel.org/r/20220411153618.49876-1-aidanmacdonald.0x0@gmail.com Signed-off-by: Vinod Koul --- drivers/dma/dma-jz4780.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c index fc513eb2b289..e2ec540e6519 100644 --- a/drivers/dma/dma-jz4780.c +++ b/drivers/dma/dma-jz4780.c @@ -8,6 +8,7 @@ #include #include +#include #include #include #include @@ -911,6 +912,14 @@ static int jz4780_dma_probe(struct platform_device *pdev) dd = &jzdma->dma_device; + /* + * The real segment size limit is dependent on the size unit selected + * for the transfer. Because the size unit is selected automatically + * and may be as small as 1 byte, use a safe limit of 2^24-1 bytes to + * ensure the 24-bit transfer count in the descriptor cannot overflow. + */ + dma_set_max_seg_size(dev, 0xffffff); + dma_cap_set(DMA_MEMCPY, dd->cap_mask); dma_cap_set(DMA_SLAVE, dd->cap_mask); dma_cap_set(DMA_CYCLIC, dd->cap_mask); -- cgit From 4e5a4eb20393b851590b4465f1197a8041c2076b Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Mon, 11 Apr 2022 15:09:38 -0700 Subject: dmaengine: idxd: set DMA_INTERRUPT cap bit Even though idxd driver has always supported interrupt, it never actually set the DMA_INTERRUPT cap bit. Rectify this mistake so the interrupt capability is advertised. Reported-by: Ben Walker Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/164971497859.2201379.17925303210723708961.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Vinod Koul --- drivers/dma/idxd/dma.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c index bfff59617d04..13e061944db9 100644 --- a/drivers/dma/idxd/dma.c +++ b/drivers/dma/idxd/dma.c @@ -193,6 +193,7 @@ int idxd_register_dma_device(struct idxd_device *idxd) INIT_LIST_HEAD(&dma->channels); dma->dev = dev; + dma_cap_set(DMA_INTERRUPT, dma->cap_mask); dma_cap_set(DMA_PRIVATE, dma->cap_mask); dma_cap_set(DMA_COMPLETION_NO_ORDER, dma->cap_mask); dma->device_release = idxd_dma_release; -- cgit From 23084545dbb0ac0d1f0acad915bdeed7bd5f48cd Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Mon, 11 Apr 2022 15:11:16 -0700 Subject: dmaengine: idxd: set max_xfer and max_batch for RO device Load the max_xfer_size and max_batch_size values from the values read from registers to the shadow variables. This will allow the read-only device to display the correct values for the sysfs attributes. Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/164971507673.2201761.11244446608988838897.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Vinod Koul --- drivers/dma/idxd/device.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c index 2903f8bb30e1..4f5c2367ec93 100644 --- a/drivers/dma/idxd/device.c +++ b/drivers/dma/idxd/device.c @@ -1031,6 +1031,9 @@ static int idxd_wq_load_config(struct idxd_wq *wq) wq->priority = wq->wqcfg->priority; + wq->max_xfer_bytes = 1ULL << wq->wqcfg->max_xfer_shift; + wq->max_batch_size = 1ULL << wq->wqcfg->max_batch_shift; + for (i = 0; i < WQCFG_STRIDES(idxd); i++) { wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, i); dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n", wq->id, i, wqcfg_offset, wq->wqcfg->bits[i]); -- cgit From 3dbc47a9629d3ab1bca89cc25f6bffcc47c71ed7 Mon Sep 17 00:00:00 2001 From: Haowen Bai Date: Tue, 12 Apr 2022 09:16:20 +0800 Subject: dmaengine: pl08x: drop the useless function Unneeded variable: "retval". Return "NULL" , so we have to make code clear. better way, drop the function. Signed-off-by: Haowen Bai Link: https://lore.kernel.org/r/1649726180-13133-1-git-send-email-baihaowen@meizu.com Signed-off-by: Vinod Koul --- drivers/dma/amba-pl08x.c | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index a24882ba3764..a4a794e62ac2 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -1535,14 +1535,6 @@ static void pl08x_free_chan_resources(struct dma_chan *chan) vchan_free_chan_resources(to_virt_chan(chan)); } -static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt( - struct dma_chan *chan, unsigned long flags) -{ - struct dma_async_tx_descriptor *retval = NULL; - - return retval; -} - /* * Code accessing dma_async_is_complete() in a tight loop may give problems. * If slaves are relying on interrupts to signal completion this function @@ -2760,7 +2752,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) pl08x->memcpy.dev = &adev->dev; pl08x->memcpy.device_free_chan_resources = pl08x_free_chan_resources; pl08x->memcpy.device_prep_dma_memcpy = pl08x_prep_dma_memcpy; - pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt; pl08x->memcpy.device_tx_status = pl08x_dma_tx_status; pl08x->memcpy.device_issue_pending = pl08x_issue_pending; pl08x->memcpy.device_config = pl08x_config; @@ -2787,8 +2778,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) pl08x->slave.dev = &adev->dev; pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources; - pl08x->slave.device_prep_dma_interrupt = - pl08x_prep_dma_interrupt; pl08x->slave.device_tx_status = pl08x_dma_tx_status; pl08x->slave.device_issue_pending = pl08x_issue_pending; pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg; -- cgit From e335de6ba5b63872e03208c83a44d2dc155985b8 Mon Sep 17 00:00:00 2001 From: Haowen Bai Date: Tue, 12 Apr 2022 15:59:00 +0800 Subject: dmaengine: mediatek: mtk-hsdma: use NULL instead of using plain integer as pointer This fixes the following sparse warnings: drivers/dma/mediatek/mtk-hsdma.c:604:26: warning: Using plain integer as NULL pointer Signed-off-by: Haowen Bai Link: https://lore.kernel.org/r/1649750340-30777-1-git-send-email-baihaowen@meizu.com Signed-off-by: Vinod Koul --- drivers/dma/mediatek/mtk-hsdma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c index c0fffde7fe08..9ebd9231f62f 100644 --- a/drivers/dma/mediatek/mtk-hsdma.c +++ b/drivers/dma/mediatek/mtk-hsdma.c @@ -601,7 +601,7 @@ static void mtk_hsdma_free_rooms_in_ring(struct mtk_hsdma_device *hsdma) cb->flag = 0; } - cb->vd = 0; + cb->vd = NULL; /* * Recycle the RXD with the helper WRITE_ONCE that can ensure -- cgit From a8facc7b988599f83a680d2d61f4607cda495175 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Tue, 12 Apr 2022 11:06:32 -0700 Subject: dmaengine: add verification of DMA_INTERRUPT capability for dmatest Looks like I forgot to add DMA_INTERRUPT cap setting to the idxd driver and dmatest is still working regardless of this mistake. Add an explicit check of DMA_INTERRUPT capability for dmatest to make sure the DMA device being used actually supports interrupt before the test is launched and also that the driver is programmed correctly. Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/164978679251.2361020.5856734256126725993.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Vinod Koul --- drivers/dma/dmatest.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index f696246f57fd..0a2168a4ccb0 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -675,10 +675,16 @@ static int dmatest_func(void *data) /* * src and dst buffers are freed by ourselves below */ - if (params->polled) + if (params->polled) { flags = DMA_CTRL_ACK; - else - flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; + } else { + if (dma_has_cap(DMA_INTERRUPT, dev->cap_mask)) { + flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; + } else { + pr_err("Channel does not support interrupt!\n"); + goto err_pq_array; + } + } ktime = ktime_get(); while (!(kthread_should_stop() || @@ -906,6 +912,7 @@ error_unmap_continue: runtime = ktime_to_us(ktime); ret = 0; +err_pq_array: kfree(dma_pq); err_srcs_array: kfree(srcs); -- cgit From 96144c8fb3921aa8f02ecac2129d30fa36f93ccb Mon Sep 17 00:00:00 2001 From: Jiapeng Chong Date: Wed, 13 Apr 2022 16:38:42 +0800 Subject: dmaengine: tegra: Remove unused including Eliminate the follow versioncheck warning: ./drivers/dma/tegra186-gpc-dma.c: 21 linux/version.h not needed. Reported-by: Abaci Robot Signed-off-by: Jiapeng Chong Link: https://lore.kernel.org/r/20220413083842.69845-1-jiapeng.chong@linux.alibaba.com Signed-off-by: Vinod Koul --- drivers/dma/tegra186-gpc-dma.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/dma/tegra186-gpc-dma.c b/drivers/dma/tegra186-gpc-dma.c index f12327732041..97fe0e9e9b83 100644 --- a/drivers/dma/tegra186-gpc-dma.c +++ b/drivers/dma/tegra186-gpc-dma.c @@ -18,7 +18,6 @@ #include #include #include -#include #include #include "virt-dma.h" -- cgit From 439b5e765a00a546e8f6b6eedac69889d0b5a869 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Fri, 4 Mar 2022 14:02:57 -0700 Subject: dmaengine: idxd: move wq irq enabling to after device enable Move the calling of request_irq() and other related irq setup code until after the WQ is successfully enabled. This reduces the amount of setup/teardown if the wq is not configured correctly and cannot be enabled. Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/164642777730.179702.1880317757087484299.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Vinod Koul --- drivers/dma/idxd/dma.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c index 13e061944db9..644114465b33 100644 --- a/drivers/dma/idxd/dma.c +++ b/drivers/dma/idxd/dma.c @@ -291,13 +291,6 @@ static int idxd_dmaengine_drv_probe(struct idxd_dev *idxd_dev) mutex_lock(&wq->wq_lock); wq->type = IDXD_WQT_KERNEL; - rc = idxd_wq_request_irq(wq); - if (rc < 0) { - idxd->cmd_status = IDXD_SCMD_WQ_IRQ_ERR; - dev_dbg(dev, "WQ %d irq setup failed: %d\n", wq->id, rc); - goto err_irq; - } - rc = __drv_enable_wq(wq); if (rc < 0) { dev_dbg(dev, "Enable wq %d failed: %d\n", wq->id, rc); @@ -305,6 +298,13 @@ static int idxd_dmaengine_drv_probe(struct idxd_dev *idxd_dev) goto err; } + rc = idxd_wq_request_irq(wq); + if (rc < 0) { + idxd->cmd_status = IDXD_SCMD_WQ_IRQ_ERR; + dev_dbg(dev, "WQ %d irq setup failed: %d\n", wq->id, rc); + goto err_irq; + } + rc = idxd_wq_alloc_resources(wq); if (rc < 0) { idxd->cmd_status = IDXD_SCMD_WQ_RES_ALLOC_ERR; @@ -336,10 +336,10 @@ err_dma: err_ref: idxd_wq_free_resources(wq); err_res_alloc: - __drv_disable_wq(wq); -err: idxd_wq_free_irq(wq); err_irq: + __drv_disable_wq(wq); +err: wq->type = IDXD_WQT_NONE; mutex_unlock(&wq->wq_lock); return rc; -- cgit From fc44ff0ae9f2aa20b64c4e63ab4614156df80240 Mon Sep 17 00:00:00 2001 From: Ben Walker Date: Tue, 1 Mar 2022 11:25:48 -0700 Subject: dmaengine: Document dmaengine_prep_dma_memset Document this function to make clear the expected behavior of the 'value' parameter. It was intended to match the behavior of POSIX memset as laid out here: https://lore.kernel.org/dmaengine/YejrA5ZWZ3lTRO%2F1@matsya/ Signed-off-by: Ben Walker Link: https://lore.kernel.org/r/20220301182551.883474-2-benjamin.walker@intel.com Signed-off-by: Vinod Koul --- Documentation/driver-api/dmaengine/provider.rst | 6 ++++++ include/linux/dmaengine.h | 8 ++++++++ 2 files changed, 14 insertions(+) diff --git a/Documentation/driver-api/dmaengine/provider.rst b/Documentation/driver-api/dmaengine/provider.rst index d3fa80e333b1..1e0f1f85d10e 100644 --- a/Documentation/driver-api/dmaengine/provider.rst +++ b/Documentation/driver-api/dmaengine/provider.rst @@ -206,6 +206,12 @@ Currently, the types available are: - The device is able to perform parity check using RAID6 P+Q algorithm against a memory buffer. +- DMA_MEMSET + + - The device is able to fill memory with the provided pattern + + - The pattern is treated as a single byte signed value. + - DMA_INTERRUPT - The device is able to trigger a dummy transfer that will diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 6db9e03afd0b..b46b88e6aa0d 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -1030,6 +1030,14 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma( return chan->device->device_prep_interleaved_dma(chan, xt, flags); } +/** + * dmaengine_prep_dma_memset() - Prepare a DMA memset descriptor. + * @chan: The channel to be used for this descriptor + * @dest: Address of buffer to be set + * @value: Treated as a single byte value that fills the destination buffer + * @len: The total size of dest + * @flags: DMA engine flags + */ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memset( struct dma_chan *chan, dma_addr_t dest, int value, size_t len, unsigned long flags) -- cgit From ceabe10cf5d642b6ea80e77193f9cd4446df26b5 Mon Sep 17 00:00:00 2001 From: Ben Walker Date: Tue, 1 Mar 2022 11:25:49 -0700 Subject: dmaengine: at_hdmac: In atc_prep_dma_memset, treat value as a single byte The value passed in to .prep_dma_memset is to be treated as a single byte repeating pattern. Signed-off-by: Ben Walker Cc: Ludovic Desroches Cc: Tudor Ambarus Link: https://lore.kernel.org/r/20220301182551.883474-3-benjamin.walker@intel.com Signed-off-by: Vinod Koul --- drivers/dma/at_hdmac.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 30ae36124b1d..5a50423b7378 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -942,6 +942,7 @@ atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value, struct at_desc *desc; void __iomem *vaddr; dma_addr_t paddr; + char fill_pattern; dev_vdbg(chan2dev(chan), "%s: d%pad v0x%x l0x%zx f0x%lx\n", __func__, &dest, value, len, flags); @@ -963,7 +964,14 @@ atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value, __func__); return NULL; } - *(u32*)vaddr = value; + + /* Only the first byte of value is to be used according to dmaengine */ + fill_pattern = (char)value; + + *(u32*)vaddr = (fill_pattern << 24) | + (fill_pattern << 16) | + (fill_pattern << 8) | + fill_pattern; desc = atc_create_memset_desc(chan, paddr, dest, len); if (!desc) { -- cgit From 3e0c06964bfcc8271a07065709452fcbc44e70b0 Mon Sep 17 00:00:00 2001 From: Ben Walker Date: Tue, 1 Mar 2022 11:25:50 -0700 Subject: dmaengine: at_xdmac: In at_xdmac_prep_dma_memset, treat value as a single byte The value passed in to .prep_dma_memset is to be treated as a single byte repeating pattern. Signed-off-by: Ben Walker Cc: Ludovic Desroches Cc: Tudor Ambarus Link: https://lore.kernel.org/r/20220301182551.883474-4-benjamin.walker@intel.com Signed-off-by: Vinod Koul --- drivers/dma/at_xdmac.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index 1476156af74b..1911861a39fa 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -1202,6 +1202,7 @@ static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan, unsigned long flags; size_t ublen; u32 dwidth; + char pattern; /* * WARNING: The channel configuration is set here since there is no * dmaengine_slave_config call in this case. Moreover we don't know the @@ -1244,10 +1245,16 @@ static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan, chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth); + /* Only the first byte of value is to be used according to dmaengine */ + pattern = (char)value; + ublen = len >> dwidth; desc->lld.mbr_da = dst_addr; - desc->lld.mbr_ds = value; + desc->lld.mbr_ds = (pattern << 24) | + (pattern << 16) | + (pattern << 8) | + pattern; desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV3 | AT_XDMAC_MBR_UBC_NDEN | AT_XDMAC_MBR_UBC_NSEN -- cgit From 643a4a85b0bc7efeb5732fb4563c43c77ba0c6ac Mon Sep 17 00:00:00 2001 From: Ben Walker Date: Tue, 1 Mar 2022 11:25:51 -0700 Subject: dmaengine: hidma: In hidma_prep_dma_memset treat value as a single byte The value parameter is a single byte, so duplicate it to the 8 byte range that is used as the pattern. Signed-off-by: Ben Walker Cc: Sinan Kaya Link: https://lore.kernel.org/r/20220301182551.883474-5-benjamin.walker@intel.com Signed-off-by: Vinod Koul --- drivers/dma/qcom/hidma.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c index 51587cf8196b..210f1a9eb441 100644 --- a/drivers/dma/qcom/hidma.c +++ b/drivers/dma/qcom/hidma.c @@ -431,6 +431,7 @@ hidma_prep_dma_memset(struct dma_chan *dmach, dma_addr_t dest, int value, struct hidma_desc *mdesc = NULL; struct hidma_dev *mdma = mchan->dmadev; unsigned long irqflags; + u64 byte_pattern, fill_pattern; /* Get free descriptor */ spin_lock_irqsave(&mchan->lock, irqflags); @@ -443,9 +444,19 @@ hidma_prep_dma_memset(struct dma_chan *dmach, dma_addr_t dest, int value, if (!mdesc) return NULL; + byte_pattern = (char)value; + fill_pattern = (byte_pattern << 56) | + (byte_pattern << 48) | + (byte_pattern << 40) | + (byte_pattern << 32) | + (byte_pattern << 24) | + (byte_pattern << 16) | + (byte_pattern << 8) | + byte_pattern; + mdesc->desc.flags = flags; hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch, - value, dest, len, flags, + fill_pattern, dest, len, flags, HIDMA_TRE_MEMSET); /* Place descriptor in prepared list */ -- cgit From e235fe3bcf831dabd67bc2d9b75cad53311a16ec Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Thu, 21 Apr 2022 10:54:07 +0530 Subject: dmaengine: ptdma: statify pt_tx_status LKP bot reports a new warning: Warning: drivers/dma/ptdma/ptdma-dmaengine.c:262:1: warning: no previous prototype for 'pt_tx_status' [-Wmissing-prototypes] pt_tx_status() should be static, so declare as such. Reported-by: kernel test robot Fixes: d965068259d1 ("dmaengine: PTDMA: support polled mode") Link: https://lore.kernel.org/r/20220421052407.745637-1-vkoul@kernel.org Signed-off-by: Vinod Koul --- drivers/dma/ptdma/ptdma-dmaengine.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/ptdma/ptdma-dmaengine.c b/drivers/dma/ptdma/ptdma-dmaengine.c index ea07cc42f4d0..cc22d162ce25 100644 --- a/drivers/dma/ptdma/ptdma-dmaengine.c +++ b/drivers/dma/ptdma/ptdma-dmaengine.c @@ -258,7 +258,7 @@ static void pt_issue_pending(struct dma_chan *dma_chan) pt_cmd_callback(desc, 0); } -enum dma_status +static enum dma_status pt_tx_status(struct dma_chan *c, dma_cookie_t cookie, struct dma_tx_state *txstate) { -- cgit From b21fe492a3a9831c315eb456cf5480c9490eaeef Mon Sep 17 00:00:00 2001 From: Jayesh Choudhary Date: Thu, 21 Apr 2022 12:23:23 +0530 Subject: dmaengine: ti: k3-psil-am62: Update PSIL thread for saul. Correct the RX PSIL thread for sa3ul. Signed-off-by: Jayesh Choudhary Fixes: 5ac6bfb587772 ("dmaengine: ti: k3-psil: Add AM62x PSIL and PDMA data") Link: https://lore.kernel.org/r/20220421065323.16378-1-j-choudhary@ti.com Signed-off-by: Vinod Koul --- drivers/dma/ti/k3-psil-am62.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/dma/ti/k3-psil-am62.c b/drivers/dma/ti/k3-psil-am62.c index d431e2033237..2b6fd6e37c61 100644 --- a/drivers/dma/ti/k3-psil-am62.c +++ b/drivers/dma/ti/k3-psil-am62.c @@ -70,10 +70,10 @@ /* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */ static struct psil_ep am62_src_ep_map[] = { /* SAUL */ - PSIL_SAUL(0x7500, 20, 35, 8, 35, 0), - PSIL_SAUL(0x7501, 21, 35, 8, 36, 0), - PSIL_SAUL(0x7502, 22, 43, 8, 43, 0), - PSIL_SAUL(0x7503, 23, 43, 8, 44, 0), + PSIL_SAUL(0x7504, 20, 35, 8, 35, 0), + PSIL_SAUL(0x7505, 21, 35, 8, 36, 0), + PSIL_SAUL(0x7506, 22, 43, 8, 43, 0), + PSIL_SAUL(0x7507, 23, 43, 8, 44, 0), /* PDMA_MAIN0 - SPI0-3 */ PSIL_PDMA_XY_PKT(0x4302), PSIL_PDMA_XY_PKT(0x4303), -- cgit From 63c14ae6c161dec8ff3be49277edc75a769e054a Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 20 Apr 2022 09:43:36 -0700 Subject: dmaengine: idxd: refactor wq driver enable/disable operations Move the core driver operations from wq driver to the drv_enable_wq() and drv_disable_wq() functions. The move should reduce the wq driver's knowledge of the core driver operations and prevent code confusion for future wq drivers. Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/165047301643.3841827.11222723219862233060.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Vinod Koul --- drivers/dma/idxd/cdev.c | 6 ++--- drivers/dma/idxd/device.c | 58 ++++++++++++++++++++++++++++++----------------- drivers/dma/idxd/dma.c | 38 +++---------------------------- drivers/dma/idxd/idxd.h | 2 -- 4 files changed, 43 insertions(+), 61 deletions(-) diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c index b9b2b4a4124e..b670b75885ad 100644 --- a/drivers/dma/idxd/cdev.c +++ b/drivers/dma/idxd/cdev.c @@ -314,7 +314,7 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev) mutex_lock(&wq->wq_lock); wq->type = IDXD_WQT_USER; - rc = __drv_enable_wq(wq); + rc = drv_enable_wq(wq); if (rc < 0) goto err; @@ -329,7 +329,7 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev) return 0; err_cdev: - __drv_disable_wq(wq); + drv_disable_wq(wq); err: wq->type = IDXD_WQT_NONE; mutex_unlock(&wq->wq_lock); @@ -342,7 +342,7 @@ static void idxd_user_drv_remove(struct idxd_dev *idxd_dev) mutex_lock(&wq->wq_lock); idxd_wq_del_cdev(wq); - __drv_disable_wq(wq); + drv_disable_wq(wq); wq->type = IDXD_WQT_NONE; mutex_unlock(&wq->wq_lock); } diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c index 4f5c2367ec93..22ad9ee383e2 100644 --- a/drivers/dma/idxd/device.c +++ b/drivers/dma/idxd/device.c @@ -1196,6 +1196,9 @@ int idxd_wq_request_irq(struct idxd_wq *wq) struct idxd_irq_entry *ie; int rc; + if (wq->type != IDXD_WQT_KERNEL) + return 0; + ie = &wq->ie; ie->vector = pci_irq_vector(pdev, ie->id); ie->pasid = device_pasid_enabled(idxd) ? idxd->pasid : INVALID_IOASID; @@ -1227,7 +1230,7 @@ err_irq: return rc; } -int __drv_enable_wq(struct idxd_wq *wq) +int drv_enable_wq(struct idxd_wq *wq) { struct idxd_device *idxd = wq->idxd; struct device *dev = &idxd->pdev->dev; @@ -1328,8 +1331,36 @@ int __drv_enable_wq(struct idxd_wq *wq) } wq->client_count = 0; + + rc = idxd_wq_request_irq(wq); + if (rc < 0) { + idxd->cmd_status = IDXD_SCMD_WQ_IRQ_ERR; + dev_dbg(dev, "WQ %d irq setup failed: %d\n", wq->id, rc); + goto err_irq; + } + + rc = idxd_wq_alloc_resources(wq); + if (rc < 0) { + idxd->cmd_status = IDXD_SCMD_WQ_RES_ALLOC_ERR; + dev_dbg(dev, "WQ resource alloc failed\n"); + goto err_res_alloc; + } + + rc = idxd_wq_init_percpu_ref(wq); + if (rc < 0) { + idxd->cmd_status = IDXD_SCMD_PERCPU_ERR; + dev_dbg(dev, "percpu_ref setup failed\n"); + goto err_ref; + } + return 0; +err_ref: + idxd_wq_free_resources(wq); +err_res_alloc: + idxd_wq_free_irq(wq); +err_irq: + idxd_wq_unmap_portal(wq); err_map_portal: rc = idxd_wq_disable(wq, false); if (rc < 0) @@ -1338,17 +1369,7 @@ err: return rc; } -int drv_enable_wq(struct idxd_wq *wq) -{ - int rc; - - mutex_lock(&wq->wq_lock); - rc = __drv_enable_wq(wq); - mutex_unlock(&wq->wq_lock); - return rc; -} - -void __drv_disable_wq(struct idxd_wq *wq) +void drv_disable_wq(struct idxd_wq *wq) { struct idxd_device *idxd = wq->idxd; struct device *dev = &idxd->pdev->dev; @@ -1359,21 +1380,16 @@ void __drv_disable_wq(struct idxd_wq *wq) dev_warn(dev, "Clients has claim on wq %d: %d\n", wq->id, idxd_wq_refcount(wq)); + idxd_wq_free_resources(wq); idxd_wq_unmap_portal(wq); - idxd_wq_drain(wq); idxd_wq_reset(wq); - + percpu_ref_exit(&wq->wq_active); + idxd_wq_free_irq(wq); + wq->type = IDXD_WQT_NONE; wq->client_count = 0; } -void drv_disable_wq(struct idxd_wq *wq) -{ - mutex_lock(&wq->wq_lock); - __drv_disable_wq(wq); - mutex_unlock(&wq->wq_lock); -} - int idxd_device_drv_probe(struct idxd_dev *idxd_dev) { struct idxd_device *idxd = idxd_dev_to_idxd(idxd_dev); diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c index 644114465b33..950f06c8aad5 100644 --- a/drivers/dma/idxd/dma.c +++ b/drivers/dma/idxd/dma.c @@ -291,34 +291,13 @@ static int idxd_dmaengine_drv_probe(struct idxd_dev *idxd_dev) mutex_lock(&wq->wq_lock); wq->type = IDXD_WQT_KERNEL; - rc = __drv_enable_wq(wq); + rc = drv_enable_wq(wq); if (rc < 0) { dev_dbg(dev, "Enable wq %d failed: %d\n", wq->id, rc); rc = -ENXIO; goto err; } - rc = idxd_wq_request_irq(wq); - if (rc < 0) { - idxd->cmd_status = IDXD_SCMD_WQ_IRQ_ERR; - dev_dbg(dev, "WQ %d irq setup failed: %d\n", wq->id, rc); - goto err_irq; - } - - rc = idxd_wq_alloc_resources(wq); - if (rc < 0) { - idxd->cmd_status = IDXD_SCMD_WQ_RES_ALLOC_ERR; - dev_dbg(dev, "WQ resource alloc failed\n"); - goto err_res_alloc; - } - - rc = idxd_wq_init_percpu_ref(wq); - if (rc < 0) { - idxd->cmd_status = IDXD_SCMD_PERCPU_ERR; - dev_dbg(dev, "percpu_ref setup failed\n"); - goto err_ref; - } - rc = idxd_register_dma_channel(wq); if (rc < 0) { idxd->cmd_status = IDXD_SCMD_DMA_CHAN_ERR; @@ -331,14 +310,7 @@ static int idxd_dmaengine_drv_probe(struct idxd_dev *idxd_dev) return 0; err_dma: - __idxd_wq_quiesce(wq); - percpu_ref_exit(&wq->wq_active); -err_ref: - idxd_wq_free_resources(wq); -err_res_alloc: - idxd_wq_free_irq(wq); -err_irq: - __drv_disable_wq(wq); + drv_disable_wq(wq); err: wq->type = IDXD_WQT_NONE; mutex_unlock(&wq->wq_lock); @@ -352,11 +324,7 @@ static void idxd_dmaengine_drv_remove(struct idxd_dev *idxd_dev) mutex_lock(&wq->wq_lock); __idxd_wq_quiesce(wq); idxd_unregister_dma_channel(wq); - idxd_wq_free_resources(wq); - __drv_disable_wq(wq); - percpu_ref_exit(&wq->wq_active); - idxd_wq_free_irq(wq); - wq->type = IDXD_WQT_NONE; + drv_disable_wq(wq); mutex_unlock(&wq->wq_lock); } diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h index da72eb15f610..8e03fb548d13 100644 --- a/drivers/dma/idxd/idxd.h +++ b/drivers/dma/idxd/idxd.h @@ -559,9 +559,7 @@ void idxd_unregister_idxd_drv(void); int idxd_device_drv_probe(struct idxd_dev *idxd_dev); void idxd_device_drv_remove(struct idxd_dev *idxd_dev); int drv_enable_wq(struct idxd_wq *wq); -int __drv_enable_wq(struct idxd_wq *wq); void drv_disable_wq(struct idxd_wq *wq); -void __drv_disable_wq(struct idxd_wq *wq); int idxd_device_init_reset(struct idxd_device *idxd); int idxd_device_enable(struct idxd_device *idxd); int idxd_device_disable(struct idxd_device *idxd); -- cgit From 99faef48e7a3f878848a2d711af710e36fadbd6e Mon Sep 17 00:00:00 2001 From: Yunbo Yu Date: Wed, 20 Apr 2022 20:27:54 +0800 Subject: dmaengine: mv_xor_v2 : Move spin_lock_bh() to spin_lock() It is unnecessary to call spin_lock_bh() for that you are already in a tasklet. Signed-off-by: Yunbo Yu Link: https://lore.kernel.org/r/20220420122754.148359-1-yuyunbo519@gmail.com Signed-off-by: Vinod Koul --- drivers/dma/mv_xor_v2.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c index 9c8b4084ba2f..f10b29034da1 100644 --- a/drivers/dma/mv_xor_v2.c +++ b/drivers/dma/mv_xor_v2.c @@ -591,14 +591,14 @@ static void mv_xor_v2_tasklet(struct tasklet_struct *t) dma_run_dependencies(&next_pending_sw_desc->async_tx); /* Lock the channel */ - spin_lock_bh(&xor_dev->lock); + spin_lock(&xor_dev->lock); /* add the SW descriptor to the free descriptors list */ list_add(&next_pending_sw_desc->free_list, &xor_dev->free_sw_desc); /* Release the channel */ - spin_unlock_bh(&xor_dev->lock); + spin_unlock(&xor_dev->lock); /* increment the next descriptor */ pending_ptr++; -- cgit From 461cd3709f266d179064b0a63d8949fa2e75ff72 Mon Sep 17 00:00:00 2001 From: Akhil R Date: Thu, 5 May 2022 14:44:40 +0530 Subject: dmaengine: tegra: Use platform_get_irq() to get IRQ resource Use platform_irq_get() instead platform_get_resource() for IRQ resource to fix the probe failure. platform_get_resource() fails to fetch the IRQ resource as it might not be ready at that time. platform_irq_get() is also the recommended way to get interrupt as it directly gives the IRQ number and no conversion from resource is required. Fixes: ee17028009d4 ("dmaengine: tegra: Add tegra gpcdma driver") Reported-by: Jonathan Hunter Signed-off-by: Akhil R Acked-by: Thierry Reding Reviewed-by: Jon Hunter Link: https://lore.kernel.org/r/20220505091440.12981-1-akhilrajeev@nvidia.com Signed-off-by: Vinod Koul --- drivers/dma/tegra186-gpc-dma.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/drivers/dma/tegra186-gpc-dma.c b/drivers/dma/tegra186-gpc-dma.c index 97fe0e9e9b83..3951db527dec 100644 --- a/drivers/dma/tegra186-gpc-dma.c +++ b/drivers/dma/tegra186-gpc-dma.c @@ -1328,7 +1328,6 @@ static int tegra_dma_probe(struct platform_device *pdev) struct iommu_fwspec *iommu_spec; unsigned int stream_id, i; struct tegra_dma *tdma; - struct resource *res; int ret; cdata = of_device_get_match_data(&pdev->dev); @@ -1367,16 +1366,13 @@ static int tegra_dma_probe(struct platform_device *pdev) for (i = 0; i < cdata->nr_channels; i++) { struct tegra_dma_channel *tdc = &tdma->channels[i]; + tdc->irq = platform_get_irq(pdev, i); + if (tdc->irq < 0) + return tdc->irq; + tdc->chan_base_offset = TEGRA_GPCDMA_CHANNEL_BASE_ADD_OFFSET + i * cdata->channel_reg_size; - res = platform_get_resource(pdev, IORESOURCE_IRQ, i); - if (!res) { - dev_err(&pdev->dev, "No irq resource for chan %d\n", i); - return -EINVAL; - } - tdc->irq = res->start; snprintf(tdc->name, sizeof(tdc->name), "gpcdma.%d", i); - tdc->tdma = tdma; tdc->id = i; tdc->slave_id = -1; -- cgit From aab08c1aac01097815fbcf10fce7021d2396a31f Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Thu, 21 Apr 2022 08:13:38 +0200 Subject: dmaengine: idxd: Fix the error handling path in idxd_cdev_register() If a call to alloc_chrdev_region() fails, the already allocated resources are leaking. Add the needed error handling path to fix the leak. Fixes: 42d279f9137a ("dmaengine: idxd: add char driver to expose submission portal to userland") Signed-off-by: Christophe JAILLET Acked-by: Dave Jiang Link: https://lore.kernel.org/r/1b5033dcc87b5f2a953c413f0306e883e6114542.1650521591.git.christophe.jaillet@wanadoo.fr Signed-off-by: Vinod Koul --- drivers/dma/idxd/cdev.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c index b670b75885ad..bd44293804d1 100644 --- a/drivers/dma/idxd/cdev.c +++ b/drivers/dma/idxd/cdev.c @@ -369,10 +369,16 @@ int idxd_cdev_register(void) rc = alloc_chrdev_region(&ictx[i].devt, 0, MINORMASK, ictx[i].name); if (rc) - return rc; + goto err_free_chrdev_region; } return 0; + +err_free_chrdev_region: + for (i--; i >= 0; i--) + unregister_chrdev_region(ictx[i].devt, MINORMASK); + + return rc; } void idxd_cdev_remove(void) -- cgit From b965182aee6e391084addcd25be3134d82fddb22 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Mon, 2 May 2022 15:34:56 +0200 Subject: dt-bindings: renesas,rcar-dmac: R-Car V3U is R-Car Gen4 Despite the name, R-Car V3U is the first member of the R-Car Gen4 family. Hence move its compatible value to the R-Car Gen4 section. Signed-off-by: Geert Uytterhoeven Acked-by: Krzysztof Kozlowski Link: https://lore.kernel.org/r/e6e4cf701f3a43b061b9c3f7f0adc4d6addd4722.1651497024.git.geert+renesas@glider.be Signed-off-by: Vinod Koul --- Documentation/devicetree/bindings/dma/renesas,rcar-dmac.yaml | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.yaml b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.yaml index 7c6badf39921..7202cd68e759 100644 --- a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.yaml +++ b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.yaml @@ -42,11 +42,10 @@ properties: - const: renesas,rcar-dmac - items: - - const: renesas,dmac-r8a779a0 # R-Car V3U - - - items: - - const: renesas,dmac-r8a779f0 # R-Car S4-8 - - const: renesas,rcar-gen4-dmac + - enum: + - renesas,dmac-r8a779a0 # R-Car V3U + - renesas,dmac-r8a779f0 # R-Car S4-8 + - const: renesas,rcar-gen4-dmac # R-Car Gen4 reg: true @@ -121,7 +120,6 @@ if: compatible: contains: enum: - - renesas,dmac-r8a779a0 - renesas,rcar-gen4-dmac then: properties: -- cgit From 42a1b73852c4a176d233a192422b5e1d0ba67cbf Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 11 May 2022 17:11:57 -0700 Subject: dmaengine: idxd: Separate user and kernel pasid enabling The idxd driver always gated the pasid enabling under a single knob and this assumption is incorrect. The pasid used for kernel operation can be independently toggled and has no dependency on the user pasid (and vice versa). Split the two so they are independent "enabled" flags. Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/165231431746.986466.5666862038354800551.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Vinod Koul --- drivers/dma/idxd/cdev.c | 4 ++-- drivers/dma/idxd/device.c | 6 +++--- drivers/dma/idxd/idxd.h | 16 ++++++++++++++-- drivers/dma/idxd/init.c | 30 +++++++++++++++--------------- drivers/dma/idxd/sysfs.c | 2 +- 5 files changed, 35 insertions(+), 23 deletions(-) diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c index bd44293804d1..c2808fd081d6 100644 --- a/drivers/dma/idxd/cdev.c +++ b/drivers/dma/idxd/cdev.c @@ -99,7 +99,7 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp) ctx->wq = wq; filp->private_data = ctx; - if (device_pasid_enabled(idxd)) { + if (device_user_pasid_enabled(idxd)) { sva = iommu_sva_bind_device(dev, current->mm, NULL); if (IS_ERR(sva)) { rc = PTR_ERR(sva); @@ -152,7 +152,7 @@ static int idxd_cdev_release(struct inode *node, struct file *filep) if (wq_shared(wq)) { idxd_device_drain_pasid(idxd, ctx->pasid); } else { - if (device_pasid_enabled(idxd)) { + if (device_user_pasid_enabled(idxd)) { /* The wq disable in the disable pasid function will drain the wq */ rc = idxd_wq_disable_pasid(wq); if (rc < 0) diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c index 22ad9ee383e2..49ee36038cca 100644 --- a/drivers/dma/idxd/device.c +++ b/drivers/dma/idxd/device.c @@ -966,7 +966,7 @@ static int idxd_wqs_setup(struct idxd_device *idxd) if (!wq->group) continue; - if (wq_shared(wq) && !device_swq_supported(idxd)) { + if (wq_shared(wq) && !wq_shared_supported(wq)) { idxd->cmd_status = IDXD_SCMD_WQ_NO_SWQ_SUPPORT; dev_warn(dev, "No shared wq support but configured.\n"); return -EINVAL; @@ -1264,7 +1264,7 @@ int drv_enable_wq(struct idxd_wq *wq) /* Shared WQ checks */ if (wq_shared(wq)) { - if (!device_swq_supported(idxd)) { + if (!wq_shared_supported(wq)) { idxd->cmd_status = IDXD_SCMD_WQ_NO_SVM; dev_dbg(dev, "PASID not enabled and shared wq.\n"); goto err; @@ -1294,7 +1294,7 @@ int drv_enable_wq(struct idxd_wq *wq) if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) { int priv = 0; - if (device_pasid_enabled(idxd)) { + if (wq_pasid_enabled(wq)) { if (is_idxd_wq_kernel(wq) || wq_shared(wq)) { u32 pasid = wq_dedicated(wq) ? idxd->pasid : 0; diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h index 8e03fb548d13..77d241a92bd1 100644 --- a/drivers/dma/idxd/idxd.h +++ b/drivers/dma/idxd/idxd.h @@ -239,6 +239,7 @@ enum idxd_device_flag { IDXD_FLAG_CONFIGURABLE = 0, IDXD_FLAG_CMD_RUNNING, IDXD_FLAG_PASID_ENABLED, + IDXD_FLAG_USER_PASID_ENABLED, }; struct idxd_dma_dev { @@ -469,9 +470,20 @@ static inline bool device_pasid_enabled(struct idxd_device *idxd) return test_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags); } -static inline bool device_swq_supported(struct idxd_device *idxd) +static inline bool device_user_pasid_enabled(struct idxd_device *idxd) { - return (support_enqcmd && device_pasid_enabled(idxd)); + return test_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags); +} + +static inline bool wq_pasid_enabled(struct idxd_wq *wq) +{ + return (is_idxd_wq_kernel(wq) && device_pasid_enabled(wq->idxd)) || + (is_idxd_wq_user(wq) && device_user_pasid_enabled(wq->idxd)); +} + +static inline bool wq_shared_supported(struct idxd_wq *wq) +{ + return (support_enqcmd && wq_pasid_enabled(wq)); } enum idxd_portal_prot { diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c index 993a5dcca24f..355fb3ef4cbf 100644 --- a/drivers/dma/idxd/init.c +++ b/drivers/dma/idxd/init.c @@ -512,18 +512,15 @@ static int idxd_probe(struct idxd_device *idxd) dev_dbg(dev, "IDXD reset complete\n"); if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) { - rc = iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA); - if (rc == 0) { - rc = idxd_enable_system_pasid(idxd); - if (rc < 0) { - iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA); - dev_warn(dev, "Failed to enable PASID. No SVA support: %d\n", rc); - } else { - set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags); - } - } else { - dev_warn(dev, "Unable to turn on SVA feature.\n"); - } + if (iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA)) + dev_warn(dev, "Unable to turn on user SVA feature.\n"); + else + set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags); + + if (idxd_enable_system_pasid(idxd)) + dev_warn(dev, "No in-kernel DMA with PASID.\n"); + else + set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags); } else if (!sva) { dev_warn(dev, "User forced SVA off via module param.\n"); } @@ -561,7 +558,8 @@ static int idxd_probe(struct idxd_device *idxd) err: if (device_pasid_enabled(idxd)) idxd_disable_system_pasid(idxd); - iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA); + if (device_user_pasid_enabled(idxd)) + iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA); return rc; } @@ -574,7 +572,8 @@ static void idxd_cleanup(struct idxd_device *idxd) idxd_cleanup_internals(idxd); if (device_pasid_enabled(idxd)) idxd_disable_system_pasid(idxd); - iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA); + if (device_user_pasid_enabled(idxd)) + iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA); } static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) @@ -691,7 +690,8 @@ static void idxd_remove(struct pci_dev *pdev) free_irq(irq_entry->vector, irq_entry); pci_free_irq_vectors(pdev); pci_iounmap(pdev, idxd->reg_base); - iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA); + if (device_user_pasid_enabled(idxd)) + iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA); pci_disable_device(pdev); destroy_workqueue(idxd->wq); perfmon_pmu_remove(idxd); diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c index 7e628e31ce24..d482e708f0fa 100644 --- a/drivers/dma/idxd/sysfs.c +++ b/drivers/dma/idxd/sysfs.c @@ -588,7 +588,7 @@ static ssize_t wq_mode_store(struct device *dev, if (sysfs_streq(buf, "dedicated")) { set_bit(WQ_FLAG_DEDICATED, &wq->flags); wq->threshold = 0; - } else if (sysfs_streq(buf, "shared") && device_swq_supported(idxd)) { + } else if (sysfs_streq(buf, "shared")) { clear_bit(WQ_FLAG_DEDICATED, &wq->flags); } else { return -EINVAL; -- cgit From cf4ac3fef33883a14131d8925d7edfbdb7d69b68 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 11 May 2022 17:00:44 -0700 Subject: dmaengine: idxd: fix lockdep warning on device driver removal Jacob reported that with lockdep debug turned on, idxd_device_driver removal causes kernel splat from lock assert warning for idxd_device_wqs_clear_state(). Make sure idxd_device_wqs_clear_state() holds the wq lock for each wq when cleaning the wq state. Move the call outside of the device spinlock. Reported-by: Jacob Pan Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/165231364426.986304.9294302800482492780.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Vinod Koul --- drivers/dma/idxd/device.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c index 49ee36038cca..001a82040e98 100644 --- a/drivers/dma/idxd/device.c +++ b/drivers/dma/idxd/device.c @@ -578,19 +578,15 @@ int idxd_device_disable(struct idxd_device *idxd) return -ENXIO; } - spin_lock(&idxd->dev_lock); idxd_device_clear_state(idxd); - idxd->state = IDXD_DEV_DISABLED; - spin_unlock(&idxd->dev_lock); return 0; } void idxd_device_reset(struct idxd_device *idxd) { idxd_cmd_exec(idxd, IDXD_CMD_RESET_DEVICE, 0, NULL); - spin_lock(&idxd->dev_lock); idxd_device_clear_state(idxd); - idxd->state = IDXD_DEV_DISABLED; + spin_lock(&idxd->dev_lock); idxd_unmask_error_interrupts(idxd); spin_unlock(&idxd->dev_lock); } @@ -717,23 +713,27 @@ static void idxd_device_wqs_clear_state(struct idxd_device *idxd) { int i; - lockdep_assert_held(&idxd->dev_lock); for (i = 0; i < idxd->max_wqs; i++) { struct idxd_wq *wq = idxd->wqs[i]; if (wq->state == IDXD_WQ_ENABLED) { + mutex_lock(&wq->wq_lock); idxd_wq_disable_cleanup(wq); idxd_wq_device_reset_cleanup(wq); wq->state = IDXD_WQ_DISABLED; + mutex_unlock(&wq->wq_lock); } } } void idxd_device_clear_state(struct idxd_device *idxd) { + idxd_device_wqs_clear_state(idxd); + spin_lock(&idxd->dev_lock); idxd_groups_clear_state(idxd); idxd_engines_clear_state(idxd); - idxd_device_wqs_clear_state(idxd); + idxd->state = IDXD_DEV_DISABLED; + spin_unlock(&idxd->dev_lock); } static void idxd_group_config_write(struct idxd_group *group) -- cgit From 9120c879d28873829dfa2f511c68162d52540e6a Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 11 May 2022 17:01:13 -0700 Subject: dmaengine: idxd: free irq before wq type is reset Call idxd_wq_free_irq() in the drv_disable_wq() function before idxd_wq_reset() is called. Otherwise the wq type is reset and the irq does not get freed. Fixes: 63c14ae6c161 ("dmaengine: idxd: refactor wq driver enable/disable operations") Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/165231367316.986407.11001767338124941736.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Vinod Koul --- drivers/dma/idxd/device.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c index 001a82040e98..c7412f59ffb1 100644 --- a/drivers/dma/idxd/device.c +++ b/drivers/dma/idxd/device.c @@ -1383,9 +1383,9 @@ void drv_disable_wq(struct idxd_wq *wq) idxd_wq_free_resources(wq); idxd_wq_unmap_portal(wq); idxd_wq_drain(wq); + idxd_wq_free_irq(wq); idxd_wq_reset(wq); percpu_ref_exit(&wq->wq_active); - idxd_wq_free_irq(wq); wq->type = IDXD_WQT_NONE; wq->client_count = 0; } -- cgit From 4734afb0d5ed3e56494ca6f28e51bafafef4c6aa Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 11 May 2022 17:00:57 -0700 Subject: dmaengine: idxd: remove redudant idxd_wq_disable_cleanup() call idxd_wq_device_reset_cleanup() already calls idxd_wq_disable_cleanup(). There is no need to call idxd_wq_disable_cleanup() again in idxd_device_wqs_clear_state(). Remove redudant call from idxd_wq_device_reset_cleanup(). Fixes: 0dcfe41e9a4c ("dmanegine: idxd: cleanup all device related bits after disabling device") Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/165231365717.986350.2441351765955825964.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Vinod Koul --- drivers/dma/idxd/device.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c index c7412f59ffb1..dee5e4f8f426 100644 --- a/drivers/dma/idxd/device.c +++ b/drivers/dma/idxd/device.c @@ -395,7 +395,6 @@ static void idxd_wq_device_reset_cleanup(struct idxd_wq *wq) { lockdep_assert_held(&wq->wq_lock); - idxd_wq_disable_cleanup(wq); wq->size = 0; wq->group = NULL; } -- cgit From f9a9f43a62a04ec3183fb0da9226c7706eed0115 Mon Sep 17 00:00:00 2001 From: Radhey Shyam Pandey Date: Tue, 10 May 2022 12:42:40 +0530 Subject: dmaengine: zynqmp_dma: In struct zynqmp_dma_chan fix desc_size data type In zynqmp_dma_alloc/free_chan_resources functions there is a potential overflow in the below expressions. dma_alloc_coherent(chan->dev, (2 * chan->desc_size * ZYNQMP_DMA_NUM_DESCS), &chan->desc_pool_p, GFP_KERNEL); dma_free_coherent(chan->dev,(2 * ZYNQMP_DMA_DESC_SIZE(chan) * ZYNQMP_DMA_NUM_DESCS), chan->desc_pool_v, chan->desc_pool_p); The arguments desc_size and ZYNQMP_DMA_NUM_DESCS were 32 bit. Though this overflow condition is not observed but it is a potential problem in the case of 32-bit multiplication. Hence fix it by changing the desc_size data type to size_t. In addition to coverity fix it also reuse ZYNQMP_DMA_DESC_SIZE macro in dma_alloc_coherent API argument. Addresses-Coverity: Event overflow_before_widen. Signed-off-by: Radhey Shyam Pandey Link: https://lore.kernel.org/r/1652166762-18317-2-git-send-email-radhey.shyam.pandey@xilinx.com Signed-off-by: Vinod Koul --- drivers/dma/xilinx/zynqmp_dma.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c index 7aa63b652027..3ffa7f37c701 100644 --- a/drivers/dma/xilinx/zynqmp_dma.c +++ b/drivers/dma/xilinx/zynqmp_dma.c @@ -229,7 +229,7 @@ struct zynqmp_dma_chan { bool is_dmacoherent; struct tasklet_struct tasklet; bool idle; - u32 desc_size; + size_t desc_size; bool err; u32 bus_width; u32 src_burst_len; @@ -486,7 +486,8 @@ static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan) } chan->desc_pool_v = dma_alloc_coherent(chan->dev, - (2 * chan->desc_size * ZYNQMP_DMA_NUM_DESCS), + (2 * ZYNQMP_DMA_DESC_SIZE(chan) * + ZYNQMP_DMA_NUM_DESCS), &chan->desc_pool_p, GFP_KERNEL); if (!chan->desc_pool_v) return -ENOMEM; -- cgit From 9126518e0439bc4b47b15d0da42f0cdba7a74b3a Mon Sep 17 00:00:00 2001 From: Shravya Kumbham Date: Tue, 10 May 2022 12:42:41 +0530 Subject: dmaengine: zynqmp_dma: check dma_async_device_register return value Add condition to check the return value of dma_async_device_register and implement its error handling. Addresses-Coverity: Event check_return. Signed-off-by: Shravya Kumbham Signed-off-by: Harini Katakam Signed-off-by: Radhey Shyam Pandey Link: https://lore.kernel.org/r/1652166762-18317-3-git-send-email-radhey.shyam.pandey@xilinx.com Signed-off-by: Vinod Koul --- drivers/dma/xilinx/zynqmp_dma.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c index 3ffa7f37c701..915dbe6275d4 100644 --- a/drivers/dma/xilinx/zynqmp_dma.c +++ b/drivers/dma/xilinx/zynqmp_dma.c @@ -1094,7 +1094,11 @@ static int zynqmp_dma_probe(struct platform_device *pdev) p->dst_addr_widths = BIT(zdev->chan->bus_width / 8); p->src_addr_widths = BIT(zdev->chan->bus_width / 8); - dma_async_device_register(&zdev->common); + ret = dma_async_device_register(&zdev->common); + if (ret) { + dev_err(zdev->dev, "failed to register the dma device\n"); + goto free_chan_resources; + } ret = of_dma_controller_register(pdev->dev.of_node, of_zynqmp_dma_xlate, zdev); -- cgit From 517a710ac8fe9cbc48d0ec3ca81377f6ef3b86b7 Mon Sep 17 00:00:00 2001 From: Radhey Shyam Pandey Date: Tue, 10 May 2022 12:42:42 +0530 Subject: dmaengine: zynqmp_dma: use pm_runtime_resume_and_get() instead of pm_runtime_get_sync() pm_runtime_resume_and_get() automatically handle dev->power.usage_count decrement on errors, so prefer using it and also implement it's error handling. Signed-off-by: Radhey Shyam Pandey Link: https://lore.kernel.org/r/1652166762-18317-4-git-send-email-radhey.shyam.pandey@xilinx.com Signed-off-by: Vinod Koul --- drivers/dma/xilinx/zynqmp_dma.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c index 915dbe6275d4..dc299ab36818 100644 --- a/drivers/dma/xilinx/zynqmp_dma.c +++ b/drivers/dma/xilinx/zynqmp_dma.c @@ -1078,7 +1078,11 @@ static int zynqmp_dma_probe(struct platform_device *pdev) pm_runtime_set_autosuspend_delay(zdev->dev, ZDMA_PM_TIMEOUT); pm_runtime_use_autosuspend(zdev->dev); pm_runtime_enable(zdev->dev); - pm_runtime_get_sync(zdev->dev); + ret = pm_runtime_resume_and_get(zdev->dev); + if (ret < 0) { + dev_err(&pdev->dev, "device wakeup failed.\n"); + pm_runtime_disable(zdev->dev); + } if (!pm_runtime_enabled(zdev->dev)) { ret = zynqmp_dma_runtime_resume(zdev->dev); if (ret) -- cgit From 8e6226f0f1a321de5f9ffdcb3fe920f94b45d38b Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Fri, 6 May 2022 15:23:52 -0700 Subject: dmaengine: idxd: make idxd_register/unregister_dma_channel() static Since idxd_register/unregister_dma_channel() are only called locally, make them static. Reported-by: kernel test robot Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/165187583222.3287435.12882651040433040246.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Vinod Koul --- drivers/dma/idxd/dma.c | 4 ++-- drivers/dma/idxd/idxd.h | 2 -- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c index 950f06c8aad5..d3476a532d3b 100644 --- a/drivers/dma/idxd/dma.c +++ b/drivers/dma/idxd/dma.c @@ -228,7 +228,7 @@ void idxd_unregister_dma_device(struct idxd_device *idxd) dma_async_device_unregister(&idxd->idxd_dma->dma); } -int idxd_register_dma_channel(struct idxd_wq *wq) +static int idxd_register_dma_channel(struct idxd_wq *wq) { struct idxd_device *idxd = wq->idxd; struct dma_device *dma = &idxd->idxd_dma->dma; @@ -265,7 +265,7 @@ int idxd_register_dma_channel(struct idxd_wq *wq) return 0; } -void idxd_unregister_dma_channel(struct idxd_wq *wq) +static void idxd_unregister_dma_channel(struct idxd_wq *wq) { struct idxd_dma_chan *idxd_chan = wq->idxd_chan; struct dma_chan *chan = &idxd_chan->chan; diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h index 77d241a92bd1..fed0dfc1eaa8 100644 --- a/drivers/dma/idxd/idxd.h +++ b/drivers/dma/idxd/idxd.h @@ -612,8 +612,6 @@ int idxd_enqcmds(struct idxd_wq *wq, void __iomem *portal, const void *desc); /* dmaengine */ int idxd_register_dma_device(struct idxd_device *idxd); void idxd_unregister_dma_device(struct idxd_device *idxd); -int idxd_register_dma_channel(struct idxd_wq *wq); -void idxd_unregister_dma_channel(struct idxd_wq *wq); void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res); void idxd_dma_complete_txd(struct idxd_desc *desc, enum idxd_complete_type comp_type, bool free_desc); -- cgit From d0ad42388a396813771e9407614f40d128ad62db Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Thu, 5 May 2022 08:05:07 -0700 Subject: dmaengine: idxd: skip irq free when wq type is not kernel Skip wq irq resources freeing when wq type is not kernel since the driver skips the irq alloction during wq enable. Add check in wq type check in idxd_wq_free_irq() to mirror idxd_wq_request_irq(). Fixes: 63c14ae6c161 ("dmaengine: idxd: refactor wq driver enable/disable operations") Reported-by: Tony Zu Tested-by: Tony Zu Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/165176310726.2112428.7474366910758522079.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Vinod Koul --- drivers/dma/idxd/device.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c index dee5e4f8f426..8b1f8591ae83 100644 --- a/drivers/dma/idxd/device.c +++ b/drivers/dma/idxd/device.c @@ -1176,6 +1176,9 @@ void idxd_wq_free_irq(struct idxd_wq *wq) struct idxd_device *idxd = wq->idxd; struct idxd_irq_entry *ie = &wq->ie; + if (wq->type != IDXD_WQT_KERNEL) + return; + synchronize_irq(ie->vector); free_irq(ie->vector, ie); idxd_flush_pending_descs(ie); -- cgit From 54326f37ec134c138dad4ae33bea048b0b186dd3 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Tue, 3 May 2022 08:51:45 +0200 Subject: dt-bindings: dmaengine: sprd: deprecate '#dma-channels' The generic property, used in most of the drivers and defined in generic dma-common DT bindings, is 'dma-channels'. Signed-off-by: Krzysztof Kozlowski Reviewed-by: Rob Herring Link: https://lore.kernel.org/r/20220503065147.51728-2-krzysztof.kozlowski@linaro.org Signed-off-by: Vinod Koul --- Documentation/devicetree/bindings/dma/sprd-dma.txt | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/Documentation/devicetree/bindings/dma/sprd-dma.txt b/Documentation/devicetree/bindings/dma/sprd-dma.txt index adccea9941f1..c7e9b5fd50e7 100644 --- a/Documentation/devicetree/bindings/dma/sprd-dma.txt +++ b/Documentation/devicetree/bindings/dma/sprd-dma.txt @@ -8,10 +8,13 @@ Required properties: - interrupts: Should contain one interrupt shared by all channel. - #dma-cells: must be <1>. Used to represent the number of integer cells in the dmas property of client device. -- #dma-channels : Number of DMA channels supported. Should be 32. +- dma-channels : Number of DMA channels supported. Should be 32. - clock-names: Should contain the clock of the DMA controller. - clocks: Should contain a clock specifier for each entry in clock-names. +Deprecated properties: +- #dma-channels : Number of DMA channels supported. Should be 32. + Example: Controller: @@ -20,7 +23,7 @@ apdma: dma-controller@20100000 { reg = <0x20100000 0x4000>; interrupts = ; #dma-cells = <1>; - #dma-channels = <32>; + dma-channels = <32>; clock-names = "enable"; clocks = <&clk_ap_ahb_gates 5>; }; -- cgit From d84c3ad998795503379ef8458ffffb5c06d18cc5 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Tue, 3 May 2022 08:51:46 +0200 Subject: dmaengine: sprd: deprecate '#dma-channels' The generic property, used in most of the drivers and defined in generic dma-common DT bindings, is 'dma-channels'. Switch to new property while keeping backward compatibility. Signed-off-by: Krzysztof Kozlowski Reviewed-by: Baolin Wang Link: https://lore.kernel.org/r/20220503065147.51728-3-krzysztof.kozlowski@linaro.org Signed-off-by: Vinod Koul --- drivers/dma/sprd-dma.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c index 7f158ef5672d..2138b80435ab 100644 --- a/drivers/dma/sprd-dma.c +++ b/drivers/dma/sprd-dma.c @@ -1117,7 +1117,11 @@ static int sprd_dma_probe(struct platform_device *pdev) u32 chn_count; int ret, i; - ret = device_property_read_u32(&pdev->dev, "#dma-channels", &chn_count); + /* Parse new and deprecated dma-channels properties */ + ret = device_property_read_u32(&pdev->dev, "dma-channels", &chn_count); + if (ret) + ret = device_property_read_u32(&pdev->dev, "#dma-channels", + &chn_count); if (ret) { dev_err(&pdev->dev, "get dma channels count failed\n"); return ret; -- cgit From 2112b8f4fb5cc35d1c384324763765953186b81f Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Tue, 26 Apr 2022 15:32:06 -0700 Subject: dmaengine: idxd: add missing callback function to support DMA_INTERRUPT When setting DMA_INTERRUPT capability, a callback function dma->device_prep_dma_interrupt() is needed to support this capability. Without setting the callback, dma_async_device_register() will fail dma capability check. Fixes: 4e5a4eb20393 ("dmaengine: idxd: set DMA_INTERRUPT cap bit") Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/165101232637.3951447.15765792791591763119.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Vinod Koul --- drivers/dma/idxd/dma.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c index d3476a532d3b..e0874cb4721c 100644 --- a/drivers/dma/idxd/dma.c +++ b/drivers/dma/idxd/dma.c @@ -87,6 +87,27 @@ static inline void idxd_prep_desc_common(struct idxd_wq *wq, hw->completion_addr = compl; } +static struct dma_async_tx_descriptor * +idxd_dma_prep_interrupt(struct dma_chan *c, unsigned long flags) +{ + struct idxd_wq *wq = to_idxd_wq(c); + u32 desc_flags; + struct idxd_desc *desc; + + if (wq->state != IDXD_WQ_ENABLED) + return NULL; + + op_flag_setup(flags, &desc_flags); + desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK); + if (IS_ERR(desc)) + return NULL; + + idxd_prep_desc_common(wq, desc->hw, DSA_OPCODE_NOOP, + 0, 0, 0, desc->compl_dma, desc_flags); + desc->txd.flags = flags; + return &desc->txd; +} + static struct dma_async_tx_descriptor * idxd_dma_submit_memcpy(struct dma_chan *c, dma_addr_t dma_dest, dma_addr_t dma_src, size_t len, unsigned long flags) @@ -198,6 +219,7 @@ int idxd_register_dma_device(struct idxd_device *idxd) dma_cap_set(DMA_COMPLETION_NO_ORDER, dma->cap_mask); dma->device_release = idxd_dma_release; + dma->device_prep_dma_interrupt = idxd_dma_prep_interrupt; if (idxd->hw.opcap.bits[0] & IDXD_OPCAP_MEMMOVE) { dma_cap_set(DMA_MEMCPY, dma->cap_mask); dma->device_prep_dma_memcpy = idxd_dma_submit_memcpy; -- cgit From 411dccf9d271e49f37471c73ebedb18719d6b608 Mon Sep 17 00:00:00 2001 From: Minghao Chi Date: Mon, 16 May 2022 11:54:12 +0000 Subject: dmaengine: idxd: Remove unnecessary synchronize_irq() before free_irq() Calling synchronize_irq() right before free_irq() is quite useless. On one hand the IRQ can easily fire again before free_irq() is entered, on the other hand free_irq() itself calls synchronize_irq() internally (in a race condition free way), before any state associated with the IRQ is freed. Signed-off-by: Minghao Chi Link: https://lore.kernel.org/r/20220516115412.1651772-1-chi.minghao@zte.com.cn Acked-by: Dave Jiang Signed-off-by: Vinod Koul --- drivers/dma/idxd/device.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c index 8b1f8591ae83..1143886f4a80 100644 --- a/drivers/dma/idxd/device.c +++ b/drivers/dma/idxd/device.c @@ -1179,7 +1179,6 @@ void idxd_wq_free_irq(struct idxd_wq *wq) if (wq->type != IDXD_WQT_KERNEL) return; - synchronize_irq(ie->vector); free_irq(ie->vector, ie); idxd_flush_pending_descs(ie); if (idxd->request_int_handles) -- cgit From 6cd4154a266584ca3908305ce64e14a5f0f3e81f Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Wed, 27 Apr 2022 11:56:45 +0200 Subject: dt-bindings: dmaengine: Introduce RZN1 dmamux bindings The Renesas RZN1 DMA IP is based on a DW core, with eg. an additional dmamux register located in the system control area which can take up to 32 requests (16 per DMA controller). Each DMA channel can be wired to two different peripherals. Signed-off-by: Miquel Raynal Reviewed-by: Geert Uytterhoeven Reviewed-by: Rob Herring Acked-by: Vinod Koul Link: https://lore.kernel.org/r/20220427095653.91804-2-miquel.raynal@bootlin.com Signed-off-by: Vinod Koul --- .../bindings/dma/renesas,rzn1-dmamux.yaml | 51 ++++++++++++++++++++++ MAINTAINERS | 1 + 2 files changed, 52 insertions(+) create mode 100644 Documentation/devicetree/bindings/dma/renesas,rzn1-dmamux.yaml diff --git a/Documentation/devicetree/bindings/dma/renesas,rzn1-dmamux.yaml b/Documentation/devicetree/bindings/dma/renesas,rzn1-dmamux.yaml new file mode 100644 index 000000000000..d83013b0dd74 --- /dev/null +++ b/Documentation/devicetree/bindings/dma/renesas,rzn1-dmamux.yaml @@ -0,0 +1,51 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/dma/renesas,rzn1-dmamux.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Renesas RZ/N1 DMA mux + +maintainers: + - Miquel Raynal + +allOf: + - $ref: "dma-router.yaml#" + +properties: + compatible: + const: renesas,rzn1-dmamux + + reg: + maxItems: 1 + description: DMA mux first register offset within the system control parent. + + '#dma-cells': + const: 6 + description: + The first four cells are dedicated to the master DMA controller. The fifth + cell gives the DMA mux bit index that must be set starting from 0. The + sixth cell gives the binary value that must be written there, ie. 0 or 1. + + dma-masters: + minItems: 1 + maxItems: 2 + + dma-requests: + const: 32 + +required: + - reg + - dma-requests + +additionalProperties: false + +examples: + - | + dma-router@a0 { + compatible = "renesas,rzn1-dmamux"; + reg = <0xa0 4>; + #dma-cells = <6>; + dma-masters = <&dma0 &dma1>; + dma-requests = <32>; + }; diff --git a/MAINTAINERS b/MAINTAINERS index 2cdb36afceb8..e7c0b08aae8c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -19040,6 +19040,7 @@ SYNOPSYS DESIGNWARE DMAC DRIVER M: Viresh Kumar R: Andy Shevchenko S: Maintained +F: Documentation/devicetree/bindings/dma/renesas,rzn1-dmamux.yaml F: Documentation/devicetree/bindings/dma/snps,dma-spear1340.yaml F: drivers/dma/dw/ F: include/dt-bindings/dma/dw-dmac.h -- cgit From ad73c629b591c73baac67cb388ea342d076f6b1b Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Wed, 27 Apr 2022 11:56:46 +0200 Subject: dt-bindings: clock: r9a06g032-sysctrl: Reference the DMAMUX subnode This system controller contains several registers that have nothing to do with the clock handling, like the DMA mux register. Describe this part of the system controller as a subnode. Signed-off-by: Miquel Raynal Reviewed-by: Rob Herring Acked-by: Stephen Boyd Reviewed-by: Geert Uytterhoeven Acked-by: Geert Uytterhoeven Link: https://lore.kernel.org/r/20220427095653.91804-3-miquel.raynal@bootlin.com Signed-off-by: Vinod Koul --- .../devicetree/bindings/clock/renesas,r9a06g032-sysctrl.yaml | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/Documentation/devicetree/bindings/clock/renesas,r9a06g032-sysctrl.yaml b/Documentation/devicetree/bindings/clock/renesas,r9a06g032-sysctrl.yaml index 25dbb0fac065..95bf485c6cec 100644 --- a/Documentation/devicetree/bindings/clock/renesas,r9a06g032-sysctrl.yaml +++ b/Documentation/devicetree/bindings/clock/renesas,r9a06g032-sysctrl.yaml @@ -39,6 +39,17 @@ properties: '#power-domain-cells': const: 0 + '#address-cells': + const: 1 + + '#size-cells': + const: 1 + +patternProperties: + "^dma-router@[a-f0-9]+$": + type: object + $ref: "../dma/renesas,rzn1-dmamux.yaml#" + required: - compatible - reg -- cgit From 7ac92262e1fb574ee4da2944b83e412aa0ae2ab4 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Wed, 27 Apr 2022 11:56:47 +0200 Subject: dt-bindings: dmaengine: Introduce RZN1 DMA compatible Just like for the NAND controller that is also on this SoC, let's provide a SoC generic and a more specific couple of compatibles for the DMA controller. Signed-off-by: Miquel Raynal Reviewed-by: Geert Uytterhoeven Acked-by: Rob Herring Acked-by: Vinod Koul Link: https://lore.kernel.org/r/20220427095653.91804-4-miquel.raynal@bootlin.com Signed-off-by: Vinod Koul --- Documentation/devicetree/bindings/dma/snps,dma-spear1340.yaml | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/Documentation/devicetree/bindings/dma/snps,dma-spear1340.yaml b/Documentation/devicetree/bindings/dma/snps,dma-spear1340.yaml index 6b35089ac017..c13649bf7f19 100644 --- a/Documentation/devicetree/bindings/dma/snps,dma-spear1340.yaml +++ b/Documentation/devicetree/bindings/dma/snps,dma-spear1340.yaml @@ -15,7 +15,13 @@ allOf: properties: compatible: - const: snps,dma-spear1340 + oneOf: + - const: snps,dma-spear1340 + - items: + - enum: + - renesas,r9a06g032-dma + - const: renesas,rzn1-dma + "#dma-cells": minimum: 3 -- cgit From 885525c1e7e27ea6207d648a8db20dfbbd9e4238 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Wed, 27 Apr 2022 11:56:48 +0200 Subject: clk: renesas: r9a06g032: Export function to set dmamux The dmamux register is located within the system controller. Without syscon, we need an extra helper in order to give write access to this register to a dmamux driver. Signed-off-by: Miquel Raynal Acked-by: Stephen Boyd Reviewed-by: Geert Uytterhoeven Acked-by: Geert Uytterhoeven Link: https://lore.kernel.org/r/20220427095653.91804-5-miquel.raynal@bootlin.com Signed-off-by: Vinod Koul --- drivers/clk/renesas/r9a06g032-clocks.c | 35 ++++++++++++++++++++++++++- include/linux/soc/renesas/r9a06g032-sysctrl.h | 11 +++++++++ 2 files changed, 45 insertions(+), 1 deletion(-) create mode 100644 include/linux/soc/renesas/r9a06g032-sysctrl.h diff --git a/drivers/clk/renesas/r9a06g032-clocks.c b/drivers/clk/renesas/r9a06g032-clocks.c index c99942f0e4d4..052d99059981 100644 --- a/drivers/clk/renesas/r9a06g032-clocks.c +++ b/drivers/clk/renesas/r9a06g032-clocks.c @@ -20,9 +20,12 @@ #include #include #include +#include #include #include +#define R9A06G032_SYSCTRL_DMAMUX 0xA0 + struct r9a06g032_gate { u16 gate, reset, ready, midle, scon, mirack, mistat; @@ -315,6 +318,30 @@ struct r9a06g032_priv { void __iomem *reg; }; +static struct r9a06g032_priv *sysctrl_priv; + +/* Exported helper to access the DMAMUX register */ +int r9a06g032_sysctrl_set_dmamux(u32 mask, u32 val) +{ + unsigned long flags; + u32 dmamux; + + if (!sysctrl_priv) + return -EPROBE_DEFER; + + spin_lock_irqsave(&sysctrl_priv->lock, flags); + + dmamux = readl(sysctrl_priv->reg + R9A06G032_SYSCTRL_DMAMUX); + dmamux &= ~mask; + dmamux |= val & mask; + writel(dmamux, sysctrl_priv->reg + R9A06G032_SYSCTRL_DMAMUX); + + spin_unlock_irqrestore(&sysctrl_priv->lock, flags); + + return 0; +} +EXPORT_SYMBOL_GPL(r9a06g032_sysctrl_set_dmamux); + /* register/bit pairs are encoded as an uint16_t */ static void clk_rdesc_set(struct r9a06g032_priv *clocks, @@ -963,7 +990,13 @@ static int __init r9a06g032_clocks_probe(struct platform_device *pdev) if (error) return error; - return r9a06g032_add_clk_domain(dev); + error = r9a06g032_add_clk_domain(dev); + if (error) + return error; + + sysctrl_priv = clocks; + + return 0; } static const struct of_device_id r9a06g032_match[] = { diff --git a/include/linux/soc/renesas/r9a06g032-sysctrl.h b/include/linux/soc/renesas/r9a06g032-sysctrl.h new file mode 100644 index 000000000000..066dfb15cbdd --- /dev/null +++ b/include/linux/soc/renesas/r9a06g032-sysctrl.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_SOC_RENESAS_R9A06G032_SYSCTRL_H__ +#define __LINUX_SOC_RENESAS_R9A06G032_SYSCTRL_H__ + +#ifdef CONFIG_CLK_R9A06G032 +int r9a06g032_sysctrl_set_dmamux(u32 mask, u32 val); +#else +static inline int r9a06g032_sysctrl_set_dmamux(u32 mask, u32 val) { return -ENODEV; } +#endif + +#endif /* __LINUX_SOC_RENESAS_R9A06G032_SYSCTRL_H__ */ -- cgit From 134d9c52fca26d2d199516e915da00f0cc6adc73 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Wed, 27 Apr 2022 11:56:49 +0200 Subject: dmaengine: dw: dmamux: Introduce RZN1 DMA router support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The Renesas RZN1 DMA IP is based on a DW core, with eg. an additional dmamux register located in the system control area which can take up to 32 requests (16 per DMA controller). Each DMA channel can be wired to two different peripherals. We need two additional information from the 'dmas' property: the channel (bit in the dmamux register) that must be accessed and the value of the mux for this channel. Signed-off-by: Miquel Raynal Reviewed-by: Andy Shevchenko Reviewed-by: Ilpo Järvinen Link: https://lore.kernel.org/r/20220427095653.91804-6-miquel.raynal@bootlin.com Signed-off-by: Vinod Koul --- drivers/dma/dw/Kconfig | 9 +++ drivers/dma/dw/Makefile | 2 + drivers/dma/dw/rzn1-dmamux.c | 155 +++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 166 insertions(+) create mode 100644 drivers/dma/dw/rzn1-dmamux.c diff --git a/drivers/dma/dw/Kconfig b/drivers/dma/dw/Kconfig index db25f9b7778c..a9828ddd6d06 100644 --- a/drivers/dma/dw/Kconfig +++ b/drivers/dma/dw/Kconfig @@ -16,6 +16,15 @@ config DW_DMAC Support the Synopsys DesignWare AHB DMA controller. This can be integrated in chips such as the Intel Cherrytrail. +config RZN1_DMAMUX + tristate "Renesas RZ/N1 DMAMUX driver" + depends on DW_DMAC + depends on ARCH_RZN1 || COMPILE_TEST + help + Support the Renesas RZ/N1 DMAMUX which is located in front of + the Synopsys DesignWare AHB DMA controller located on Renesas + SoCs. + config DW_DMAC_PCI tristate "Synopsys DesignWare AHB DMA PCI driver" depends on PCI diff --git a/drivers/dma/dw/Makefile b/drivers/dma/dw/Makefile index a6f358ad8591..e1796015f213 100644 --- a/drivers/dma/dw/Makefile +++ b/drivers/dma/dw/Makefile @@ -9,3 +9,5 @@ dw_dmac-$(CONFIG_OF) += of.o obj-$(CONFIG_DW_DMAC_PCI) += dw_dmac_pci.o dw_dmac_pci-y := pci.o + +obj-$(CONFIG_RZN1_DMAMUX) += rzn1-dmamux.o diff --git a/drivers/dma/dw/rzn1-dmamux.c b/drivers/dma/dw/rzn1-dmamux.c new file mode 100644 index 000000000000..11d254e450b0 --- /dev/null +++ b/drivers/dma/dw/rzn1-dmamux.c @@ -0,0 +1,155 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2022 Schneider-Electric + * Author: Miquel Raynal + */ +#include +#include +#include +#include +#include +#include + +#define RNZ1_DMAMUX_NCELLS 6 +#define RZN1_DMAMUX_MAX_LINES 64 +#define RZN1_DMAMUX_LINES_PER_CTLR 16 + +struct rzn1_dmamux_data { + struct dma_router dmarouter; + DECLARE_BITMAP(used_chans, 2 * RZN1_DMAMUX_LINES_PER_CTLR); +}; + +struct rzn1_dmamux_map { + unsigned int req_idx; +}; + +static void rzn1_dmamux_free(struct device *dev, void *route_data) +{ + struct rzn1_dmamux_data *dmamux = dev_get_drvdata(dev); + struct rzn1_dmamux_map *map = route_data; + + dev_dbg(dev, "Unmapping DMAMUX request %u\n", map->req_idx); + + clear_bit(map->req_idx, dmamux->used_chans); + + kfree(map); +} + +static void *rzn1_dmamux_route_allocate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct platform_device *pdev = of_find_device_by_node(ofdma->of_node); + struct rzn1_dmamux_data *dmamux = platform_get_drvdata(pdev); + struct rzn1_dmamux_map *map; + unsigned int dmac_idx, chan, val; + u32 mask; + int ret; + + if (dma_spec->args_count != RNZ1_DMAMUX_NCELLS) + return ERR_PTR(-EINVAL); + + map = kzalloc(sizeof(*map), GFP_KERNEL); + if (!map) + return ERR_PTR(-ENOMEM); + + chan = dma_spec->args[0]; + map->req_idx = dma_spec->args[4]; + val = dma_spec->args[5]; + dma_spec->args_count -= 2; + + if (chan >= RZN1_DMAMUX_LINES_PER_CTLR) { + dev_err(&pdev->dev, "Invalid DMA request line: %u\n", chan); + ret = -EINVAL; + goto free_map; + } + + if (map->req_idx >= RZN1_DMAMUX_MAX_LINES || + (map->req_idx % RZN1_DMAMUX_LINES_PER_CTLR) != chan) { + dev_err(&pdev->dev, "Invalid MUX request line: %u\n", map->req_idx); + ret = -EINVAL; + goto free_map; + } + + dmac_idx = map->req_idx >= RZN1_DMAMUX_LINES_PER_CTLR ? 1 : 0; + dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", dmac_idx); + if (!dma_spec->np) { + dev_err(&pdev->dev, "Can't get DMA master\n"); + ret = -EINVAL; + goto free_map; + } + + dev_dbg(&pdev->dev, "Mapping DMAMUX request %u to DMAC%u request %u\n", + map->req_idx, dmac_idx, chan); + + if (test_and_set_bit(map->req_idx, dmamux->used_chans)) { + ret = -EBUSY; + goto free_map; + } + + mask = BIT(map->req_idx); + ret = r9a06g032_sysctrl_set_dmamux(mask, val ? mask : 0); + if (ret) + goto clear_bitmap; + + return map; + +clear_bitmap: + clear_bit(map->req_idx, dmamux->used_chans); +free_map: + kfree(map); + + return ERR_PTR(ret); +} + +static const struct of_device_id rzn1_dmac_match[] = { + { .compatible = "renesas,rzn1-dma" }, + {} +}; + +static int rzn1_dmamux_probe(struct platform_device *pdev) +{ + struct device_node *mux_node = pdev->dev.of_node; + const struct of_device_id *match; + struct device_node *dmac_node; + struct rzn1_dmamux_data *dmamux; + + dmamux = devm_kzalloc(&pdev->dev, sizeof(*dmamux), GFP_KERNEL); + if (!dmamux) + return -ENOMEM; + + dmac_node = of_parse_phandle(mux_node, "dma-masters", 0); + if (!dmac_node) + return dev_err_probe(&pdev->dev, -ENODEV, "Can't get DMA master node\n"); + + match = of_match_node(rzn1_dmac_match, dmac_node); + of_node_put(dmac_node); + if (!match) + return dev_err_probe(&pdev->dev, -EINVAL, "DMA master is not supported\n"); + + dmamux->dmarouter.dev = &pdev->dev; + dmamux->dmarouter.route_free = rzn1_dmamux_free; + + platform_set_drvdata(pdev, dmamux); + + return of_dma_router_register(mux_node, rzn1_dmamux_route_allocate, + &dmamux->dmarouter); +} + +static const struct of_device_id rzn1_dmamux_match[] = { + { .compatible = "renesas,rzn1-dmamux" }, + {} +}; + +static struct platform_driver rzn1_dmamux_driver = { + .driver = { + .name = "renesas,rzn1-dmamux", + .of_match_table = rzn1_dmamux_match, + }, + .probe = rzn1_dmamux_probe, +}; +module_platform_driver(rzn1_dmamux_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Miquel Raynal Date: Wed, 27 Apr 2022 11:56:50 +0200 Subject: clk: renesas: r9a06g032: Probe possible children The clock controller device on r9a06g032 takes all the memory range that is described as being a system controller. This range contains many different (unrelated?) registers besides the ones belonging to the clock controller, that can necessitate to be accessed from other peripherals. For instance, the dmamux registers are there. The dmamux "device" will be described as a child node of the clock/system controller node, which means we need the top device driver (the clock controller driver in this case) to populate its children manually. In case of error when populating the children, we do not fail the probe on purpose to keep the clk driver up and running. Signed-off-by: Miquel Raynal Acked-by: Stephen Boyd Reviewed-by: Geert Uytterhoeven Acked-by: Geert Uytterhoeven Link: https://lore.kernel.org/r/20220427095653.91804-7-miquel.raynal@bootlin.com Signed-off-by: Vinod Koul --- drivers/clk/renesas/r9a06g032-clocks.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/clk/renesas/r9a06g032-clocks.c b/drivers/clk/renesas/r9a06g032-clocks.c index 052d99059981..69c0eb0eabd1 100644 --- a/drivers/clk/renesas/r9a06g032-clocks.c +++ b/drivers/clk/renesas/r9a06g032-clocks.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -996,6 +997,10 @@ static int __init r9a06g032_clocks_probe(struct platform_device *pdev) sysctrl_priv = clocks; + error = of_platform_populate(np, NULL, NULL, dev); + if (error) + dev_err(dev, "Failed to populate children (%d)\n", error); + return 0; } -- cgit From d5a8fe0fee54d830c47959f625ffc41d080ee526 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Wed, 27 Apr 2022 11:56:51 +0200 Subject: dmaengine: dw: Add RZN1 compatible The Renesas RZN1 DMA IP is very close to the original DW DMA IP, a DMA router has been introduced to handle the wiring options that have been added. Signed-off-by: Miquel Raynal Reviewed-by: Geert Uytterhoeven Acked-by: Andy Shevchenko Acked-By: Vinod Koul Link: https://lore.kernel.org/r/20220427095653.91804-8-miquel.raynal@bootlin.com Signed-off-by: Vinod Koul --- drivers/dma/dw/platform.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c index 246118955877..47f2292dba98 100644 --- a/drivers/dma/dw/platform.c +++ b/drivers/dma/dw/platform.c @@ -137,6 +137,7 @@ static void dw_shutdown(struct platform_device *pdev) #ifdef CONFIG_OF static const struct of_device_id dw_dma_of_id_table[] = { { .compatible = "snps,dma-spear1340", .data = &dw_dma_chip_pdata }, + { .compatible = "renesas,rzn1-dma", .data = &dw_dma_chip_pdata }, {} }; MODULE_DEVICE_TABLE(of, dw_dma_of_id_table); -- cgit From 2cdd3ca67aeabf4d6274af2d1c7e22f17a33dd64 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Thu, 5 May 2022 17:32:36 +0800 Subject: dmaengine: tegra: Fix build error without IOMMU_API MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit drivers/dma/tegra186-gpc-dma.c: In function ‘tegra_dma_probe’: drivers/dma/tegra186-gpc-dma.c:1364:24: error: ‘struct iommu_fwspec’ has no member named ‘ids’ stream_id = iommu_spec->ids[0] & 0xffff; ^~ Make TEGRA186_GPC_DMA depends on IOMMU_API to fix this. Fixes: ee17028009d4 ("dmaengine: tegra: Add tegra gpcdma driver") Signed-off-by: YueHaibing Link: https://lore.kernel.org/r/20220505093236.15076-1-yuehaibing@huawei.com Signed-off-by: Vinod Koul --- drivers/dma/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index cc1464e4acde..857b2174a4cb 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -632,6 +632,7 @@ config TXX9_DMAC config TEGRA186_GPC_DMA tristate "NVIDIA Tegra GPC DMA support" depends on (ARCH_TEGRA || COMPILE_TEST) && ARCH_DMA_ADDR_T_64BIT + depends on IOMMU_API select DMA_ENGINE help Support for the NVIDIA Tegra General Purpose Central DMA controller. -- cgit From bd1eca7b2c66c53873a0eab84f9f301aca41f33a Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Tue, 3 May 2022 08:54:04 +0200 Subject: dt-bindings: dmaengine: mmp: deprecate '#dma-channels' and '#dma-requests' The generic properties, used in most of the drivers and defined in generic dma-common DT bindings, are 'dma-channels' and 'dma-requests'. Signed-off-by: Krzysztof Kozlowski Reviewed-by: Rob Herring Link: https://lore.kernel.org/r/20220503065407.52188-2-krzysztof.kozlowski@linaro.org Signed-off-by: Vinod Koul --- Documentation/devicetree/bindings/dma/mmp-dma.txt | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/Documentation/devicetree/bindings/dma/mmp-dma.txt b/Documentation/devicetree/bindings/dma/mmp-dma.txt index 8f7364a7b349..ec18bf0a802a 100644 --- a/Documentation/devicetree/bindings/dma/mmp-dma.txt +++ b/Documentation/devicetree/bindings/dma/mmp-dma.txt @@ -10,10 +10,12 @@ Required properties: or one irq for pdma device Optional properties: -- #dma-channels: Number of DMA channels supported by the controller (defaults +- dma-channels: Number of DMA channels supported by the controller (defaults to 32 when not specified) -- #dma-requests: Number of DMA requestor lines supported by the controller +- #dma-channels: deprecated +- dma-requests: Number of DMA requestor lines supported by the controller (defaults to 32 when not specified) +- #dma-requests: deprecated "marvell,pdma-1.0" Used platforms: pxa25x, pxa27x, pxa3xx, pxa93x, pxa168, pxa910, pxa688. @@ -33,7 +35,7 @@ pdma: dma-controller@d4000000 { reg = <0xd4000000 0x10000>; interrupts = <0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15>; interrupt-parent = <&intcmux32>; - #dma-channels = <16>; + dma-channels = <16>; }; /* @@ -45,7 +47,7 @@ pdma: dma-controller@d4000000 { compatible = "marvell,pdma-1.0"; reg = <0xd4000000 0x10000>; interrupts = <47>; - #dma-channels = <16>; + dma-channels = <16>; }; -- cgit From d9cb0a4c0be595b55e30cf8774d3e92ab38bca5b Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Tue, 3 May 2022 08:54:05 +0200 Subject: dmaengine: pxa: deprecate '#dma-channels' and '#dma-requests' The generic properties, used in most of the drivers and defined in generic dma-common DT bindings, are 'dma-channels' and 'dma-requests'. Switch to new properties while keeping backward compatibility. Signed-off-by: Krzysztof Kozlowski Link: https://lore.kernel.org/r/20220503065407.52188-3-krzysztof.kozlowski@linaro.org Signed-off-by: Vinod Koul --- drivers/dma/pxa_dma.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c index 6078cc81892e..e7034f6f3994 100644 --- a/drivers/dma/pxa_dma.c +++ b/drivers/dma/pxa_dma.c @@ -1365,10 +1365,17 @@ static int pxad_probe(struct platform_device *op) of_id = of_match_device(pxad_dt_ids, &op->dev); if (of_id) { - of_property_read_u32(op->dev.of_node, "#dma-channels", - &dma_channels); - ret = of_property_read_u32(op->dev.of_node, "#dma-requests", + /* Parse new and deprecated dma-channels properties */ + if (of_property_read_u32(op->dev.of_node, "dma-channels", + &dma_channels)) + of_property_read_u32(op->dev.of_node, "#dma-channels", + &dma_channels); + /* Parse new and deprecated dma-requests properties */ + ret = of_property_read_u32(op->dev.of_node, "dma-requests", &nb_requestors); + if (ret) + ret = of_property_read_u32(op->dev.of_node, "#dma-requests", + &nb_requestors); if (ret) { dev_warn(pdev->slave.dev, "#dma-requests set to default 32 as missing in OF: %d", -- cgit From 607c04a0441ff477666dca4744e94f555aa8fd65 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Tue, 3 May 2022 08:54:06 +0200 Subject: dmaengine: mmp: deprecate '#dma-channels' The generic property, used in most of the drivers and defined in generic dma-common DT bindings, is 'dma-channels'. Switch to new property while keeping backward compatibility. Signed-off-by: Krzysztof Kozlowski Link: https://lore.kernel.org/r/20220503065407.52188-4-krzysztof.kozlowski@linaro.org Signed-off-by: Vinod Koul --- drivers/dma/mmp_pdma.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c index 5a53d7fcef01..e8d71b35593e 100644 --- a/drivers/dma/mmp_pdma.c +++ b/drivers/dma/mmp_pdma.c @@ -1043,13 +1043,17 @@ static int mmp_pdma_probe(struct platform_device *op) return PTR_ERR(pdev->base); of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev); - if (of_id) - of_property_read_u32(pdev->dev->of_node, "#dma-channels", - &dma_channels); - else if (pdata && pdata->dma_channels) + if (of_id) { + /* Parse new and deprecated dma-channels properties */ + if (of_property_read_u32(pdev->dev->of_node, "dma-channels", + &dma_channels)) + of_property_read_u32(pdev->dev->of_node, "#dma-channels", + &dma_channels); + } else if (pdata && pdata->dma_channels) { dma_channels = pdata->dma_channels; - else + } else { dma_channels = 32; /* default 32 channel */ + } pdev->dma_channels = dma_channels; for (i = 0; i < dma_channels; i++) { -- cgit From a725e582d720066e49034f8980af57a720d75127 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Wed, 27 Apr 2022 18:11:23 +0200 Subject: dmaengine: ti: deprecate '#dma-channels' The generic property, used in most of the drivers and defined in generic dma-common DT bindings, is 'dma-channels'. Switch to new property while keeping backward compatibility. Signed-off-by: Krzysztof Kozlowski Reviewed-by: Tony Lindgren Link: https://lore.kernel.org/r/20220427161126.647073-4-krzysztof.kozlowski@linaro.org Signed-off-by: Vinod Koul --- drivers/dma/ti/cppi41.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/dma/ti/cppi41.c b/drivers/dma/ti/cppi41.c index 062bd9bd4de0..695915dba707 100644 --- a/drivers/dma/ti/cppi41.c +++ b/drivers/dma/ti/cppi41.c @@ -1105,8 +1105,12 @@ static int cppi41_dma_probe(struct platform_device *pdev) cdd->qmgr_num_pend = glue_info->qmgr_num_pend; cdd->first_completion_queue = glue_info->first_completion_queue; + /* Parse new and deprecated dma-channels properties */ ret = of_property_read_u32(dev->of_node, - "#dma-channels", &cdd->n_chans); + "dma-channels", &cdd->n_chans); + if (ret) + ret = of_property_read_u32(dev->of_node, + "#dma-channels", &cdd->n_chans); if (ret) goto err_get_n_chans; -- cgit From 9d6a2d92e450926c483e45eaf426080a19219f4e Mon Sep 17 00:00:00 2001 From: Amelie Delaunay Date: Wed, 4 May 2022 17:53:20 +0200 Subject: dmaengine: stm32-mdma: remove GISR1 register GISR1 was described in a not up-to-date documentation when the stm32-mdma driver has been developed. This register has not been added in reference manual of STM32 SoC with MDMA, which have only 32 MDMA channels. So remove it from stm32-mdma driver. Fixes: a4ffb13c8946 ("dmaengine: Add STM32 MDMA driver") Signed-off-by: Amelie Delaunay Link: https://lore.kernel.org/r/20220504155322.121431-2-amelie.delaunay@foss.st.com Signed-off-by: Vinod Koul --- drivers/dma/stm32-mdma.c | 21 +++++---------------- 1 file changed, 5 insertions(+), 16 deletions(-) diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c index 95e5831e490a..57f0eb8a18fc 100644 --- a/drivers/dma/stm32-mdma.c +++ b/drivers/dma/stm32-mdma.c @@ -34,7 +34,6 @@ #include "virt-dma.h" #define STM32_MDMA_GISR0 0x0000 /* MDMA Int Status Reg 1 */ -#define STM32_MDMA_GISR1 0x0004 /* MDMA Int Status Reg 2 */ /* MDMA Channel x interrupt/status register */ #define STM32_MDMA_CISR(x) (0x40 + 0x40 * (x)) /* x = 0..62 */ @@ -169,7 +168,7 @@ #define STM32_MDMA_MAX_BUF_LEN 128 #define STM32_MDMA_MAX_BLOCK_LEN 65536 -#define STM32_MDMA_MAX_CHANNELS 63 +#define STM32_MDMA_MAX_CHANNELS 32 #define STM32_MDMA_MAX_REQUESTS 256 #define STM32_MDMA_MAX_BURST 128 #define STM32_MDMA_VERY_HIGH_PRIORITY 0x3 @@ -1324,21 +1323,11 @@ static irqreturn_t stm32_mdma_irq_handler(int irq, void *devid) /* Find out which channel generates the interrupt */ status = readl_relaxed(dmadev->base + STM32_MDMA_GISR0); - if (status) { - id = __ffs(status); - } else { - status = readl_relaxed(dmadev->base + STM32_MDMA_GISR1); - if (!status) { - dev_dbg(mdma2dev(dmadev), "spurious it\n"); - return IRQ_NONE; - } - id = __ffs(status); - /* - * As GISR0 provides status for channel id from 0 to 31, - * so GISR1 provides status for channel id from 32 to 62 - */ - id += 32; + if (!status) { + dev_dbg(mdma2dev(dmadev), "spurious it\n"); + return IRQ_NONE; } + id = __ffs(status); chan = &dmadev->chan[id]; if (!chan) { -- cgit From da3b8ddb464bd49b6248d00ca888ad751c9e44fd Mon Sep 17 00:00:00 2001 From: Amelie Delaunay Date: Wed, 4 May 2022 17:53:21 +0200 Subject: dmaengine: stm32-mdma: fix chan initialization in stm32_mdma_irq_handler() The parameter to pass back to the handler function when irq has been requested is a struct stm32_mdma_device pointer, not a struct stm32_mdma_chan pointer. Even if chan is reinit later in the function, remove this wrong initialization. Fixes: a4ffb13c8946 ("dmaengine: Add STM32 MDMA driver") Signed-off-by: Amelie Delaunay Link: https://lore.kernel.org/r/20220504155322.121431-3-amelie.delaunay@foss.st.com Signed-off-by: Vinod Koul --- drivers/dma/stm32-mdma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c index 57f0eb8a18fc..a5cbfbbb93d1 100644 --- a/drivers/dma/stm32-mdma.c +++ b/drivers/dma/stm32-mdma.c @@ -1318,7 +1318,7 @@ static void stm32_mdma_xfer_end(struct stm32_mdma_chan *chan) static irqreturn_t stm32_mdma_irq_handler(int irq, void *devid) { struct stm32_mdma_device *dmadev = devid; - struct stm32_mdma_chan *chan = devid; + struct stm32_mdma_chan *chan; u32 reg, id, ccr, ien, status; /* Find out which channel generates the interrupt */ -- cgit From 2763826966808800beeda5db406b3b704edc8137 Mon Sep 17 00:00:00 2001 From: Amelie Delaunay Date: Wed, 4 May 2022 17:53:22 +0200 Subject: dmaengine: stm32-mdma: use dev_dbg on non-busy channel spurious it If interrupt occurs while !chan->busy, it means channel has been disabled between the raise of the interruption and the read of status and ien, so, spurious interrupt can be silently discarded. Signed-off-by: Amelie Delaunay Link: https://lore.kernel.org/r/20220504155322.121431-4-amelie.delaunay@foss.st.com Signed-off-by: Vinod Koul --- drivers/dma/stm32-mdma.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c index a5cbfbbb93d1..caf0cce8f528 100644 --- a/drivers/dma/stm32-mdma.c +++ b/drivers/dma/stm32-mdma.c @@ -1345,9 +1345,12 @@ static irqreturn_t stm32_mdma_irq_handler(int irq, void *devid) if (!(status & ien)) { spin_unlock(&chan->vchan.lock); - dev_warn(chan2dev(chan), - "spurious it (status=0x%04x, ien=0x%04x)\n", - status, ien); + if (chan->busy) + dev_warn(chan2dev(chan), + "spurious it (status=0x%04x, ien=0x%04x)\n", status, ien); + else + dev_dbg(chan2dev(chan), + "spurious it (status=0x%04x, ien=0x%04x)\n", status, ien); return IRQ_NONE; } -- cgit From 6c3c2066d6dc317fd05893d4ce07b2f3633e3e2a Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Wed, 27 Apr 2022 08:40:48 +0200 Subject: dt-bindings: dma: pl330: Add power-domains The pl330 DMA controller on Exynos SoC (e.g. dma-controller@3880000 in Exynos5420) belongs to power domain, so allow such property. Reported-by: Rob Herring Signed-off-by: Krzysztof Kozlowski Acked-by: Rob Herring Link: https://lore.kernel.org/r/20220427064048.86635-1-krzysztof.kozlowski@linaro.org Signed-off-by: Vinod Koul --- Documentation/devicetree/bindings/dma/arm,pl330.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Documentation/devicetree/bindings/dma/arm,pl330.yaml b/Documentation/devicetree/bindings/dma/arm,pl330.yaml index decab185cf4d..2bec69b308f8 100644 --- a/Documentation/devicetree/bindings/dma/arm,pl330.yaml +++ b/Documentation/devicetree/bindings/dma/arm,pl330.yaml @@ -55,6 +55,9 @@ properties: dma-coherent: true + power-domains: + maxItems: 1 + resets: minItems: 1 maxItems: 2 -- cgit From c7399e6d3b18dc17974bc44e2e178664f1bd1bec Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Thu, 21 Apr 2022 17:47:33 +0530 Subject: dmaengine: qcom: gpi: Add support for sc7280 Add compatible and driver_data for GPI DMA engines found in Qualcomm SC7280. The driver_data contains ee_offset of 0x10000. Signed-off-by: Vinod Koul Link: https://lore.kernel.org/r/20220421121733.1829350-1-vkoul@kernel.org Signed-off-by: Vinod Koul --- drivers/dma/qcom/gpi.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c index d8fad6a77560..8f0c9c4e2efd 100644 --- a/drivers/dma/qcom/gpi.c +++ b/drivers/dma/qcom/gpi.c @@ -2286,6 +2286,7 @@ static int gpi_probe(struct platform_device *pdev) } static const struct of_device_id gpi_of_match[] = { + { .compatible = "qcom,sc7280-gpi-dma", .data = (void *)0x10000 }, { .compatible = "qcom,sdm845-gpi-dma", .data = (void *)0x0 }, { .compatible = "qcom,sm8150-gpi-dma", .data = (void *)0x0 }, { .compatible = "qcom,sm8250-gpi-dma", .data = (void *)0x0 }, -- cgit From d0b360e3c164be7ccc8eb2bfc341287959e27040 Mon Sep 17 00:00:00 2001 From: Amelie Delaunay Date: Wed, 4 May 2022 18:17:24 +0200 Subject: dmaengine: stm32-dmamux: avoid reset of dmamux if used by coprocessor One of the two DMA controllers managed by the DMAMUX can be used by the coprocessor. It is defined in the device tree with dma-masters. When the two DMA controllers are used by the main CPU, dma-masters = <&dma1, &dma2>; is specified in the device tree. When one of the controllers is used by coprocessor (so not managed by Linux), dma-masters = <&dma1>; is specified in the device tree. In this case, Linux driver must not reset the DMAMUX, because it could have been configured by the coprocessor to use the second DMA controller. count is the number of DMA controllers defined in dma-masters property. Reset only if resets property is found and valid in device tree, and if the two DMA controllers are under Linux control. Signed-off-by: Amelie Delaunay Link: https://lore.kernel.org/r/20220504161724.123180-1-amelie.delaunay@foss.st.com Signed-off-by: Vinod Koul --- drivers/dma/stm32-dmamux.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c index d5d55732adba..eee0c5aa5fb5 100644 --- a/drivers/dma/stm32-dmamux.c +++ b/drivers/dma/stm32-dmamux.c @@ -267,7 +267,7 @@ static int stm32_dmamux_probe(struct platform_device *pdev) ret = PTR_ERR(rst); if (ret == -EPROBE_DEFER) goto err_clk; - } else { + } else if (count > 1) { /* Don't reset if there is only one dma-master */ reset_control_assert(rst); udelay(2); reset_control_deassert(rst); -- cgit From db60a63eb6850fce081c1a22e7318e5d37f4dcf5 Mon Sep 17 00:00:00 2001 From: Amelie Delaunay Date: Thu, 5 May 2022 13:56:08 +0200 Subject: dmaengine: stm32-dma: introduce stm32_dma_sg_inc to manage chan->next_sg chan->next_sg is used to know which transfer will start after the ongoing one. It is incremented for each new transfer, either on transfer start for non-cyclic transfers, or on transfer complete interrupt for cyclic transfers. For cyclic transfer, when the last item is reached, chan->next_sg must be reinitialized to the first item. Signed-off-by: Amelie Delaunay Link: https://lore.kernel.org/r/20220505115611.38845-2-amelie.delaunay@foss.st.com Signed-off-by: Vinod Koul --- drivers/dma/stm32-dma.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c index d2365fab1b7a..5afe4205f57b 100644 --- a/drivers/dma/stm32-dma.c +++ b/drivers/dma/stm32-dma.c @@ -535,6 +535,13 @@ static void stm32_dma_dump_reg(struct stm32_dma_chan *chan) dev_dbg(chan2dev(chan), "SFCR: 0x%08x\n", sfcr); } +static void stm32_dma_sg_inc(struct stm32_dma_chan *chan) +{ + chan->next_sg++; + if (chan->desc->cyclic && (chan->next_sg == chan->desc->num_sgs)) + chan->next_sg = 0; +} + static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan); static void stm32_dma_start_transfer(struct stm32_dma_chan *chan) @@ -575,7 +582,7 @@ static void stm32_dma_start_transfer(struct stm32_dma_chan *chan) stm32_dma_write(dmadev, STM32_DMA_SM1AR(chan->id), reg->dma_sm1ar); stm32_dma_write(dmadev, STM32_DMA_SNDTR(chan->id), reg->dma_sndtr); - chan->next_sg++; + stm32_dma_sg_inc(chan); /* Clear interrupt status if it is there */ status = stm32_dma_irq_status(chan); @@ -606,9 +613,6 @@ static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan) dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id)); if (dma_scr & STM32_DMA_SCR_DBM) { - if (chan->next_sg == chan->desc->num_sgs) - chan->next_sg = 0; - sg_req = &chan->desc->sg_req[chan->next_sg]; if (dma_scr & STM32_DMA_SCR_CT) { @@ -630,7 +634,7 @@ static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan) if (chan->desc) { if (chan->desc->cyclic) { vchan_cyclic_callback(&chan->desc->vdesc); - chan->next_sg++; + stm32_dma_sg_inc(chan); stm32_dma_configure_next_sg(chan); } else { chan->busy = false; -- cgit From ded6230691e00b0f31afc8aa18f26c57072ff58f Mon Sep 17 00:00:00 2001 From: Amelie Delaunay Date: Thu, 5 May 2022 13:56:09 +0200 Subject: dmaengine: stm32-dma: pass DMA_SxSCR value to stm32_dma_handle_chan_done() stm32_dma_handle_chan_done() is called on Transfer Complete interrupt. As DMA_SxSCR register is read in interrupt handler, pass the value as parameter of stm32_dma_handle_chan_done(). Also return directly if chan->desc is null to remove one ident level. Then, stm32_dma_configure_next_sg() is doing something only if Double-Buffer Mode (DBM) is enabled, so, check it is enabled prior calling stm32_dma_configure_next_sg(), to remove one ident level in stm32_dma_configure_next_sg(). Signed-off-by: Amelie Delaunay Link: https://lore.kernel.org/r/20220505115611.38845-3-amelie.delaunay@foss.st.com Signed-off-by: Vinod Koul --- drivers/dma/stm32-dma.c | 54 ++++++++++++++++++++++++------------------------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c index 5afe4205f57b..eecd13795943 100644 --- a/drivers/dma/stm32-dma.c +++ b/drivers/dma/stm32-dma.c @@ -612,38 +612,38 @@ static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan) id = chan->id; dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id)); - if (dma_scr & STM32_DMA_SCR_DBM) { - sg_req = &chan->desc->sg_req[chan->next_sg]; - - if (dma_scr & STM32_DMA_SCR_CT) { - dma_sm0ar = sg_req->chan_reg.dma_sm0ar; - stm32_dma_write(dmadev, STM32_DMA_SM0AR(id), dma_sm0ar); - dev_dbg(chan2dev(chan), "CT=1 <=> SM0AR: 0x%08x\n", - stm32_dma_read(dmadev, STM32_DMA_SM0AR(id))); - } else { - dma_sm1ar = sg_req->chan_reg.dma_sm1ar; - stm32_dma_write(dmadev, STM32_DMA_SM1AR(id), dma_sm1ar); - dev_dbg(chan2dev(chan), "CT=0 <=> SM1AR: 0x%08x\n", - stm32_dma_read(dmadev, STM32_DMA_SM1AR(id))); - } + sg_req = &chan->desc->sg_req[chan->next_sg]; + + if (dma_scr & STM32_DMA_SCR_CT) { + dma_sm0ar = sg_req->chan_reg.dma_sm0ar; + stm32_dma_write(dmadev, STM32_DMA_SM0AR(id), dma_sm0ar); + dev_dbg(chan2dev(chan), "CT=1 <=> SM0AR: 0x%08x\n", + stm32_dma_read(dmadev, STM32_DMA_SM0AR(id))); + } else { + dma_sm1ar = sg_req->chan_reg.dma_sm1ar; + stm32_dma_write(dmadev, STM32_DMA_SM1AR(id), dma_sm1ar); + dev_dbg(chan2dev(chan), "CT=0 <=> SM1AR: 0x%08x\n", + stm32_dma_read(dmadev, STM32_DMA_SM1AR(id))); } } -static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan) +static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan, u32 scr) { - if (chan->desc) { - if (chan->desc->cyclic) { - vchan_cyclic_callback(&chan->desc->vdesc); - stm32_dma_sg_inc(chan); + if (!chan->desc) + return; + + if (chan->desc->cyclic) { + vchan_cyclic_callback(&chan->desc->vdesc); + stm32_dma_sg_inc(chan); + if (scr & STM32_DMA_SCR_DBM) stm32_dma_configure_next_sg(chan); - } else { - chan->busy = false; - if (chan->next_sg == chan->desc->num_sgs) { - vchan_cookie_complete(&chan->desc->vdesc); - chan->desc = NULL; - } - stm32_dma_start_transfer(chan); + } else { + chan->busy = false; + if (chan->next_sg == chan->desc->num_sgs) { + vchan_cookie_complete(&chan->desc->vdesc); + chan->desc = NULL; } + stm32_dma_start_transfer(chan); } } @@ -680,7 +680,7 @@ static irqreturn_t stm32_dma_chan_irq(int irq, void *devid) if (status & STM32_DMA_TCI) { stm32_dma_irq_clear(chan, STM32_DMA_TCI); if (scr & STM32_DMA_SCR_TCIE) - stm32_dma_handle_chan_done(chan); + stm32_dma_handle_chan_done(chan, scr); status &= ~STM32_DMA_TCI; } -- cgit From baa1424314f8e4bb5b266aaf9cc7fb7a9e65901b Mon Sep 17 00:00:00 2001 From: Amelie Delaunay Date: Thu, 5 May 2022 13:56:10 +0200 Subject: dmaengine: stm32-dma: rename pm ops before dma pause/resume introduction dmaengine framework offers device_pause and device_resume ops to pause an on-going transfer and resume it later. To avoid any misunderstanding with system sleep pm ops, rename pm ops into stm32_dma_pm_suspend and stm32_dma_pm_resume. Signed-off-by: Amelie Delaunay Link: https://lore.kernel.org/r/20220505115611.38845-4-amelie.delaunay@foss.st.com Signed-off-by: Vinod Koul --- drivers/dma/stm32-dma.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c index eecd13795943..0b35c5178501 100644 --- a/drivers/dma/stm32-dma.c +++ b/drivers/dma/stm32-dma.c @@ -1486,7 +1486,7 @@ static int stm32_dma_runtime_resume(struct device *dev) #endif #ifdef CONFIG_PM_SLEEP -static int stm32_dma_suspend(struct device *dev) +static int stm32_dma_pm_suspend(struct device *dev) { struct stm32_dma_device *dmadev = dev_get_drvdata(dev); int id, ret, scr; @@ -1510,14 +1510,14 @@ static int stm32_dma_suspend(struct device *dev) return 0; } -static int stm32_dma_resume(struct device *dev) +static int stm32_dma_pm_resume(struct device *dev) { return pm_runtime_force_resume(dev); } #endif static const struct dev_pm_ops stm32_dma_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(stm32_dma_suspend, stm32_dma_resume) + SET_SYSTEM_SLEEP_PM_OPS(stm32_dma_pm_suspend, stm32_dma_pm_resume) SET_RUNTIME_PM_OPS(stm32_dma_runtime_suspend, stm32_dma_runtime_resume, NULL) }; -- cgit From 099a9a94be0e1c7fa45410deb2bff640320c3819 Mon Sep 17 00:00:00 2001 From: Amelie Delaunay Date: Thu, 5 May 2022 13:56:11 +0200 Subject: dmaengine: stm32-dma: add device_pause/device_resume support At any time, a DMA transfer can be suspended to be restarted later before the end of the DMA transfer. In order to restart from the point where the transfer was stopped, DMA_SxNDTR has to be read after disabling the channel by clearing the EN bit in DMA_SxCR register, to know the number of data items already collected. Peripheral and/or memory addresses have to be updated in order to adjust the address pointers. SxNDTR register has to be updated with the remaining number of data items to be transferred (the value read when the channel was disabled). Then the channel can be re-enabled to resume the transfer from the point it was suspended. If the channel was configured in circular or double-buffer mode, the circular or double-buffer mode must be disabled before re-enabling the channel to be able to reconfigure SxNDTR register and re-activate circular or double-buffer mode on next Transfer Complete interrupt where channel will be disabled by HW. This is due to the fact that on resume, re-writing SxNDTR register value updates internal HW auto-reload data counter, and then it truncates all next transfers after a pause/resume sequence. Signed-off-by: Amelie Delaunay Link: https://lore.kernel.org/r/20220505115611.38845-5-amelie.delaunay@foss.st.com Signed-off-by: Vinod Koul --- drivers/dma/stm32-dma.c | 247 +++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 234 insertions(+), 13 deletions(-) diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c index 0b35c5178501..adb25a11c70f 100644 --- a/drivers/dma/stm32-dma.c +++ b/drivers/dma/stm32-dma.c @@ -208,6 +208,7 @@ struct stm32_dma_chan { u32 threshold; u32 mem_burst; u32 mem_width; + enum dma_status status; }; struct stm32_dma_device { @@ -485,6 +486,7 @@ static void stm32_dma_stop(struct stm32_dma_chan *chan) } chan->busy = false; + chan->status = DMA_COMPLETE; } static int stm32_dma_terminate_all(struct dma_chan *c) @@ -595,11 +597,11 @@ static void stm32_dma_start_transfer(struct stm32_dma_chan *chan) stm32_dma_dump_reg(chan); /* Start DMA */ + chan->busy = true; + chan->status = DMA_IN_PROGRESS; reg->dma_scr |= STM32_DMA_SCR_EN; stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), reg->dma_scr); - chan->busy = true; - dev_dbg(chan2dev(chan), "vchan %pK: started\n", &chan->vchan); } @@ -627,6 +629,95 @@ static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan) } } +static void stm32_dma_handle_chan_paused(struct stm32_dma_chan *chan) +{ + struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); + u32 dma_scr; + + /* + * Read and store current remaining data items and peripheral/memory addresses to be + * updated on resume + */ + dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id)); + /* + * Transfer can be paused while between a previous resume and reconfiguration on transfer + * complete. If transfer is cyclic and CIRC and DBM have been deactivated for resume, need + * to set it here in SCR backup to ensure a good reconfiguration on transfer complete. + */ + if (chan->desc && chan->desc->cyclic) { + if (chan->desc->num_sgs == 1) + dma_scr |= STM32_DMA_SCR_CIRC; + else + dma_scr |= STM32_DMA_SCR_DBM; + } + chan->chan_reg.dma_scr = dma_scr; + + /* + * Need to temporarily deactivate CIRC/DBM until next Transfer Complete interrupt, otherwise + * on resume NDTR autoreload value will be wrong (lower than the initial period length) + */ + if (chan->desc && chan->desc->cyclic) { + dma_scr &= ~(STM32_DMA_SCR_DBM | STM32_DMA_SCR_CIRC); + stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr); + } + + chan->chan_reg.dma_sndtr = stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id)); + + dev_dbg(chan2dev(chan), "vchan %pK: paused\n", &chan->vchan); +} + +static void stm32_dma_post_resume_reconfigure(struct stm32_dma_chan *chan) +{ + struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); + struct stm32_dma_sg_req *sg_req; + u32 dma_scr, status, id; + + id = chan->id; + dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id)); + + /* Clear interrupt status if it is there */ + status = stm32_dma_irq_status(chan); + if (status) + stm32_dma_irq_clear(chan, status); + + if (!chan->next_sg) + sg_req = &chan->desc->sg_req[chan->desc->num_sgs - 1]; + else + sg_req = &chan->desc->sg_req[chan->next_sg - 1]; + + /* Reconfigure NDTR with the initial value */ + stm32_dma_write(dmadev, STM32_DMA_SNDTR(chan->id), sg_req->chan_reg.dma_sndtr); + + /* Restore SPAR */ + stm32_dma_write(dmadev, STM32_DMA_SPAR(id), sg_req->chan_reg.dma_spar); + + /* Restore SM0AR/SM1AR whatever DBM/CT as they may have been modified */ + stm32_dma_write(dmadev, STM32_DMA_SM0AR(id), sg_req->chan_reg.dma_sm0ar); + stm32_dma_write(dmadev, STM32_DMA_SM1AR(id), sg_req->chan_reg.dma_sm1ar); + + /* Reactivate CIRC/DBM if needed */ + if (chan->chan_reg.dma_scr & STM32_DMA_SCR_DBM) { + dma_scr |= STM32_DMA_SCR_DBM; + /* Restore CT */ + if (chan->chan_reg.dma_scr & STM32_DMA_SCR_CT) + dma_scr &= ~STM32_DMA_SCR_CT; + else + dma_scr |= STM32_DMA_SCR_CT; + } else if (chan->chan_reg.dma_scr & STM32_DMA_SCR_CIRC) { + dma_scr |= STM32_DMA_SCR_CIRC; + } + stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr); + + stm32_dma_configure_next_sg(chan); + + stm32_dma_dump_reg(chan); + + dma_scr |= STM32_DMA_SCR_EN; + stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr); + + dev_dbg(chan2dev(chan), "vchan %pK: reconfigured after pause/resume\n", &chan->vchan); +} + static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan, u32 scr) { if (!chan->desc) @@ -635,10 +726,14 @@ static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan, u32 scr) if (chan->desc->cyclic) { vchan_cyclic_callback(&chan->desc->vdesc); stm32_dma_sg_inc(chan); - if (scr & STM32_DMA_SCR_DBM) + /* cyclic while CIRC/DBM disable => post resume reconfiguration needed */ + if (!(scr & (STM32_DMA_SCR_CIRC | STM32_DMA_SCR_DBM))) + stm32_dma_post_resume_reconfigure(chan); + else if (scr & STM32_DMA_SCR_DBM) stm32_dma_configure_next_sg(chan); } else { chan->busy = false; + chan->status = DMA_COMPLETE; if (chan->next_sg == chan->desc->num_sgs) { vchan_cookie_complete(&chan->desc->vdesc); chan->desc = NULL; @@ -679,8 +774,12 @@ static irqreturn_t stm32_dma_chan_irq(int irq, void *devid) if (status & STM32_DMA_TCI) { stm32_dma_irq_clear(chan, STM32_DMA_TCI); - if (scr & STM32_DMA_SCR_TCIE) - stm32_dma_handle_chan_done(chan, scr); + if (scr & STM32_DMA_SCR_TCIE) { + if (chan->status == DMA_PAUSED && !(scr & STM32_DMA_SCR_EN)) + stm32_dma_handle_chan_paused(chan); + else + stm32_dma_handle_chan_done(chan, scr); + } status &= ~STM32_DMA_TCI; } @@ -715,6 +814,107 @@ static void stm32_dma_issue_pending(struct dma_chan *c) spin_unlock_irqrestore(&chan->vchan.lock, flags); } +static int stm32_dma_pause(struct dma_chan *c) +{ + struct stm32_dma_chan *chan = to_stm32_dma_chan(c); + unsigned long flags; + int ret; + + if (chan->status != DMA_IN_PROGRESS) + return -EPERM; + + spin_lock_irqsave(&chan->vchan.lock, flags); + ret = stm32_dma_disable_chan(chan); + /* + * A transfer complete flag is set to indicate the end of transfer due to the stream + * interruption, so wait for interrupt + */ + if (!ret) + chan->status = DMA_PAUSED; + spin_unlock_irqrestore(&chan->vchan.lock, flags); + + return ret; +} + +static int stm32_dma_resume(struct dma_chan *c) +{ + struct stm32_dma_chan *chan = to_stm32_dma_chan(c); + struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); + struct stm32_dma_chan_reg chan_reg = chan->chan_reg; + u32 id = chan->id, scr, ndtr, offset, spar, sm0ar, sm1ar; + struct stm32_dma_sg_req *sg_req; + unsigned long flags; + + if (chan->status != DMA_PAUSED) + return -EPERM; + + scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id)); + if (WARN_ON(scr & STM32_DMA_SCR_EN)) + return -EPERM; + + spin_lock_irqsave(&chan->vchan.lock, flags); + + /* sg_reg[prev_sg] contains original ndtr, sm0ar and sm1ar before pausing the transfer */ + if (!chan->next_sg) + sg_req = &chan->desc->sg_req[chan->desc->num_sgs - 1]; + else + sg_req = &chan->desc->sg_req[chan->next_sg - 1]; + + ndtr = sg_req->chan_reg.dma_sndtr; + offset = (ndtr - chan_reg.dma_sndtr) << STM32_DMA_SCR_PSIZE_GET(chan_reg.dma_scr); + spar = sg_req->chan_reg.dma_spar; + sm0ar = sg_req->chan_reg.dma_sm0ar; + sm1ar = sg_req->chan_reg.dma_sm1ar; + + /* + * The peripheral and/or memory addresses have to be updated in order to adjust the + * address pointers. Need to check increment. + */ + if (chan_reg.dma_scr & STM32_DMA_SCR_PINC) + stm32_dma_write(dmadev, STM32_DMA_SPAR(id), spar + offset); + else + stm32_dma_write(dmadev, STM32_DMA_SPAR(id), spar); + + if (!(chan_reg.dma_scr & STM32_DMA_SCR_MINC)) + offset = 0; + + /* + * In case of DBM, the current target could be SM1AR. + * Need to temporarily deactivate CIRC/DBM to finish the current transfer, so + * SM0AR becomes the current target and must be updated with SM1AR + offset if CT=1. + */ + if ((chan_reg.dma_scr & STM32_DMA_SCR_DBM) && (chan_reg.dma_scr & STM32_DMA_SCR_CT)) + stm32_dma_write(dmadev, STM32_DMA_SM1AR(id), sm1ar + offset); + else + stm32_dma_write(dmadev, STM32_DMA_SM0AR(id), sm0ar + offset); + + /* NDTR must be restored otherwise internal HW counter won't be correctly reset */ + stm32_dma_write(dmadev, STM32_DMA_SNDTR(id), chan_reg.dma_sndtr); + + /* + * Need to temporarily deactivate CIRC/DBM until next Transfer Complete interrupt, + * otherwise NDTR autoreload value will be wrong (lower than the initial period length) + */ + if (chan_reg.dma_scr & (STM32_DMA_SCR_CIRC | STM32_DMA_SCR_DBM)) + chan_reg.dma_scr &= ~(STM32_DMA_SCR_CIRC | STM32_DMA_SCR_DBM); + + if (chan_reg.dma_scr & STM32_DMA_SCR_DBM) + stm32_dma_configure_next_sg(chan); + + stm32_dma_dump_reg(chan); + + /* The stream may then be re-enabled to restart transfer from the point it was stopped */ + chan->status = DMA_IN_PROGRESS; + chan_reg.dma_scr |= STM32_DMA_SCR_EN; + stm32_dma_write(dmadev, STM32_DMA_SCR(id), chan_reg.dma_scr); + + spin_unlock_irqrestore(&chan->vchan.lock, flags); + + dev_dbg(chan2dev(chan), "vchan %pK: resumed\n", &chan->vchan); + + return 0; +} + static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan, enum dma_transfer_direction direction, enum dma_slave_buswidth *buswidth, @@ -982,10 +1182,12 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_cyclic( } /* Enable Circular mode or double buffer mode */ - if (buf_len == period_len) + if (buf_len == period_len) { chan->chan_reg.dma_scr |= STM32_DMA_SCR_CIRC; - else + } else { chan->chan_reg.dma_scr |= STM32_DMA_SCR_DBM; + chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_CT; + } /* Clear periph ctrl if client set it */ chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_PFCTRL; @@ -1095,24 +1297,36 @@ static bool stm32_dma_is_current_sg(struct stm32_dma_chan *chan) { struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); struct stm32_dma_sg_req *sg_req; - u32 dma_scr, dma_smar, id; + u32 dma_scr, dma_smar, id, period_len; id = chan->id; dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id)); + /* In cyclic CIRC but not DBM, CT is not used */ if (!(dma_scr & STM32_DMA_SCR_DBM)) return true; sg_req = &chan->desc->sg_req[chan->next_sg]; + period_len = sg_req->len; + /* DBM - take care of a previous pause/resume not yet post reconfigured */ if (dma_scr & STM32_DMA_SCR_CT) { dma_smar = stm32_dma_read(dmadev, STM32_DMA_SM0AR(id)); - return (dma_smar == sg_req->chan_reg.dma_sm0ar); + /* + * If transfer has been pause/resumed, + * SM0AR is in the range of [SM0AR:SM0AR+period_len] + */ + return (dma_smar >= sg_req->chan_reg.dma_sm0ar && + dma_smar < sg_req->chan_reg.dma_sm0ar + period_len); } dma_smar = stm32_dma_read(dmadev, STM32_DMA_SM1AR(id)); - - return (dma_smar == sg_req->chan_reg.dma_sm1ar); + /* + * If transfer has been pause/resumed, + * SM1AR is in the range of [SM1AR:SM1AR+period_len] + */ + return (dma_smar >= sg_req->chan_reg.dma_sm1ar && + dma_smar < sg_req->chan_reg.dma_sm1ar + period_len); } static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan, @@ -1152,7 +1366,7 @@ static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan, residue = stm32_dma_get_remaining_bytes(chan); - if (!stm32_dma_is_current_sg(chan)) { + if (chan->desc->cyclic && !stm32_dma_is_current_sg(chan)) { n_sg++; if (n_sg == chan->desc->num_sgs) n_sg = 0; @@ -1192,7 +1406,12 @@ static enum dma_status stm32_dma_tx_status(struct dma_chan *c, u32 residue = 0; status = dma_cookie_status(c, cookie, state); - if (status == DMA_COMPLETE || !state) + if (status == DMA_COMPLETE) + return status; + + status = chan->status; + + if (!state) return status; spin_lock_irqsave(&chan->vchan.lock, flags); @@ -1381,6 +1600,8 @@ static int stm32_dma_probe(struct platform_device *pdev) dd->device_prep_slave_sg = stm32_dma_prep_slave_sg; dd->device_prep_dma_cyclic = stm32_dma_prep_dma_cyclic; dd->device_config = stm32_dma_slave_config; + dd->device_pause = stm32_dma_pause; + dd->device_resume = stm32_dma_resume; dd->device_terminate_all = stm32_dma_terminate_all; dd->device_synchronize = stm32_dma_synchronize; dd->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | -- cgit From 39b930bec80e7af4faae4caf4a36464a6d003bed Mon Sep 17 00:00:00 2001 From: Akhil R Date: Tue, 26 Apr 2022 15:49:12 +0530 Subject: dmaengine: tegra: Fix uninitialized variable usage Initialize slave_bw in dma_prep*() functions as the parameter is not set for DMA_MEM_TO_MEM case in get_transfer_param(). Though the case may never occur, initializing it avoids warning from certain static checkers Fixes: ee17028009d4 ("dmaengine: tegra: Add tegra gpcdma driver") Reported-by: Dan Carpenter Signed-off-by: Akhil R Link: https://lore.kernel.org/r/20220426101913.43335-2-akhilrajeev@nvidia.com Signed-off-by: Vinod Koul --- drivers/dma/tegra186-gpc-dma.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/dma/tegra186-gpc-dma.c b/drivers/dma/tegra186-gpc-dma.c index 3951db527dec..b33c9b7770d5 100644 --- a/drivers/dma/tegra186-gpc-dma.c +++ b/drivers/dma/tegra186-gpc-dma.c @@ -984,8 +984,8 @@ tegra_dma_prep_slave_sg(struct dma_chan *dc, struct scatterlist *sgl, { struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); unsigned int max_dma_count = tdc->tdma->chip_data->max_dma_count; + enum dma_slave_buswidth slave_bw = DMA_SLAVE_BUSWIDTH_UNDEFINED; u32 csr, mc_seq, apb_ptr = 0, mmio_seq = 0; - enum dma_slave_buswidth slave_bw; struct tegra_dma_sg_req *sg_req; struct tegra_dma_desc *dma_desc; struct scatterlist *sg; @@ -1102,12 +1102,12 @@ tegra_dma_prep_dma_cyclic(struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_l size_t period_len, enum dma_transfer_direction direction, unsigned long flags) { + enum dma_slave_buswidth slave_bw = DMA_SLAVE_BUSWIDTH_UNDEFINED; + u32 csr, mc_seq, apb_ptr = 0, mmio_seq = 0, burst_size; + unsigned int max_dma_count, len, period_count, i; struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); struct tegra_dma_desc *dma_desc; struct tegra_dma_sg_req *sg_req; - enum dma_slave_buswidth slave_bw; - u32 csr, mc_seq, apb_ptr = 0, mmio_seq = 0, burst_size; - unsigned int max_dma_count, len, period_count, i; dma_addr_t mem = buf_addr; int ret; -- cgit From 360e4f4e3fcceb9c24ee509f9eb4ed48759b3918 Mon Sep 17 00:00:00 2001 From: Akhil R Date: Tue, 26 Apr 2022 15:49:13 +0530 Subject: dmaengine: tegra: Remove unused switch case Remove unused switch case in get_transfer_param() function. The function is not called for MEM_TO_MEM transfers. Signed-off-by: Akhil R Link: https://lore.kernel.org/r/20220426101913.43335-3-akhilrajeev@nvidia.com Signed-off-by: Vinod Koul --- drivers/dma/tegra186-gpc-dma.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/dma/tegra186-gpc-dma.c b/drivers/dma/tegra186-gpc-dma.c index b33c9b7770d5..05cd451f541d 100644 --- a/drivers/dma/tegra186-gpc-dma.c +++ b/drivers/dma/tegra186-gpc-dma.c @@ -829,10 +829,6 @@ static int get_transfer_param(struct tegra_dma_channel *tdc, *slave_bw = tdc->dma_sconfig.src_addr_width; *csr = TEGRA_GPCDMA_CSR_DMA_IO2MEM_FC; return 0; - case DMA_MEM_TO_MEM: - *burst_size = tdc->dma_sconfig.src_addr_width; - *csr = TEGRA_GPCDMA_CSR_DMA_MEM2MEM; - return 0; default: dev_err(tdc2dev(tdc), "DMA direction is not supported\n"); } -- cgit From 59e477763d092923f567051e1bbfbb4e04d0dfbf Mon Sep 17 00:00:00 2001 From: Samuel Holland Date: Sun, 24 Apr 2022 12:27:55 -0500 Subject: dt-bindings: dma: sun50i-a64: Add compatible for D1 D1 has a DMA controller similar to the one in other Allwinner SoCs. Add its compatible, and include it in the list of variants with a separate MBUS clock gate. Acked-by: Rob Herring Acked-by: Maxime Ripard Signed-off-by: Samuel Holland Link: https://lore.kernel.org/r/20220424172759.33383-2-samuel@sholland.org Signed-off-by: Vinod Koul --- .../devicetree/bindings/dma/allwinner,sun50i-a64-dma.yaml | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/Documentation/devicetree/bindings/dma/allwinner,sun50i-a64-dma.yaml b/Documentation/devicetree/bindings/dma/allwinner,sun50i-a64-dma.yaml index b6e1ebfaf366..ff0a5c58d78c 100644 --- a/Documentation/devicetree/bindings/dma/allwinner,sun50i-a64-dma.yaml +++ b/Documentation/devicetree/bindings/dma/allwinner,sun50i-a64-dma.yaml @@ -20,9 +20,11 @@ properties: compatible: oneOf: - - const: allwinner,sun50i-a64-dma - - const: allwinner,sun50i-a100-dma - - const: allwinner,sun50i-h6-dma + - enum: + - allwinner,sun20i-d1-dma + - allwinner,sun50i-a64-dma + - allwinner,sun50i-a100-dma + - allwinner,sun50i-h6-dma - items: - const: allwinner,sun8i-r40-dma - const: allwinner,sun50i-a64-dma @@ -58,6 +60,7 @@ if: properties: compatible: enum: + - allwinner,sun20i-d1-dma - allwinner,sun50i-a100-dma - allwinner,sun50i-h6-dma -- cgit From 9aa48806edb8c37e82532dbc6098b03f6bd4245e Mon Sep 17 00:00:00 2001 From: Samuel Holland Date: Sun, 24 Apr 2022 12:27:56 -0500 Subject: dmaengine: sun6i: Do not use virt_to_phys This breaks on RISC-V, because dma_pool_alloc returns addresses which are not in the linear map. Instead, plumb through the physical address which is already known anyway. Acked-by: Maxime Ripard Signed-off-by: Samuel Holland Link: https://lore.kernel.org/r/20220424172759.33383-3-samuel@sholland.org Signed-off-by: Vinod Koul --- drivers/dma/sun6i-dma.c | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c index 5cadd4d2b824..4436fbd70445 100644 --- a/drivers/dma/sun6i-dma.c +++ b/drivers/dma/sun6i-dma.c @@ -241,9 +241,7 @@ static inline void sun6i_dma_dump_com_regs(struct sun6i_dma_dev *sdev) static inline void sun6i_dma_dump_chan_regs(struct sun6i_dma_dev *sdev, struct sun6i_pchan *pchan) { - phys_addr_t reg = virt_to_phys(pchan->base); - - dev_dbg(sdev->slave.dev, "Chan %d reg: %pa\n" + dev_dbg(sdev->slave.dev, "Chan %d reg:\n" "\t___en(%04x): \t0x%08x\n" "\tpause(%04x): \t0x%08x\n" "\tstart(%04x): \t0x%08x\n" @@ -252,7 +250,7 @@ static inline void sun6i_dma_dump_chan_regs(struct sun6i_dma_dev *sdev, "\t__dst(%04x): \t0x%08x\n" "\tcount(%04x): \t0x%08x\n" "\t_para(%04x): \t0x%08x\n\n", - pchan->idx, ®, + pchan->idx, DMA_CHAN_ENABLE, readl(pchan->base + DMA_CHAN_ENABLE), DMA_CHAN_PAUSE, @@ -385,17 +383,16 @@ static void *sun6i_dma_lli_add(struct sun6i_dma_lli *prev, } static inline void sun6i_dma_dump_lli(struct sun6i_vchan *vchan, - struct sun6i_dma_lli *lli) + struct sun6i_dma_lli *v_lli, + dma_addr_t p_lli) { - phys_addr_t p_lli = virt_to_phys(lli); - dev_dbg(chan2dev(&vchan->vc.chan), - "\n\tdesc: p - %pa v - 0x%p\n" + "\n\tdesc:\tp - %pad v - 0x%p\n" "\t\tc - 0x%08x s - 0x%08x d - 0x%08x\n" "\t\tl - 0x%08x p - 0x%08x n - 0x%08x\n", - &p_lli, lli, - lli->cfg, lli->src, lli->dst, - lli->len, lli->para, lli->p_lli_next); + &p_lli, v_lli, + v_lli->cfg, v_lli->src, v_lli->dst, + v_lli->len, v_lli->para, v_lli->p_lli_next); } static void sun6i_dma_free_desc(struct virt_dma_desc *vd) @@ -445,7 +442,7 @@ static int sun6i_dma_start_desc(struct sun6i_vchan *vchan) pchan->desc = to_sun6i_desc(&desc->tx); pchan->done = NULL; - sun6i_dma_dump_lli(vchan, pchan->desc->v_lli); + sun6i_dma_dump_lli(vchan, pchan->desc->v_lli, pchan->desc->p_lli); irq_reg = pchan->idx / DMA_IRQ_CHAN_NR; irq_offset = pchan->idx % DMA_IRQ_CHAN_NR; @@ -670,7 +667,7 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy( sun6i_dma_lli_add(NULL, v_lli, p_lli, txd); - sun6i_dma_dump_lli(vchan, v_lli); + sun6i_dma_dump_lli(vchan, v_lli, p_lli); return vchan_tx_prep(&vchan->vc, &txd->vd, flags); @@ -746,14 +743,16 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg( } dev_dbg(chan2dev(chan), "First: %pad\n", &txd->p_lli); - for (prev = txd->v_lli; prev; prev = prev->v_lli_next) - sun6i_dma_dump_lli(vchan, prev); + for (p_lli = txd->p_lli, v_lli = txd->v_lli; v_lli; + p_lli = v_lli->p_lli_next, v_lli = v_lli->v_lli_next) + sun6i_dma_dump_lli(vchan, v_lli, p_lli); return vchan_tx_prep(&vchan->vc, &txd->vd, flags); err_lli_free: - for (prev = txd->v_lli; prev; prev = prev->v_lli_next) - dma_pool_free(sdev->pool, prev, virt_to_phys(prev)); + for (p_lli = txd->p_lli, v_lli = txd->v_lli; v_lli; + p_lli = v_lli->p_lli_next, v_lli = v_lli->v_lli_next) + dma_pool_free(sdev->pool, v_lli, p_lli); kfree(txd); return NULL; } @@ -820,8 +819,9 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_cyclic( return vchan_tx_prep(&vchan->vc, &txd->vd, flags); err_lli_free: - for (prev = txd->v_lli; prev; prev = prev->v_lli_next) - dma_pool_free(sdev->pool, prev, virt_to_phys(prev)); + for (p_lli = txd->p_lli, v_lli = txd->v_lli; v_lli; + p_lli = v_lli->p_lli_next, v_lli = v_lli->v_lli_next) + dma_pool_free(sdev->pool, v_lli, p_lli); kfree(txd); return NULL; } -- cgit From ec31c5c594927556ae74f6617fe4969568d8dcc5 Mon Sep 17 00:00:00 2001 From: Samuel Holland Date: Sun, 24 Apr 2022 12:27:57 -0500 Subject: dmaengine: sun6i: Add support for 34-bit physical addresses Recent Allwinner SoCs support >4 GiB of DRAM, so those variants of the DMA engine support >32 bit physical addresses. This is accomplished by placing the high bits in the "para" word in the DMA descriptor. DMA descriptors themselves can be located at >32 bit addresses by putting the high bits in the LSBs of the descriptor address register, taking advantage of the required DMA descriptor alignment. However, support for this is not really necessary, so we can avoid the complication by allocating them from the DMA_32 zone. Signed-off-by: Samuel Holland Acked-by: Jernej Skrabec Link: https://lore.kernel.org/r/20220424172759.33383-4-samuel@sholland.org Signed-off-by: Vinod Koul --- drivers/dma/sun6i-dma.c | 53 +++++++++++++++++++++++++++++++++++-------------- 1 file changed, 38 insertions(+), 15 deletions(-) diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c index 4436fbd70445..1eb3bafa7324 100644 --- a/drivers/dma/sun6i-dma.c +++ b/drivers/dma/sun6i-dma.c @@ -90,6 +90,14 @@ #define DMA_CHAN_CUR_PARA 0x1c +/* + * LLI address mangling + * + * The LLI link physical address is also mangled, but we avoid dealing + * with that by allocating LLIs from the DMA32 zone. + */ +#define SRC_HIGH_ADDR(x) (((x) & 0x3U) << 16) +#define DST_HIGH_ADDR(x) (((x) & 0x3U) << 18) /* * Various hardware related defines @@ -132,6 +140,7 @@ struct sun6i_dma_config { u32 dst_burst_lengths; u32 src_addr_widths; u32 dst_addr_widths; + bool has_high_addr; bool has_mbus_clk; }; @@ -623,6 +632,18 @@ static int set_config(struct sun6i_dma_dev *sdev, return 0; } +static inline void sun6i_dma_set_addr(struct sun6i_dma_dev *sdev, + struct sun6i_dma_lli *v_lli, + dma_addr_t src, dma_addr_t dst) +{ + v_lli->src = lower_32_bits(src); + v_lli->dst = lower_32_bits(dst); + + if (sdev->cfg->has_high_addr) + v_lli->para |= SRC_HIGH_ADDR(upper_32_bits(src)) | + DST_HIGH_ADDR(upper_32_bits(dst)); +} + static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy( struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, size_t len, unsigned long flags) @@ -645,16 +666,15 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy( if (!txd) return NULL; - v_lli = dma_pool_alloc(sdev->pool, GFP_NOWAIT, &p_lli); + v_lli = dma_pool_alloc(sdev->pool, GFP_DMA32 | GFP_NOWAIT, &p_lli); if (!v_lli) { dev_err(sdev->slave.dev, "Failed to alloc lli memory\n"); goto err_txd_free; } - v_lli->src = src; - v_lli->dst = dest; v_lli->len = len; v_lli->para = NORMAL_WAIT; + sun6i_dma_set_addr(sdev, v_lli, src, dest); burst = convert_burst(8); width = convert_buswidth(DMA_SLAVE_BUSWIDTH_4_BYTES); @@ -705,7 +725,7 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg( return NULL; for_each_sg(sgl, sg, sg_len, i) { - v_lli = dma_pool_alloc(sdev->pool, GFP_NOWAIT, &p_lli); + v_lli = dma_pool_alloc(sdev->pool, GFP_DMA32 | GFP_NOWAIT, &p_lli); if (!v_lli) goto err_lli_free; @@ -713,8 +733,9 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg( v_lli->para = NORMAL_WAIT; if (dir == DMA_MEM_TO_DEV) { - v_lli->src = sg_dma_address(sg); - v_lli->dst = sconfig->dst_addr; + sun6i_dma_set_addr(sdev, v_lli, + sg_dma_address(sg), + sconfig->dst_addr); v_lli->cfg = lli_cfg; sdev->cfg->set_drq(&v_lli->cfg, DRQ_SDRAM, vchan->port); sdev->cfg->set_mode(&v_lli->cfg, LINEAR_MODE, IO_MODE); @@ -726,8 +747,9 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg( sg_dma_len(sg), flags); } else { - v_lli->src = sconfig->src_addr; - v_lli->dst = sg_dma_address(sg); + sun6i_dma_set_addr(sdev, v_lli, + sconfig->src_addr, + sg_dma_address(sg)); v_lli->cfg = lli_cfg; sdev->cfg->set_drq(&v_lli->cfg, vchan->port, DRQ_SDRAM); sdev->cfg->set_mode(&v_lli->cfg, IO_MODE, LINEAR_MODE); @@ -786,7 +808,7 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_cyclic( return NULL; for (i = 0; i < periods; i++) { - v_lli = dma_pool_alloc(sdev->pool, GFP_NOWAIT, &p_lli); + v_lli = dma_pool_alloc(sdev->pool, GFP_DMA32 | GFP_NOWAIT, &p_lli); if (!v_lli) { dev_err(sdev->slave.dev, "Failed to alloc lli memory\n"); goto err_lli_free; @@ -796,14 +818,16 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_cyclic( v_lli->para = NORMAL_WAIT; if (dir == DMA_MEM_TO_DEV) { - v_lli->src = buf_addr + period_len * i; - v_lli->dst = sconfig->dst_addr; + sun6i_dma_set_addr(sdev, v_lli, + buf_addr + period_len * i, + sconfig->dst_addr); v_lli->cfg = lli_cfg; sdev->cfg->set_drq(&v_lli->cfg, DRQ_SDRAM, vchan->port); sdev->cfg->set_mode(&v_lli->cfg, LINEAR_MODE, IO_MODE); } else { - v_lli->src = sconfig->src_addr; - v_lli->dst = buf_addr + period_len * i; + sun6i_dma_set_addr(sdev, v_lli, + sconfig->src_addr, + buf_addr + period_len * i); v_lli->cfg = lli_cfg; sdev->cfg->set_drq(&v_lli->cfg, vchan->port, DRQ_SDRAM); sdev->cfg->set_mode(&v_lli->cfg, IO_MODE, LINEAR_MODE); @@ -1174,8 +1198,6 @@ static struct sun6i_dma_config sun50i_a64_dma_cfg = { }; /* - * TODO: Add support for more than 4g physical addressing. - * * The A100 binding uses the number of dma channels from the * device tree node. */ @@ -1194,6 +1216,7 @@ static struct sun6i_dma_config sun50i_a100_dma_cfg = { BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | BIT(DMA_SLAVE_BUSWIDTH_8_BYTES), + .has_high_addr = true, .has_mbus_clk = true, }; -- cgit From 8292a15597db6f97dddd2afff98095a4722d0303 Mon Sep 17 00:00:00 2001 From: Samuel Holland Date: Sun, 24 Apr 2022 12:27:58 -0500 Subject: dmaengine: sun6i: Add support for the D1 variant So far it appears to match the configuration of the A100 variant. Since D1 is a RISC-V chip, it does not meet any of the existing dependencies for this driver, so relax the dependency somewhat. Acked-by: Maxime Ripard Signed-off-by: Samuel Holland Link: https://lore.kernel.org/r/20220424172759.33383-5-samuel@sholland.org Signed-off-by: Vinod Koul --- drivers/dma/Kconfig | 2 +- drivers/dma/sun6i-dma.c | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 857b2174a4cb..487ed4ddc3be 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -163,7 +163,7 @@ config DMA_SUN4I config DMA_SUN6I tristate "Allwinner A31 SoCs DMA support" - depends on MACH_SUN6I || MACH_SUN8I || (ARM64 && ARCH_SUNXI) || COMPILE_TEST + depends on ARCH_SUNXI || COMPILE_TEST depends on RESET_CONTROLLER select DMA_ENGINE select DMA_VIRTUAL_CHANNELS diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c index 1eb3bafa7324..b7557f437936 100644 --- a/drivers/dma/sun6i-dma.c +++ b/drivers/dma/sun6i-dma.c @@ -1271,6 +1271,7 @@ static const struct of_device_id sun6i_dma_match[] = { { .compatible = "allwinner,sun8i-a83t-dma", .data = &sun8i_a83t_dma_cfg }, { .compatible = "allwinner,sun8i-h3-dma", .data = &sun8i_h3_dma_cfg }, { .compatible = "allwinner,sun8i-v3s-dma", .data = &sun8i_v3s_dma_cfg }, + { .compatible = "allwinner,sun20i-d1-dma", .data = &sun50i_a100_dma_cfg }, { .compatible = "allwinner,sun50i-a64-dma", .data = &sun50i_a64_dma_cfg }, { .compatible = "allwinner,sun50i-a100-dma", .data = &sun50i_a100_dma_cfg }, { .compatible = "allwinner,sun50i-h6-dma", .data = &sun50i_h6_dma_cfg }, -- cgit From d1a28597808268b87f156138aad3104aa255e62b Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Mon, 25 Apr 2022 11:03:29 -0700 Subject: dmaengine: idxd: make idxd_wq_enable() return 0 if wq is already enabled When calling idxd_wq_enable() and wq is already enabled, code should return 0 and indicate function is successful instead of return error code and fail. This should also put idxd_wq_enable() in sync with idxd_wq_disable() where it returns 0 if wq is already disabled. Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/165090980906.1378449.1939401700832432886.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Vinod Koul --- drivers/dma/idxd/device.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c index 1143886f4a80..dd6a05eccb18 100644 --- a/drivers/dma/idxd/device.c +++ b/drivers/dma/idxd/device.c @@ -184,7 +184,7 @@ int idxd_wq_enable(struct idxd_wq *wq) if (wq->state == IDXD_WQ_ENABLED) { dev_dbg(dev, "WQ %d already enabled\n", wq->id); - return -ENXIO; + return 0; } idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_WQ, wq->id, &status); -- cgit