summaryrefslogtreecommitdiff
path: root/drivers/dma
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/Makefile6
-rw-r--r--drivers/dma/amba-pl08x.c4
-rw-r--r--drivers/dma/dma-axi-dmac.c78
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c38
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac.h3
-rw-r--r--drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c14
-rw-r--r--drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h5
-rw-r--r--drivers/dma/fsl-dpaa2-qdma/dpdmai.c113
-rw-r--r--drivers/dma/fsl-dpaa2-qdma/dpdmai.h61
-rw-r--r--drivers/dma/fsl-edma-common.c25
-rw-r--r--drivers/dma/fsl-edma-common.h110
-rw-r--r--drivers/dma/fsl-edma-main.c50
-rw-r--r--drivers/dma/fsl-edma-trace.c4
-rw-r--r--drivers/dma/fsl-edma-trace.h132
-rw-r--r--drivers/dma/idma64.c8
-rw-r--r--drivers/dma/idxd/cdev.c100
-rw-r--r--drivers/dma/idxd/debugfs.c4
-rw-r--r--drivers/dma/idxd/device.c8
-rw-r--r--drivers/dma/idxd/idxd.h5
-rw-r--r--drivers/dma/idxd/init.c6
-rw-r--r--drivers/dma/idxd/irq.c4
-rw-r--r--drivers/dma/idxd/perfmon.c9
-rw-r--r--drivers/dma/idxd/registers.h3
-rw-r--r--drivers/dma/idxd/sysfs.c27
-rw-r--r--drivers/dma/imx-sdma.c97
-rw-r--r--drivers/dma/mcf-edma-main.c4
-rw-r--r--drivers/dma/owl-dma.c4
-rw-r--r--drivers/dma/pch_dma.c5
-rw-r--r--drivers/dma/pl330.c4
-rw-r--r--drivers/dma/qcom/hidma.c11
-rw-r--r--drivers/dma/qcom/hidma_mgmt.c109
-rw-r--r--drivers/dma/tegra186-gpc-dma.c3
-rw-r--r--drivers/dma/virt-dma.h10
-rw-r--r--drivers/dma/xilinx/xdma-regs.h3
-rw-r--r--drivers/dma/xilinx/xdma.c43
-rw-r--r--drivers/dma/xilinx/xilinx_dpdma.c23
36 files changed, 672 insertions, 461 deletions
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index dfd40d14e408..802ca916f05f 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -31,10 +31,12 @@ obj-$(CONFIG_DW_AXI_DMAC) += dw-axi-dmac/
obj-$(CONFIG_DW_DMAC_CORE) += dw/
obj-$(CONFIG_DW_EDMA) += dw-edma/
obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
+fsl-edma-trace-$(CONFIG_TRACING) := fsl-edma-trace.o
+CFLAGS_fsl-edma-trace.o := -I$(src)
obj-$(CONFIG_FSL_DMA) += fsldma.o
-fsl-edma-objs := fsl-edma-main.o fsl-edma-common.o
+fsl-edma-objs := fsl-edma-main.o fsl-edma-common.o ${fsl-edma-trace-y}
obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
-mcf-edma-objs := mcf-edma-main.o fsl-edma-common.o
+mcf-edma-objs := mcf-edma-main.o fsl-edma-common.o ${fsl-edma-trace-y}
obj-$(CONFIG_MCF_EDMA) += mcf-edma.o
obj-$(CONFIG_FSL_QDMA) += fsl-qdma.o
obj-$(CONFIG_FSL_RAID) += fsl_raid.o
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index fbf048f432bf..73a5cfb4da8a 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -2855,8 +2855,8 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
}
/* Initialize physical channels */
- pl08x->phy_chans = kzalloc((vd->channels * sizeof(*pl08x->phy_chans)),
- GFP_KERNEL);
+ pl08x->phy_chans = kcalloc(vd->channels, sizeof(*pl08x->phy_chans),
+ GFP_KERNEL);
if (!pl08x->phy_chans) {
ret = -ENOMEM;
goto out_no_phychans;
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
index 4e339c04fc1e..bdb752f11869 100644
--- a/drivers/dma/dma-axi-dmac.c
+++ b/drivers/dma/dma-axi-dmac.c
@@ -1002,6 +1002,16 @@ static int axi_dmac_detect_caps(struct axi_dmac *dmac, unsigned int version)
return 0;
}
+static void axi_dmac_tasklet_kill(void *task)
+{
+ tasklet_kill(task);
+}
+
+static void axi_dmac_free_dma_controller(void *of_node)
+{
+ of_dma_controller_free(of_node);
+}
+
static int axi_dmac_probe(struct platform_device *pdev)
{
struct dma_device *dma_dev;
@@ -1025,14 +1035,10 @@ static int axi_dmac_probe(struct platform_device *pdev)
if (IS_ERR(dmac->base))
return PTR_ERR(dmac->base);
- dmac->clk = devm_clk_get(&pdev->dev, NULL);
+ dmac->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(dmac->clk))
return PTR_ERR(dmac->clk);
- ret = clk_prepare_enable(dmac->clk);
- if (ret < 0)
- return ret;
-
version = axi_dmac_read(dmac, ADI_AXI_REG_VERSION);
if (version >= ADI_AXI_PCORE_VER(4, 3, 'a'))
@@ -1041,7 +1047,7 @@ static int axi_dmac_probe(struct platform_device *pdev)
ret = axi_dmac_parse_dt(&pdev->dev, dmac);
if (ret < 0)
- goto err_clk_disable;
+ return ret;
INIT_LIST_HEAD(&dmac->chan.active_descs);
@@ -1072,7 +1078,7 @@ static int axi_dmac_probe(struct platform_device *pdev)
ret = axi_dmac_detect_caps(dmac, version);
if (ret)
- goto err_clk_disable;
+ return ret;
dma_dev->copy_align = (dmac->chan.address_align_mask + 1);
@@ -1088,57 +1094,42 @@ static int axi_dmac_probe(struct platform_device *pdev)
!AXI_DMAC_DST_COHERENT_GET(ret)) {
dev_err(dmac->dma_dev.dev,
"Coherent DMA not supported in hardware");
- ret = -EINVAL;
- goto err_clk_disable;
+ return -EINVAL;
}
}
- ret = dma_async_device_register(dma_dev);
+ ret = dmaenginem_async_device_register(dma_dev);
+ if (ret)
+ return ret;
+
+ /*
+ * Put the action in here so it get's done before unregistering the DMA
+ * device.
+ */
+ ret = devm_add_action_or_reset(&pdev->dev, axi_dmac_tasklet_kill,
+ &dmac->chan.vchan.task);
if (ret)
- goto err_clk_disable;
+ return ret;
ret = of_dma_controller_register(pdev->dev.of_node,
of_dma_xlate_by_chan_id, dma_dev);
if (ret)
- goto err_unregister_device;
+ return ret;
- ret = request_irq(dmac->irq, axi_dmac_interrupt_handler, IRQF_SHARED,
- dev_name(&pdev->dev), dmac);
+ ret = devm_add_action_or_reset(&pdev->dev, axi_dmac_free_dma_controller,
+ pdev->dev.of_node);
if (ret)
- goto err_unregister_of;
+ return ret;
- platform_set_drvdata(pdev, dmac);
+ ret = devm_request_irq(&pdev->dev, dmac->irq, axi_dmac_interrupt_handler,
+ IRQF_SHARED, dev_name(&pdev->dev), dmac);
+ if (ret)
+ return ret;
regmap = devm_regmap_init_mmio(&pdev->dev, dmac->base,
&axi_dmac_regmap_config);
- if (IS_ERR(regmap)) {
- ret = PTR_ERR(regmap);
- goto err_free_irq;
- }
-
- return 0;
-
-err_free_irq:
- free_irq(dmac->irq, dmac);
-err_unregister_of:
- of_dma_controller_free(pdev->dev.of_node);
-err_unregister_device:
- dma_async_device_unregister(&dmac->dma_dev);
-err_clk_disable:
- clk_disable_unprepare(dmac->clk);
-
- return ret;
-}
-
-static void axi_dmac_remove(struct platform_device *pdev)
-{
- struct axi_dmac *dmac = platform_get_drvdata(pdev);
- of_dma_controller_free(pdev->dev.of_node);
- free_irq(dmac->irq, dmac);
- tasklet_kill(&dmac->chan.vchan.task);
- dma_async_device_unregister(&dmac->dma_dev);
- clk_disable_unprepare(dmac->clk);
+ return PTR_ERR_OR_ZERO(regmap);
}
static const struct of_device_id axi_dmac_of_match_table[] = {
@@ -1153,7 +1144,6 @@ static struct platform_driver axi_dmac_driver = {
.of_match_table = axi_dmac_of_match_table,
},
.probe = axi_dmac_probe,
- .remove_new = axi_dmac_remove,
};
module_platform_driver(axi_dmac_driver);
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
index a86a81ff0caa..fffafa86d964 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
@@ -302,6 +302,7 @@ static struct axi_dma_desc *axi_desc_alloc(u32 num)
kfree(desc);
return NULL;
}
+ desc->nr_hw_descs = num;
return desc;
}
@@ -328,7 +329,7 @@ static struct axi_dma_lli *axi_desc_get(struct axi_dma_chan *chan,
static void axi_desc_put(struct axi_dma_desc *desc)
{
struct axi_dma_chan *chan = desc->chan;
- int count = atomic_read(&chan->descs_allocated);
+ int count = desc->nr_hw_descs;
struct axi_dma_hw_desc *hw_desc;
int descs_put;
@@ -1139,9 +1140,6 @@ static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan)
/* Remove the completed descriptor from issued list before completing */
list_del(&vd->node);
vchan_cookie_complete(vd);
-
- /* Submit queued descriptors after processing the completed ones */
- axi_chan_start_first_queued(chan);
}
out:
@@ -1445,6 +1443,24 @@ static int parse_device_properties(struct axi_dma_chip *chip)
return 0;
}
+static int axi_req_irqs(struct platform_device *pdev, struct axi_dma_chip *chip)
+{
+ int irq_count = platform_irq_count(pdev);
+ int ret;
+
+ for (int i = 0; i < irq_count; i++) {
+ chip->irq[i] = platform_get_irq(pdev, i);
+ if (chip->irq[i] < 0)
+ return chip->irq[i];
+ ret = devm_request_irq(chip->dev, chip->irq[i], dw_axi_dma_interrupt,
+ IRQF_SHARED, KBUILD_MODNAME, chip);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
static int dw_probe(struct platform_device *pdev)
{
struct axi_dma_chip *chip;
@@ -1471,10 +1487,6 @@ static int dw_probe(struct platform_device *pdev)
chip->dev = &pdev->dev;
chip->dw->hdata = hdata;
- chip->irq = platform_get_irq(pdev, 0);
- if (chip->irq < 0)
- return chip->irq;
-
chip->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(chip->regs))
return PTR_ERR(chip->regs);
@@ -1515,8 +1527,7 @@ static int dw_probe(struct platform_device *pdev)
if (!dw->chan)
return -ENOMEM;
- ret = devm_request_irq(chip->dev, chip->irq, dw_axi_dma_interrupt,
- IRQF_SHARED, KBUILD_MODNAME, chip);
+ ret = axi_req_irqs(pdev, chip);
if (ret)
return ret;
@@ -1629,7 +1640,9 @@ static void dw_remove(struct platform_device *pdev)
pm_runtime_disable(chip->dev);
axi_dma_suspend(chip);
- devm_free_irq(chip->dev, chip->irq, chip);
+ for (i = 0; i < DMAC_MAX_CHANNELS; i++)
+ if (chip->irq[i] > 0)
+ devm_free_irq(chip->dev, chip->irq[i], chip);
of_dma_controller_free(chip->dev->of_node);
@@ -1653,6 +1666,9 @@ static const struct of_device_id dw_dma_of_id_table[] = {
}, {
.compatible = "starfive,jh7110-axi-dma",
.data = (void *)(AXI_DMA_FLAG_HAS_RESETS | AXI_DMA_FLAG_USE_CFG2),
+ }, {
+ .compatible = "starfive,jh8100-axi-dma",
+ .data = (void *)AXI_DMA_FLAG_HAS_RESETS,
},
{}
};
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
index 454904d99654..b842e6a8d90d 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
@@ -65,7 +65,7 @@ struct dw_axi_dma {
struct axi_dma_chip {
struct device *dev;
- int irq;
+ int irq[DMAC_MAX_CHANNELS];
void __iomem *regs;
void __iomem *apb_regs;
struct clk *core_clk;
@@ -104,6 +104,7 @@ struct axi_dma_desc {
u32 completed_blocks;
u32 length;
u32 period_len;
+ u32 nr_hw_descs;
};
struct axi_dma_chan_config {
diff --git a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
index 5a8061a307cd..36384d019263 100644
--- a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
+++ b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
@@ -362,7 +362,7 @@ static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)
for (i = 0; i < priv->num_pairs; i++) {
err = dpdmai_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle,
- i, &priv->rx_queue_attr[i]);
+ i, 0, &priv->rx_queue_attr[i]);
if (err) {
dev_err(dev, "dpdmai_get_rx_queue() failed\n");
goto exit;
@@ -370,13 +370,13 @@ static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)
ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid;
err = dpdmai_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle,
- i, &priv->tx_fqid[i]);
+ i, 0, &priv->tx_queue_attr[i]);
if (err) {
dev_err(dev, "dpdmai_get_tx_queue() failed\n");
goto exit;
}
- ppriv->req_fqid = priv->tx_fqid[i];
- ppriv->prio = i;
+ ppriv->req_fqid = priv->tx_queue_attr[i].fqid;
+ ppriv->prio = DPAA2_QDMA_DEFAULT_PRIORITY;
ppriv->priv = priv;
ppriv++;
}
@@ -542,7 +542,7 @@ static int __cold dpaa2_dpdmai_bind(struct dpaa2_qdma_priv *priv)
rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
rx_queue_cfg.dest_cfg.priority = ppriv->prio;
err = dpdmai_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle,
- rx_queue_cfg.dest_cfg.priority,
+ rx_queue_cfg.dest_cfg.priority, 0,
&rx_queue_cfg);
if (err) {
dev_err(dev, "dpdmai_set_rx_queue() failed\n");
@@ -642,7 +642,7 @@ static int dpaa2_dpdmai_init_channels(struct dpaa2_qdma_engine *dpaa2_qdma)
for (i = 0; i < dpaa2_qdma->n_chans; i++) {
dpaa2_chan = &dpaa2_qdma->chans[i];
dpaa2_chan->qdma = dpaa2_qdma;
- dpaa2_chan->fqid = priv->tx_fqid[i % num];
+ dpaa2_chan->fqid = priv->tx_queue_attr[i % num].fqid;
dpaa2_chan->vchan.desc_free = dpaa2_qdma_free_desc;
vchan_init(&dpaa2_chan->vchan, &dpaa2_qdma->dma_dev);
spin_lock_init(&dpaa2_chan->queue_lock);
@@ -802,7 +802,7 @@ static void dpaa2_qdma_shutdown(struct fsl_mc_device *ls_dev)
dpdmai_disable(priv->mc_io, 0, ls_dev->mc_handle);
dpaa2_dpdmai_dpio_unbind(priv);
dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle);
- dpdmai_destroy(priv->mc_io, 0, ls_dev->mc_handle);
+ dpdmai_destroy(priv->mc_io, 0, priv->dpqdma_id, ls_dev->mc_handle);
}
static const struct fsl_mc_device_id dpaa2_qdma_id_table[] = {
diff --git a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h
index 03e2f4e0baca..2c80077cb7c0 100644
--- a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h
+++ b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h
@@ -6,6 +6,7 @@
#define DPAA2_QDMA_STORE_SIZE 16
#define NUM_CH 8
+#define DPAA2_QDMA_DEFAULT_PRIORITY 0
struct dpaa2_qdma_sd_d {
u32 rsv:32;
@@ -122,8 +123,8 @@ struct dpaa2_qdma_priv {
struct dpaa2_qdma_engine *dpaa2_qdma;
struct dpaa2_qdma_priv_per_prio *ppriv;
- struct dpdmai_rx_queue_attr rx_queue_attr[DPDMAI_PRIO_NUM];
- u32 tx_fqid[DPDMAI_PRIO_NUM];
+ struct dpdmai_rx_queue_attr rx_queue_attr[DPDMAI_MAX_QUEUE_NUM];
+ struct dpdmai_tx_queue_attr tx_queue_attr[DPDMAI_MAX_QUEUE_NUM];
};
struct dpaa2_qdma_priv_per_prio {
diff --git a/drivers/dma/fsl-dpaa2-qdma/dpdmai.c b/drivers/dma/fsl-dpaa2-qdma/dpdmai.c
index 878662aaa1c2..36897b41ee7e 100644
--- a/drivers/dma/fsl-dpaa2-qdma/dpdmai.c
+++ b/drivers/dma/fsl-dpaa2-qdma/dpdmai.c
@@ -1,47 +1,52 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright 2019 NXP
+#include <linux/bitfield.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/io.h>
#include <linux/fsl/mc.h>
#include "dpdmai.h"
+#define DEST_TYPE_MASK 0xF
+
struct dpdmai_rsp_get_attributes {
__le32 id;
u8 num_of_priorities;
- u8 pad0[3];
+ u8 num_of_queues;
+ u8 pad0[2];
__le16 major;
__le16 minor;
};
struct dpdmai_cmd_queue {
__le32 dest_id;
- u8 priority;
- u8 queue;
+ u8 dest_priority;
+ union {
+ u8 queue;
+ u8 pri;
+ };
u8 dest_type;
- u8 pad;
+ u8 queue_idx;
__le64 user_ctx;
union {
__le32 options;
__le32 fqid;
};
-};
+} __packed;
struct dpdmai_rsp_get_tx_queue {
__le64 pad;
__le32 fqid;
};
-#define MC_CMD_OP(_cmd, _param, _offset, _width, _type, _arg) \
- ((_cmd).params[_param] |= mc_enc((_offset), (_width), _arg))
+struct dpdmai_cmd_open {
+ __le32 dpdmai_id;
+} __packed;
-/* cmd, param, offset, width, type, arg_name */
-#define DPDMAI_CMD_CREATE(cmd, cfg) \
-do { \
- MC_CMD_OP(cmd, 0, 8, 8, u8, (cfg)->priorities[0]);\
- MC_CMD_OP(cmd, 0, 16, 8, u8, (cfg)->priorities[1]);\
-} while (0)
+struct dpdmai_cmd_destroy {
+ __le32 dpdmai_id;
+} __packed;
static inline u64 mc_enc(int lsoffset, int width, u64 val)
{
@@ -68,16 +73,16 @@ static inline u64 mc_enc(int lsoffset, int width, u64 val)
int dpdmai_open(struct fsl_mc_io *mc_io, u32 cmd_flags,
int dpdmai_id, u16 *token)
{
+ struct dpdmai_cmd_open *cmd_params;
struct fsl_mc_command cmd = { 0 };
- __le64 *cmd_dpdmai_id;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_OPEN,
cmd_flags, 0);
- cmd_dpdmai_id = cmd.params;
- *cmd_dpdmai_id = cpu_to_le32(dpdmai_id);
+ cmd_params = (struct dpdmai_cmd_open *)&cmd.params;
+ cmd_params->dpdmai_id = cpu_to_le32(dpdmai_id);
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -116,65 +121,26 @@ int dpdmai_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
EXPORT_SYMBOL_GPL(dpdmai_close);
/**
- * dpdmai_create() - Create the DPDMAI object
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @cfg: Configuration structure
- * @token: Returned token; use in subsequent API calls
- *
- * Create the DPDMAI object, allocate required resources and
- * perform required initialization.
- *
- * The object can be created either by declaring it in the
- * DPL file, or by calling this function.
- *
- * This function returns a unique authentication token,
- * associated with the specific object ID and the specific MC
- * portal; this token must be used in all subsequent calls to
- * this specific object. For objects that are created using the
- * DPL file, call dpdmai_open() function to get an authentication
- * token first.
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpdmai_create(struct fsl_mc_io *mc_io, u32 cmd_flags,
- const struct dpdmai_cfg *cfg, u16 *token)
-{
- struct fsl_mc_command cmd = { 0 };
- int err;
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CREATE,
- cmd_flags, 0);
- DPDMAI_CMD_CREATE(cmd, cfg);
-
- /* send command to mc*/
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- /* retrieve response parameters */
- *token = mc_cmd_hdr_read_token(&cmd);
-
- return 0;
-}
-
-/**
* dpdmai_destroy() - Destroy the DPDMAI object and release all its resources.
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpdmai_id: The object id; it must be a valid id within the container that created this object;
* @token: Token of DPDMAI object
*
* Return: '0' on Success; error code otherwise.
*/
-int dpdmai_destroy(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+int dpdmai_destroy(struct fsl_mc_io *mc_io, u32 cmd_flags, u32 dpdmai_id, u16 token)
{
+ struct dpdmai_cmd_destroy *cmd_params;
struct fsl_mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DESTROY,
cmd_flags, token);
+ cmd_params = (struct dpdmai_cmd_destroy *)&cmd.params;
+ cmd_params->dpdmai_id = cpu_to_le32(dpdmai_id);
+
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
@@ -274,6 +240,7 @@ int dpdmai_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags,
attr->version.major = le16_to_cpu(rsp_params->major);
attr->version.minor = le16_to_cpu(rsp_params->minor);
attr->num_of_priorities = rsp_params->num_of_priorities;
+ attr->num_of_queues = rsp_params->num_of_queues;
return 0;
}
@@ -284,13 +251,14 @@ EXPORT_SYMBOL_GPL(dpdmai_get_attributes);
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPDMAI object
+ * @queue_idx: DMA queue index
* @priority: Select the queue relative to number of
* priorities configured at DPDMAI creation
* @cfg: Rx queue configuration
*
* Return: '0' on Success; Error code otherwise.
*/
-int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 queue_idx,
u8 priority, const struct dpdmai_rx_queue_cfg *cfg)
{
struct dpdmai_cmd_queue *cmd_params;
@@ -302,11 +270,12 @@ int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
cmd_params = (struct dpdmai_cmd_queue *)cmd.params;
cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
- cmd_params->priority = cfg->dest_cfg.priority;
- cmd_params->queue = priority;
+ cmd_params->dest_priority = cfg->dest_cfg.priority;
+ cmd_params->pri = priority;
cmd_params->dest_type = cfg->dest_cfg.dest_type;
cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx);
cmd_params->options = cpu_to_le32(cfg->options);
+ cmd_params->queue_idx = queue_idx;
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -318,13 +287,14 @@ EXPORT_SYMBOL_GPL(dpdmai_set_rx_queue);
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPDMAI object
+ * @queue_idx: DMA Queue index
* @priority: Select the queue relative to number of
* priorities configured at DPDMAI creation
* @attr: Returned Rx queue attributes
*
* Return: '0' on Success; Error code otherwise.
*/
-int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 queue_idx,
u8 priority, struct dpdmai_rx_queue_attr *attr)
{
struct dpdmai_cmd_queue *cmd_params;
@@ -337,6 +307,7 @@ int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
cmd_params = (struct dpdmai_cmd_queue *)cmd.params;
cmd_params->queue = priority;
+ cmd_params->queue_idx = queue_idx;
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -345,8 +316,8 @@ int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
/* retrieve response parameters */
attr->dest_cfg.dest_id = le32_to_cpu(cmd_params->dest_id);
- attr->dest_cfg.priority = cmd_params->priority;
- attr->dest_cfg.dest_type = cmd_params->dest_type;
+ attr->dest_cfg.priority = cmd_params->dest_priority;
+ attr->dest_cfg.dest_type = FIELD_GET(DEST_TYPE_MASK, cmd_params->dest_type);
attr->user_ctx = le64_to_cpu(cmd_params->user_ctx);
attr->fqid = le32_to_cpu(cmd_params->fqid);
@@ -359,14 +330,15 @@ EXPORT_SYMBOL_GPL(dpdmai_get_rx_queue);
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPDMAI object
+ * @queue_idx: DMA queue index
* @priority: Select the queue relative to number of
* priorities configured at DPDMAI creation
- * @fqid: Returned Tx queue
+ * @attr: Returned DMA Tx queue attributes
*
* Return: '0' on Success; Error code otherwise.
*/
int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags,
- u16 token, u8 priority, u32 *fqid)
+ u16 token, u8 queue_idx, u8 priority, struct dpdmai_tx_queue_attr *attr)
{
struct dpdmai_rsp_get_tx_queue *rsp_params;
struct dpdmai_cmd_queue *cmd_params;
@@ -379,6 +351,7 @@ int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags,
cmd_params = (struct dpdmai_cmd_queue *)cmd.params;
cmd_params->queue = priority;
+ cmd_params->queue_idx = queue_idx;
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -388,7 +361,7 @@ int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags,
/* retrieve response parameters */
rsp_params = (struct dpdmai_rsp_get_tx_queue *)cmd.params;
- *fqid = le32_to_cpu(rsp_params->fqid);
+ attr->fqid = le32_to_cpu(rsp_params->fqid);
return 0;
}
diff --git a/drivers/dma/fsl-dpaa2-qdma/dpdmai.h b/drivers/dma/fsl-dpaa2-qdma/dpdmai.h
index b13b9bf0c003..3fe7d8327366 100644
--- a/drivers/dma/fsl-dpaa2-qdma/dpdmai.h
+++ b/drivers/dma/fsl-dpaa2-qdma/dpdmai.h
@@ -5,14 +5,19 @@
#define __FSL_DPDMAI_H
/* DPDMAI Version */
-#define DPDMAI_VER_MAJOR 2
-#define DPDMAI_VER_MINOR 2
+#define DPDMAI_VER_MAJOR 3
+#define DPDMAI_VER_MINOR 3
-#define DPDMAI_CMD_BASE_VERSION 0
+#define DPDMAI_CMD_BASE_VERSION 1
#define DPDMAI_CMD_ID_OFFSET 4
-#define DPDMAI_CMDID_FORMAT(x) (((x) << DPDMAI_CMD_ID_OFFSET) | \
- DPDMAI_CMD_BASE_VERSION)
+/*
+ * Maximum number of Tx/Rx queues per DPDMAI object
+ */
+#define DPDMAI_MAX_QUEUE_NUM 8
+
+#define DPDMAI_CMDID_FORMAT_V(x, v) (((x) << DPDMAI_CMD_ID_OFFSET) | (v))
+#define DPDMAI_CMDID_FORMAT(x) DPDMAI_CMDID_FORMAT_V(x, DPDMAI_CMD_BASE_VERSION)
/* Command IDs */
#define DPDMAI_CMDID_CLOSE DPDMAI_CMDID_FORMAT(0x800)
@@ -26,18 +31,9 @@
#define DPDMAI_CMDID_RESET DPDMAI_CMDID_FORMAT(0x005)
#define DPDMAI_CMDID_IS_ENABLED DPDMAI_CMDID_FORMAT(0x006)
-#define DPDMAI_CMDID_SET_IRQ DPDMAI_CMDID_FORMAT(0x010)
-#define DPDMAI_CMDID_GET_IRQ DPDMAI_CMDID_FORMAT(0x011)
-#define DPDMAI_CMDID_SET_IRQ_ENABLE DPDMAI_CMDID_FORMAT(0x012)
-#define DPDMAI_CMDID_GET_IRQ_ENABLE DPDMAI_CMDID_FORMAT(0x013)
-#define DPDMAI_CMDID_SET_IRQ_MASK DPDMAI_CMDID_FORMAT(0x014)
-#define DPDMAI_CMDID_GET_IRQ_MASK DPDMAI_CMDID_FORMAT(0x015)
-#define DPDMAI_CMDID_GET_IRQ_STATUS DPDMAI_CMDID_FORMAT(0x016)
-#define DPDMAI_CMDID_CLEAR_IRQ_STATUS DPDMAI_CMDID_FORMAT(0x017)
-
-#define DPDMAI_CMDID_SET_RX_QUEUE DPDMAI_CMDID_FORMAT(0x1A0)
-#define DPDMAI_CMDID_GET_RX_QUEUE DPDMAI_CMDID_FORMAT(0x1A1)
-#define DPDMAI_CMDID_GET_TX_QUEUE DPDMAI_CMDID_FORMAT(0x1A2)
+#define DPDMAI_CMDID_SET_RX_QUEUE DPDMAI_CMDID_FORMAT_V(0x1A0, 2)
+#define DPDMAI_CMDID_GET_RX_QUEUE DPDMAI_CMDID_FORMAT_V(0x1A1, 2)
+#define DPDMAI_CMDID_GET_TX_QUEUE DPDMAI_CMDID_FORMAT_V(0x1A2, 2)
#define MC_CMD_HDR_TOKEN_O 32 /* Token field offset */
#define MC_CMD_HDR_TOKEN_S 16 /* Token field size */
@@ -49,30 +45,32 @@
* Contains initialization APIs and runtime control APIs for DPDMAI
*/
-/**
+/*
* Maximum number of Tx/Rx priorities per DPDMAI object
*/
#define DPDMAI_PRIO_NUM 2
/* DPDMAI queue modification options */
-/**
+/*
* Select to modify the user's context associated with the queue
*/
#define DPDMAI_QUEUE_OPT_USER_CTX 0x1
-/**
+/*
* Select to modify the queue's destination
*/
#define DPDMAI_QUEUE_OPT_DEST 0x2
/**
* struct dpdmai_cfg - Structure representing DPDMAI configuration
+ * @num_queues: Number of the DMA queues
* @priorities: Priorities for the DMA hardware processing; valid priorities are
* configured with values 1-8; the entry following last valid entry
* should be configured with 0
*/
struct dpdmai_cfg {
+ u8 num_queues;
u8 priorities[DPDMAI_PRIO_NUM];
};
@@ -80,20 +78,19 @@ struct dpdmai_cfg {
* struct dpdmai_attr - Structure representing DPDMAI attributes
* @id: DPDMAI object ID
* @version: DPDMAI version
+ * @version.major: DPDMAI major version
+ * @version.minor: DPDMAI minor version
* @num_of_priorities: number of priorities
+ * @num_of_queues: number of the DMA queues
*/
struct dpdmai_attr {
int id;
- /**
- * struct version - DPDMAI version
- * @major: DPDMAI major version
- * @minor: DPDMAI minor version
- */
struct {
u16 major;
u16 minor;
} version;
u8 num_of_priorities;
+ u8 num_of_queues;
};
/**
@@ -158,22 +155,24 @@ struct dpdmai_rx_queue_attr {
u32 fqid;
};
+struct dpdmai_tx_queue_attr {
+ u32 fqid;
+};
+
int dpdmai_open(struct fsl_mc_io *mc_io, u32 cmd_flags,
int dpdmai_id, u16 *token);
int dpdmai_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
-int dpdmai_destroy(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
-int dpdmai_create(struct fsl_mc_io *mc_io, u32 cmd_flags,
- const struct dpdmai_cfg *cfg, u16 *token);
+int dpdmai_destroy(struct fsl_mc_io *mc_io, u32 cmd_flags, u32 dpdmai_id, u16 token);
int dpdmai_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
int dpdmai_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
int dpdmai_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
int dpdmai_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags,
u16 token, struct dpdmai_attr *attr);
int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
- u8 priority, const struct dpdmai_rx_queue_cfg *cfg);
+ u8 queue_idx, u8 priority, const struct dpdmai_rx_queue_cfg *cfg);
int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
- u8 priority, struct dpdmai_rx_queue_attr *attr);
+ u8 queue_idx, u8 priority, struct dpdmai_rx_queue_attr *attr);
int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags,
- u16 token, u8 priority, u32 *fqid);
+ u16 token, u8 queue_idx, u8 priority, struct dpdmai_tx_queue_attr *attr);
#endif /* __FSL_DPDMAI_H */
diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c
index b18faa7cfedb..3af430787315 100644
--- a/drivers/dma/fsl-edma-common.c
+++ b/drivers/dma/fsl-edma-common.c
@@ -3,6 +3,8 @@
// Copyright (c) 2013-2014 Freescale Semiconductor, Inc
// Copyright (c) 2017 Sysam, Angelo Dureghello <angelo@sysam.it>
+#include <linux/cleanup.h>
+#include <linux/clk.h>
#include <linux/dmapool.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -74,18 +76,10 @@ static void fsl_edma3_enable_request(struct fsl_edma_chan *fsl_chan)
flags = fsl_edma_drvflags(fsl_chan);
val = edma_readl_chreg(fsl_chan, ch_sbr);
- /* Remote/local swapped wrongly on iMX8 QM Audio edma */
- if (flags & FSL_EDMA_DRV_QUIRK_SWAPPED) {
- if (!fsl_chan->is_rxchan)
- val |= EDMA_V3_CH_SBR_RD;
- else
- val |= EDMA_V3_CH_SBR_WR;
- } else {
- if (fsl_chan->is_rxchan)
- val |= EDMA_V3_CH_SBR_RD;
- else
- val |= EDMA_V3_CH_SBR_WR;
- }
+ if (fsl_chan->is_rxchan)
+ val |= EDMA_V3_CH_SBR_RD;
+ else
+ val |= EDMA_V3_CH_SBR_WR;
if (fsl_chan->is_remote)
val &= ~(EDMA_V3_CH_SBR_RD | EDMA_V3_CH_SBR_WR);
@@ -546,6 +540,8 @@ void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan,
csr |= EDMA_TCD_CSR_START;
fsl_edma_set_tcd_to_le(fsl_chan, tcd, csr, csr);
+
+ trace_edma_fill_tcd(fsl_chan, tcd);
}
static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan,
@@ -810,6 +806,9 @@ int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
{
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+ if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_HAS_CHCLK)
+ clk_prepare_enable(fsl_chan->clk);
+
fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev,
fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_TCD64 ?
sizeof(struct fsl_edma_hw_tcd64) : sizeof(struct fsl_edma_hw_tcd),
@@ -838,6 +837,8 @@ void fsl_edma_free_chan_resources(struct dma_chan *chan)
fsl_chan->tcd_pool = NULL;
fsl_chan->is_sw = false;
fsl_chan->srcid = 0;
+ if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_HAS_CHCLK)
+ clk_disable_unprepare(fsl_chan->clk);
}
void fsl_edma_cleanup_vchan(struct dma_device *dmadev)
diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h
index 7bf0aba471a8..ac66222c1604 100644
--- a/drivers/dma/fsl-edma-common.h
+++ b/drivers/dma/fsl-edma-common.h
@@ -151,7 +151,6 @@ struct fsl_edma_chan {
enum dma_status status;
enum fsl_edma_pm_state pm_state;
bool idle;
- u32 slave_id;
struct fsl_edma_engine *edma;
struct fsl_edma_desc *edesc;
struct dma_slave_config cfg;
@@ -195,8 +194,6 @@ struct fsl_edma_desc {
#define FSL_EDMA_DRV_HAS_PD BIT(5)
#define FSL_EDMA_DRV_HAS_CHCLK BIT(6)
#define FSL_EDMA_DRV_HAS_CHMUX BIT(7)
-/* imx8 QM audio edma remote local swapped */
-#define FSL_EDMA_DRV_QUIRK_SWAPPED BIT(8)
/* control and status register is in tcd address space, edma3 reg layout */
#define FSL_EDMA_DRV_SPLIT_REG BIT(9)
#define FSL_EDMA_DRV_BUS_8BYTE BIT(10)
@@ -238,7 +235,6 @@ struct fsl_edma_engine {
void __iomem *muxbase[DMAMUX_NR];
struct clk *muxclk[DMAMUX_NR];
struct clk *dmaclk;
- struct clk *chclk;
struct mutex fsl_edma_mutex;
const struct fsl_edma_drvdata *drvdata;
u32 n_chans;
@@ -250,13 +246,17 @@ struct fsl_edma_engine {
struct fsl_edma_chan chans[] __counted_by(n_chans);
};
+static inline u32 fsl_edma_drvflags(struct fsl_edma_chan *fsl_chan)
+{
+ return fsl_chan->edma->drvdata->flags;
+}
+
#define edma_read_tcdreg_c(chan, _tcd, __name) \
-(sizeof((_tcd)->__name) == sizeof(u64) ? \
- edma_readq(chan->edma, &(_tcd)->__name) : \
- ((sizeof((_tcd)->__name) == sizeof(u32)) ? \
- edma_readl(chan->edma, &(_tcd)->__name) : \
- edma_readw(chan->edma, &(_tcd)->__name) \
- ))
+_Generic(((_tcd)->__name), \
+ __iomem __le64 : edma_readq(chan->edma, &(_tcd)->__name), \
+ __iomem __le32 : edma_readl(chan->edma, &(_tcd)->__name), \
+ __iomem __le16 : edma_readw(chan->edma, &(_tcd)->__name) \
+ )
#define edma_read_tcdreg(chan, __name) \
((fsl_edma_drvflags(chan) & FSL_EDMA_DRV_TCD64) ? \
@@ -264,23 +264,13 @@ struct fsl_edma_engine {
edma_read_tcdreg_c(chan, ((struct fsl_edma_hw_tcd __iomem *)chan->tcd), __name) \
)
-#define edma_write_tcdreg_c(chan, _tcd, _val, __name) \
-do { \
- switch (sizeof(_tcd->__name)) { \
- case sizeof(u64): \
- edma_writeq(chan->edma, (u64 __force)_val, &_tcd->__name); \
- break; \
- case sizeof(u32): \
- edma_writel(chan->edma, (u32 __force)_val, &_tcd->__name); \
- break; \
- case sizeof(u16): \
- edma_writew(chan->edma, (u16 __force)_val, &_tcd->__name); \
- break; \
- case sizeof(u8): \
- edma_writeb(chan->edma, (u8 __force)_val, &_tcd->__name); \
- break; \
- } \
-} while (0)
+#define edma_write_tcdreg_c(chan, _tcd, _val, __name) \
+_Generic((_tcd->__name), \
+ __iomem __le64 : edma_writeq(chan->edma, (u64 __force)(_val), &_tcd->__name), \
+ __iomem __le32 : edma_writel(chan->edma, (u32 __force)(_val), &_tcd->__name), \
+ __iomem __le16 : edma_writew(chan->edma, (u16 __force)(_val), &_tcd->__name), \
+ __iomem u8 : edma_writeb(chan->edma, _val, &_tcd->__name) \
+ )
#define edma_write_tcdreg(chan, val, __name) \
do { \
@@ -321,9 +311,11 @@ do { \
(((struct fsl_edma_hw_tcd *)_tcd)->_field))
#define fsl_edma_le_to_cpu(x) \
-(sizeof(x) == sizeof(u64) ? le64_to_cpu((__force __le64)(x)) : \
- (sizeof(x) == sizeof(u32) ? le32_to_cpu((__force __le32)(x)) : \
- le16_to_cpu((__force __le16)(x))))
+_Generic((x), \
+ __le64 : le64_to_cpu((x)), \
+ __le32 : le32_to_cpu((x)), \
+ __le16 : le16_to_cpu((x)) \
+)
#define fsl_edma_get_tcd_to_cpu(_chan, _tcd, _field) \
(fsl_edma_drvflags(_chan) & FSL_EDMA_DRV_TCD64 ? \
@@ -331,19 +323,11 @@ do { \
fsl_edma_le_to_cpu(((struct fsl_edma_hw_tcd *)_tcd)->_field))
#define fsl_edma_set_tcd_to_le_c(_tcd, _val, _field) \
-do { \
- switch (sizeof((_tcd)->_field)) { \
- case sizeof(u64): \
- *(__force __le64 *)(&((_tcd)->_field)) = cpu_to_le64(_val); \
- break; \
- case sizeof(u32): \
- *(__force __le32 *)(&((_tcd)->_field)) = cpu_to_le32(_val); \
- break; \
- case sizeof(u16): \
- *(__force __le16 *)(&((_tcd)->_field)) = cpu_to_le16(_val); \
- break; \
- } \
-} while (0)
+_Generic(((_tcd)->_field), \
+ __le64 : (_tcd)->_field = cpu_to_le64(_val), \
+ __le32 : (_tcd)->_field = cpu_to_le32(_val), \
+ __le16 : (_tcd)->_field = cpu_to_le16(_val) \
+)
#define fsl_edma_set_tcd_to_le(_chan, _tcd, _val, _field) \
do { \
@@ -353,6 +337,9 @@ do { \
fsl_edma_set_tcd_to_le_c((struct fsl_edma_hw_tcd *)_tcd, _val, _field); \
} while (0)
+/* Need after struct defination */
+#include "fsl-edma-trace.h"
+
/*
* R/W functions for big- or little-endian registers:
* The eDMA controller's endian is independent of the CPU core's endian.
@@ -371,23 +358,38 @@ static inline u64 edma_readq(struct fsl_edma_engine *edma, void __iomem *addr)
h = ioread32(addr + 4);
}
+ trace_edma_readl(edma, addr, l);
+ trace_edma_readl(edma, addr + 4, h);
+
return (h << 32) | l;
}
static inline u32 edma_readl(struct fsl_edma_engine *edma, void __iomem *addr)
{
+ u32 val;
+
if (edma->big_endian)
- return ioread32be(addr);
+ val = ioread32be(addr);
else
- return ioread32(addr);
+ val = ioread32(addr);
+
+ trace_edma_readl(edma, addr, val);
+
+ return val;
}
static inline u16 edma_readw(struct fsl_edma_engine *edma, void __iomem *addr)
{
+ u16 val;
+
if (edma->big_endian)
- return ioread16be(addr);
+ val = ioread16be(addr);
else
- return ioread16(addr);
+ val = ioread16(addr);
+
+ trace_edma_readw(edma, addr, val);
+
+ return val;
}
static inline void edma_writeb(struct fsl_edma_engine *edma,
@@ -398,6 +400,8 @@ static inline void edma_writeb(struct fsl_edma_engine *edma,
iowrite8(val, (void __iomem *)((unsigned long)addr ^ 0x3));
else
iowrite8(val, addr);
+
+ trace_edma_writeb(edma, addr, val);
}
static inline void edma_writew(struct fsl_edma_engine *edma,
@@ -408,6 +412,8 @@ static inline void edma_writew(struct fsl_edma_engine *edma,
iowrite16be(val, (void __iomem *)((unsigned long)addr ^ 0x2));
else
iowrite16(val, addr);
+
+ trace_edma_writew(edma, addr, val);
}
static inline void edma_writel(struct fsl_edma_engine *edma,
@@ -417,6 +423,8 @@ static inline void edma_writel(struct fsl_edma_engine *edma,
iowrite32be(val, addr);
else
iowrite32(val, addr);
+
+ trace_edma_writel(edma, addr, val);
}
static inline void edma_writeq(struct fsl_edma_engine *edma,
@@ -429,6 +437,9 @@ static inline void edma_writeq(struct fsl_edma_engine *edma,
iowrite32(val & 0xFFFFFFFF, addr);
iowrite32(val >> 32, addr + 4);
}
+
+ trace_edma_writel(edma, addr, val & 0xFFFFFFFF);
+ trace_edma_writel(edma, addr + 4, val >> 32);
}
static inline struct fsl_edma_chan *to_fsl_edma_chan(struct dma_chan *chan)
@@ -436,11 +447,6 @@ static inline struct fsl_edma_chan *to_fsl_edma_chan(struct dma_chan *chan)
return container_of(chan, struct fsl_edma_chan, vchan.chan);
}
-static inline u32 fsl_edma_drvflags(struct fsl_edma_chan *fsl_chan)
-{
- return fsl_chan->edma->drvdata->flags;
-}
-
static inline struct fsl_edma_desc *to_fsl_edma_desc(struct virt_dma_desc *vd)
{
return container_of(vd, struct fsl_edma_desc, vdesc);
diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c
index 402f0058a180..391e4f13dfeb 100644
--- a/drivers/dma/fsl-edma-main.c
+++ b/drivers/dma/fsl-edma-main.c
@@ -105,7 +105,8 @@ static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec,
if (dma_spec->args_count != 2)
return NULL;
- mutex_lock(&fsl_edma->fsl_edma_mutex);
+ guard(mutex)(&fsl_edma->fsl_edma_mutex);
+
list_for_each_entry_safe(chan, _chan, &fsl_edma->dma_dev.channels, device_node) {
if (chan->client_count)
continue;
@@ -114,15 +115,20 @@ static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec,
if (chan) {
chan->device->privatecnt++;
fsl_chan = to_fsl_edma_chan(chan);
- fsl_chan->slave_id = dma_spec->args[1];
- fsl_edma_chan_mux(fsl_chan, fsl_chan->slave_id,
+ fsl_chan->srcid = dma_spec->args[1];
+
+ if (!fsl_chan->srcid) {
+ dev_err(&fsl_chan->pdev->dev, "Invalidate srcid %d\n",
+ fsl_chan->srcid);
+ return NULL;
+ }
+
+ fsl_edma_chan_mux(fsl_chan, fsl_chan->srcid,
true);
- mutex_unlock(&fsl_edma->fsl_edma_mutex);
return chan;
}
}
}
- mutex_unlock(&fsl_edma->fsl_edma_mutex);
return NULL;
}
@@ -342,10 +348,13 @@ static struct fsl_edma_drvdata imx8qm_data = {
.setup_irq = fsl_edma3_irq_init,
};
-static struct fsl_edma_drvdata imx8qm_audio_data = {
- .flags = FSL_EDMA_DRV_QUIRK_SWAPPED | FSL_EDMA_DRV_HAS_PD | FSL_EDMA_DRV_EDMA3,
+static struct fsl_edma_drvdata imx8ulp_data = {
+ .flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_CHCLK | FSL_EDMA_DRV_HAS_DMACLK |
+ FSL_EDMA_DRV_EDMA3,
.chreg_space_sz = 0x10000,
.chreg_off = 0x10000,
+ .mux_off = 0x10000 + offsetof(struct fsl_edma3_ch_reg, ch_mux),
+ .mux_skip = 0x10000,
.setup_irq = fsl_edma3_irq_init,
};
@@ -380,7 +389,7 @@ static const struct of_device_id fsl_edma_dt_ids[] = {
{ .compatible = "fsl,ls1028a-edma", .data = &ls1028a_data},
{ .compatible = "fsl,imx7ulp-edma", .data = &imx7ulp_data},
{ .compatible = "fsl,imx8qm-edma", .data = &imx8qm_data},
- { .compatible = "fsl,imx8qm-adma", .data = &imx8qm_audio_data},
+ { .compatible = "fsl,imx8ulp-edma", .data = &imx8ulp_data},
{ .compatible = "fsl,imx93-edma3", .data = &imx93_data3},
{ .compatible = "fsl,imx93-edma4", .data = &imx93_data4},
{ .compatible = "fsl,imx95-edma5", .data = &imx95_data5},
@@ -434,6 +443,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
struct fsl_edma_engine *fsl_edma;
const struct fsl_edma_drvdata *drvdata = NULL;
u32 chan_mask[2] = {0, 0};
+ char clk_name[36];
struct edma_regs *regs;
int chans;
int ret, i;
@@ -476,14 +486,6 @@ static int fsl_edma_probe(struct platform_device *pdev)
}
}
- if (drvdata->flags & FSL_EDMA_DRV_HAS_CHCLK) {
- fsl_edma->chclk = devm_clk_get_enabled(&pdev->dev, "mp");
- if (IS_ERR(fsl_edma->chclk)) {
- dev_err(&pdev->dev, "Missing MP block clock.\n");
- return PTR_ERR(fsl_edma->chclk);
- }
- }
-
ret = of_property_read_variable_u32_array(np, "dma-channel-mask", chan_mask, 1, 2);
if (ret > 0) {
@@ -540,7 +542,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
fsl_chan->edma = fsl_edma;
fsl_chan->pm_state = RUNNING;
- fsl_chan->slave_id = 0;
+ fsl_chan->srcid = 0;
fsl_chan->idle = true;
fsl_chan->dma_dir = DMA_NONE;
fsl_chan->vchan.desc_free = fsl_edma_free_desc;
@@ -551,11 +553,21 @@ static int fsl_edma_probe(struct platform_device *pdev)
+ i * drvdata->chreg_space_sz + drvdata->chreg_off + len;
fsl_chan->mux_addr = fsl_edma->membase + drvdata->mux_off + i * drvdata->mux_skip;
+ if (drvdata->flags & FSL_EDMA_DRV_HAS_CHCLK) {
+ snprintf(clk_name, sizeof(clk_name), "ch%02d", i);
+ fsl_chan->clk = devm_clk_get_enabled(&pdev->dev,
+ (const char *)clk_name);
+
+ if (IS_ERR(fsl_chan->clk))
+ return PTR_ERR(fsl_chan->clk);
+ }
fsl_chan->pdev = pdev;
vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev);
edma_write_tcdreg(fsl_chan, cpu_to_le32(0), csr);
fsl_edma_chan_mux(fsl_chan, 0, false);
+ if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_HAS_CHCLK)
+ clk_disable_unprepare(fsl_chan->clk);
}
ret = fsl_edma->drvdata->setup_irq(pdev, fsl_edma);
@@ -682,8 +694,8 @@ static int fsl_edma_resume_early(struct device *dev)
continue;
fsl_chan->pm_state = RUNNING;
edma_write_tcdreg(fsl_chan, 0, csr);
- if (fsl_chan->slave_id != 0)
- fsl_edma_chan_mux(fsl_chan, fsl_chan->slave_id, true);
+ if (fsl_chan->srcid != 0)
+ fsl_edma_chan_mux(fsl_chan, fsl_chan->srcid, true);
}
if (!(fsl_edma->drvdata->flags & FSL_EDMA_DRV_SPLIT_REG))
diff --git a/drivers/dma/fsl-edma-trace.c b/drivers/dma/fsl-edma-trace.c
new file mode 100644
index 000000000000..28300ad80bb7
--- /dev/null
+++ b/drivers/dma/fsl-edma-trace.c
@@ -0,0 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define CREATE_TRACE_POINTS
+#include "fsl-edma-common.h"
diff --git a/drivers/dma/fsl-edma-trace.h b/drivers/dma/fsl-edma-trace.h
new file mode 100644
index 000000000000..d3541301a247
--- /dev/null
+++ b/drivers/dma/fsl-edma-trace.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2023 NXP.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM fsl_edma
+
+#if !defined(__LINUX_FSL_EDMA_TRACE) || defined(TRACE_HEADER_MULTI_READ)
+#define __LINUX_FSL_EDMA_TRACE
+
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(edma_log_io,
+ TP_PROTO(struct fsl_edma_engine *edma, void __iomem *addr, u32 value),
+ TP_ARGS(edma, addr, value),
+ TP_STRUCT__entry(
+ __field(struct fsl_edma_engine *, edma)
+ __field(void __iomem *, addr)
+ __field(u32, value)
+ ),
+ TP_fast_assign(
+ __entry->edma = edma;
+ __entry->addr = addr;
+ __entry->value = value;
+ ),
+ TP_printk("offset %08x: value %08x",
+ (u32)(__entry->addr - __entry->edma->membase), __entry->value)
+);
+
+DEFINE_EVENT(edma_log_io, edma_readl,
+ TP_PROTO(struct fsl_edma_engine *edma, void __iomem *addr, u32 value),
+ TP_ARGS(edma, addr, value)
+);
+
+DEFINE_EVENT(edma_log_io, edma_writel,
+ TP_PROTO(struct fsl_edma_engine *edma, void __iomem *addr, u32 value),
+ TP_ARGS(edma, addr, value)
+);
+
+DEFINE_EVENT(edma_log_io, edma_readw,
+ TP_PROTO(struct fsl_edma_engine *edma, void __iomem *addr, u32 value),
+ TP_ARGS(edma, addr, value)
+);
+
+DEFINE_EVENT(edma_log_io, edma_writew,
+ TP_PROTO(struct fsl_edma_engine *edma, void __iomem *addr, u32 value),
+ TP_ARGS(edma, addr, value)
+);
+
+DEFINE_EVENT(edma_log_io, edma_readb,
+ TP_PROTO(struct fsl_edma_engine *edma, void __iomem *addr, u32 value),
+ TP_ARGS(edma, addr, value)
+);
+
+DEFINE_EVENT(edma_log_io, edma_writeb,
+ TP_PROTO(struct fsl_edma_engine *edma, void __iomem *addr, u32 value),
+ TP_ARGS(edma, addr, value)
+);
+
+DECLARE_EVENT_CLASS(edma_log_tcd,
+ TP_PROTO(struct fsl_edma_chan *chan, void *tcd),
+ TP_ARGS(chan, tcd),
+ TP_STRUCT__entry(
+ __field(u64, saddr)
+ __field(u16, soff)
+ __field(u16, attr)
+ __field(u32, nbytes)
+ __field(u64, slast)
+ __field(u64, daddr)
+ __field(u16, doff)
+ __field(u16, citer)
+ __field(u64, dlast_sga)
+ __field(u16, csr)
+ __field(u16, biter)
+
+ ),
+ TP_fast_assign(
+ __entry->saddr = fsl_edma_get_tcd_to_cpu(chan, tcd, saddr),
+ __entry->soff = fsl_edma_get_tcd_to_cpu(chan, tcd, soff),
+ __entry->attr = fsl_edma_get_tcd_to_cpu(chan, tcd, attr),
+ __entry->nbytes = fsl_edma_get_tcd_to_cpu(chan, tcd, nbytes),
+ __entry->slast = fsl_edma_get_tcd_to_cpu(chan, tcd, slast),
+ __entry->daddr = fsl_edma_get_tcd_to_cpu(chan, tcd, daddr),
+ __entry->doff = fsl_edma_get_tcd_to_cpu(chan, tcd, doff),
+ __entry->citer = fsl_edma_get_tcd_to_cpu(chan, tcd, citer),
+ __entry->dlast_sga = fsl_edma_get_tcd_to_cpu(chan, tcd, dlast_sga),
+ __entry->csr = fsl_edma_get_tcd_to_cpu(chan, tcd, csr),
+ __entry->biter = fsl_edma_get_tcd_to_cpu(chan, tcd, biter);
+ ),
+ TP_printk("\n==== TCD =====\n"
+ " saddr: 0x%016llx\n"
+ " soff: 0x%04x\n"
+ " attr: 0x%04x\n"
+ " nbytes: 0x%08x\n"
+ " slast: 0x%016llx\n"
+ " daddr: 0x%016llx\n"
+ " doff: 0x%04x\n"
+ " citer: 0x%04x\n"
+ " dlast: 0x%016llx\n"
+ " csr: 0x%04x\n"
+ " biter: 0x%04x\n",
+ __entry->saddr,
+ __entry->soff,
+ __entry->attr,
+ __entry->nbytes,
+ __entry->slast,
+ __entry->daddr,
+ __entry->doff,
+ __entry->citer,
+ __entry->dlast_sga,
+ __entry->csr,
+ __entry->biter)
+);
+
+DEFINE_EVENT(edma_log_tcd, edma_fill_tcd,
+ TP_PROTO(struct fsl_edma_chan *chan, void *tcd),
+ TP_ARGS(chan, tcd)
+);
+
+#endif
+
+/* this part must be outside header guard */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE fsl-edma-trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
index 78a938969d7d..e3505e56784b 100644
--- a/drivers/dma/idma64.c
+++ b/drivers/dma/idma64.c
@@ -171,6 +171,10 @@ static irqreturn_t idma64_irq(int irq, void *dev)
u32 status_err;
unsigned short i;
+ /* Since IRQ may be shared, check if DMA controller is powered on */
+ if (status == GENMASK(31, 0))
+ return IRQ_NONE;
+
dev_vdbg(idma64->dma.dev, "%s: status=%#x\n", __func__, status);
/* Check if we have any interrupt from the DMA controller */
@@ -594,7 +598,9 @@ static int idma64_probe(struct idma64_chip *chip)
idma64->dma.dev = chip->sysdev;
- dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK);
+ ret = dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK);
+ if (ret)
+ return ret;
ret = dma_async_device_register(&idma64->dma);
if (ret)
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
index 8078ab9acfbc..57f1bf2ab20b 100644
--- a/drivers/dma/idxd/cdev.c
+++ b/drivers/dma/idxd/cdev.c
@@ -342,7 +342,7 @@ static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid)
if (!evl)
return;
- spin_lock(&evl->lock);
+ mutex_lock(&evl->lock);
status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
t = status.tail;
h = status.head;
@@ -354,9 +354,8 @@ static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid)
set_bit(h, evl->bmap);
h = (h + 1) % size;
}
- spin_unlock(&evl->lock);
-
drain_workqueue(wq->wq);
+ mutex_unlock(&evl->lock);
}
static int idxd_cdev_release(struct inode *node, struct file *filep)
@@ -401,6 +400,18 @@ static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
int rc;
dev_dbg(&pdev->dev, "%s called\n", __func__);
+
+ /*
+ * Due to an erratum in some of the devices supported by the driver,
+ * direct user submission to the device can be unsafe.
+ * (See the INTEL-SA-01084 security advisory)
+ *
+ * For the devices that exhibit this behavior, require that the user
+ * has CAP_SYS_RAWIO capabilities.
+ */
+ if (!idxd->user_submission_safe && !capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
rc = check_vma(wq, vma, __func__);
if (rc < 0)
return rc;
@@ -415,6 +426,70 @@ static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
vma->vm_page_prot);
}
+static int idxd_submit_user_descriptor(struct idxd_user_context *ctx,
+ struct dsa_hw_desc __user *udesc)
+{
+ struct idxd_wq *wq = ctx->wq;
+ struct idxd_dev *idxd_dev = &wq->idxd->idxd_dev;
+ const uint64_t comp_addr_align = is_dsa_dev(idxd_dev) ? 0x20 : 0x40;
+ void __iomem *portal = idxd_wq_portal_addr(wq);
+ struct dsa_hw_desc descriptor __aligned(64);
+ int rc;
+
+ rc = copy_from_user(&descriptor, udesc, sizeof(descriptor));
+ if (rc)
+ return -EFAULT;
+
+ /*
+ * DSA devices are capable of indirect ("batch") command submission.
+ * On devices where direct user submissions are not safe, we cannot
+ * allow this since there is no good way for us to verify these
+ * indirect commands.
+ */
+ if (is_dsa_dev(idxd_dev) && descriptor.opcode == DSA_OPCODE_BATCH &&
+ !wq->idxd->user_submission_safe)
+ return -EINVAL;
+ /*
+ * As per the programming specification, the completion address must be
+ * aligned to 32 or 64 bytes. If this is violated the hardware
+ * engine can get very confused (security issue).
+ */
+ if (!IS_ALIGNED(descriptor.completion_addr, comp_addr_align))
+ return -EINVAL;
+
+ if (wq_dedicated(wq))
+ iosubmit_cmds512(portal, &descriptor, 1);
+ else {
+ descriptor.priv = 0;
+ descriptor.pasid = ctx->pasid;
+ rc = idxd_enqcmds(wq, portal, &descriptor);
+ if (rc < 0)
+ return rc;
+ }
+
+ return 0;
+}
+
+static ssize_t idxd_cdev_write(struct file *filp, const char __user *buf, size_t len,
+ loff_t *unused)
+{
+ struct dsa_hw_desc __user *udesc = (struct dsa_hw_desc __user *)buf;
+ struct idxd_user_context *ctx = filp->private_data;
+ ssize_t written = 0;
+ int i;
+
+ for (i = 0; i < len/sizeof(struct dsa_hw_desc); i++) {
+ int rc = idxd_submit_user_descriptor(ctx, udesc + i);
+
+ if (rc)
+ return written ? written : rc;
+
+ written += sizeof(struct dsa_hw_desc);
+ }
+
+ return written;
+}
+
static __poll_t idxd_cdev_poll(struct file *filp,
struct poll_table_struct *wait)
{
@@ -437,6 +512,7 @@ static const struct file_operations idxd_cdev_fops = {
.open = idxd_cdev_open,
.release = idxd_cdev_release,
.mmap = idxd_cdev_mmap,
+ .write = idxd_cdev_write,
.poll = idxd_cdev_poll,
};
@@ -501,7 +577,6 @@ void idxd_wq_del_cdev(struct idxd_wq *wq)
struct idxd_cdev *idxd_cdev;
idxd_cdev = wq->idxd_cdev;
- ida_destroy(&file_ida);
wq->idxd_cdev = NULL;
cdev_device_del(&idxd_cdev->cdev, cdev_dev(idxd_cdev));
put_device(cdev_dev(idxd_cdev));
@@ -517,6 +592,14 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev)
if (idxd->state != IDXD_DEV_ENABLED)
return -ENXIO;
+ mutex_lock(&wq->wq_lock);
+
+ if (!idxd_wq_driver_name_match(wq, dev)) {
+ idxd->cmd_status = IDXD_SCMD_WQ_NO_DRV_NAME;
+ rc = -ENODEV;
+ goto wq_err;
+ }
+
/*
* User type WQ is enabled only when SVA is enabled for two reasons:
* - If no IOMMU or IOMMU Passthrough without SVA, userspace
@@ -532,14 +615,7 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev)
dev_dbg(&idxd->pdev->dev,
"User type WQ cannot be enabled without SVA.\n");
- return -EOPNOTSUPP;
- }
-
- mutex_lock(&wq->wq_lock);
-
- if (!idxd_wq_driver_name_match(wq, dev)) {
- idxd->cmd_status = IDXD_SCMD_WQ_NO_DRV_NAME;
- rc = -ENODEV;
+ rc = -EOPNOTSUPP;
goto wq_err;
}
diff --git a/drivers/dma/idxd/debugfs.c b/drivers/dma/idxd/debugfs.c
index f3f25ee676f3..ad4245cb301d 100644
--- a/drivers/dma/idxd/debugfs.c
+++ b/drivers/dma/idxd/debugfs.c
@@ -66,7 +66,7 @@ static int debugfs_evl_show(struct seq_file *s, void *d)
if (!evl || !evl->log)
return 0;
- spin_lock(&evl->lock);
+ mutex_lock(&evl->lock);
evl_status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
t = evl_status.tail;
@@ -87,7 +87,7 @@ static int debugfs_evl_show(struct seq_file *s, void *d)
dump_event_entry(idxd, s, i, &count, processed);
}
- spin_unlock(&evl->lock);
+ mutex_unlock(&evl->lock);
return 0;
}
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index ecfdf4a8f1f8..c41ef195eeb9 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -775,7 +775,7 @@ static int idxd_device_evl_setup(struct idxd_device *idxd)
goto err_alloc;
}
- spin_lock(&evl->lock);
+ mutex_lock(&evl->lock);
evl->log = addr;
evl->dma = dma_addr;
evl->log_size = size;
@@ -796,7 +796,7 @@ static int idxd_device_evl_setup(struct idxd_device *idxd)
gencfg.evl_en = 1;
iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
- spin_unlock(&evl->lock);
+ mutex_unlock(&evl->lock);
return 0;
err_alloc:
@@ -819,7 +819,7 @@ static void idxd_device_evl_free(struct idxd_device *idxd)
if (!gencfg.evl_en)
return;
- spin_lock(&evl->lock);
+ mutex_lock(&evl->lock);
gencfg.evl_en = 0;
iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
@@ -836,7 +836,7 @@ static void idxd_device_evl_free(struct idxd_device *idxd)
evl_dma = evl->dma;
evl->log = NULL;
evl->size = IDXD_EVL_SIZE_MIN;
- spin_unlock(&evl->lock);
+ mutex_unlock(&evl->lock);
dma_free_coherent(dev, evl_log_size, evl_log, evl_dma);
}
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index a4099a1e2340..868b724a3b75 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -288,12 +288,13 @@ struct idxd_driver_data {
int evl_cr_off;
int cr_status_off;
int cr_result_off;
+ bool user_submission_safe;
load_device_defaults_fn_t load_device_defaults;
};
struct idxd_evl {
/* Lock to protect event log access. */
- spinlock_t lock;
+ struct mutex lock;
void *log;
dma_addr_t dma;
/* Total size of event log = number of entries * entry size. */
@@ -374,6 +375,8 @@ struct idxd_device {
struct dentry *dbgfs_dir;
struct dentry *dbgfs_evl_file;
+
+ bool user_submission_safe;
};
static inline unsigned int evl_ent_size(struct idxd_device *idxd)
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 4954adc6bb60..a7295943fa22 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -47,6 +47,7 @@ static struct idxd_driver_data idxd_driver_data[] = {
.align = 32,
.dev_type = &dsa_device_type,
.evl_cr_off = offsetof(struct dsa_evl_entry, cr),
+ .user_submission_safe = false, /* See INTEL-SA-01084 security advisory */
.cr_status_off = offsetof(struct dsa_completion_record, status),
.cr_result_off = offsetof(struct dsa_completion_record, result),
},
@@ -57,6 +58,7 @@ static struct idxd_driver_data idxd_driver_data[] = {
.align = 64,
.dev_type = &iax_device_type,
.evl_cr_off = offsetof(struct iax_evl_entry, cr),
+ .user_submission_safe = false, /* See INTEL-SA-01084 security advisory */
.cr_status_off = offsetof(struct iax_completion_record, status),
.cr_result_off = offsetof(struct iax_completion_record, error_code),
.load_device_defaults = idxd_load_iaa_device_defaults,
@@ -354,7 +356,7 @@ static int idxd_init_evl(struct idxd_device *idxd)
if (!evl)
return -ENOMEM;
- spin_lock_init(&evl->lock);
+ mutex_init(&evl->lock);
evl->size = IDXD_EVL_SIZE_MIN;
idxd_name = dev_name(idxd_confdev(idxd));
@@ -774,6 +776,8 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n",
idxd->hw.version);
+ idxd->user_submission_safe = data->user_submission_safe;
+
return 0;
err_dev_register:
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index 348aa21389a9..8dc029c86551 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -363,7 +363,7 @@ static void process_evl_entries(struct idxd_device *idxd)
evl_status.bits = 0;
evl_status.int_pending = 1;
- spin_lock(&evl->lock);
+ mutex_lock(&evl->lock);
/* Clear interrupt pending bit */
iowrite32(evl_status.bits_upper32,
idxd->reg_base + IDXD_EVLSTATUS_OFFSET + sizeof(u32));
@@ -380,7 +380,7 @@ static void process_evl_entries(struct idxd_device *idxd)
evl_status.head = h;
iowrite32(evl_status.bits_lower32, idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
- spin_unlock(&evl->lock);
+ mutex_unlock(&evl->lock);
}
irqreturn_t idxd_misc_thread(int vec, void *data)
diff --git a/drivers/dma/idxd/perfmon.c b/drivers/dma/idxd/perfmon.c
index fdda6d604262..5e94247e1ea7 100644
--- a/drivers/dma/idxd/perfmon.c
+++ b/drivers/dma/idxd/perfmon.c
@@ -528,14 +528,11 @@ static int perf_event_cpu_offline(unsigned int cpu, struct hlist_node *node)
return 0;
target = cpumask_any_but(cpu_online_mask, cpu);
-
/* migrate events if there is a valid target */
- if (target < nr_cpu_ids)
+ if (target < nr_cpu_ids) {
cpumask_set_cpu(target, &perfmon_dsa_cpu_mask);
- else
- target = -1;
-
- perf_pmu_migrate_context(&idxd_pmu->pmu, cpu, target);
+ perf_pmu_migrate_context(&idxd_pmu->pmu, cpu, target);
+ }
return 0;
}
diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h
index 315c004f58e4..e16dbf9ab324 100644
--- a/drivers/dma/idxd/registers.h
+++ b/drivers/dma/idxd/registers.h
@@ -6,9 +6,6 @@
#include <uapi/linux/idxd.h>
/* PCI Config */
-#define PCI_DEVICE_ID_INTEL_DSA_SPR0 0x0b25
-#define PCI_DEVICE_ID_INTEL_IAX_SPR0 0x0cfe
-
#define DEVICE_VERSION_1 0x100
#define DEVICE_VERSION_2 0x200
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index 7f28f01be672..f706eae0e76b 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -1197,12 +1197,35 @@ static ssize_t wq_enqcmds_retries_store(struct device *dev, struct device_attrib
static struct device_attribute dev_attr_wq_enqcmds_retries =
__ATTR(enqcmds_retries, 0644, wq_enqcmds_retries_show, wq_enqcmds_retries_store);
+static ssize_t op_cap_show_common(struct device *dev, char *buf, unsigned long *opcap_bmap)
+{
+ ssize_t pos;
+ int i;
+
+ pos = 0;
+ for (i = IDXD_MAX_OPCAP_BITS/64 - 1; i >= 0; i--) {
+ unsigned long val = opcap_bmap[i];
+
+ /* On systems where direct user submissions are not safe, we need to clear out
+ * the BATCH capability from the capability mask in sysfs since we cannot support
+ * that command on such systems.
+ */
+ if (i == DSA_OPCODE_BATCH/64 && !confdev_to_idxd(dev)->user_submission_safe)
+ clear_bit(DSA_OPCODE_BATCH % 64, &val);
+
+ pos += sysfs_emit_at(buf, pos, "%*pb", 64, &val);
+ pos += sysfs_emit_at(buf, pos, "%c", i == 0 ? '\n' : ',');
+ }
+
+ return pos;
+}
+
static ssize_t wq_op_config_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct idxd_wq *wq = confdev_to_wq(dev);
- return sysfs_emit(buf, "%*pb\n", IDXD_MAX_OPCAP_BITS, wq->opcap_bmap);
+ return op_cap_show_common(dev, buf, wq->opcap_bmap);
}
static int idxd_verify_supported_opcap(struct idxd_device *idxd, unsigned long *opmask)
@@ -1455,7 +1478,7 @@ static ssize_t op_cap_show(struct device *dev,
{
struct idxd_device *idxd = confdev_to_idxd(dev);
- return sysfs_emit(buf, "%*pb\n", IDXD_MAX_OPCAP_BITS, idxd->opcap_bmap);
+ return op_cap_show_common(dev, buf, idxd->opcap_bmap);
}
static DEVICE_ATTR_RO(op_cap);
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 9b42f5e96b1e..003e1580b902 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -24,6 +24,7 @@
#include <linux/semaphore.h>
#include <linux/spinlock.h>
#include <linux/device.h>
+#include <linux/genalloc.h>
#include <linux/dma-mapping.h>
#include <linux/firmware.h>
#include <linux/slab.h>
@@ -137,7 +138,11 @@
* 0: Source on AIPS
* 12 Destination Bit(DP) 1: Destination on SPBA
* 0: Destination on AIPS
- * 13-15 --------- MUST BE 0
+ * 13 Source FIFO 1: Source is dual FIFO
+ * 0: Source is single FIFO
+ * 14 Destination FIFO 1: Destination is dual FIFO
+ * 0: Destination is single FIFO
+ * 15 --------- MUST BE 0
* 16-23 Higher WML HWML
* 24-27 N Total number of samples after
* which Pad adding/Swallowing
@@ -168,6 +173,8 @@
#define SDMA_WATERMARK_LEVEL_SPDIF BIT(10)
#define SDMA_WATERMARK_LEVEL_SP BIT(11)
#define SDMA_WATERMARK_LEVEL_DP BIT(12)
+#define SDMA_WATERMARK_LEVEL_SD BIT(13)
+#define SDMA_WATERMARK_LEVEL_DD BIT(14)
#define SDMA_WATERMARK_LEVEL_HWML (0xFF << 16)
#define SDMA_WATERMARK_LEVEL_LWE BIT(28)
#define SDMA_WATERMARK_LEVEL_HWE BIT(29)
@@ -175,6 +182,7 @@
#define SDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
#define SDMA_DMA_DIRECTIONS (BIT(DMA_DEV_TO_MEM) | \
@@ -232,20 +240,23 @@ struct sdma_script_start_addrs {
s32 utra_addr;
s32 ram_code_start_addr;
/* End of v1 array */
- s32 mcu_2_ssish_addr;
+ union { s32 v1_end; s32 mcu_2_ssish_addr; };
s32 ssish_2_mcu_addr;
s32 hdmi_dma_addr;
/* End of v2 array */
- s32 zcanfd_2_mcu_addr;
+ union { s32 v2_end; s32 zcanfd_2_mcu_addr; };
s32 zqspi_2_mcu_addr;
s32 mcu_2_ecspi_addr;
s32 mcu_2_sai_addr;
s32 sai_2_mcu_addr;
s32 uart_2_mcu_rom_addr;
s32 uartsh_2_mcu_rom_addr;
+ s32 i2c_2_mcu_addr;
+ s32 mcu_2_i2c_addr;
/* End of v3 array */
- s32 mcu_2_zqspi_addr;
+ union { s32 v3_end; s32 mcu_2_zqspi_addr; };
/* End of v4 array */
+ s32 v4_end[0];
};
/*
@@ -531,6 +542,7 @@ struct sdma_engine {
/* clock ratio for AHB:SDMA core. 1:1 is 1, 2:1 is 0*/
bool clk_ratio;
bool fw_loaded;
+ struct gen_pool *iram_pool;
};
static int sdma_config_write(struct dma_chan *chan,
@@ -1072,6 +1084,11 @@ static int sdma_get_pc(struct sdma_channel *sdmac,
per_2_emi = sdma->script_addrs->sai_2_mcu_addr;
emi_2_per = sdma->script_addrs->mcu_2_sai_addr;
break;
+ case IMX_DMATYPE_I2C:
+ per_2_emi = sdma->script_addrs->i2c_2_mcu_addr;
+ emi_2_per = sdma->script_addrs->mcu_2_i2c_addr;
+ sdmac->is_ram_script = true;
+ break;
case IMX_DMATYPE_HDMI:
emi_2_per = sdma->script_addrs->hdmi_dma_addr;
sdmac->is_ram_script = true;
@@ -1255,6 +1272,16 @@ static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac)
sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_DP;
sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_CONT;
+
+ /*
+ * Limitation: The p2p script support dual fifos in maximum,
+ * So when fifo number is larger than 1, force enable dual
+ * fifos.
+ */
+ if (sdmac->n_fifos_src > 1)
+ sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_SD;
+ if (sdmac->n_fifos_dst > 1)
+ sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_DD;
}
static void sdma_set_watermarklevel_for_sais(struct sdma_channel *sdmac)
@@ -1358,8 +1385,14 @@ static int sdma_request_channel0(struct sdma_engine *sdma)
{
int ret = -EBUSY;
- sdma->bd0 = dma_alloc_coherent(sdma->dev, PAGE_SIZE, &sdma->bd0_phys,
- GFP_NOWAIT);
+ if (sdma->iram_pool)
+ sdma->bd0 = gen_pool_dma_alloc(sdma->iram_pool,
+ sizeof(struct sdma_buffer_descriptor),
+ &sdma->bd0_phys);
+ else
+ sdma->bd0 = dma_alloc_coherent(sdma->dev,
+ sizeof(struct sdma_buffer_descriptor),
+ &sdma->bd0_phys, GFP_NOWAIT);
if (!sdma->bd0) {
ret = -ENOMEM;
goto out;
@@ -1379,10 +1412,14 @@ out:
static int sdma_alloc_bd(struct sdma_desc *desc)
{
u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
+ struct sdma_engine *sdma = desc->sdmac->sdma;
int ret = 0;
- desc->bd = dma_alloc_coherent(desc->sdmac->sdma->dev, bd_size,
- &desc->bd_phys, GFP_NOWAIT);
+ if (sdma->iram_pool)
+ desc->bd = gen_pool_dma_alloc(sdma->iram_pool, bd_size, &desc->bd_phys);
+ else
+ desc->bd = dma_alloc_coherent(sdma->dev, bd_size, &desc->bd_phys, GFP_NOWAIT);
+
if (!desc->bd) {
ret = -ENOMEM;
goto out;
@@ -1394,9 +1431,12 @@ out:
static void sdma_free_bd(struct sdma_desc *desc)
{
u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
+ struct sdma_engine *sdma = desc->sdmac->sdma;
- dma_free_coherent(desc->sdmac->sdma->dev, bd_size, desc->bd,
- desc->bd_phys);
+ if (sdma->iram_pool)
+ gen_pool_free(sdma->iram_pool, (unsigned long)desc->bd, bd_size);
+ else
+ dma_free_coherent(desc->sdmac->sdma->dev, bd_size, desc->bd, desc->bd_phys);
}
static void sdma_desc_free(struct virt_dma_desc *vd)
@@ -1643,6 +1683,9 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
if (count & 3 || sg->dma_address & 3)
goto err_bd_out;
break;
+ case DMA_SLAVE_BUSWIDTH_3_BYTES:
+ bd->mode.command = 3;
+ break;
case DMA_SLAVE_BUSWIDTH_2_BYTES:
bd->mode.command = 2;
if (count & 1 || sg->dma_address & 1)
@@ -1880,10 +1923,17 @@ static void sdma_issue_pending(struct dma_chan *chan)
spin_unlock_irqrestore(&sdmac->vc.lock, flags);
}
-#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34
-#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2 38
-#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3 45
-#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4 46
+#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 \
+(offsetof(struct sdma_script_start_addrs, v1_end) / sizeof(s32))
+
+#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2 \
+(offsetof(struct sdma_script_start_addrs, v2_end) / sizeof(s32))
+
+#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3 \
+(offsetof(struct sdma_script_start_addrs, v3_end) / sizeof(s32))
+
+#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4 \
+(offsetof(struct sdma_script_start_addrs, v4_end) / sizeof(s32))
static void sdma_add_scripts(struct sdma_engine *sdma,
const struct sdma_script_start_addrs *addr)
@@ -2068,6 +2118,7 @@ static int sdma_init(struct sdma_engine *sdma)
{
int i, ret;
dma_addr_t ccb_phys;
+ int ccbsize;
ret = clk_enable(sdma->clk_ipg);
if (ret)
@@ -2083,10 +2134,14 @@ static int sdma_init(struct sdma_engine *sdma)
/* Be sure SDMA has not started yet */
writel_relaxed(0, sdma->regs + SDMA_H_C0PTR);
- sdma->channel_control = dma_alloc_coherent(sdma->dev,
- MAX_DMA_CHANNELS * sizeof(struct sdma_channel_control) +
- sizeof(struct sdma_context_data),
- &ccb_phys, GFP_KERNEL);
+ ccbsize = MAX_DMA_CHANNELS * (sizeof(struct sdma_channel_control)
+ + sizeof(struct sdma_context_data));
+
+ if (sdma->iram_pool)
+ sdma->channel_control = gen_pool_dma_alloc(sdma->iram_pool, ccbsize, &ccb_phys);
+ else
+ sdma->channel_control = dma_alloc_coherent(sdma->dev, ccbsize, &ccb_phys,
+ GFP_KERNEL);
if (!sdma->channel_control) {
ret = -ENOMEM;
@@ -2272,6 +2327,12 @@ static int sdma_probe(struct platform_device *pdev)
vchan_init(&sdmac->vc, &sdma->dma_device);
}
+ if (np) {
+ sdma->iram_pool = of_gen_pool_get(np, "iram", 0);
+ if (sdma->iram_pool)
+ dev_info(&pdev->dev, "alloc bd from iram.\n");
+ }
+
ret = sdma_init(sdma);
if (ret)
goto err_init;
diff --git a/drivers/dma/mcf-edma-main.c b/drivers/dma/mcf-edma-main.c
index dba631783876..78c606f6d002 100644
--- a/drivers/dma/mcf-edma-main.c
+++ b/drivers/dma/mcf-edma-main.c
@@ -195,7 +195,7 @@ static int mcf_edma_probe(struct platform_device *pdev)
struct fsl_edma_chan *mcf_chan = &mcf_edma->chans[i];
mcf_chan->edma = mcf_edma;
- mcf_chan->slave_id = i;
+ mcf_chan->srcid = i;
mcf_chan->idle = true;
mcf_chan->dma_dir = DMA_NONE;
mcf_chan->vchan.desc_free = fsl_edma_free_desc;
@@ -277,7 +277,7 @@ bool mcf_edma_filter_fn(struct dma_chan *chan, void *param)
if (chan->device->dev->driver == &mcf_edma_driver.driver) {
struct fsl_edma_chan *mcf_chan = to_fsl_edma_chan(chan);
- return (mcf_chan->slave_id == (uintptr_t)param);
+ return (mcf_chan->srcid == (uintptr_t)param);
}
return false;
diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c
index 4e76c4ec2d39..e001f4f7aa64 100644
--- a/drivers/dma/owl-dma.c
+++ b/drivers/dma/owl-dma.c
@@ -250,7 +250,7 @@ static void pchan_update(struct owl_dma_pchan *pchan, u32 reg,
else
regval &= ~val;
- writel(val, pchan->base + reg);
+ writel(regval, pchan->base + reg);
}
static void pchan_writel(struct owl_dma_pchan *pchan, u32 reg, u32 data)
@@ -274,7 +274,7 @@ static void dma_update(struct owl_dma *od, u32 reg, u32 val, bool state)
else
regval &= ~val;
- writel(val, od->base + reg);
+ writel(regval, od->base + reg);
}
static void dma_writel(struct owl_dma *od, u32 reg, u32 data)
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index c359decc07a3..6b2793b07694 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -155,11 +155,6 @@ static inline struct device *chan2dev(struct dma_chan *chan)
return &chan->dev->device;
}
-static inline struct device *chan2parent(struct dma_chan *chan)
-{
- return chan->dev->device.parent;
-}
-
static inline
struct pch_dma_desc *pdc_first_active(struct pch_dma_chan *pd_chan)
{
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 5f6d7f1e095f..60c4de8dac1d 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -1053,9 +1053,6 @@ static bool _trigger(struct pl330_thread *thrd)
thrd->req_running = idx;
- if (desc->rqtype == DMA_MEM_TO_DEV || desc->rqtype == DMA_DEV_TO_MEM)
- UNTIL(thrd, PL330_STATE_WFP);
-
return true;
}
@@ -3265,7 +3262,6 @@ MODULE_DEVICE_TABLE(amba, pl330_ids);
static struct amba_driver pl330_driver = {
.drv = {
- .owner = THIS_MODULE,
.name = "dma-pl330",
.pm = &pl330_pm,
},
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
index 202ac95227cb..721b4ac0857a 100644
--- a/drivers/dma/qcom/hidma.c
+++ b/drivers/dma/qcom/hidma.c
@@ -50,7 +50,6 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <linux/of_dma.h>
#include <linux/property.h>
#include <linux/delay.h>
#include <linux/acpi.h>
@@ -947,22 +946,12 @@ static const struct acpi_device_id hidma_acpi_ids[] = {
MODULE_DEVICE_TABLE(acpi, hidma_acpi_ids);
#endif
-static const struct of_device_id hidma_match[] = {
- {.compatible = "qcom,hidma-1.0",},
- {.compatible = "qcom,hidma-1.1", .data = (void *)(HIDMA_MSI_CAP),},
- {.compatible = "qcom,hidma-1.2",
- .data = (void *)(HIDMA_MSI_CAP | HIDMA_IDENTITY_CAP),},
- {},
-};
-MODULE_DEVICE_TABLE(of, hidma_match);
-
static struct platform_driver hidma_driver = {
.probe = hidma_probe,
.remove_new = hidma_remove,
.shutdown = hidma_shutdown,
.driver = {
.name = "hidma",
- .of_match_table = hidma_match,
.acpi_match_table = ACPI_PTR(hidma_acpi_ids),
},
};
diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c
index 1d675f31252b..bb883e138ebf 100644
--- a/drivers/dma/qcom/hidma_mgmt.c
+++ b/drivers/dma/qcom/hidma_mgmt.c
@@ -7,12 +7,7 @@
#include <linux/dmaengine.h>
#include <linux/acpi.h>
-#include <linux/of.h>
#include <linux/property.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/of_platform.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/uaccess.h>
@@ -327,115 +322,13 @@ static const struct acpi_device_id hidma_mgmt_acpi_ids[] = {
MODULE_DEVICE_TABLE(acpi, hidma_mgmt_acpi_ids);
#endif
-static const struct of_device_id hidma_mgmt_match[] = {
- {.compatible = "qcom,hidma-mgmt-1.0",},
- {},
-};
-MODULE_DEVICE_TABLE(of, hidma_mgmt_match);
-
static struct platform_driver hidma_mgmt_driver = {
.probe = hidma_mgmt_probe,
.driver = {
.name = "hidma-mgmt",
- .of_match_table = hidma_mgmt_match,
.acpi_match_table = ACPI_PTR(hidma_mgmt_acpi_ids),
},
};
-#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
-static int object_counter;
-
-static int __init hidma_mgmt_of_populate_channels(struct device_node *np)
-{
- struct platform_device *pdev_parent = of_find_device_by_node(np);
- struct platform_device_info pdevinfo;
- struct device_node *child;
- struct resource *res;
- int ret = 0;
-
- /* allocate a resource array */
- res = kcalloc(3, sizeof(*res), GFP_KERNEL);
- if (!res)
- return -ENOMEM;
-
- for_each_available_child_of_node(np, child) {
- struct platform_device *new_pdev;
-
- ret = of_address_to_resource(child, 0, &res[0]);
- if (!ret)
- goto out;
-
- ret = of_address_to_resource(child, 1, &res[1]);
- if (!ret)
- goto out;
-
- ret = of_irq_to_resource(child, 0, &res[2]);
- if (ret <= 0)
- goto out;
-
- memset(&pdevinfo, 0, sizeof(pdevinfo));
- pdevinfo.fwnode = &child->fwnode;
- pdevinfo.parent = pdev_parent ? &pdev_parent->dev : NULL;
- pdevinfo.name = child->name;
- pdevinfo.id = object_counter++;
- pdevinfo.res = res;
- pdevinfo.num_res = 3;
- pdevinfo.data = NULL;
- pdevinfo.size_data = 0;
- pdevinfo.dma_mask = DMA_BIT_MASK(64);
- new_pdev = platform_device_register_full(&pdevinfo);
- if (IS_ERR(new_pdev)) {
- ret = PTR_ERR(new_pdev);
- goto out;
- }
- new_pdev->dev.of_node = child;
- of_dma_configure(&new_pdev->dev, child, true);
- /*
- * It is assumed that calling of_msi_configure is safe on
- * platforms with or without MSI support.
- */
- of_msi_configure(&new_pdev->dev, child);
- }
-
- kfree(res);
-
- return ret;
-
-out:
- of_node_put(child);
- kfree(res);
-
- return ret;
-}
-#endif
-
-static int __init hidma_mgmt_init(void)
-{
-#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
- struct device_node *child;
-
- for_each_matching_node(child, hidma_mgmt_match) {
- /* device tree based firmware here */
- hidma_mgmt_of_populate_channels(child);
- }
-#endif
- /*
- * We do not check for return value here, as it is assumed that
- * platform_driver_register must not fail. The reason for this is that
- * the (potential) hidma_mgmt_of_populate_channels calls above are not
- * cleaned up if it does fail, and to do this work is quite
- * complicated. In particular, various calls of of_address_to_resource,
- * of_irq_to_resource, platform_device_register_full, of_dma_configure,
- * and of_msi_configure which then call other functions and so on, must
- * be cleaned up - this is not a trivial exercise.
- *
- * Currently, this module is not intended to be unloaded, and there is
- * no module_exit function defined which does the needed cleanup. For
- * this reason, we have to assume success here.
- */
- platform_driver_register(&hidma_mgmt_driver);
-
- return 0;
-}
-module_init(hidma_mgmt_init);
+module_platform_driver(hidma_mgmt_driver);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/tegra186-gpc-dma.c b/drivers/dma/tegra186-gpc-dma.c
index 88547a23825b..3642508e88bb 100644
--- a/drivers/dma/tegra186-gpc-dma.c
+++ b/drivers/dma/tegra186-gpc-dma.c
@@ -746,6 +746,9 @@ static int tegra_dma_get_residual(struct tegra_dma_channel *tdc)
bytes_xfer = dma_desc->bytes_xfer +
sg_req[dma_desc->sg_idx].len - (wcount * 4);
+ if (dma_desc->bytes_req == bytes_xfer)
+ return 0;
+
residual = dma_desc->bytes_req - (bytes_xfer % dma_desc->bytes_req);
return residual;
diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h
index e9f5250fbe4d..59d9eabc8b67 100644
--- a/drivers/dma/virt-dma.h
+++ b/drivers/dma/virt-dma.h
@@ -81,6 +81,8 @@ static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan
*/
static inline bool vchan_issue_pending(struct virt_dma_chan *vc)
{
+ lockdep_assert_held(&vc->lock);
+
list_splice_tail_init(&vc->desc_submitted, &vc->desc_issued);
return !list_empty(&vc->desc_issued);
}
@@ -96,6 +98,8 @@ static inline void vchan_cookie_complete(struct virt_dma_desc *vd)
struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
dma_cookie_t cookie;
+ lockdep_assert_held(&vc->lock);
+
cookie = vd->tx.cookie;
dma_cookie_complete(&vd->tx);
dev_vdbg(vc->chan.device->dev, "txd %p[%x]: marked complete\n",
@@ -146,6 +150,8 @@ static inline void vchan_terminate_vdesc(struct virt_dma_desc *vd)
{
struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
+ lockdep_assert_held(&vc->lock);
+
list_add_tail(&vd->node, &vc->desc_terminated);
if (vc->cyclic == vd)
@@ -160,6 +166,8 @@ static inline void vchan_terminate_vdesc(struct virt_dma_desc *vd)
*/
static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
{
+ lockdep_assert_held(&vc->lock);
+
return list_first_entry_or_null(&vc->desc_issued,
struct virt_dma_desc, node);
}
@@ -177,6 +185,8 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
struct list_head *head)
{
+ lockdep_assert_held(&vc->lock);
+
list_splice_tail_init(&vc->desc_allocated, head);
list_splice_tail_init(&vc->desc_submitted, head);
list_splice_tail_init(&vc->desc_issued, head);
diff --git a/drivers/dma/xilinx/xdma-regs.h b/drivers/dma/xilinx/xdma-regs.h
index 98f5f6fb9ff9..6ad08878e938 100644
--- a/drivers/dma/xilinx/xdma-regs.h
+++ b/drivers/dma/xilinx/xdma-regs.h
@@ -117,6 +117,9 @@ struct xdma_hw_desc {
CHAN_CTRL_IE_WRITE_ERROR | \
CHAN_CTRL_IE_DESC_ERROR)
+/* bits of the channel status register */
+#define XDMA_CHAN_STATUS_BUSY BIT(0)
+
#define XDMA_CHAN_STATUS_MASK CHAN_CTRL_START
#define XDMA_CHAN_ERROR_MASK (CHAN_CTRL_IE_DESC_ALIGN_MISMATCH | \
diff --git a/drivers/dma/xilinx/xdma.c b/drivers/dma/xilinx/xdma.c
index 170017ff2aad..e143a7330816 100644
--- a/drivers/dma/xilinx/xdma.c
+++ b/drivers/dma/xilinx/xdma.c
@@ -71,6 +71,8 @@ struct xdma_chan {
enum dma_transfer_direction dir;
struct dma_slave_config cfg;
u32 irq;
+ struct completion last_interrupt;
+ bool stop_requested;
};
/**
@@ -376,6 +378,8 @@ static int xdma_xfer_start(struct xdma_chan *xchan)
return ret;
xchan->busy = true;
+ xchan->stop_requested = false;
+ reinit_completion(&xchan->last_interrupt);
return 0;
}
@@ -387,7 +391,6 @@ static int xdma_xfer_start(struct xdma_chan *xchan)
static int xdma_xfer_stop(struct xdma_chan *xchan)
{
int ret;
- u32 val;
struct xdma_device *xdev = xchan->xdev_hdl;
/* clear run stop bit to prevent any further auto-triggering */
@@ -395,13 +398,7 @@ static int xdma_xfer_stop(struct xdma_chan *xchan)
CHAN_CTRL_RUN_STOP);
if (ret)
return ret;
-
- /* Clear the channel status register */
- ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_STATUS_RC, &val);
- if (ret)
- return ret;
-
- return 0;
+ return ret;
}
/**
@@ -474,6 +471,8 @@ static int xdma_alloc_channels(struct xdma_device *xdev,
xchan->xdev_hdl = xdev;
xchan->base = base + i * XDMA_CHAN_STRIDE;
xchan->dir = dir;
+ xchan->stop_requested = false;
+ init_completion(&xchan->last_interrupt);
ret = xdma_channel_init(xchan);
if (ret)
@@ -521,6 +520,7 @@ static int xdma_terminate_all(struct dma_chan *chan)
spin_lock_irqsave(&xdma_chan->vchan.lock, flags);
xdma_chan->busy = false;
+ xdma_chan->stop_requested = true;
vd = vchan_next_desc(&xdma_chan->vchan);
if (vd) {
list_del(&vd->node);
@@ -542,17 +542,26 @@ static int xdma_terminate_all(struct dma_chan *chan)
static void xdma_synchronize(struct dma_chan *chan)
{
struct xdma_chan *xdma_chan = to_xdma_chan(chan);
+ struct xdma_device *xdev = xdma_chan->xdev_hdl;
+ int st = 0;
+
+ /* If the engine continues running, wait for the last interrupt */
+ regmap_read(xdev->rmap, xdma_chan->base + XDMA_CHAN_STATUS, &st);
+ if (st & XDMA_CHAN_STATUS_BUSY)
+ wait_for_completion_timeout(&xdma_chan->last_interrupt, msecs_to_jiffies(1000));
vchan_synchronize(&xdma_chan->vchan);
}
/**
- * xdma_fill_descs - Fill hardware descriptors with contiguous memory block addresses
- * @sw_desc: tx descriptor state container
- * @src_addr: Value for a ->src_addr field of a first descriptor
- * @dst_addr: Value for a ->dst_addr field of a first descriptor
- * @size: Total size of a contiguous memory block
- * @filled_descs_num: Number of filled hardware descriptors for corresponding sw_desc
+ * xdma_fill_descs() - Fill hardware descriptors for one contiguous memory chunk.
+ * More than one descriptor will be used if the size is bigger
+ * than XDMA_DESC_BLEN_MAX.
+ * @sw_desc: Descriptor container
+ * @src_addr: First value for the ->src_addr field
+ * @dst_addr: First value for the ->dst_addr field
+ * @size: Size of the contiguous memory block
+ * @filled_descs_num: Index of the first descriptor to take care of in @sw_desc
*/
static inline u32 xdma_fill_descs(struct xdma_desc *sw_desc, u64 src_addr,
u64 dst_addr, u32 size, u32 filled_descs_num)
@@ -704,7 +713,7 @@ xdma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t address,
desc_num = 0;
for (i = 0; i < periods; i++) {
desc_num += xdma_fill_descs(sw_desc, *src, *dst, period_size, desc_num);
- addr += i * period_size;
+ addr += period_size;
}
tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags);
@@ -876,6 +885,9 @@ static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
u32 st;
bool repeat_tx;
+ if (xchan->stop_requested)
+ complete(&xchan->last_interrupt);
+
spin_lock(&xchan->vchan.lock);
/* get submitted request */
@@ -1295,6 +1307,7 @@ static const struct platform_device_id xdma_id_table[] = {
{ "xdma", 0},
{ },
};
+MODULE_DEVICE_TABLE(platform, xdma_id_table);
static struct platform_driver xdma_driver = {
.driver = {
diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c
index b82815e64d24..36bd4825d389 100644
--- a/drivers/dma/xilinx/xilinx_dpdma.c
+++ b/drivers/dma/xilinx/xilinx_dpdma.c
@@ -214,7 +214,8 @@ struct xilinx_dpdma_tx_desc {
* @running: true if the channel is running
* @first_frame: flag for the first frame of stream
* @video_group: flag if multi-channel operation is needed for video channels
- * @lock: lock to access struct xilinx_dpdma_chan
+ * @lock: lock to access struct xilinx_dpdma_chan. Must be taken before
+ * @vchan.lock, if both are to be held.
* @desc_pool: descriptor allocation pool
* @err_task: error IRQ bottom half handler
* @desc: References to descriptors being processed
@@ -1042,9 +1043,8 @@ static int xilinx_dpdma_chan_stop(struct xilinx_dpdma_chan *chan)
static void xilinx_dpdma_chan_done_irq(struct xilinx_dpdma_chan *chan)
{
struct xilinx_dpdma_tx_desc *active;
- unsigned long flags;
- spin_lock_irqsave(&chan->lock, flags);
+ spin_lock(&chan->lock);
xilinx_dpdma_debugfs_desc_done_irq(chan);
@@ -1056,7 +1056,7 @@ static void xilinx_dpdma_chan_done_irq(struct xilinx_dpdma_chan *chan)
"chan%u: DONE IRQ with no active descriptor!\n",
chan->id);
- spin_unlock_irqrestore(&chan->lock, flags);
+ spin_unlock(&chan->lock);
}
/**
@@ -1071,10 +1071,9 @@ static void xilinx_dpdma_chan_vsync_irq(struct xilinx_dpdma_chan *chan)
{
struct xilinx_dpdma_tx_desc *pending;
struct xilinx_dpdma_sw_desc *sw_desc;
- unsigned long flags;
u32 desc_id;
- spin_lock_irqsave(&chan->lock, flags);
+ spin_lock(&chan->lock);
pending = chan->desc.pending;
if (!chan->running || !pending)
@@ -1097,15 +1096,17 @@ static void xilinx_dpdma_chan_vsync_irq(struct xilinx_dpdma_chan *chan)
* Complete the active descriptor, if any, promote the pending
* descriptor to active, and queue the next transfer, if any.
*/
+ spin_lock(&chan->vchan.lock);
if (chan->desc.active)
vchan_cookie_complete(&chan->desc.active->vdesc);
chan->desc.active = pending;
chan->desc.pending = NULL;
xilinx_dpdma_chan_queue_transfer(chan);
+ spin_unlock(&chan->vchan.lock);
out:
- spin_unlock_irqrestore(&chan->lock, flags);
+ spin_unlock(&chan->lock);
}
/**
@@ -1264,10 +1265,12 @@ static void xilinx_dpdma_issue_pending(struct dma_chan *dchan)
struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
unsigned long flags;
- spin_lock_irqsave(&chan->vchan.lock, flags);
+ spin_lock_irqsave(&chan->lock, flags);
+ spin_lock(&chan->vchan.lock);
if (vchan_issue_pending(&chan->vchan))
xilinx_dpdma_chan_queue_transfer(chan);
- spin_unlock_irqrestore(&chan->vchan.lock, flags);
+ spin_unlock(&chan->vchan.lock);
+ spin_unlock_irqrestore(&chan->lock, flags);
}
static int xilinx_dpdma_config(struct dma_chan *dchan,
@@ -1495,7 +1498,9 @@ static void xilinx_dpdma_chan_err_task(struct tasklet_struct *t)
XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id);
spin_lock_irqsave(&chan->lock, flags);
+ spin_lock(&chan->vchan.lock);
xilinx_dpdma_chan_queue_transfer(chan);
+ spin_unlock(&chan->vchan.lock);
spin_unlock_irqrestore(&chan->lock, flags);
}