summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMohan Kumar <mkumard@nvidia.com>2023-11-28 12:46:15 +0530
committerVinod Koul <vkoul@kernel.org>2023-12-11 17:44:18 +0530
commit25b636225a0816eac20b02fcb37daf6c722d0bed (patch)
tree38b5bdbcc24e8705f4ed0c9aeb70afda8d213058
parentd95fcb78e7f263f909ce492c3882a704067dc534 (diff)
dmaengine: tegra210-adma: Support dma-channel-mask property
To support the flexibility to reserve the specific dma channels add the support of dma-channel-mask property in the tegra210-adma driver Signed-off-by: Mohan Kumar <mkumard@nvidia.com> Link: https://lore.kernel.org/r/20231128071615.31447-3-mkumard@nvidia.com Signed-off-by: Vinod Koul <vkoul@kernel.org>
-rw-r--r--drivers/dma/tegra210-adma.c35
1 files changed, 33 insertions, 2 deletions
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index 7a0586633bf3..24ad7077c53b 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -153,6 +153,7 @@ struct tegra_adma {
void __iomem *base_addr;
struct clk *ahub_clk;
unsigned int nr_channels;
+ unsigned long *dma_chan_mask;
unsigned long rx_requests_reserved;
unsigned long tx_requests_reserved;
@@ -741,6 +742,10 @@ static int __maybe_unused tegra_adma_runtime_suspend(struct device *dev)
for (i = 0; i < tdma->nr_channels; i++) {
tdc = &tdma->channels[i];
+ /* skip for reserved channels */
+ if (!tdc->tdma)
+ continue;
+
ch_reg = &tdc->ch_regs;
ch_reg->cmd = tdma_ch_read(tdc, ADMA_CH_CMD);
/* skip if channel is not active */
@@ -779,6 +784,9 @@ static int __maybe_unused tegra_adma_runtime_resume(struct device *dev)
for (i = 0; i < tdma->nr_channels; i++) {
tdc = &tdma->channels[i];
+ /* skip for reserved channels */
+ if (!tdc->tdma)
+ continue;
ch_reg = &tdc->ch_regs;
/* skip if channel was not active earlier */
if (!ch_reg->cmd)
@@ -867,10 +875,31 @@ static int tegra_adma_probe(struct platform_device *pdev)
return PTR_ERR(tdma->ahub_clk);
}
+ tdma->dma_chan_mask = devm_kzalloc(&pdev->dev,
+ BITS_TO_LONGS(tdma->nr_channels) * sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!tdma->dma_chan_mask)
+ return -ENOMEM;
+
+ /* Enable all channels by default */
+ bitmap_fill(tdma->dma_chan_mask, tdma->nr_channels);
+
+ ret = of_property_read_u32_array(pdev->dev.of_node, "dma-channel-mask",
+ (u32 *)tdma->dma_chan_mask,
+ BITS_TO_U32(tdma->nr_channels));
+ if (ret < 0 && (ret != -EINVAL)) {
+ dev_err(&pdev->dev, "dma-channel-mask is not complete.\n");
+ return ret;
+ }
+
INIT_LIST_HEAD(&tdma->dma_dev.channels);
for (i = 0; i < tdma->nr_channels; i++) {
struct tegra_adma_chan *tdc = &tdma->channels[i];
+ /* skip for reserved channels */
+ if (!test_bit(i, tdma->dma_chan_mask))
+ continue;
+
tdc->chan_addr = tdma->base_addr + cdata->ch_base_offset
+ (cdata->ch_reg_size * i);
@@ -957,8 +986,10 @@ static void tegra_adma_remove(struct platform_device *pdev)
of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(&tdma->dma_dev);
- for (i = 0; i < tdma->nr_channels; ++i)
- irq_dispose_mapping(tdma->channels[i].irq);
+ for (i = 0; i < tdma->nr_channels; ++i) {
+ if (tdma->channels[i].irq)
+ irq_dispose_mapping(tdma->channels[i].irq);
+ }
pm_runtime_disable(&pdev->dev);
}