summaryrefslogtreecommitdiff
path: root/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c')
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c269
1 files changed, 199 insertions, 70 deletions
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
index 33baf1591a49..b23536645ff7 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0
// (C) 2017-2018 Synopsys, Inc. (www.synopsys.com)
/*
@@ -25,6 +25,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/property.h>
+#include <linux/reset.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -35,7 +36,7 @@
/*
* The set of bus widths supported by the DMA controller. DW AXI DMAC supports
* master data bus width up to 512 bits (for both AXI master interfaces), but
- * it depends on IP block configurarion.
+ * it depends on IP block configuration.
*/
#define AXI_DMA_BUSWIDTHS \
(DMA_SLAVE_BUSWIDTH_1_BYTE | \
@@ -46,6 +47,10 @@
DMA_SLAVE_BUSWIDTH_32_BYTES | \
DMA_SLAVE_BUSWIDTH_64_BYTES)
+#define AXI_DMA_FLAG_HAS_APB_REGS BIT(0)
+#define AXI_DMA_FLAG_HAS_RESETS BIT(1)
+#define AXI_DMA_FLAG_USE_CFG2 BIT(2)
+
static inline void
axi_dma_iowrite32(struct axi_dma_chip *chip, u32 reg, u32 val)
{
@@ -58,6 +63,17 @@ static inline u32 axi_dma_ioread32(struct axi_dma_chip *chip, u32 reg)
}
static inline void
+axi_dma_iowrite64(struct axi_dma_chip *chip, u32 reg, u64 val)
+{
+ iowrite64(val, chip->regs + reg);
+}
+
+static inline u64 axi_dma_ioread64(struct axi_dma_chip *chip, u32 reg)
+{
+ return ioread64(chip->regs + reg);
+}
+
+static inline void
axi_chan_iowrite32(struct axi_dma_chan *chan, u32 reg, u32 val)
{
iowrite32(val, chan->chan_regs + reg);
@@ -86,7 +102,8 @@ static inline void axi_chan_config_write(struct axi_dma_chan *chan,
cfg_lo = (config->dst_multblk_type << CH_CFG_L_DST_MULTBLK_TYPE_POS |
config->src_multblk_type << CH_CFG_L_SRC_MULTBLK_TYPE_POS);
- if (chan->chip->dw->hdata->reg_map_8_channels) {
+ if (chan->chip->dw->hdata->reg_map_8_channels &&
+ !chan->chip->dw->hdata->use_cfg2) {
cfg_hi = config->tt_fc << CH_CFG_H_TT_FC_POS |
config->hs_sel_src << CH_CFG_H_HS_SEL_SRC_POS |
config->hs_sel_dst << CH_CFG_H_HS_SEL_DST_POS |
@@ -176,38 +193,73 @@ static inline u32 axi_chan_irq_read(struct axi_dma_chan *chan)
static inline void axi_chan_disable(struct axi_dma_chan *chan)
{
- u32 val;
-
- val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
- val &= ~(BIT(chan->id) << DMAC_CHAN_EN_SHIFT);
- if (chan->chip->dw->hdata->reg_map_8_channels)
- val |= BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT;
- else
- val |= BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT;
- axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
+ u64 val;
+
+ if (chan->chip->dw->hdata->nr_channels >= DMAC_CHAN_16) {
+ val = axi_dma_ioread64(chan->chip, DMAC_CHEN);
+ if (chan->id >= DMAC_CHAN_16) {
+ val &= ~((u64)(BIT(chan->id) >> DMAC_CHAN_16)
+ << (DMAC_CHAN_EN_SHIFT + DMAC_CHAN_BLOCK_SHIFT));
+ val |= (u64)(BIT(chan->id) >> DMAC_CHAN_16)
+ << (DMAC_CHAN_EN2_WE_SHIFT + DMAC_CHAN_BLOCK_SHIFT);
+ } else {
+ val &= ~(BIT(chan->id) << DMAC_CHAN_EN_SHIFT);
+ val |= BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT;
+ }
+ axi_dma_iowrite64(chan->chip, DMAC_CHEN, val);
+ } else {
+ val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
+ val &= ~(BIT(chan->id) << DMAC_CHAN_EN_SHIFT);
+ if (chan->chip->dw->hdata->reg_map_8_channels)
+ val |= BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT;
+ else
+ val |= BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT;
+ axi_dma_iowrite32(chan->chip, DMAC_CHEN, (u32)val);
+ }
}
static inline void axi_chan_enable(struct axi_dma_chan *chan)
{
- u32 val;
-
- val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
- if (chan->chip->dw->hdata->reg_map_8_channels)
- val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT |
- BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT;
- else
- val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT |
+ u64 val;
+
+ if (chan->chip->dw->hdata->nr_channels >= DMAC_CHAN_16) {
+ val = axi_dma_ioread64(chan->chip, DMAC_CHEN);
+ if (chan->id >= DMAC_CHAN_16) {
+ val |= (u64)(BIT(chan->id) >> DMAC_CHAN_16)
+ << (DMAC_CHAN_EN_SHIFT + DMAC_CHAN_BLOCK_SHIFT) |
+ (u64)(BIT(chan->id) >> DMAC_CHAN_16)
+ << (DMAC_CHAN_EN2_WE_SHIFT + DMAC_CHAN_BLOCK_SHIFT);
+ } else {
+ val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT |
BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT;
- axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
+ }
+ axi_dma_iowrite64(chan->chip, DMAC_CHEN, val);
+ } else {
+ val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
+ if (chan->chip->dw->hdata->reg_map_8_channels) {
+ val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT |
+ BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT;
+ } else {
+ val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT |
+ BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT;
+ }
+ axi_dma_iowrite32(chan->chip, DMAC_CHEN, (u32)val);
+ }
}
static inline bool axi_chan_is_hw_enable(struct axi_dma_chan *chan)
{
- u32 val;
+ u64 val;
- val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
+ if (chan->chip->dw->hdata->nr_channels >= DMAC_CHAN_16)
+ val = axi_dma_ioread64(chan->chip, DMAC_CHEN);
+ else
+ val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
- return !!(val & (BIT(chan->id) << DMAC_CHAN_EN_SHIFT));
+ if (chan->id >= DMAC_CHAN_16)
+ return !!(val & ((u64)(BIT(chan->id) >> DMAC_CHAN_16) << DMAC_CHAN_BLOCK_SHIFT));
+ else
+ return !!(val & (BIT(chan->id) << DMAC_CHAN_EN_SHIFT));
}
static void axi_dma_hw_init(struct axi_dma_chip *chip)
@@ -250,6 +302,7 @@ static struct axi_dma_desc *axi_desc_alloc(u32 num)
kfree(desc);
return NULL;
}
+ desc->nr_hw_descs = num;
return desc;
}
@@ -276,7 +329,7 @@ static struct axi_dma_lli *axi_desc_get(struct axi_dma_chan *chan,
static void axi_desc_put(struct axi_dma_desc *desc)
{
struct axi_dma_chan *chan = desc->chan;
- int count = atomic_read(&chan->descs_allocated);
+ int count = desc->nr_hw_descs;
struct axi_dma_hw_desc *hw_desc;
int descs_put;
@@ -325,8 +378,6 @@ dma_chan_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
len = vd_to_axi_desc(vdesc)->hw_desc[0].len;
completed_length = completed_blocks * len;
bytes = length - completed_length;
- } else {
- bytes = vd_to_axi_desc(vdesc)->length;
}
spin_unlock_irqrestore(&chan->vc.lock, flags);
@@ -982,6 +1033,11 @@ static int dw_axi_dma_chan_slave_config(struct dma_chan *dchan,
static void axi_chan_dump_lli(struct axi_dma_chan *chan,
struct axi_dma_hw_desc *desc)
{
+ if (!desc->lli) {
+ dev_err(dchan2dev(&chan->vc.chan), "NULL LLI\n");
+ return;
+ }
+
dev_err(dchan2dev(&chan->vc.chan),
"SAR: 0x%llx DAR: 0x%llx LLP: 0x%llx BTS 0x%x CTL: 0x%x:%08x",
le64_to_cpu(desc->lli->sar),
@@ -1013,6 +1069,11 @@ static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status)
/* The bad descriptor currently is in the head of vc list */
vd = vchan_next_desc(&chan->vc);
+ if (!vd) {
+ dev_err(chan2dev(chan), "BUG: %s, IRQ with no descriptors\n",
+ axi_chan_name(chan));
+ goto out;
+ }
/* Remove the completed descriptor from issued list */
list_del(&vd->node);
@@ -1027,6 +1088,7 @@ static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status)
/* Try to restart the controller */
axi_chan_start_first_queued(chan);
+out:
spin_unlock_irqrestore(&chan->vc.lock, flags);
}
@@ -1049,6 +1111,11 @@ static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan)
/* The completed descriptor currently is in the head of vc list */
vd = vchan_next_desc(&chan->vc);
+ if (!vd) {
+ dev_err(chan2dev(chan), "BUG: %s, IRQ with no descriptors\n",
+ axi_chan_name(chan));
+ goto out;
+ }
if (chan->cyclic) {
desc = vd_to_axi_desc(vd);
@@ -1073,11 +1140,9 @@ static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan)
/* Remove the completed descriptor from issued list before completing */
list_del(&vd->node);
vchan_cookie_complete(vd);
-
- /* Submit queued descriptors after processing the completed ones */
- axi_chan_start_first_queued(chan);
}
+out:
spin_unlock_irqrestore(&chan->vc.lock, flags);
}
@@ -1089,10 +1154,10 @@ static irqreturn_t dw_axi_dma_interrupt(int irq, void *dev_id)
u32 status, i;
- /* Disable DMAC inerrupts. We'll enable them after processing chanels */
+ /* Disable DMAC interrupts. We'll enable them after processing channels */
axi_dma_irq_disable(chip);
- /* Poll, clear and process every chanel interrupt status */
+ /* Poll, clear and process every channel interrupt status */
for (i = 0; i < dw->hdata->nr_channels; i++) {
chan = &dw->chan[i];
status = axi_chan_irq_read(chan);
@@ -1125,7 +1190,7 @@ static int dma_chan_terminate_all(struct dma_chan *dchan)
axi_chan_disable(chan);
ret = readl_poll_timeout_atomic(chan->chip->regs + DMAC_CHEN, val,
- !(val & chan_active), 1000, 10000);
+ !(val & chan_active), 1000, 50000);
if (ret == -ETIMEDOUT)
dev_warn(dchan2dev(dchan),
"%s failed to stop\n", axi_chan_name(chan));
@@ -1154,19 +1219,34 @@ static int dma_chan_pause(struct dma_chan *dchan)
struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
unsigned long flags;
unsigned int timeout = 20; /* timeout iterations */
- u32 val;
+ u64 val;
spin_lock_irqsave(&chan->vc.lock, flags);
- if (chan->chip->dw->hdata->reg_map_8_channels) {
- val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
- val |= BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT |
- BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT;
- axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
+ if (chan->chip->dw->hdata->nr_channels >= DMAC_CHAN_16) {
+ val = axi_dma_ioread64(chan->chip, DMAC_CHSUSPREG);
+ if (chan->id >= DMAC_CHAN_16) {
+ val |= (u64)(BIT(chan->id) >> DMAC_CHAN_16)
+ << (DMAC_CHAN_SUSP2_SHIFT + DMAC_CHAN_BLOCK_SHIFT) |
+ (u64)(BIT(chan->id) >> DMAC_CHAN_16)
+ << (DMAC_CHAN_SUSP2_WE_SHIFT + DMAC_CHAN_BLOCK_SHIFT);
+ } else {
+ val |= BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT |
+ BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT;
+ }
+ axi_dma_iowrite64(chan->chip, DMAC_CHSUSPREG, val);
} else {
- val = BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT |
- BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT;
- axi_dma_iowrite32(chan->chip, DMAC_CHSUSPREG, val);
+ if (chan->chip->dw->hdata->reg_map_8_channels) {
+ val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
+ val |= BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT |
+ BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT;
+ axi_dma_iowrite32(chan->chip, DMAC_CHEN, (u32)val);
+ } else {
+ val = axi_dma_ioread32(chan->chip, DMAC_CHSUSPREG);
+ val |= BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT |
+ BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT;
+ axi_dma_iowrite32(chan->chip, DMAC_CHSUSPREG, (u32)val);
+ }
}
do {
@@ -1188,17 +1268,32 @@ static int dma_chan_pause(struct dma_chan *dchan)
/* Called in chan locked context */
static inline void axi_chan_resume(struct axi_dma_chan *chan)
{
- u32 val;
-
- val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
- if (chan->chip->dw->hdata->reg_map_8_channels) {
- val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT);
- val |= (BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT);
- axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
+ u64 val;
+
+ if (chan->chip->dw->hdata->nr_channels >= DMAC_CHAN_16) {
+ val = axi_dma_ioread64(chan->chip, DMAC_CHSUSPREG);
+ if (chan->id >= DMAC_CHAN_16) {
+ val &= ~((u64)(BIT(chan->id) >> DMAC_CHAN_16)
+ << (DMAC_CHAN_SUSP2_SHIFT + DMAC_CHAN_BLOCK_SHIFT));
+ val |= ((u64)(BIT(chan->id) >> DMAC_CHAN_16)
+ << (DMAC_CHAN_SUSP2_WE_SHIFT + DMAC_CHAN_BLOCK_SHIFT));
+ } else {
+ val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT);
+ val |= (BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT);
+ }
+ axi_dma_iowrite64(chan->chip, DMAC_CHSUSPREG, val);
} else {
- val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT);
- val |= (BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT);
- axi_dma_iowrite32(chan->chip, DMAC_CHSUSPREG, val);
+ if (chan->chip->dw->hdata->reg_map_8_channels) {
+ val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
+ val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT);
+ val |= (BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT);
+ axi_dma_iowrite32(chan->chip, DMAC_CHEN, (u32)val);
+ } else {
+ val = axi_dma_ioread32(chan->chip, DMAC_CHSUSPREG);
+ val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT);
+ val |= (BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT);
+ axi_dma_iowrite32(chan->chip, DMAC_CHSUSPREG, (u32)val);
+ }
}
chan->is_paused = false;
@@ -1348,13 +1443,31 @@ static int parse_device_properties(struct axi_dma_chip *chip)
return 0;
}
+static int axi_req_irqs(struct platform_device *pdev, struct axi_dma_chip *chip)
+{
+ int irq_count = platform_irq_count(pdev);
+ int ret;
+
+ for (int i = 0; i < irq_count; i++) {
+ chip->irq[i] = platform_get_irq(pdev, i);
+ if (chip->irq[i] < 0)
+ return chip->irq[i];
+ ret = devm_request_irq(chip->dev, chip->irq[i], dw_axi_dma_interrupt,
+ IRQF_SHARED, KBUILD_MODNAME, chip);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
static int dw_probe(struct platform_device *pdev)
{
- struct device_node *node = pdev->dev.of_node;
struct axi_dma_chip *chip;
- struct resource *mem;
struct dw_axi_dma *dw;
struct dw_axi_dma_hcfg *hdata;
+ struct reset_control *resets;
+ unsigned int flags;
u32 i;
int ret;
@@ -1374,21 +1487,29 @@ static int dw_probe(struct platform_device *pdev)
chip->dev = &pdev->dev;
chip->dw->hdata = hdata;
- chip->irq = platform_get_irq(pdev, 0);
- if (chip->irq < 0)
- return chip->irq;
-
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- chip->regs = devm_ioremap_resource(chip->dev, mem);
+ chip->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(chip->regs))
return PTR_ERR(chip->regs);
- if (of_device_is_compatible(node, "intel,kmb-axi-dma")) {
+ flags = (uintptr_t)of_device_get_match_data(&pdev->dev);
+ if (flags & AXI_DMA_FLAG_HAS_APB_REGS) {
chip->apb_regs = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(chip->apb_regs))
return PTR_ERR(chip->apb_regs);
}
+ if (flags & AXI_DMA_FLAG_HAS_RESETS) {
+ resets = devm_reset_control_array_get_exclusive(&pdev->dev);
+ if (IS_ERR(resets))
+ return PTR_ERR(resets);
+
+ ret = reset_control_deassert(resets);
+ if (ret)
+ return ret;
+ }
+
+ chip->dw->hdata->use_cfg2 = !!(flags & AXI_DMA_FLAG_USE_CFG2);
+
chip->core_clk = devm_clk_get(chip->dev, "core-clk");
if (IS_ERR(chip->core_clk))
return PTR_ERR(chip->core_clk);
@@ -1406,8 +1527,7 @@ static int dw_probe(struct platform_device *pdev)
if (!dw->chan)
return -ENOMEM;
- ret = devm_request_irq(chip->dev, chip->irq, dw_axi_dma_interrupt,
- IRQF_SHARED, KBUILD_MODNAME, chip);
+ ret = axi_req_irqs(pdev, chip);
if (ret)
return ret;
@@ -1430,7 +1550,6 @@ static int dw_probe(struct platform_device *pdev)
dma_cap_set(DMA_CYCLIC, dw->dma.cap_mask);
/* DMA capabilities */
- dw->dma.chancnt = hdata->nr_channels;
dw->dma.max_burst = hdata->axi_rw_burst_len;
dw->dma.src_addr_widths = AXI_DMA_BUSWIDTHS;
dw->dma.dst_addr_widths = AXI_DMA_BUSWIDTHS;
@@ -1501,7 +1620,7 @@ err_pm_disable:
return ret;
}
-static int dw_remove(struct platform_device *pdev)
+static void dw_remove(struct platform_device *pdev)
{
struct axi_dma_chip *chip = platform_get_drvdata(pdev);
struct dw_axi_dma *dw = chip->dw;
@@ -1521,7 +1640,9 @@ static int dw_remove(struct platform_device *pdev)
pm_runtime_disable(chip->dev);
axi_dma_suspend(chip);
- devm_free_irq(chip->dev, chip->irq, chip);
+ for (i = 0; i < DMAC_MAX_CHANNELS; i++)
+ if (chip->irq[i] > 0)
+ devm_free_irq(chip->dev, chip->irq[i], chip);
of_dma_controller_free(chip->dev->of_node);
@@ -1530,8 +1651,6 @@ static int dw_remove(struct platform_device *pdev)
list_del(&chan->vc.chan.device_node);
tasklet_kill(&chan->vc.task);
}
-
- return 0;
}
static const struct dev_pm_ops dw_axi_dma_pm_ops = {
@@ -1539,8 +1658,18 @@ static const struct dev_pm_ops dw_axi_dma_pm_ops = {
};
static const struct of_device_id dw_dma_of_id_table[] = {
- { .compatible = "snps,axi-dma-1.01a" },
- { .compatible = "intel,kmb-axi-dma" },
+ {
+ .compatible = "snps,axi-dma-1.01a"
+ }, {
+ .compatible = "intel,kmb-axi-dma",
+ .data = (void *)AXI_DMA_FLAG_HAS_APB_REGS,
+ }, {
+ .compatible = "starfive,jh7110-axi-dma",
+ .data = (void *)(AXI_DMA_FLAG_HAS_RESETS | AXI_DMA_FLAG_USE_CFG2),
+ }, {
+ .compatible = "starfive,jh8100-axi-dma",
+ .data = (void *)AXI_DMA_FLAG_HAS_RESETS,
+ },
{}
};
MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);