diff options
Diffstat (limited to 'drivers/dma/dma-jz4780.c')
| -rw-r--r-- | drivers/dma/dma-jz4780.c | 555 |
1 files changed, 393 insertions, 162 deletions
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c index 7373b7a555ec..100057603fd4 100644 --- a/drivers/dma/dma-jz4780.c +++ b/drivers/dma/dma-jz4780.c @@ -1,17 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Ingenic JZ4780 DMA controller * * Copyright (c) 2015 Imagination Technologies * Author: Alex Smith <alex@alex-smith.me.uk> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. */ #include <linux/clk.h> #include <linux/dmapool.h> +#include <linux/dma-mapping.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/module.h> @@ -23,33 +20,35 @@ #include "dmaengine.h" #include "virt-dma.h" -#define JZ_DMA_NR_CHANNELS 32 - /* Global registers. */ -#define JZ_DMA_REG_DMAC 0x1000 -#define JZ_DMA_REG_DIRQP 0x1004 -#define JZ_DMA_REG_DDR 0x1008 -#define JZ_DMA_REG_DDRS 0x100c -#define JZ_DMA_REG_DMACP 0x101c -#define JZ_DMA_REG_DSIRQP 0x1020 -#define JZ_DMA_REG_DSIRQM 0x1024 -#define JZ_DMA_REG_DCIRQP 0x1028 -#define JZ_DMA_REG_DCIRQM 0x102c +#define JZ_DMA_REG_DMAC 0x00 +#define JZ_DMA_REG_DIRQP 0x04 +#define JZ_DMA_REG_DDR 0x08 +#define JZ_DMA_REG_DDRS 0x0c +#define JZ_DMA_REG_DCKE 0x10 +#define JZ_DMA_REG_DCKES 0x14 +#define JZ_DMA_REG_DCKEC 0x18 +#define JZ_DMA_REG_DMACP 0x1c +#define JZ_DMA_REG_DSIRQP 0x20 +#define JZ_DMA_REG_DSIRQM 0x24 +#define JZ_DMA_REG_DCIRQP 0x28 +#define JZ_DMA_REG_DCIRQM 0x2c /* Per-channel registers. */ #define JZ_DMA_REG_CHAN(n) (n * 0x20) -#define JZ_DMA_REG_DSA(n) (0x00 + JZ_DMA_REG_CHAN(n)) -#define JZ_DMA_REG_DTA(n) (0x04 + JZ_DMA_REG_CHAN(n)) -#define JZ_DMA_REG_DTC(n) (0x08 + JZ_DMA_REG_CHAN(n)) -#define JZ_DMA_REG_DRT(n) (0x0c + JZ_DMA_REG_CHAN(n)) -#define JZ_DMA_REG_DCS(n) (0x10 + JZ_DMA_REG_CHAN(n)) -#define JZ_DMA_REG_DCM(n) (0x14 + JZ_DMA_REG_CHAN(n)) -#define JZ_DMA_REG_DDA(n) (0x18 + JZ_DMA_REG_CHAN(n)) -#define JZ_DMA_REG_DSD(n) (0x1c + JZ_DMA_REG_CHAN(n)) +#define JZ_DMA_REG_DSA 0x00 +#define JZ_DMA_REG_DTA 0x04 +#define JZ_DMA_REG_DTC 0x08 +#define JZ_DMA_REG_DRT 0x0c +#define JZ_DMA_REG_DCS 0x10 +#define JZ_DMA_REG_DCM 0x14 +#define JZ_DMA_REG_DDA 0x18 +#define JZ_DMA_REG_DSD 0x1c #define JZ_DMA_DMAC_DMAE BIT(0) #define JZ_DMA_DMAC_AR BIT(2) #define JZ_DMA_DMAC_HLT BIT(3) +#define JZ_DMA_DMAC_FAIC BIT(27) #define JZ_DMA_DMAC_FMSC BIT(31) #define JZ_DMA_DRT_AUTO 0x8 @@ -86,6 +85,15 @@ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) +#define JZ4780_DMA_CTRL_OFFSET 0x1000 + +/* macros for use with jz4780_dma_soc_data.flags */ +#define JZ_SOC_DATA_ALLOW_LEGACY_DT BIT(0) +#define JZ_SOC_DATA_PROGRAMMABLE_DMA BIT(1) +#define JZ_SOC_DATA_PER_CHAN_PM BIT(2) +#define JZ_SOC_DATA_NO_DCKES_DCKEC BIT(3) +#define JZ_SOC_DATA_BREAK_LINKS BIT(4) + /** * struct jz4780_dma_hwdesc - descriptor structure read by the DMA controller. * @dcm: value for the DCM (channel command) register @@ -94,17 +102,12 @@ * @dtc: transfer count (number of blocks of the transfer size specified in DCM * to transfer) in the low 24 bits, offset of the next descriptor from the * descriptor base address in the upper 8 bits. - * @sd: target/source stride difference (in stride transfer mode). - * @drt: request type */ struct jz4780_dma_hwdesc { - uint32_t dcm; - uint32_t dsa; - uint32_t dta; - uint32_t dtc; - uint32_t sd; - uint32_t drt; - uint32_t reserved[2]; + u32 dcm; + u32 dsa; + u32 dta; + u32 dtc; }; /* Size of allocations for hardware descriptor blocks. */ @@ -119,7 +122,8 @@ struct jz4780_dma_desc { dma_addr_t desc_phys; unsigned int count; enum dma_transaction_type type; - uint32_t status; + u32 transfer_type; + u32 status; }; struct jz4780_dma_chan { @@ -127,27 +131,34 @@ struct jz4780_dma_chan { unsigned int id; struct dma_pool *desc_pool; - uint32_t transfer_type; - uint32_t transfer_shift; + u32 transfer_type_tx, transfer_type_rx; + u32 transfer_shift; struct dma_slave_config config; struct jz4780_dma_desc *desc; unsigned int curr_hwdesc; }; +struct jz4780_dma_soc_data { + unsigned int nb_channels; + unsigned int transfer_ord_max; + unsigned long flags; +}; + struct jz4780_dma_dev { struct dma_device dma_device; - void __iomem *base; + void __iomem *chn_base; + void __iomem *ctrl_base; struct clk *clk; unsigned int irq; + const struct jz4780_dma_soc_data *soc_data; - uint32_t chan_reserved; - struct jz4780_dma_chan chan[JZ_DMA_NR_CHANNELS]; + u32 chan_reserved; + struct jz4780_dma_chan chan[]; }; struct jz4780_dma_filter_data { - struct device_node *of_node; - uint32_t transfer_type; + u32 transfer_type_tx, transfer_type_rx; int channel; }; @@ -169,21 +180,57 @@ static inline struct jz4780_dma_dev *jz4780_dma_chan_parent( dma_device); } -static inline uint32_t jz4780_dma_readl(struct jz4780_dma_dev *jzdma, +static inline u32 jz4780_dma_chn_readl(struct jz4780_dma_dev *jzdma, + unsigned int chn, unsigned int reg) +{ + return readl(jzdma->chn_base + reg + JZ_DMA_REG_CHAN(chn)); +} + +static inline void jz4780_dma_chn_writel(struct jz4780_dma_dev *jzdma, + unsigned int chn, unsigned int reg, u32 val) +{ + writel(val, jzdma->chn_base + reg + JZ_DMA_REG_CHAN(chn)); +} + +static inline u32 jz4780_dma_ctrl_readl(struct jz4780_dma_dev *jzdma, unsigned int reg) { - return readl(jzdma->base + reg); + return readl(jzdma->ctrl_base + reg); } -static inline void jz4780_dma_writel(struct jz4780_dma_dev *jzdma, - unsigned int reg, uint32_t val) +static inline void jz4780_dma_ctrl_writel(struct jz4780_dma_dev *jzdma, + unsigned int reg, u32 val) { - writel(val, jzdma->base + reg); + writel(val, jzdma->ctrl_base + reg); } -static struct jz4780_dma_desc *jz4780_dma_desc_alloc( - struct jz4780_dma_chan *jzchan, unsigned int count, - enum dma_transaction_type type) +static inline void jz4780_dma_chan_enable(struct jz4780_dma_dev *jzdma, + unsigned int chn) +{ + if (jzdma->soc_data->flags & JZ_SOC_DATA_PER_CHAN_PM) { + unsigned int reg; + + if (jzdma->soc_data->flags & JZ_SOC_DATA_NO_DCKES_DCKEC) + reg = JZ_DMA_REG_DCKE; + else + reg = JZ_DMA_REG_DCKES; + + jz4780_dma_ctrl_writel(jzdma, reg, BIT(chn)); + } +} + +static inline void jz4780_dma_chan_disable(struct jz4780_dma_dev *jzdma, + unsigned int chn) +{ + if ((jzdma->soc_data->flags & JZ_SOC_DATA_PER_CHAN_PM) && + !(jzdma->soc_data->flags & JZ_SOC_DATA_NO_DCKES_DCKEC)) + jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DCKEC, BIT(chn)); +} + +static struct jz4780_dma_desc * +jz4780_dma_desc_alloc(struct jz4780_dma_chan *jzchan, unsigned int count, + enum dma_transaction_type type, + enum dma_transfer_direction direction) { struct jz4780_dma_desc *desc; @@ -203,6 +250,12 @@ static struct jz4780_dma_desc *jz4780_dma_desc_alloc( desc->count = count; desc->type = type; + + if (direction == DMA_DEV_TO_MEM) + desc->transfer_type = jzchan->transfer_type_rx; + else + desc->transfer_type = jzchan->transfer_type_tx; + return desc; } @@ -215,8 +268,10 @@ static void jz4780_dma_desc_free(struct virt_dma_desc *vdesc) kfree(desc); } -static uint32_t jz4780_dma_transfer_size(unsigned long val, uint32_t *shift) +static u32 jz4780_dma_transfer_size(struct jz4780_dma_chan *jzchan, + unsigned long val, u32 *shift) { + struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan); int ord = ffs(val) - 1; /* @@ -228,8 +283,8 @@ static uint32_t jz4780_dma_transfer_size(unsigned long val, uint32_t *shift) */ if (ord == 3) ord = 2; - else if (ord > 7) - ord = 7; + else if (ord > jzdma->soc_data->transfer_ord_max) + ord = jzdma->soc_data->transfer_ord_max; *shift = ord; @@ -256,13 +311,12 @@ static int jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan, enum dma_transfer_direction direction) { struct dma_slave_config *config = &jzchan->config; - uint32_t width, maxburst, tsz; + u32 width, maxburst, tsz; if (direction == DMA_MEM_TO_DEV) { desc->dcm = JZ_DMA_DCM_SAI; desc->dsa = addr; desc->dta = config->dst_addr; - desc->drt = jzchan->transfer_type; width = config->dst_addr_width; maxburst = config->dst_maxburst; @@ -270,7 +324,6 @@ static int jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan, desc->dcm = JZ_DMA_DCM_DAI; desc->dsa = config->src_addr; desc->dta = addr; - desc->drt = jzchan->transfer_type; width = config->src_addr_width; maxburst = config->src_maxburst; @@ -283,7 +336,7 @@ static int jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan, * divisible by the transfer size, and we must not use more than the * maximum burst specified by the user. */ - tsz = jz4780_dma_transfer_size(addr | len | (width * maxburst), + tsz = jz4780_dma_transfer_size(jzchan, addr | len | (width * maxburst), &jzchan->transfer_shift); switch (width) { @@ -311,11 +364,12 @@ static struct dma_async_tx_descriptor *jz4780_dma_prep_slave_sg( void *context) { struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan); + struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan); struct jz4780_dma_desc *desc; unsigned int i; int err; - desc = jz4780_dma_desc_alloc(jzchan, sg_len, DMA_SLAVE); + desc = jz4780_dma_desc_alloc(jzchan, sg_len, DMA_SLAVE, direction); if (!desc) return NULL; @@ -331,8 +385,9 @@ static struct dma_async_tx_descriptor *jz4780_dma_prep_slave_sg( desc->desc[i].dcm |= JZ_DMA_DCM_TIE; - if (i != (sg_len - 1)) { - /* Automatically proceeed to the next descriptor. */ + if (i != (sg_len - 1) && + !(jzdma->soc_data->flags & JZ_SOC_DATA_BREAK_LINKS)) { + /* Automatically proceed to the next descriptor. */ desc->desc[i].dcm |= JZ_DMA_DCM_LINK; /* @@ -363,7 +418,7 @@ static struct dma_async_tx_descriptor *jz4780_dma_prep_dma_cyclic( periods = buf_len / period_len; - desc = jz4780_dma_desc_alloc(jzchan, periods, DMA_CYCLIC); + desc = jz4780_dma_desc_alloc(jzchan, periods, DMA_CYCLIC, direction); if (!desc) return NULL; @@ -406,18 +461,19 @@ static struct dma_async_tx_descriptor *jz4780_dma_prep_dma_memcpy( { struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan); struct jz4780_dma_desc *desc; - uint32_t tsz; + u32 tsz; - desc = jz4780_dma_desc_alloc(jzchan, 1, DMA_MEMCPY); + desc = jz4780_dma_desc_alloc(jzchan, 1, DMA_MEMCPY, 0); if (!desc) return NULL; - tsz = jz4780_dma_transfer_size(dest | src | len, + tsz = jz4780_dma_transfer_size(jzchan, dest | src | len, &jzchan->transfer_shift); + desc->transfer_type = JZ_DMA_DRT_AUTO; + desc->desc[0].dsa = src; desc->desc[0].dta = dest; - desc->desc[0].drt = JZ_DMA_DRT_AUTO; desc->desc[0].dcm = JZ_DMA_DCM_TIE | JZ_DMA_DCM_SAI | JZ_DMA_DCM_DAI | tsz << JZ_DMA_DCM_TSZ_SHIFT | JZ_DMA_WIDTH_32_BIT << JZ_DMA_DCM_SP_SHIFT | @@ -472,18 +528,34 @@ static void jz4780_dma_begin(struct jz4780_dma_chan *jzchan) (jzchan->curr_hwdesc + 1) % jzchan->desc->count; } - /* Use 8-word descriptors. */ - jz4780_dma_writel(jzdma, JZ_DMA_REG_DCS(jzchan->id), JZ_DMA_DCS_DES8); + /* Enable the channel's clock. */ + jz4780_dma_chan_enable(jzdma, jzchan->id); + + /* Use 4-word descriptors. */ + jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS, 0); + + /* Set transfer type. */ + jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DRT, + jzchan->desc->transfer_type); + + /* + * Set the transfer count. This is redundant for a descriptor-driven + * transfer. However, there can be a delay between the transfer start + * time and when DTCn reg contains the new transfer count. Setting + * it explicitly ensures residue is computed correctly at all times. + */ + jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DTC, + jzchan->desc->desc[jzchan->curr_hwdesc].dtc); /* Write descriptor address and initiate descriptor fetch. */ desc_phys = jzchan->desc->desc_phys + (jzchan->curr_hwdesc * sizeof(*jzchan->desc->desc)); - jz4780_dma_writel(jzdma, JZ_DMA_REG_DDA(jzchan->id), desc_phys); - jz4780_dma_writel(jzdma, JZ_DMA_REG_DDRS, BIT(jzchan->id)); + jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DDA, desc_phys); + jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DDRS, BIT(jzchan->id)); /* Enable the channel. */ - jz4780_dma_writel(jzdma, JZ_DMA_REG_DCS(jzchan->id), - JZ_DMA_DCS_DES8 | JZ_DMA_DCS_CTE); + jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS, + JZ_DMA_DCS_CTE); } static void jz4780_dma_issue_pending(struct dma_chan *chan) @@ -509,12 +581,14 @@ static int jz4780_dma_terminate_all(struct dma_chan *chan) spin_lock_irqsave(&jzchan->vchan.lock, flags); /* Clear the DMA status and stop the transfer. */ - jz4780_dma_writel(jzdma, JZ_DMA_REG_DCS(jzchan->id), 0); + jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS, 0); if (jzchan->desc) { - jz4780_dma_desc_free(&jzchan->desc->vdesc); + vchan_terminate_vdesc(&jzchan->desc->vdesc); jzchan->desc = NULL; } + jz4780_dma_chan_disable(jzdma, jzchan->id); + vchan_get_all_descriptors(&jzchan->vchan, &head); spin_unlock_irqrestore(&jzchan->vchan.lock, flags); @@ -523,6 +597,15 @@ static int jz4780_dma_terminate_all(struct dma_chan *chan) return 0; } +static void jz4780_dma_synchronize(struct dma_chan *chan) +{ + struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan); + struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan); + + vchan_synchronize(&jzchan->vchan); + jz4780_dma_chan_disable(jzdma, jzchan->id); +} + static int jz4780_dma_config(struct dma_chan *chan, struct dma_slave_config *config) { @@ -542,21 +625,17 @@ static size_t jz4780_dma_desc_residue(struct jz4780_dma_chan *jzchan, struct jz4780_dma_desc *desc, unsigned int next_sg) { struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan); - unsigned int residue, count; + unsigned int count = 0; unsigned int i; - residue = 0; - for (i = next_sg; i < desc->count; i++) - residue += desc->desc[i].dtc << jzchan->transfer_shift; + count += desc->desc[i].dtc & GENMASK(23, 0); - if (next_sg != 0) { - count = jz4780_dma_readl(jzdma, - JZ_DMA_REG_DTC(jzchan->id)); - residue += count << jzchan->transfer_shift; - } + if (next_sg != 0) + count += jz4780_dma_chn_readl(jzdma, jzchan->id, + JZ_DMA_REG_DTC); - return residue; + return count << jzchan->transfer_shift; } static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan, @@ -566,41 +645,46 @@ static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan, struct virt_dma_desc *vdesc; enum dma_status status; unsigned long flags; + unsigned long residue = 0; + + spin_lock_irqsave(&jzchan->vchan.lock, flags); status = dma_cookie_status(chan, cookie, txstate); if ((status == DMA_COMPLETE) || (txstate == NULL)) - return status; - - spin_lock_irqsave(&jzchan->vchan.lock, flags); + goto out_unlock_irqrestore; vdesc = vchan_find_desc(&jzchan->vchan, cookie); if (vdesc) { /* On the issued list, so hasn't been processed yet */ - txstate->residue = jz4780_dma_desc_residue(jzchan, + residue = jz4780_dma_desc_residue(jzchan, to_jz4780_dma_desc(vdesc), 0); } else if (cookie == jzchan->desc->vdesc.tx.cookie) { - txstate->residue = jz4780_dma_desc_residue(jzchan, jzchan->desc, - (jzchan->curr_hwdesc + 1) % jzchan->desc->count); - } else - txstate->residue = 0; + residue = jz4780_dma_desc_residue(jzchan, jzchan->desc, + jzchan->curr_hwdesc + 1); + } + dma_set_residue(txstate, residue); if (vdesc && jzchan->desc && vdesc == &jzchan->desc->vdesc && jzchan->desc->status & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT)) status = DMA_ERROR; +out_unlock_irqrestore: spin_unlock_irqrestore(&jzchan->vchan.lock, flags); return status; } -static void jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma, - struct jz4780_dma_chan *jzchan) +static bool jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma, + struct jz4780_dma_chan *jzchan) { - uint32_t dcs; + const unsigned int soc_flags = jzdma->soc_data->flags; + struct jz4780_dma_desc *desc = jzchan->desc; + u32 dcs; + bool ack = true; spin_lock(&jzchan->vchan.lock); - dcs = jz4780_dma_readl(jzdma, JZ_DMA_REG_DCS(jzchan->id)); - jz4780_dma_writel(jzdma, JZ_DMA_REG_DCS(jzchan->id), 0); + dcs = jz4780_dma_chn_readl(jzdma, jzchan->id, JZ_DMA_REG_DCS); + jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS, 0); if (dcs & JZ_DMA_DCS_AR) { dev_warn(&jzchan->vchan.chan.dev->device, @@ -618,12 +702,23 @@ static void jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma, if ((dcs & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT)) == 0) { if (jzchan->desc->type == DMA_CYCLIC) { vchan_cyclic_callback(&jzchan->desc->vdesc); + + jz4780_dma_begin(jzchan); + } else if (dcs & JZ_DMA_DCS_TT) { + if (!(soc_flags & JZ_SOC_DATA_BREAK_LINKS) || + (jzchan->curr_hwdesc + 1 == desc->count)) { + vchan_cookie_complete(&desc->vdesc); + jzchan->desc = NULL; + } + + jz4780_dma_begin(jzchan); } else { - vchan_cookie_complete(&jzchan->desc->vdesc); - jzchan->desc = NULL; + /* False positive - continue the transfer */ + ack = false; + jz4780_dma_chn_writel(jzdma, jzchan->id, + JZ_DMA_REG_DCS, + JZ_DMA_DCS_CTE); } - - jz4780_dma_begin(jzchan); } } else { dev_err(&jzchan->vchan.chan.dev->device, @@ -631,30 +726,32 @@ static void jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma, } spin_unlock(&jzchan->vchan.lock); + + return ack; } static irqreturn_t jz4780_dma_irq_handler(int irq, void *data) { struct jz4780_dma_dev *jzdma = data; - uint32_t pending, dmac; + unsigned int nb_channels = jzdma->soc_data->nb_channels; + unsigned long pending; + u32 dmac; int i; - pending = jz4780_dma_readl(jzdma, JZ_DMA_REG_DIRQP); - - for (i = 0; i < JZ_DMA_NR_CHANNELS; i++) { - if (!(pending & (1<<i))) - continue; + pending = jz4780_dma_ctrl_readl(jzdma, JZ_DMA_REG_DIRQP); - jz4780_dma_chan_irq(jzdma, &jzdma->chan[i]); + for_each_set_bit(i, &pending, nb_channels) { + if (jz4780_dma_chan_irq(jzdma, &jzdma->chan[i])) + pending &= ~BIT(i); } /* Clear halt and address error status of all channels. */ - dmac = jz4780_dma_readl(jzdma, JZ_DMA_REG_DMAC); + dmac = jz4780_dma_ctrl_readl(jzdma, JZ_DMA_REG_DMAC); dmac &= ~(JZ_DMA_DMAC_HLT | JZ_DMA_DMAC_AR); - jz4780_dma_writel(jzdma, JZ_DMA_REG_DMAC, dmac); + jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DMAC, dmac); /* Clear interrupt pending status. */ - jz4780_dma_writel(jzdma, JZ_DMA_REG_DIRQP, 0); + jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DIRQP, pending); return IRQ_HANDLED; } @@ -691,8 +788,6 @@ static bool jz4780_dma_filter_fn(struct dma_chan *chan, void *param) struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan); struct jz4780_dma_filter_data *data = param; - if (jzdma->dma_device.dev->of_node != data->of_node) - return false; if (data->channel > -1) { if (data->channel != jzchan->id) @@ -701,7 +796,8 @@ static bool jz4780_dma_filter_fn(struct dma_chan *chan, void *param) return false; } - jzchan->transfer_type = data->transfer_type; + jzchan->transfer_type_tx = data->transfer_type_tx; + jzchan->transfer_type_rx = data->transfer_type_rx; return true; } @@ -713,15 +809,20 @@ static struct dma_chan *jz4780_of_dma_xlate(struct of_phandle_args *dma_spec, dma_cap_mask_t mask = jzdma->dma_device.cap_mask; struct jz4780_dma_filter_data data; - if (dma_spec->args_count != 2) + if (dma_spec->args_count == 2) { + data.transfer_type_tx = dma_spec->args[0]; + data.transfer_type_rx = dma_spec->args[0]; + data.channel = dma_spec->args[1]; + } else if (dma_spec->args_count == 3) { + data.transfer_type_tx = dma_spec->args[0]; + data.transfer_type_rx = dma_spec->args[1]; + data.channel = dma_spec->args[2]; + } else { return NULL; - - data.of_node = ofdma->of_node; - data.transfer_type = dma_spec->args[0]; - data.channel = dma_spec->args[1]; + } if (data.channel > -1) { - if (data.channel >= JZ_DMA_NR_CHANNELS) { + if (data.channel >= jzdma->soc_data->nb_channels) { dev_err(jzdma->dma_device.dev, "device requested non-existent channel %u\n", data.channel); @@ -736,60 +837,70 @@ static struct dma_chan *jz4780_of_dma_xlate(struct of_phandle_args *dma_spec, return NULL; } - jzdma->chan[data.channel].transfer_type = data.transfer_type; + jzdma->chan[data.channel].transfer_type_tx = data.transfer_type_tx; + jzdma->chan[data.channel].transfer_type_rx = data.transfer_type_rx; return dma_get_slave_channel( &jzdma->chan[data.channel].vchan.chan); } else { - return dma_request_channel(mask, jz4780_dma_filter_fn, &data); + return __dma_request_channel(&mask, jz4780_dma_filter_fn, &data, + ofdma->of_node); } } static int jz4780_dma_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; + const struct jz4780_dma_soc_data *soc_data; struct jz4780_dma_dev *jzdma; struct jz4780_dma_chan *jzchan; struct dma_device *dd; struct resource *res; int i, ret; - jzdma = devm_kzalloc(dev, sizeof(*jzdma), GFP_KERNEL); + if (!dev->of_node) { + dev_err(dev, "This driver must be probed from devicetree\n"); + return -EINVAL; + } + + soc_data = device_get_match_data(dev); + if (!soc_data) + return -EINVAL; + + jzdma = devm_kzalloc(dev, struct_size(jzdma, chan, + soc_data->nb_channels), GFP_KERNEL); if (!jzdma) return -ENOMEM; + jzdma->soc_data = soc_data; platform_set_drvdata(pdev, jzdma); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { + jzdma->chn_base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(jzdma->chn_base)) + return PTR_ERR(jzdma->chn_base); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (res) { + jzdma->ctrl_base = devm_ioremap_resource(dev, res); + if (IS_ERR(jzdma->ctrl_base)) + return PTR_ERR(jzdma->ctrl_base); + } else if (soc_data->flags & JZ_SOC_DATA_ALLOW_LEGACY_DT) { + /* + * On JZ4780, if the second memory resource was not supplied, + * assume we're using an old devicetree, and calculate the + * offset to the control registers. + */ + jzdma->ctrl_base = jzdma->chn_base + JZ4780_DMA_CTRL_OFFSET; + } else { dev_err(dev, "failed to get I/O memory\n"); return -EINVAL; } - jzdma->base = devm_ioremap_resource(dev, res); - if (IS_ERR(jzdma->base)) - return PTR_ERR(jzdma->base); - - ret = platform_get_irq(pdev, 0); - if (ret < 0) { - dev_err(dev, "failed to get IRQ: %d\n", ret); - return ret; - } - - jzdma->irq = ret; - - ret = request_irq(jzdma->irq, jz4780_dma_irq_handler, 0, dev_name(dev), - jzdma); - if (ret) { - dev_err(dev, "failed to request IRQ %u!\n", jzdma->irq); - return ret; - } - jzdma->clk = devm_clk_get(dev, NULL); if (IS_ERR(jzdma->clk)) { dev_err(dev, "failed to get clock\n"); ret = PTR_ERR(jzdma->clk); - goto err_free_irq; + return ret; } clk_prepare_enable(jzdma->clk); @@ -800,6 +911,14 @@ static int jz4780_dma_probe(struct platform_device *pdev) dd = &jzdma->dma_device; + /* + * The real segment size limit is dependent on the size unit selected + * for the transfer. Because the size unit is selected automatically + * and may be as small as 1 byte, use a safe limit of 2^24-1 bytes to + * ensure the 24-bit transfer count in the descriptor cannot overflow. + */ + dma_set_max_seg_size(dev, 0xffffff); + dma_cap_set(DMA_MEMCPY, dd->cap_mask); dma_cap_set(DMA_SLAVE, dd->cap_mask); dma_cap_set(DMA_CYCLIC, dd->cap_mask); @@ -813,25 +932,29 @@ static int jz4780_dma_probe(struct platform_device *pdev) dd->device_prep_dma_memcpy = jz4780_dma_prep_dma_memcpy; dd->device_config = jz4780_dma_config; dd->device_terminate_all = jz4780_dma_terminate_all; + dd->device_synchronize = jz4780_dma_synchronize; dd->device_tx_status = jz4780_dma_tx_status; dd->device_issue_pending = jz4780_dma_issue_pending; dd->src_addr_widths = JZ_DMA_BUSWIDTHS; dd->dst_addr_widths = JZ_DMA_BUSWIDTHS; dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; + dd->max_sg_burst = JZ_DMA_MAX_DESC; /* * Enable DMA controller, mark all channels as not programmable. * Also set the FMSC bit - it increases MSC performance, so it makes * little sense not to enable it. */ - jz4780_dma_writel(jzdma, JZ_DMA_REG_DMAC, - JZ_DMA_DMAC_DMAE | JZ_DMA_DMAC_FMSC); - jz4780_dma_writel(jzdma, JZ_DMA_REG_DMACP, 0); + jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DMAC, JZ_DMA_DMAC_DMAE | + JZ_DMA_DMAC_FAIC | JZ_DMA_DMAC_FMSC); + + if (soc_data->flags & JZ_SOC_DATA_PROGRAMMABLE_DMA) + jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DMACP, 0); INIT_LIST_HEAD(&dd->channels); - for (i = 0; i < JZ_DMA_NR_CHANNELS; i++) { + for (i = 0; i < soc_data->nb_channels; i++) { jzchan = &jzdma->chan[i]; jzchan->id = i; @@ -839,10 +962,31 @@ static int jz4780_dma_probe(struct platform_device *pdev) jzchan->vchan.desc_free = jz4780_dma_desc_free; } - ret = dma_async_device_register(dd); + /* + * On JZ4760, chan0 won't enable properly the first time. + * Enabling then disabling chan1 will magically make chan0 work + * correctly. + */ + jz4780_dma_chan_enable(jzdma, 1); + jz4780_dma_chan_disable(jzdma, 1); + + ret = platform_get_irq(pdev, 0); + if (ret < 0) + goto err_disable_clk; + + jzdma->irq = ret; + + ret = request_irq(jzdma->irq, jz4780_dma_irq_handler, 0, dev_name(dev), + jzdma); + if (ret) { + dev_err(dev, "failed to request IRQ %u!\n", jzdma->irq); + goto err_disable_clk; + } + + ret = dmaenginem_async_device_register(dd); if (ret) { dev_err(dev, "failed to register device\n"); - goto err_disable_clk; + goto err_free_irq; } /* Register with OF DMA helpers. */ @@ -850,41 +994,128 @@ static int jz4780_dma_probe(struct platform_device *pdev) jzdma); if (ret) { dev_err(dev, "failed to register OF DMA controller\n"); - goto err_unregister_dev; + goto err_free_irq; } dev_info(dev, "JZ4780 DMA controller initialised\n"); return 0; -err_unregister_dev: - dma_async_device_unregister(dd); +err_free_irq: + free_irq(jzdma->irq, jzdma); err_disable_clk: clk_disable_unprepare(jzdma->clk); - -err_free_irq: - free_irq(jzdma->irq, jzdma); return ret; } -static int jz4780_dma_remove(struct platform_device *pdev) +static void jz4780_dma_remove(struct platform_device *pdev) { struct jz4780_dma_dev *jzdma = platform_get_drvdata(pdev); int i; of_dma_controller_free(pdev->dev.of_node); + clk_disable_unprepare(jzdma->clk); free_irq(jzdma->irq, jzdma); - for (i = 0; i < JZ_DMA_NR_CHANNELS; i++) + for (i = 0; i < jzdma->soc_data->nb_channels; i++) tasklet_kill(&jzdma->chan[i].vchan.task); - - dma_async_device_unregister(&jzdma->dma_device); - return 0; } +static const struct jz4780_dma_soc_data jz4740_dma_soc_data = { + .nb_channels = 6, + .transfer_ord_max = 5, + .flags = JZ_SOC_DATA_BREAK_LINKS, +}; + +static const struct jz4780_dma_soc_data jz4725b_dma_soc_data = { + .nb_channels = 6, + .transfer_ord_max = 5, + .flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC | + JZ_SOC_DATA_BREAK_LINKS, +}; + +static const struct jz4780_dma_soc_data jz4755_dma_soc_data = { + .nb_channels = 4, + .transfer_ord_max = 5, + .flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC | + JZ_SOC_DATA_BREAK_LINKS, +}; + +static const struct jz4780_dma_soc_data jz4760_dma_soc_data = { + .nb_channels = 5, + .transfer_ord_max = 6, + .flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC, +}; + +static const struct jz4780_dma_soc_data jz4760_mdma_soc_data = { + .nb_channels = 2, + .transfer_ord_max = 6, + .flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC, +}; + +static const struct jz4780_dma_soc_data jz4760_bdma_soc_data = { + .nb_channels = 3, + .transfer_ord_max = 6, + .flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC, +}; + +static const struct jz4780_dma_soc_data jz4760b_dma_soc_data = { + .nb_channels = 5, + .transfer_ord_max = 6, + .flags = JZ_SOC_DATA_PER_CHAN_PM, +}; + +static const struct jz4780_dma_soc_data jz4760b_mdma_soc_data = { + .nb_channels = 2, + .transfer_ord_max = 6, + .flags = JZ_SOC_DATA_PER_CHAN_PM, +}; + +static const struct jz4780_dma_soc_data jz4760b_bdma_soc_data = { + .nb_channels = 3, + .transfer_ord_max = 6, + .flags = JZ_SOC_DATA_PER_CHAN_PM, +}; + +static const struct jz4780_dma_soc_data jz4770_dma_soc_data = { + .nb_channels = 6, + .transfer_ord_max = 6, + .flags = JZ_SOC_DATA_PER_CHAN_PM, +}; + +static const struct jz4780_dma_soc_data jz4780_dma_soc_data = { + .nb_channels = 32, + .transfer_ord_max = 7, + .flags = JZ_SOC_DATA_ALLOW_LEGACY_DT | JZ_SOC_DATA_PROGRAMMABLE_DMA, +}; + +static const struct jz4780_dma_soc_data x1000_dma_soc_data = { + .nb_channels = 8, + .transfer_ord_max = 7, + .flags = JZ_SOC_DATA_PROGRAMMABLE_DMA, +}; + +static const struct jz4780_dma_soc_data x1830_dma_soc_data = { + .nb_channels = 32, + .transfer_ord_max = 7, + .flags = JZ_SOC_DATA_PROGRAMMABLE_DMA, +}; + static const struct of_device_id jz4780_dma_dt_match[] = { - { .compatible = "ingenic,jz4780-dma", .data = NULL }, + { .compatible = "ingenic,jz4740-dma", .data = &jz4740_dma_soc_data }, + { .compatible = "ingenic,jz4725b-dma", .data = &jz4725b_dma_soc_data }, + { .compatible = "ingenic,jz4755-dma", .data = &jz4755_dma_soc_data }, + { .compatible = "ingenic,jz4760-dma", .data = &jz4760_dma_soc_data }, + { .compatible = "ingenic,jz4760-mdma", .data = &jz4760_mdma_soc_data }, + { .compatible = "ingenic,jz4760-bdma", .data = &jz4760_bdma_soc_data }, + { .compatible = "ingenic,jz4760b-dma", .data = &jz4760b_dma_soc_data }, + { .compatible = "ingenic,jz4760b-mdma", .data = &jz4760b_mdma_soc_data }, + { .compatible = "ingenic,jz4760b-bdma", .data = &jz4760b_bdma_soc_data }, + { .compatible = "ingenic,jz4770-dma", .data = &jz4770_dma_soc_data }, + { .compatible = "ingenic,jz4780-dma", .data = &jz4780_dma_soc_data }, + { .compatible = "ingenic,x1000-dma", .data = &x1000_dma_soc_data }, + { .compatible = "ingenic,x1830-dma", .data = &x1830_dma_soc_data }, {}, }; MODULE_DEVICE_TABLE(of, jz4780_dma_dt_match); @@ -894,7 +1125,7 @@ static struct platform_driver jz4780_dma_driver = { .remove = jz4780_dma_remove, .driver = { .name = "jz4780-dma", - .of_match_table = of_match_ptr(jz4780_dma_dt_match), + .of_match_table = jz4780_dma_dt_match, }, }; |
