diff options
Diffstat (limited to 'drivers/dma/fsl-edma-common.c')
| -rw-r--r-- | drivers/dma/fsl-edma-common.c | 111 |
1 files changed, 82 insertions, 29 deletions
diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c index b7f15ab96855..a59212758029 100644 --- a/drivers/dma/fsl-edma-common.c +++ b/drivers/dma/fsl-edma-common.c @@ -95,7 +95,7 @@ static void fsl_edma3_enable_request(struct fsl_edma_chan *fsl_chan) } val = edma_readl_chreg(fsl_chan, ch_csr); - val |= EDMA_V3_CH_CSR_ERQ; + val |= EDMA_V3_CH_CSR_ERQ | EDMA_V3_CH_CSR_EEI; edma_writel_chreg(fsl_chan, val, ch_csr); } @@ -206,15 +206,19 @@ void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan, mux_configure8(fsl_chan, muxaddr, ch_off, slot, enable); } -static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width) +static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth src_addr_width, + enum dma_slave_buswidth dst_addr_width) { - u32 val; + u32 src_val, dst_val; - if (addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) - addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) + src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) + dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; - val = ffs(addr_width) - 1; - return val | (val << 8); + src_val = ffs(src_addr_width) - 1; + dst_val = ffs(dst_addr_width) - 1; + return dst_val | (src_val << 8); } void fsl_edma_free_desc(struct virt_dma_desc *vdesc) @@ -480,8 +484,8 @@ void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan, bool disable_req, bool enable_sg) { struct dma_slave_config *cfg = &fsl_chan->cfg; + u32 burst = 0; u16 csr = 0; - u32 burst; /* * eDMA hardware SGs require the TCDs to be stored in little @@ -496,16 +500,30 @@ void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan, fsl_edma_set_tcd_to_le(fsl_chan, tcd, soff, soff); - if (fsl_chan->is_multi_fifo) { - /* set mloff to support multiple fifo */ - burst = cfg->direction == DMA_DEV_TO_MEM ? - cfg->src_maxburst : cfg->dst_maxburst; - nbytes |= EDMA_V3_TCD_NBYTES_MLOFF(-(burst * 4)); - /* enable DMLOE/SMLOE */ - if (cfg->direction == DMA_MEM_TO_DEV) { + /* If we expect to have either multi_fifo or a port window size, + * we will use minor loop offset, meaning bits 29-10 will be used for + * address offset, while bits 9-0 will be used to tell DMA how much + * data to read from addr. + * If we don't have either of those, will use a major loop reading from addr + * nbytes (29bits). + */ + if (cfg->direction == DMA_MEM_TO_DEV) { + if (fsl_chan->is_multi_fifo) + burst = cfg->dst_maxburst * 4; + if (cfg->dst_port_window_size) + burst = cfg->dst_port_window_size * cfg->dst_addr_width; + if (burst) { + nbytes |= EDMA_V3_TCD_NBYTES_MLOFF(-burst); nbytes |= EDMA_V3_TCD_NBYTES_DMLOE; nbytes &= ~EDMA_V3_TCD_NBYTES_SMLOE; - } else { + } + } else { + if (fsl_chan->is_multi_fifo) + burst = cfg->src_maxburst * 4; + if (cfg->src_port_window_size) + burst = cfg->src_port_window_size * cfg->src_addr_width; + if (burst) { + nbytes |= EDMA_V3_TCD_NBYTES_MLOFF(-burst); nbytes |= EDMA_V3_TCD_NBYTES_SMLOE; nbytes &= ~EDMA_V3_TCD_NBYTES_DMLOE; } @@ -598,13 +616,19 @@ struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic( dma_buf_next = dma_addr; if (direction == DMA_MEM_TO_DEV) { + if (!fsl_chan->cfg.src_addr_width) + fsl_chan->cfg.src_addr_width = fsl_chan->cfg.dst_addr_width; fsl_chan->attr = - fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width); + fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width, + fsl_chan->cfg.dst_addr_width); nbytes = fsl_chan->cfg.dst_addr_width * fsl_chan->cfg.dst_maxburst; } else { + if (!fsl_chan->cfg.dst_addr_width) + fsl_chan->cfg.dst_addr_width = fsl_chan->cfg.src_addr_width; fsl_chan->attr = - fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width); + fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width, + fsl_chan->cfg.dst_addr_width); nbytes = fsl_chan->cfg.src_addr_width * fsl_chan->cfg.src_maxburst; } @@ -623,11 +647,15 @@ struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic( dst_addr = fsl_chan->dma_dev_addr; soff = fsl_chan->cfg.dst_addr_width; doff = fsl_chan->is_multi_fifo ? 4 : 0; + if (fsl_chan->cfg.dst_port_window_size) + doff = fsl_chan->cfg.dst_addr_width; } else if (direction == DMA_DEV_TO_MEM) { src_addr = fsl_chan->dma_dev_addr; dst_addr = dma_buf_next; soff = fsl_chan->is_multi_fifo ? 4 : 0; doff = fsl_chan->cfg.src_addr_width; + if (fsl_chan->cfg.src_port_window_size) + soff = fsl_chan->cfg.src_addr_width; } else { /* DMA_DEV_TO_DEV */ src_addr = fsl_chan->cfg.src_addr; @@ -671,13 +699,19 @@ struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg( fsl_desc->dirn = direction; if (direction == DMA_MEM_TO_DEV) { + if (!fsl_chan->cfg.src_addr_width) + fsl_chan->cfg.src_addr_width = fsl_chan->cfg.dst_addr_width; fsl_chan->attr = - fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width); + fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width, + fsl_chan->cfg.dst_addr_width); nbytes = fsl_chan->cfg.dst_addr_width * fsl_chan->cfg.dst_maxburst; } else { + if (!fsl_chan->cfg.dst_addr_width) + fsl_chan->cfg.dst_addr_width = fsl_chan->cfg.src_addr_width; fsl_chan->attr = - fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width); + fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width, + fsl_chan->cfg.dst_addr_width); nbytes = fsl_chan->cfg.src_addr_width * fsl_chan->cfg.src_maxburst; } @@ -748,6 +782,10 @@ struct dma_async_tx_descriptor *fsl_edma_prep_memcpy(struct dma_chan *chan, { struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); struct fsl_edma_desc *fsl_desc; + u32 src_bus_width, dst_bus_width; + + src_bus_width = min_t(u32, DMA_SLAVE_BUSWIDTH_32_BYTES, 1 << (ffs(dma_src) - 1)); + dst_bus_width = min_t(u32, DMA_SLAVE_BUSWIDTH_32_BYTES, 1 << (ffs(dma_dst) - 1)); fsl_desc = fsl_edma_alloc_desc(fsl_chan, 1); if (!fsl_desc) @@ -760,8 +798,9 @@ struct dma_async_tx_descriptor *fsl_edma_prep_memcpy(struct dma_chan *chan, /* To match with copy_align and max_seg_size so 1 tcd is enough */ fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[0].vtcd, dma_src, dma_dst, - fsl_edma_get_tcd_attr(DMA_SLAVE_BUSWIDTH_32_BYTES), - 32, len, 0, 1, 1, 32, 0, true, true, false); + fsl_edma_get_tcd_attr(src_bus_width, dst_bus_width), + src_bus_width, len, 0, 1, 1, dst_bus_width, 0, true, + true, false); return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags); } @@ -803,7 +842,7 @@ void fsl_edma_issue_pending(struct dma_chan *chan) int fsl_edma_alloc_chan_resources(struct dma_chan *chan) { struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); - int ret; + int ret = 0; if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_HAS_CHCLK) clk_prepare_enable(fsl_chan->clk); @@ -813,17 +852,29 @@ int fsl_edma_alloc_chan_resources(struct dma_chan *chan) sizeof(struct fsl_edma_hw_tcd64) : sizeof(struct fsl_edma_hw_tcd), 32, 0); - if (fsl_chan->txirq) { + if (fsl_chan->txirq) ret = request_irq(fsl_chan->txirq, fsl_chan->irq_handler, IRQF_SHARED, fsl_chan->chan_name, fsl_chan); - if (ret) { - dma_pool_destroy(fsl_chan->tcd_pool); - return ret; - } - } + if (ret) + goto err_txirq; + + if (fsl_chan->errirq > 0) + ret = request_irq(fsl_chan->errirq, fsl_chan->errirq_handler, IRQF_SHARED, + fsl_chan->errirq_name, fsl_chan); + + if (ret) + goto err_errirq; return 0; + +err_errirq: + if (fsl_chan->txirq) + free_irq(fsl_chan->txirq, fsl_chan); +err_txirq: + dma_pool_destroy(fsl_chan->tcd_pool); + + return ret; } void fsl_edma_free_chan_resources(struct dma_chan *chan) @@ -844,6 +895,8 @@ void fsl_edma_free_chan_resources(struct dma_chan *chan) if (fsl_chan->txirq) free_irq(fsl_chan->txirq, fsl_chan); + if (fsl_chan->errirq) + free_irq(fsl_chan->errirq, fsl_chan); vchan_dma_desc_free_list(&fsl_chan->vchan, &head); dma_pool_destroy(fsl_chan->tcd_pool); |
