From 63ab76dbbdb8657e24645b7311ec3911a41039b5 Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Tue, 5 Sep 2017 10:16:37 +0200 Subject: dmaengine: axi-dmac: Only use hardware cyclic mode for single segment transfers In hardware cyclic mode the submitted segment is repeated. This means hardware cyclic mode can only be used if the transfer has a single segment. Signed-off-by: Lars-Peter Clausen Signed-off-by: Vinod Koul --- drivers/dma/dma-axi-dmac.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c index 7f0b9aa15867..eb289aa187dd 100644 --- a/drivers/dma/dma-axi-dmac.c +++ b/drivers/dma/dma-axi-dmac.c @@ -220,9 +220,11 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan) /* * If the hardware supports cyclic transfers and there is no callback to - * call, enable hw cyclic mode to avoid unnecessary interrupts. + * call and only a single segment, enable hw cyclic mode to avoid + * unnecessary interrupts. */ - if (chan->hw_cyclic && desc->cyclic && !desc->vdesc.tx.callback) + if (chan->hw_cyclic && desc->cyclic && !desc->vdesc.tx.callback && + desc->num_sgs == 1) flags |= AXI_DMAC_FLAG_CYCLIC; axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, sg->x_len - 1); -- cgit From 008913dbeb1775ba365daa39462ca68884bd926f Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Tue, 5 Sep 2017 10:16:38 +0200 Subject: dmaengine: axi-dmac: Fix software cyclic mode When running in software cyclic mode the driver currently does not go back to the first segment once the last segment has been reached. Effectively making the transfer non-cyclic. Fix this by going back to the first segment once the last segment has been reached for cyclic transfers. Special care need to be taken to avoid a segment from being submitted multiple times concurrently, which could happen for transfers with a number of segments that is smaller than the DMA controller's internal queue. Signed-off-by: Lars-Peter Clausen Signed-off-by: Vinod Koul --- drivers/dma/dma-axi-dmac.c | 69 ++++++++++++++++++++++++++++++++++------------ 1 file changed, 51 insertions(+), 18 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c index eb289aa187dd..2419fe524daa 100644 --- a/drivers/dma/dma-axi-dmac.c +++ b/drivers/dma/dma-axi-dmac.c @@ -72,6 +72,9 @@ #define AXI_DMAC_FLAG_CYCLIC BIT(0) +/* The maximum ID allocated by the hardware is 31 */ +#define AXI_DMAC_SG_UNUSED 32U + struct axi_dmac_sg { dma_addr_t src_addr; dma_addr_t dest_addr; @@ -80,6 +83,7 @@ struct axi_dmac_sg { unsigned int dest_stride; unsigned int src_stride; unsigned int id; + bool schedule_when_free; }; struct axi_dmac_desc { @@ -200,11 +204,21 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan) } sg = &desc->sg[desc->num_submitted]; + /* Already queued in cyclic mode. Wait for it to finish */ + if (sg->id != AXI_DMAC_SG_UNUSED) { + sg->schedule_when_free = true; + return; + } + desc->num_submitted++; - if (desc->num_submitted == desc->num_sgs) - chan->next_desc = NULL; - else + if (desc->num_submitted == desc->num_sgs) { + if (desc->cyclic) + desc->num_submitted = 0; /* Start again */ + else + chan->next_desc = NULL; + } else { chan->next_desc = desc; + } sg->id = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_ID); @@ -239,37 +253,52 @@ static struct axi_dmac_desc *axi_dmac_active_desc(struct axi_dmac_chan *chan) struct axi_dmac_desc, vdesc.node); } -static void axi_dmac_transfer_done(struct axi_dmac_chan *chan, +static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan, unsigned int completed_transfers) { struct axi_dmac_desc *active; struct axi_dmac_sg *sg; + bool start_next = false; active = axi_dmac_active_desc(chan); if (!active) - return; + return false; - if (active->cyclic) { - vchan_cyclic_callback(&active->vdesc); - } else { - do { - sg = &active->sg[active->num_completed]; - if (!(BIT(sg->id) & completed_transfers)) - break; - active->num_completed++; - if (active->num_completed == active->num_sgs) { + do { + sg = &active->sg[active->num_completed]; + if (sg->id == AXI_DMAC_SG_UNUSED) /* Not yet submitted */ + break; + if (!(BIT(sg->id) & completed_transfers)) + break; + active->num_completed++; + sg->id = AXI_DMAC_SG_UNUSED; + if (sg->schedule_when_free) { + sg->schedule_when_free = false; + start_next = true; + } + + if (active->cyclic) + vchan_cyclic_callback(&active->vdesc); + + if (active->num_completed == active->num_sgs) { + if (active->cyclic) { + active->num_completed = 0; /* wrap around */ + } else { list_del(&active->vdesc.node); vchan_cookie_complete(&active->vdesc); active = axi_dmac_active_desc(chan); } - } while (active); - } + } + } while (active); + + return start_next; } static irqreturn_t axi_dmac_interrupt_handler(int irq, void *devid) { struct axi_dmac *dmac = devid; unsigned int pending; + bool start_next = false; pending = axi_dmac_read(dmac, AXI_DMAC_REG_IRQ_PENDING); if (!pending) @@ -283,10 +312,10 @@ static irqreturn_t axi_dmac_interrupt_handler(int irq, void *devid) unsigned int completed; completed = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_DONE); - axi_dmac_transfer_done(&dmac->chan, completed); + start_next = axi_dmac_transfer_done(&dmac->chan, completed); } /* Space has become available in the descriptor queue */ - if (pending & AXI_DMAC_IRQ_SOT) + if ((pending & AXI_DMAC_IRQ_SOT) || start_next) axi_dmac_start_transfer(&dmac->chan); spin_unlock(&dmac->chan.vchan.lock); @@ -336,12 +365,16 @@ static void axi_dmac_issue_pending(struct dma_chan *c) static struct axi_dmac_desc *axi_dmac_alloc_desc(unsigned int num_sgs) { struct axi_dmac_desc *desc; + unsigned int i; desc = kzalloc(sizeof(struct axi_dmac_desc) + sizeof(struct axi_dmac_sg) * num_sgs, GFP_NOWAIT); if (!desc) return NULL; + for (i = 0; i < num_sgs; i++) + desc->sg[i].id = AXI_DMAC_SG_UNUSED; + desc->num_sgs = num_sgs; return desc; -- cgit From f3ae7d9155c79bb8f97ca3ff61ea979dec402952 Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Tue, 5 Sep 2017 16:43:49 +0200 Subject: dmaengine: xilinx_dma: Move enum xdma_ip_type to driver file The enum xdma_ip_type is only used inside the Xilinx DMA driver and not exported to any consumers (nor should it be). So move it from the global header to driver file itself. Signed-off-by: Lars-Peter Clausen Acked-by: Michal Simek Signed-off-by: Vinod Koul --- drivers/dma/xilinx/xilinx_dma.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'drivers/dma') diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index 8722bcba489d..5eef13380ca8 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -366,6 +366,20 @@ struct xilinx_dma_chan { u16 tdest; }; +/** + * enum xdma_ip_type: DMA IP type. + * + * XDMA_TYPE_AXIDMA: Axi dma ip. + * XDMA_TYPE_CDMA: Axi cdma ip. + * XDMA_TYPE_VDMA: Axi vdma ip. + * + */ +enum xdma_ip_type { + XDMA_TYPE_AXIDMA = 0, + XDMA_TYPE_CDMA, + XDMA_TYPE_VDMA, +}; + struct xilinx_dma_config { enum xdma_ip_type dmatype; int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk, -- cgit From f9d4a398f121b00f581da1428bff9b93d955452d Mon Sep 17 00:00:00 2001 From: Nicolin Chen Date: Thu, 14 Sep 2017 11:46:43 -0700 Subject: dmaengine: imx-sdma: Correct src_addr_widths and directions The driver already supports DMA_DEV_TO_DEV in sdma_config(), DMA_SLAVE_BUSWIDTH_2_BYTES and DMA_SLAVE_BUSWIDTH_1_BYTE in sdma_prep_slave_sg(). So this patch adds them to the lists. Signed-off-by: Nicolin Chen Signed-off-by: Vinod Koul --- drivers/dma/imx-sdma.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index a67ec1bdc4e0..2184881afe76 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -178,6 +178,14 @@ #define SDMA_WATERMARK_LEVEL_HWE BIT(29) #define SDMA_WATERMARK_LEVEL_CONT BIT(31) +#define SDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) + +#define SDMA_DMA_DIRECTIONS (BIT(DMA_DEV_TO_MEM) | \ + BIT(DMA_MEM_TO_DEV) | \ + BIT(DMA_DEV_TO_DEV)) + /* * Mode/Count of data node descriptors - IPCv2 */ @@ -1851,9 +1859,9 @@ static int sdma_probe(struct platform_device *pdev) sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic; sdma->dma_device.device_config = sdma_config; sdma->dma_device.device_terminate_all = sdma_disable_channel_with_delay; - sdma->dma_device.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); - sdma->dma_device.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); - sdma->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); + sdma->dma_device.src_addr_widths = SDMA_DMA_BUSWIDTHS; + sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS; + sdma->dma_device.directions = SDMA_DMA_DIRECTIONS; sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; sdma->dma_device.device_issue_pending = sdma_issue_pending; sdma->dma_device.dev->dma_parms = &sdma->dma_parms; -- cgit From 8f3b00347bf075fb457f90ce76573615f567e7bc Mon Sep 17 00:00:00 2001 From: Corentin Labbe Date: Wed, 20 Sep 2017 09:24:02 +0200 Subject: dmaengine: sun6i: use of_device_get_match_data The usage of of_device_get_match_data reduce the code size a bit. Furthermore, it prevents an improbable dereference when of_match_device() return NULL. Acked-by: Maxime Ripard Signed-off-by: Corentin Labbe Signed-off-by: Vinod Koul --- drivers/dma/sun6i-dma.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c index bcd496edc70f..584f4e82a9be 100644 --- a/drivers/dma/sun6i-dma.c +++ b/drivers/dma/sun6i-dma.c @@ -1064,7 +1064,6 @@ MODULE_DEVICE_TABLE(of, sun6i_dma_match); static int sun6i_dma_probe(struct platform_device *pdev) { - const struct of_device_id *device; struct sun6i_dma_dev *sdc; struct resource *res; int ret, i; @@ -1073,10 +1072,9 @@ static int sun6i_dma_probe(struct platform_device *pdev) if (!sdc) return -ENOMEM; - device = of_match_device(sun6i_dma_match, &pdev->dev); - if (!device) + sdc->cfg = of_device_get_match_data(&pdev->dev); + if (!sdc->cfg) return -ENODEV; - sdc->cfg = device->data; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); sdc->base = devm_ioremap_resource(&pdev->dev, res); -- cgit From 6b4faeac05bc0b91616b921191cb054d1376f3b4 Mon Sep 17 00:00:00 2001 From: Sricharan R Date: Mon, 28 Aug 2017 20:30:24 +0530 Subject: dmaengine: qcom-bam: Process multiple pending descriptors The bam dmaengine has a circular FIFO to which we add hw descriptors that describes the transaction. The FIFO has space for about 4096 hw descriptors. Currently we add one descriptor and wait for it to complete with interrupt and then add the next pending descriptor. In this way, the FIFO is underutilized since only one descriptor is processed at a time, although there is space in FIFO for the BAM to process more. Instead keep adding descriptors to FIFO till its full, that allows BAM to continue to work on the next descriptor immediately after signalling completion interrupt for the previous descriptor. Also when the client has not set the DMA_PREP_INTERRUPT for a descriptor, then do not configure BAM to trigger a interrupt upon completion of that descriptor. This way we get a interrupt only for the descriptor for which DMA_PREP_INTERRUPT was requested and there signal completion of all the previous completed descriptors. So we still do callbacks for all requested descriptors, but just that the number of interrupts are reduced. CURRENT: ------ ------- --------------- |DES 0| |DESC 1| |DESC 2 + INT | ------ ------- --------------- | | | | | | INTERRUPT: (INT) (INT) (INT) CALLBACK: (CB) (CB) (CB) MTD_SPEEDTEST READ PAGE: 3560 KiB/s MTD_SPEEDTEST WRITE PAGE: 2664 KiB/s IOZONE READ: 2456 KB/s IOZONE WRITE: 1230 KB/s bam dma interrupts (after tests): 96508 CHANGE: ------ ------- ------------- |DES 0| |DESC 1 |DESC 2 + INT | ------ ------- -------------- | | (INT) (CB for 0, 1, 2) MTD_SPEEDTEST READ PAGE: 3860 KiB/s MTD_SPEEDTEST WRITE PAGE: 2837 KiB/s IOZONE READ: 2677 KB/s IOZONE WRITE: 1308 KB/s bam dma interrupts (after tests): 58806 Signed-off-by: Sricharan R Reviewed-by: Andy Gross Tested-by: Abhishek Sahu Signed-off-by: Vinod Koul --- drivers/dma/qcom/bam_dma.c | 169 +++++++++++++++++++++++++++++---------------- 1 file changed, 109 insertions(+), 60 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c index 6d89fb6a6a92..d076940e0c69 100644 --- a/drivers/dma/qcom/bam_dma.c +++ b/drivers/dma/qcom/bam_dma.c @@ -46,6 +46,7 @@ #include #include #include +#include #include #include #include @@ -78,6 +79,8 @@ struct bam_async_desc { struct bam_desc_hw *curr_desc; + /* list node for the desc in the bam_chan list of descriptors */ + struct list_head desc_node; enum dma_transfer_direction dir; size_t length; struct bam_desc_hw desc[0]; @@ -347,6 +350,8 @@ static const struct reg_offset_data bam_v1_7_reg_info[] = { #define BAM_DESC_FIFO_SIZE SZ_32K #define MAX_DESCRIPTORS (BAM_DESC_FIFO_SIZE / sizeof(struct bam_desc_hw) - 1) #define BAM_FIFO_SIZE (SZ_32K - 8) +#define IS_BUSY(chan) (CIRC_SPACE(bchan->tail, bchan->head,\ + MAX_DESCRIPTORS + 1) == 0) struct bam_chan { struct virt_dma_chan vc; @@ -356,8 +361,6 @@ struct bam_chan { /* configuration from device tree */ u32 id; - struct bam_async_desc *curr_txd; /* current running dma */ - /* runtime configuration */ struct dma_slave_config slave; @@ -372,6 +375,8 @@ struct bam_chan { unsigned int initialized; /* is the channel hw initialized? */ unsigned int paused; /* is the channel paused? */ unsigned int reconfigure; /* new slave config? */ + /* list of descriptors currently processed */ + struct list_head desc_list; struct list_head node; }; @@ -539,7 +544,7 @@ static void bam_free_chan(struct dma_chan *chan) vchan_free_chan_resources(to_virt_chan(chan)); - if (bchan->curr_txd) { + if (!list_empty(&bchan->desc_list)) { dev_err(bchan->bdev->dev, "Cannot free busy channel\n"); goto err; } @@ -632,8 +637,6 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan, if (flags & DMA_PREP_INTERRUPT) async_desc->flags |= DESC_FLAG_EOT; - else - async_desc->flags |= DESC_FLAG_INT; async_desc->num_desc = num_alloc; async_desc->curr_desc = async_desc->desc; @@ -684,14 +687,16 @@ err_out: static int bam_dma_terminate_all(struct dma_chan *chan) { struct bam_chan *bchan = to_bam_chan(chan); + struct bam_async_desc *async_desc, *tmp; unsigned long flag; LIST_HEAD(head); /* remove all transactions, including active transaction */ spin_lock_irqsave(&bchan->vc.lock, flag); - if (bchan->curr_txd) { - list_add(&bchan->curr_txd->vd.node, &bchan->vc.desc_issued); - bchan->curr_txd = NULL; + list_for_each_entry_safe(async_desc, tmp, + &bchan->desc_list, desc_node) { + list_add(&async_desc->vd.node, &bchan->vc.desc_issued); + list_del(&async_desc->desc_node); } vchan_get_all_descriptors(&bchan->vc, &head); @@ -763,9 +768,9 @@ static int bam_resume(struct dma_chan *chan) */ static u32 process_channel_irqs(struct bam_device *bdev) { - u32 i, srcs, pipe_stts; + u32 i, srcs, pipe_stts, offset, avail; unsigned long flags; - struct bam_async_desc *async_desc; + struct bam_async_desc *async_desc, *tmp; srcs = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_EE)); @@ -785,27 +790,40 @@ static u32 process_channel_irqs(struct bam_device *bdev) writel_relaxed(pipe_stts, bam_addr(bdev, i, BAM_P_IRQ_CLR)); spin_lock_irqsave(&bchan->vc.lock, flags); - async_desc = bchan->curr_txd; - if (async_desc) { - async_desc->num_desc -= async_desc->xfer_len; - async_desc->curr_desc += async_desc->xfer_len; - bchan->curr_txd = NULL; + offset = readl_relaxed(bam_addr(bdev, i, BAM_P_SW_OFSTS)) & + P_SW_OFSTS_MASK; + offset /= sizeof(struct bam_desc_hw); + + /* Number of bytes available to read */ + avail = CIRC_CNT(offset, bchan->head, MAX_DESCRIPTORS + 1); + + list_for_each_entry_safe(async_desc, tmp, + &bchan->desc_list, desc_node) { + /* Not enough data to read */ + if (avail < async_desc->xfer_len) + break; /* manage FIFO */ bchan->head += async_desc->xfer_len; bchan->head %= MAX_DESCRIPTORS; + async_desc->num_desc -= async_desc->xfer_len; + async_desc->curr_desc += async_desc->xfer_len; + avail -= async_desc->xfer_len; + /* - * if complete, process cookie. Otherwise + * if complete, process cookie. Otherwise * push back to front of desc_issued so that * it gets restarted by the tasklet */ - if (!async_desc->num_desc) + if (!async_desc->num_desc) { vchan_cookie_complete(&async_desc->vd); - else + } else { list_add(&async_desc->vd.node, - &bchan->vc.desc_issued); + &bchan->vc.desc_issued); + } + list_del(&async_desc->desc_node); } spin_unlock_irqrestore(&bchan->vc.lock, flags); @@ -867,6 +885,7 @@ static enum dma_status bam_tx_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *txstate) { struct bam_chan *bchan = to_bam_chan(chan); + struct bam_async_desc *async_desc; struct virt_dma_desc *vd; int ret; size_t residue = 0; @@ -882,11 +901,17 @@ static enum dma_status bam_tx_status(struct dma_chan *chan, dma_cookie_t cookie, spin_lock_irqsave(&bchan->vc.lock, flags); vd = vchan_find_desc(&bchan->vc, cookie); - if (vd) + if (vd) { residue = container_of(vd, struct bam_async_desc, vd)->length; - else if (bchan->curr_txd && bchan->curr_txd->vd.tx.cookie == cookie) - for (i = 0; i < bchan->curr_txd->num_desc; i++) - residue += bchan->curr_txd->curr_desc[i].size; + } else { + list_for_each_entry(async_desc, &bchan->desc_list, desc_node) { + if (async_desc->vd.tx.cookie != cookie) + continue; + + for (i = 0; i < async_desc->num_desc; i++) + residue += async_desc->curr_desc[i].size; + } + } spin_unlock_irqrestore(&bchan->vc.lock, flags); @@ -927,63 +952,86 @@ static void bam_start_dma(struct bam_chan *bchan) { struct virt_dma_desc *vd = vchan_next_desc(&bchan->vc); struct bam_device *bdev = bchan->bdev; - struct bam_async_desc *async_desc; + struct bam_async_desc *async_desc = NULL; struct bam_desc_hw *desc; struct bam_desc_hw *fifo = PTR_ALIGN(bchan->fifo_virt, sizeof(struct bam_desc_hw)); int ret; + unsigned int avail; + struct dmaengine_desc_callback cb; lockdep_assert_held(&bchan->vc.lock); if (!vd) return; - list_del(&vd->node); - - async_desc = container_of(vd, struct bam_async_desc, vd); - bchan->curr_txd = async_desc; - ret = pm_runtime_get_sync(bdev->dev); if (ret < 0) return; - /* on first use, initialize the channel hardware */ - if (!bchan->initialized) - bam_chan_init_hw(bchan, async_desc->dir); + while (vd && !IS_BUSY(bchan)) { + list_del(&vd->node); - /* apply new slave config changes, if necessary */ - if (bchan->reconfigure) - bam_apply_new_config(bchan, async_desc->dir); + async_desc = container_of(vd, struct bam_async_desc, vd); - desc = bchan->curr_txd->curr_desc; + /* on first use, initialize the channel hardware */ + if (!bchan->initialized) + bam_chan_init_hw(bchan, async_desc->dir); - if (async_desc->num_desc > MAX_DESCRIPTORS) - async_desc->xfer_len = MAX_DESCRIPTORS; - else - async_desc->xfer_len = async_desc->num_desc; + /* apply new slave config changes, if necessary */ + if (bchan->reconfigure) + bam_apply_new_config(bchan, async_desc->dir); - /* set any special flags on the last descriptor */ - if (async_desc->num_desc == async_desc->xfer_len) - desc[async_desc->xfer_len - 1].flags |= - cpu_to_le16(async_desc->flags); - else - desc[async_desc->xfer_len - 1].flags |= - cpu_to_le16(DESC_FLAG_INT); + desc = async_desc->curr_desc; + avail = CIRC_SPACE(bchan->tail, bchan->head, + MAX_DESCRIPTORS + 1); + + if (async_desc->num_desc > avail) + async_desc->xfer_len = avail; + else + async_desc->xfer_len = async_desc->num_desc; + + /* set any special flags on the last descriptor */ + if (async_desc->num_desc == async_desc->xfer_len) + desc[async_desc->xfer_len - 1].flags |= + cpu_to_le16(async_desc->flags); - if (bchan->tail + async_desc->xfer_len > MAX_DESCRIPTORS) { - u32 partial = MAX_DESCRIPTORS - bchan->tail; + vd = vchan_next_desc(&bchan->vc); - memcpy(&fifo[bchan->tail], desc, - partial * sizeof(struct bam_desc_hw)); - memcpy(fifo, &desc[partial], (async_desc->xfer_len - partial) * + dmaengine_desc_get_callback(&async_desc->vd.tx, &cb); + + /* + * An interrupt is generated at this desc, if + * - FIFO is FULL. + * - No more descriptors to add. + * - If a callback completion was requested for this DESC, + * In this case, BAM will deliver the completion callback + * for this desc and continue processing the next desc. + */ + if (((avail <= async_desc->xfer_len) || !vd || + dmaengine_desc_callback_valid(&cb)) && + !(async_desc->flags & DESC_FLAG_EOT)) + desc[async_desc->xfer_len - 1].flags |= + cpu_to_le16(DESC_FLAG_INT); + + if (bchan->tail + async_desc->xfer_len > MAX_DESCRIPTORS) { + u32 partial = MAX_DESCRIPTORS - bchan->tail; + + memcpy(&fifo[bchan->tail], desc, + partial * sizeof(struct bam_desc_hw)); + memcpy(fifo, &desc[partial], + (async_desc->xfer_len - partial) * sizeof(struct bam_desc_hw)); - } else { - memcpy(&fifo[bchan->tail], desc, - async_desc->xfer_len * sizeof(struct bam_desc_hw)); - } + } else { + memcpy(&fifo[bchan->tail], desc, + async_desc->xfer_len * + sizeof(struct bam_desc_hw)); + } - bchan->tail += async_desc->xfer_len; - bchan->tail %= MAX_DESCRIPTORS; + bchan->tail += async_desc->xfer_len; + bchan->tail %= MAX_DESCRIPTORS; + list_add_tail(&async_desc->desc_node, &bchan->desc_list); + } /* ensure descriptor writes and dma start not reordered */ wmb(); @@ -1012,7 +1060,7 @@ static void dma_tasklet(unsigned long data) bchan = &bdev->channels[i]; spin_lock_irqsave(&bchan->vc.lock, flags); - if (!list_empty(&bchan->vc.desc_issued) && !bchan->curr_txd) + if (!list_empty(&bchan->vc.desc_issued) && !IS_BUSY(bchan)) bam_start_dma(bchan); spin_unlock_irqrestore(&bchan->vc.lock, flags); } @@ -1033,7 +1081,7 @@ static void bam_issue_pending(struct dma_chan *chan) spin_lock_irqsave(&bchan->vc.lock, flags); /* if work pending and idle, start a transaction */ - if (vchan_issue_pending(&bchan->vc) && !bchan->curr_txd) + if (vchan_issue_pending(&bchan->vc) && !IS_BUSY(bchan)) bam_start_dma(bchan); spin_unlock_irqrestore(&bchan->vc.lock, flags); @@ -1133,6 +1181,7 @@ static void bam_channel_init(struct bam_device *bdev, struct bam_chan *bchan, vchan_init(&bchan->vc, &bdev->common); bchan->vc.desc_free = bam_dma_free_desc; + INIT_LIST_HEAD(&bchan->desc_list); } static const struct of_device_id bam_of_match[] = { -- cgit From df7e762db5f6c8dbd9e480f1c9ef9851de346657 Mon Sep 17 00:00:00 2001 From: Pierre-Yves MORDRET Date: Fri, 22 Sep 2017 09:31:30 +0200 Subject: dmaengine: Add STM32 DMAMUX driver This patch implements the STM32 DMAMUX driver. The DMAMUX request multiplexer allows routing a DMA request line between the peripherals and the DMA controllers of the product. The routing function is ensured by a programmable multi-channel DMA request line multiplexer. Each channel selects a unique DMA request line, unconditionally or synchronously with events from its DMAMUX synchronization inputs. The DMAMUX may also be used as a DMA request generator from programmable events on its input trigger signals Signed-off-by: M'boumba Cedric Madianga Signed-off-by: Pierre-Yves MORDRET Signed-off-by: Vinod Koul --- drivers/dma/Kconfig | 9 ++ drivers/dma/Makefile | 1 + drivers/dma/stm32-dmamux.c | 327 +++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 337 insertions(+) create mode 100644 drivers/dma/stm32-dmamux.c (limited to 'drivers/dma') diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index fadc4d8783bd..04e381b522b4 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -483,6 +483,15 @@ config STM32_DMA If you have a board based on such a MCU and wish to use DMA say Y here. +config STM32_DMAMUX + bool "STMicroelectronics STM32 dma multiplexer support" + depends on STM32_DMA || COMPILE_TEST + help + Enable support for the on-chip DMA multiplexer on STMicroelectronics + STM32 MCUs. + If you have a board based on such a MCU and wish to use DMAMUX say Y + here. + config S3C24XX_DMAC bool "Samsung S3C24XX DMA support" depends on ARCH_S3C24XX || COMPILE_TEST diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index f08f8de1b567..a145ad1426bc 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -59,6 +59,7 @@ obj-$(CONFIG_RENESAS_DMA) += sh/ obj-$(CONFIG_SIRF_DMA) += sirf-dma.o obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o obj-$(CONFIG_STM32_DMA) += stm32-dma.o +obj-$(CONFIG_STM32_DMAMUX) += stm32-dmamux.o obj-$(CONFIG_S3C24XX_DMAC) += s3c24xx-dma.o obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c new file mode 100644 index 000000000000..22812e7a953b --- /dev/null +++ b/drivers/dma/stm32-dmamux.c @@ -0,0 +1,327 @@ +/* + * + * Copyright (C) STMicroelectronics SA 2017 + * Author(s): M'boumba Cedric Madianga + * Pierre-Yves Mordret + * + * License terms: GPL V2.0. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * DMA Router driver for STM32 DMA MUX + * + * Based on TI DMA Crossbar driver + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define STM32_DMAMUX_CCR(x) (0x4 * (x)) +#define STM32_DMAMUX_MAX_DMA_REQUESTS 32 +#define STM32_DMAMUX_MAX_REQUESTS 255 + +struct stm32_dmamux { + u32 master; + u32 request; + u32 chan_id; +}; + +struct stm32_dmamux_data { + struct dma_router dmarouter; + struct clk *clk; + struct reset_control *rst; + void __iomem *iomem; + u32 dma_requests; /* Number of DMA requests connected to DMAMUX */ + u32 dmamux_requests; /* Number of DMA requests routed toward DMAs */ + spinlock_t lock; /* Protects register access */ + unsigned long *dma_inuse; /* Used DMA channel */ + u32 dma_reqs[]; /* Number of DMA Request per DMA masters. + * [0] holds number of DMA Masters. + * To be kept at very end end of this structure + */ +}; + +static inline u32 stm32_dmamux_read(void __iomem *iomem, u32 reg) +{ + return readl_relaxed(iomem + reg); +} + +static inline void stm32_dmamux_write(void __iomem *iomem, u32 reg, u32 val) +{ + writel_relaxed(val, iomem + reg); +} + +static void stm32_dmamux_free(struct device *dev, void *route_data) +{ + struct stm32_dmamux_data *dmamux = dev_get_drvdata(dev); + struct stm32_dmamux *mux = route_data; + unsigned long flags; + + /* Clear dma request */ + spin_lock_irqsave(&dmamux->lock, flags); + + stm32_dmamux_write(dmamux->iomem, STM32_DMAMUX_CCR(mux->chan_id), 0); + clear_bit(mux->chan_id, dmamux->dma_inuse); + + if (!IS_ERR(dmamux->clk)) + clk_disable(dmamux->clk); + + spin_unlock_irqrestore(&dmamux->lock, flags); + + dev_dbg(dev, "Unmapping DMAMUX(%u) to DMA%u(%u)\n", + mux->request, mux->master, mux->chan_id); + + kfree(mux); +} + +static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct platform_device *pdev = of_find_device_by_node(ofdma->of_node); + struct stm32_dmamux_data *dmamux = platform_get_drvdata(pdev); + struct stm32_dmamux *mux; + u32 i, min, max; + int ret; + unsigned long flags; + + if (dma_spec->args_count != 3) { + dev_err(&pdev->dev, "invalid number of dma mux args\n"); + return ERR_PTR(-EINVAL); + } + + if (dma_spec->args[0] > dmamux->dmamux_requests) { + dev_err(&pdev->dev, "invalid mux request number: %d\n", + dma_spec->args[0]); + return ERR_PTR(-EINVAL); + } + + mux = kzalloc(sizeof(*mux), GFP_KERNEL); + if (!mux) + return ERR_PTR(-ENOMEM); + + spin_lock_irqsave(&dmamux->lock, flags); + mux->chan_id = find_first_zero_bit(dmamux->dma_inuse, + dmamux->dma_requests); + set_bit(mux->chan_id, dmamux->dma_inuse); + spin_unlock_irqrestore(&dmamux->lock, flags); + + if (mux->chan_id == dmamux->dma_requests) { + dev_err(&pdev->dev, "Run out of free DMA requests\n"); + ret = -ENOMEM; + goto error; + } + + /* Look for DMA Master */ + for (i = 1, min = 0, max = dmamux->dma_reqs[i]; + i <= dmamux->dma_reqs[0]; + min += dmamux->dma_reqs[i], max += dmamux->dma_reqs[++i]) + if (mux->chan_id < max) + break; + mux->master = i - 1; + + /* The of_node_put() will be done in of_dma_router_xlate function */ + dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", i - 1); + if (!dma_spec->np) { + dev_err(&pdev->dev, "can't get dma master\n"); + ret = -EINVAL; + goto error; + } + + /* Set dma request */ + spin_lock_irqsave(&dmamux->lock, flags); + if (!IS_ERR(dmamux->clk)) { + ret = clk_enable(dmamux->clk); + if (ret < 0) { + spin_unlock_irqrestore(&dmamux->lock, flags); + dev_err(&pdev->dev, "clk_prep_enable issue: %d\n", ret); + goto error; + } + } + spin_unlock_irqrestore(&dmamux->lock, flags); + + mux->request = dma_spec->args[0]; + + /* craft DMA spec */ + dma_spec->args[3] = dma_spec->args[2]; + dma_spec->args[2] = dma_spec->args[1]; + dma_spec->args[1] = 0; + dma_spec->args[0] = mux->chan_id - min; + dma_spec->args_count = 4; + + stm32_dmamux_write(dmamux->iomem, STM32_DMAMUX_CCR(mux->chan_id), + mux->request); + dev_dbg(&pdev->dev, "Mapping DMAMUX(%u) to DMA%u(%u)\n", + mux->request, mux->master, mux->chan_id); + + return mux; + +error: + clear_bit(mux->chan_id, dmamux->dma_inuse); + kfree(mux); + return ERR_PTR(ret); +} + +static const struct of_device_id stm32_stm32dma_master_match[] = { + { .compatible = "st,stm32-dma", }, + {}, +}; + +static int stm32_dmamux_probe(struct platform_device *pdev) +{ + struct device_node *node = pdev->dev.of_node; + const struct of_device_id *match; + struct device_node *dma_node; + struct stm32_dmamux_data *stm32_dmamux; + struct resource *res; + void __iomem *iomem; + int i, count, ret; + u32 dma_req; + + if (!node) + return -ENODEV; + + count = device_property_read_u32_array(&pdev->dev, "dma-masters", + NULL, 0); + if (count < 0) { + dev_err(&pdev->dev, "Can't get DMA master(s) node\n"); + return -ENODEV; + } + + stm32_dmamux = devm_kzalloc(&pdev->dev, sizeof(*stm32_dmamux) + + sizeof(u32) * (count + 1), GFP_KERNEL); + if (!stm32_dmamux) + return -ENOMEM; + + dma_req = 0; + for (i = 1; i <= count; i++) { + dma_node = of_parse_phandle(node, "dma-masters", i - 1); + + match = of_match_node(stm32_stm32dma_master_match, dma_node); + if (!match) { + dev_err(&pdev->dev, "DMA master is not supported\n"); + of_node_put(dma_node); + return -EINVAL; + } + + if (of_property_read_u32(dma_node, "dma-requests", + &stm32_dmamux->dma_reqs[i])) { + dev_info(&pdev->dev, + "Missing MUX output information, using %u.\n", + STM32_DMAMUX_MAX_DMA_REQUESTS); + stm32_dmamux->dma_reqs[i] = + STM32_DMAMUX_MAX_DMA_REQUESTS; + } + dma_req += stm32_dmamux->dma_reqs[i]; + of_node_put(dma_node); + } + + if (dma_req > STM32_DMAMUX_MAX_DMA_REQUESTS) { + dev_err(&pdev->dev, "Too many DMA Master Requests to manage\n"); + return -ENODEV; + } + + stm32_dmamux->dma_requests = dma_req; + stm32_dmamux->dma_reqs[0] = count; + stm32_dmamux->dma_inuse = devm_kcalloc(&pdev->dev, + BITS_TO_LONGS(dma_req), + sizeof(unsigned long), + GFP_KERNEL); + if (!stm32_dmamux->dma_inuse) + return -ENOMEM; + + if (device_property_read_u32(&pdev->dev, "dma-requests", + &stm32_dmamux->dmamux_requests)) { + stm32_dmamux->dmamux_requests = STM32_DMAMUX_MAX_REQUESTS; + dev_warn(&pdev->dev, "DMAMUX defaulting on %u requests\n", + stm32_dmamux->dmamux_requests); + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENODEV; + + iomem = devm_ioremap_resource(&pdev->dev, res); + if (!iomem) + return -ENOMEM; + + spin_lock_init(&stm32_dmamux->lock); + + stm32_dmamux->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(stm32_dmamux->clk)) { + ret = PTR_ERR(stm32_dmamux->clk); + if (ret == -EPROBE_DEFER) + dev_info(&pdev->dev, "Missing controller clock\n"); + return ret; + } + + stm32_dmamux->rst = devm_reset_control_get(&pdev->dev, NULL); + if (!IS_ERR(stm32_dmamux->rst)) { + reset_control_assert(stm32_dmamux->rst); + udelay(2); + reset_control_deassert(stm32_dmamux->rst); + } + + stm32_dmamux->iomem = iomem; + stm32_dmamux->dmarouter.dev = &pdev->dev; + stm32_dmamux->dmarouter.route_free = stm32_dmamux_free; + + platform_set_drvdata(pdev, stm32_dmamux); + + if (!IS_ERR(stm32_dmamux->clk)) { + ret = clk_prepare_enable(stm32_dmamux->clk); + if (ret < 0) { + dev_err(&pdev->dev, "clk_prep_enable error: %d\n", ret); + return ret; + } + } + + /* Reset the dmamux */ + for (i = 0; i < stm32_dmamux->dma_requests; i++) + stm32_dmamux_write(stm32_dmamux->iomem, STM32_DMAMUX_CCR(i), 0); + + if (!IS_ERR(stm32_dmamux->clk)) + clk_disable(stm32_dmamux->clk); + + return of_dma_router_register(node, stm32_dmamux_route_allocate, + &stm32_dmamux->dmarouter); +} + +static const struct of_device_id stm32_dmamux_match[] = { + { .compatible = "st,stm32h7-dmamux" }, + {}, +}; + +static struct platform_driver stm32_dmamux_driver = { + .probe = stm32_dmamux_probe, + .driver = { + .name = "stm32-dmamux", + .of_match_table = stm32_dmamux_match, + }, +}; + +static int __init stm32_dmamux_init(void) +{ + return platform_driver_register(&stm32_dmamux_driver); +} +arch_initcall(stm32_dmamux_init); + +MODULE_DESCRIPTION("DMA Router driver for STM32 DMA MUX"); +MODULE_AUTHOR("M'boumba Cedric Madianga "); +MODULE_AUTHOR("Pierre-Yves Mordret "); +MODULE_LICENSE("GPL v2"); -- cgit From 73d2a3cef4bb6d93cb83faedf71d31ca3af0078c Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 26 Sep 2017 15:10:02 +0100 Subject: dmaengine: sa11x0: add DMA filters Add DMA filters for the sa11x0 DMA channels. This will allow us to migrate away from directly using the DMA filter function in drivers. Signed-off-by: Russell King Signed-off-by: Vinod Koul --- drivers/dma/sa11x0-dma.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'drivers/dma') diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c index 1adeb3265085..c7a89c22890e 100644 --- a/drivers/dma/sa11x0-dma.c +++ b/drivers/dma/sa11x0-dma.c @@ -823,6 +823,13 @@ static const struct sa11x0_dma_channel_desc chan_desc[] = { CD(Ser4SSPRc, DDAR_RW), }; +static const struct dma_slave_map sa11x0_dma_map[] = { + { "sa11x0-ir", "tx", "Ser2ICPTr" }, + { "sa11x0-ir", "rx", "Ser2ICPRc" }, + { "sa11x0-ssp", "tx", "Ser4SSPTr" }, + { "sa11x0-ssp", "rx", "Ser4SSPRc" }, +}; + static int sa11x0_dma_init_dmadev(struct dma_device *dmadev, struct device *dev) { @@ -909,6 +916,10 @@ static int sa11x0_dma_probe(struct platform_device *pdev) spin_lock_init(&d->lock); INIT_LIST_HEAD(&d->chan_pending); + d->slave.filter.fn = sa11x0_dma_filter_fn; + d->slave.filter.mapcnt = ARRAY_SIZE(sa11x0_dma_map); + d->slave.filter.map = sa11x0_dma_map; + d->base = ioremap(res->start, resource_size(res)); if (!d->base) { ret = -ENOMEM; -- cgit From a4ffb13c8946abc4b92621275de6718e19db860d Mon Sep 17 00:00:00 2001 From: Pierre-Yves MORDRET Date: Thu, 28 Sep 2017 17:36:41 +0200 Subject: dmaengine: Add STM32 MDMA driver This patch adds the driver for the STM32 MDMA controller. Master Direct memory access (MDMA) is used in order to provide high-speed data transfer between memory and memory or between peripherals and memory. MDMA controller provides a master AXI interface for main memory and peripheral registers access (system access port) and a master AHB interface only for Cortex-M7 TCM memory access (TCM access port). MDMA works in conjunction with the standard DMA controllers (DMA1 or DMA2). It offers up to 64 channels, each dedicated to managing memory access requests from one of the DMA stream memory buffer or other peripherals (w/ integrated FIFO). Signed-off-by: M'boumba Cedric Madianga Signed-off-by: Pierre-Yves MORDRET Signed-off-by: Vinod Koul --- drivers/dma/Kconfig | 12 + drivers/dma/Makefile | 1 + drivers/dma/stm32-mdma.c | 1666 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 1679 insertions(+) create mode 100644 drivers/dma/stm32-mdma.c (limited to 'drivers/dma') diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 04e381b522b4..73446622ccc9 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -492,6 +492,18 @@ config STM32_DMAMUX If you have a board based on such a MCU and wish to use DMAMUX say Y here. +config STM32_MDMA + bool "STMicroelectronics STM32 master dma support" + depends on ARCH_STM32 || COMPILE_TEST + select DMA_ENGINE + select DMA_OF + select DMA_VIRTUAL_CHANNELS + help + Enable support for the on-chip MDMA controller on STMicroelectronics + STM32 platforms. + If you have a board based on STM32 SoC and wish to use the master DMA + say Y here. + config S3C24XX_DMAC bool "Samsung S3C24XX DMA support" depends on ARCH_S3C24XX || COMPILE_TEST diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index a145ad1426bc..4d2376fdea01 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -60,6 +60,7 @@ obj-$(CONFIG_SIRF_DMA) += sirf-dma.o obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o obj-$(CONFIG_STM32_DMA) += stm32-dma.o obj-$(CONFIG_STM32_DMAMUX) += stm32-dmamux.o +obj-$(CONFIG_STM32_MDMA) += stm32-mdma.o obj-$(CONFIG_S3C24XX_DMAC) += s3c24xx-dma.o obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c new file mode 100644 index 000000000000..a9cb341c8ee0 --- /dev/null +++ b/drivers/dma/stm32-mdma.c @@ -0,0 +1,1666 @@ +/* + * + * Copyright (C) STMicroelectronics SA 2017 + * Author(s): M'boumba Cedric Madianga + * Pierre-Yves Mordret + * + * License terms: GPL V2.0. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * Driver for STM32 MDMA controller + * + * Inspired by stm32-dma.c and dma-jz4780.c + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "virt-dma.h" + +/* MDMA Generic getter/setter */ +#define STM32_MDMA_SHIFT(n) (ffs(n) - 1) +#define STM32_MDMA_SET(n, mask) (((n) << STM32_MDMA_SHIFT(mask)) & \ + (mask)) +#define STM32_MDMA_GET(n, mask) (((n) & (mask)) >> \ + STM32_MDMA_SHIFT(mask)) + +#define STM32_MDMA_GISR0 0x0000 /* MDMA Int Status Reg 1 */ +#define STM32_MDMA_GISR1 0x0004 /* MDMA Int Status Reg 2 */ + +/* MDMA Channel x interrupt/status register */ +#define STM32_MDMA_CISR(x) (0x40 + 0x40 * (x)) /* x = 0..62 */ +#define STM32_MDMA_CISR_CRQA BIT(16) +#define STM32_MDMA_CISR_TCIF BIT(4) +#define STM32_MDMA_CISR_BTIF BIT(3) +#define STM32_MDMA_CISR_BRTIF BIT(2) +#define STM32_MDMA_CISR_CTCIF BIT(1) +#define STM32_MDMA_CISR_TEIF BIT(0) + +/* MDMA Channel x interrupt flag clear register */ +#define STM32_MDMA_CIFCR(x) (0x44 + 0x40 * (x)) +#define STM32_MDMA_CIFCR_CLTCIF BIT(4) +#define STM32_MDMA_CIFCR_CBTIF BIT(3) +#define STM32_MDMA_CIFCR_CBRTIF BIT(2) +#define STM32_MDMA_CIFCR_CCTCIF BIT(1) +#define STM32_MDMA_CIFCR_CTEIF BIT(0) +#define STM32_MDMA_CIFCR_CLEAR_ALL (STM32_MDMA_CIFCR_CLTCIF \ + | STM32_MDMA_CIFCR_CBTIF \ + | STM32_MDMA_CIFCR_CBRTIF \ + | STM32_MDMA_CIFCR_CCTCIF \ + | STM32_MDMA_CIFCR_CTEIF) + +/* MDMA Channel x error status register */ +#define STM32_MDMA_CESR(x) (0x48 + 0x40 * (x)) +#define STM32_MDMA_CESR_BSE BIT(11) +#define STM32_MDMA_CESR_ASR BIT(10) +#define STM32_MDMA_CESR_TEMD BIT(9) +#define STM32_MDMA_CESR_TELD BIT(8) +#define STM32_MDMA_CESR_TED BIT(7) +#define STM32_MDMA_CESR_TEA_MASK GENMASK(6, 0) + +/* MDMA Channel x control register */ +#define STM32_MDMA_CCR(x) (0x4C + 0x40 * (x)) +#define STM32_MDMA_CCR_SWRQ BIT(16) +#define STM32_MDMA_CCR_WEX BIT(14) +#define STM32_MDMA_CCR_HEX BIT(13) +#define STM32_MDMA_CCR_BEX BIT(12) +#define STM32_MDMA_CCR_PL_MASK GENMASK(7, 6) +#define STM32_MDMA_CCR_PL(n) STM32_MDMA_SET(n, \ + STM32_MDMA_CCR_PL_MASK) +#define STM32_MDMA_CCR_TCIE BIT(5) +#define STM32_MDMA_CCR_BTIE BIT(4) +#define STM32_MDMA_CCR_BRTIE BIT(3) +#define STM32_MDMA_CCR_CTCIE BIT(2) +#define STM32_MDMA_CCR_TEIE BIT(1) +#define STM32_MDMA_CCR_EN BIT(0) +#define STM32_MDMA_CCR_IRQ_MASK (STM32_MDMA_CCR_TCIE \ + | STM32_MDMA_CCR_BTIE \ + | STM32_MDMA_CCR_BRTIE \ + | STM32_MDMA_CCR_CTCIE \ + | STM32_MDMA_CCR_TEIE) + +/* MDMA Channel x transfer configuration register */ +#define STM32_MDMA_CTCR(x) (0x50 + 0x40 * (x)) +#define STM32_MDMA_CTCR_BWM BIT(31) +#define STM32_MDMA_CTCR_SWRM BIT(30) +#define STM32_MDMA_CTCR_TRGM_MSK GENMASK(29, 28) +#define STM32_MDMA_CTCR_TRGM(n) STM32_MDMA_SET((n), \ + STM32_MDMA_CTCR_TRGM_MSK) +#define STM32_MDMA_CTCR_TRGM_GET(n) STM32_MDMA_GET((n), \ + STM32_MDMA_CTCR_TRGM_MSK) +#define STM32_MDMA_CTCR_PAM_MASK GENMASK(27, 26) +#define STM32_MDMA_CTCR_PAM(n) STM32_MDMA_SET(n, \ + STM32_MDMA_CTCR_PAM_MASK) +#define STM32_MDMA_CTCR_PKE BIT(25) +#define STM32_MDMA_CTCR_TLEN_MSK GENMASK(24, 18) +#define STM32_MDMA_CTCR_TLEN(n) STM32_MDMA_SET((n), \ + STM32_MDMA_CTCR_TLEN_MSK) +#define STM32_MDMA_CTCR_TLEN_GET(n) STM32_MDMA_GET((n), \ + STM32_MDMA_CTCR_TLEN_MSK) +#define STM32_MDMA_CTCR_LEN2_MSK GENMASK(25, 18) +#define STM32_MDMA_CTCR_LEN2(n) STM32_MDMA_SET((n), \ + STM32_MDMA_CTCR_LEN2_MSK) +#define STM32_MDMA_CTCR_LEN2_GET(n) STM32_MDMA_GET((n), \ + STM32_MDMA_CTCR_LEN2_MSK) +#define STM32_MDMA_CTCR_DBURST_MASK GENMASK(17, 15) +#define STM32_MDMA_CTCR_DBURST(n) STM32_MDMA_SET(n, \ + STM32_MDMA_CTCR_DBURST_MASK) +#define STM32_MDMA_CTCR_SBURST_MASK GENMASK(14, 12) +#define STM32_MDMA_CTCR_SBURST(n) STM32_MDMA_SET(n, \ + STM32_MDMA_CTCR_SBURST_MASK) +#define STM32_MDMA_CTCR_DINCOS_MASK GENMASK(11, 10) +#define STM32_MDMA_CTCR_DINCOS(n) STM32_MDMA_SET((n), \ + STM32_MDMA_CTCR_DINCOS_MASK) +#define STM32_MDMA_CTCR_SINCOS_MASK GENMASK(9, 8) +#define STM32_MDMA_CTCR_SINCOS(n) STM32_MDMA_SET((n), \ + STM32_MDMA_CTCR_SINCOS_MASK) +#define STM32_MDMA_CTCR_DSIZE_MASK GENMASK(7, 6) +#define STM32_MDMA_CTCR_DSIZE(n) STM32_MDMA_SET(n, \ + STM32_MDMA_CTCR_DSIZE_MASK) +#define STM32_MDMA_CTCR_SSIZE_MASK GENMASK(5, 4) +#define STM32_MDMA_CTCR_SSIZE(n) STM32_MDMA_SET(n, \ + STM32_MDMA_CTCR_SSIZE_MASK) +#define STM32_MDMA_CTCR_DINC_MASK GENMASK(3, 2) +#define STM32_MDMA_CTCR_DINC(n) STM32_MDMA_SET((n), \ + STM32_MDMA_CTCR_DINC_MASK) +#define STM32_MDMA_CTCR_SINC_MASK GENMASK(1, 0) +#define STM32_MDMA_CTCR_SINC(n) STM32_MDMA_SET((n), \ + STM32_MDMA_CTCR_SINC_MASK) +#define STM32_MDMA_CTCR_CFG_MASK (STM32_MDMA_CTCR_SINC_MASK \ + | STM32_MDMA_CTCR_DINC_MASK \ + | STM32_MDMA_CTCR_SINCOS_MASK \ + | STM32_MDMA_CTCR_DINCOS_MASK \ + | STM32_MDMA_CTCR_LEN2_MSK \ + | STM32_MDMA_CTCR_TRGM_MSK) + +/* MDMA Channel x block number of data register */ +#define STM32_MDMA_CBNDTR(x) (0x54 + 0x40 * (x)) +#define STM32_MDMA_CBNDTR_BRC_MK GENMASK(31, 20) +#define STM32_MDMA_CBNDTR_BRC(n) STM32_MDMA_SET(n, \ + STM32_MDMA_CBNDTR_BRC_MK) +#define STM32_MDMA_CBNDTR_BRC_GET(n) STM32_MDMA_GET((n), \ + STM32_MDMA_CBNDTR_BRC_MK) + +#define STM32_MDMA_CBNDTR_BRDUM BIT(19) +#define STM32_MDMA_CBNDTR_BRSUM BIT(18) +#define STM32_MDMA_CBNDTR_BNDT_MASK GENMASK(16, 0) +#define STM32_MDMA_CBNDTR_BNDT(n) STM32_MDMA_SET(n, \ + STM32_MDMA_CBNDTR_BNDT_MASK) + +/* MDMA Channel x source address register */ +#define STM32_MDMA_CSAR(x) (0x58 + 0x40 * (x)) + +/* MDMA Channel x destination address register */ +#define STM32_MDMA_CDAR(x) (0x5C + 0x40 * (x)) + +/* MDMA Channel x block repeat address update register */ +#define STM32_MDMA_CBRUR(x) (0x60 + 0x40 * (x)) +#define STM32_MDMA_CBRUR_DUV_MASK GENMASK(31, 16) +#define STM32_MDMA_CBRUR_DUV(n) STM32_MDMA_SET(n, \ + STM32_MDMA_CBRUR_DUV_MASK) +#define STM32_MDMA_CBRUR_SUV_MASK GENMASK(15, 0) +#define STM32_MDMA_CBRUR_SUV(n) STM32_MDMA_SET(n, \ + STM32_MDMA_CBRUR_SUV_MASK) + +/* MDMA Channel x link address register */ +#define STM32_MDMA_CLAR(x) (0x64 + 0x40 * (x)) + +/* MDMA Channel x trigger and bus selection register */ +#define STM32_MDMA_CTBR(x) (0x68 + 0x40 * (x)) +#define STM32_MDMA_CTBR_DBUS BIT(17) +#define STM32_MDMA_CTBR_SBUS BIT(16) +#define STM32_MDMA_CTBR_TSEL_MASK GENMASK(7, 0) +#define STM32_MDMA_CTBR_TSEL(n) STM32_MDMA_SET(n, \ + STM32_MDMA_CTBR_TSEL_MASK) + +/* MDMA Channel x mask address register */ +#define STM32_MDMA_CMAR(x) (0x70 + 0x40 * (x)) + +/* MDMA Channel x mask data register */ +#define STM32_MDMA_CMDR(x) (0x74 + 0x40 * (x)) + +#define STM32_MDMA_MAX_BUF_LEN 128 +#define STM32_MDMA_MAX_BLOCK_LEN 65536 +#define STM32_MDMA_MAX_CHANNELS 63 +#define STM32_MDMA_MAX_REQUESTS 256 +#define STM32_MDMA_MAX_BURST 128 +#define STM32_MDMA_VERY_HIGH_PRIORITY 0x11 + +enum stm32_mdma_trigger_mode { + STM32_MDMA_BUFFER, + STM32_MDMA_BLOCK, + STM32_MDMA_BLOCK_REP, + STM32_MDMA_LINKED_LIST, +}; + +enum stm32_mdma_width { + STM32_MDMA_BYTE, + STM32_MDMA_HALF_WORD, + STM32_MDMA_WORD, + STM32_MDMA_DOUBLE_WORD, +}; + +enum stm32_mdma_inc_mode { + STM32_MDMA_FIXED = 0, + STM32_MDMA_INC = 2, + STM32_MDMA_DEC = 3, +}; + +struct stm32_mdma_chan_config { + u32 request; + u32 priority_level; + u32 transfer_config; + u32 mask_addr; + u32 mask_data; +}; + +struct stm32_mdma_hwdesc { + u32 ctcr; + u32 cbndtr; + u32 csar; + u32 cdar; + u32 cbrur; + u32 clar; + u32 ctbr; + u32 dummy; + u32 cmar; + u32 cmdr; +} __aligned(64); + +struct stm32_mdma_desc { + struct virt_dma_desc vdesc; + u32 ccr; + struct stm32_mdma_hwdesc *hwdesc; + dma_addr_t hwdesc_phys; + bool cyclic; + u32 count; +}; + +struct stm32_mdma_chan { + struct virt_dma_chan vchan; + struct dma_pool *desc_pool; + u32 id; + struct stm32_mdma_desc *desc; + u32 curr_hwdesc; + struct dma_slave_config dma_config; + struct stm32_mdma_chan_config chan_config; + bool busy; + u32 mem_burst; + u32 mem_width; +}; + +struct stm32_mdma_device { + struct dma_device ddev; + void __iomem *base; + struct clk *clk; + int irq; + struct reset_control *rst; + u32 nr_channels; + u32 nr_requests; + u32 nr_ahb_addr_masks; + struct stm32_mdma_chan chan[STM32_MDMA_MAX_CHANNELS]; + u32 ahb_addr_masks[]; +}; + +static struct stm32_mdma_device *stm32_mdma_get_dev( + struct stm32_mdma_chan *chan) +{ + return container_of(chan->vchan.chan.device, struct stm32_mdma_device, + ddev); +} + +static struct stm32_mdma_chan *to_stm32_mdma_chan(struct dma_chan *c) +{ + return container_of(c, struct stm32_mdma_chan, vchan.chan); +} + +static struct stm32_mdma_desc *to_stm32_mdma_desc(struct virt_dma_desc *vdesc) +{ + return container_of(vdesc, struct stm32_mdma_desc, vdesc); +} + +static struct device *chan2dev(struct stm32_mdma_chan *chan) +{ + return &chan->vchan.chan.dev->device; +} + +static struct device *mdma2dev(struct stm32_mdma_device *mdma_dev) +{ + return mdma_dev->ddev.dev; +} + +static u32 stm32_mdma_read(struct stm32_mdma_device *dmadev, u32 reg) +{ + return readl_relaxed(dmadev->base + reg); +} + +static void stm32_mdma_write(struct stm32_mdma_device *dmadev, u32 reg, u32 val) +{ + writel_relaxed(val, dmadev->base + reg); +} + +static void stm32_mdma_set_bits(struct stm32_mdma_device *dmadev, u32 reg, + u32 mask) +{ + void __iomem *addr = dmadev->base + reg; + + writel_relaxed(readl_relaxed(addr) | mask, addr); +} + +static void stm32_mdma_clr_bits(struct stm32_mdma_device *dmadev, u32 reg, + u32 mask) +{ + void __iomem *addr = dmadev->base + reg; + + writel_relaxed(readl_relaxed(addr) & ~mask, addr); +} + +static struct stm32_mdma_desc *stm32_mdma_alloc_desc( + struct stm32_mdma_chan *chan, u32 count) +{ + struct stm32_mdma_desc *desc; + + desc = kzalloc(sizeof(*desc), GFP_NOWAIT); + if (!desc) + return NULL; + + desc->hwdesc = dma_pool_alloc(chan->desc_pool, GFP_NOWAIT, + &desc->hwdesc_phys); + if (!desc->hwdesc) { + dev_err(chan2dev(chan), "Failed to allocate descriptor\n"); + kfree(desc); + return NULL; + } + + desc->count = count; + + return desc; +} + +static void stm32_mdma_desc_free(struct virt_dma_desc *vdesc) +{ + struct stm32_mdma_desc *desc = to_stm32_mdma_desc(vdesc); + struct stm32_mdma_chan *chan = to_stm32_mdma_chan(vdesc->tx.chan); + + dma_pool_free(chan->desc_pool, desc->hwdesc, desc->hwdesc_phys); + kfree(desc); +} + +static int stm32_mdma_get_width(struct stm32_mdma_chan *chan, + enum dma_slave_buswidth width) +{ + switch (width) { + case DMA_SLAVE_BUSWIDTH_1_BYTE: + case DMA_SLAVE_BUSWIDTH_2_BYTES: + case DMA_SLAVE_BUSWIDTH_4_BYTES: + case DMA_SLAVE_BUSWIDTH_8_BYTES: + return ffs(width) - 1; + default: + dev_err(chan2dev(chan), "Dma bus width %i not supported\n", + width); + return -EINVAL; + } +} + +static enum dma_slave_buswidth stm32_mdma_get_max_width(u32 buf_len, u32 tlen) +{ + enum dma_slave_buswidth max_width = DMA_SLAVE_BUSWIDTH_8_BYTES; + + for (max_width = DMA_SLAVE_BUSWIDTH_8_BYTES; + max_width > DMA_SLAVE_BUSWIDTH_1_BYTE; + max_width >>= 1) { + if (((buf_len % max_width) == 0) && (tlen >= max_width)) + break; + } + + return max_width; +} + +static u32 stm32_mdma_get_best_burst(u32 buf_len, u32 tlen, u32 max_burst, + enum dma_slave_buswidth width) +{ + u32 best_burst = max_burst; + u32 burst_len = best_burst * width; + + while ((burst_len > 0) && (tlen % burst_len)) { + best_burst = best_burst >> 1; + burst_len = best_burst * width; + } + + return (best_burst > 0) ? best_burst : 1; +} + +static int stm32_mdma_disable_chan(struct stm32_mdma_chan *chan) +{ + struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan); + u32 ccr, cisr, id, reg; + int ret; + + id = chan->id; + reg = STM32_MDMA_CCR(id); + + /* Disable interrupts */ + stm32_mdma_clr_bits(dmadev, reg, STM32_MDMA_CCR_IRQ_MASK); + + ccr = stm32_mdma_read(dmadev, reg); + if (ccr & STM32_MDMA_CCR_EN) { + stm32_mdma_clr_bits(dmadev, reg, STM32_MDMA_CCR_EN); + + /* Ensure that any ongoing transfer has been completed */ + ret = readl_relaxed_poll_timeout_atomic( + dmadev->base + STM32_MDMA_CISR(id), cisr, + (cisr & STM32_MDMA_CISR_CTCIF), 10, 1000); + if (ret) { + dev_err(chan2dev(chan), "%s: timeout!\n", __func__); + return -EBUSY; + } + } + + return 0; +} + +static void stm32_mdma_stop(struct stm32_mdma_chan *chan) +{ + struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan); + u32 status; + int ret; + + /* Disable DMA */ + ret = stm32_mdma_disable_chan(chan); + if (ret < 0) + return; + + /* Clear interrupt status if it is there */ + status = stm32_mdma_read(dmadev, STM32_MDMA_CISR(chan->id)); + if (status) { + dev_dbg(chan2dev(chan), "%s(): clearing interrupt: 0x%08x\n", + __func__, status); + stm32_mdma_set_bits(dmadev, STM32_MDMA_CIFCR(chan->id), status); + } + + chan->busy = false; +} + +static void stm32_mdma_set_bus(struct stm32_mdma_device *dmadev, u32 *ctbr, + u32 ctbr_mask, u32 src_addr) +{ + u32 mask; + int i; + + /* Check if memory device is on AHB or AXI */ + *ctbr &= ~ctbr_mask; + mask = src_addr & 0xF0000000; + for (i = 0; i < dmadev->nr_ahb_addr_masks; i++) { + if (mask == dmadev->ahb_addr_masks[i]) { + *ctbr |= ctbr_mask; + break; + } + } +} + +static int stm32_mdma_set_xfer_param(struct stm32_mdma_chan *chan, + enum dma_transfer_direction direction, + u32 *mdma_ccr, u32 *mdma_ctcr, + u32 *mdma_ctbr, u32 buf_len) +{ + struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan); + struct stm32_mdma_chan_config *chan_config = &chan->chan_config; + enum dma_slave_buswidth src_addr_width, dst_addr_width; + phys_addr_t src_addr, dst_addr; + int src_bus_width, dst_bus_width; + u32 src_maxburst, dst_maxburst, src_best_burst, dst_best_burst; + u32 ccr, ctcr, ctbr, tlen; + + src_addr_width = chan->dma_config.src_addr_width; + dst_addr_width = chan->dma_config.dst_addr_width; + src_maxburst = chan->dma_config.src_maxburst; + dst_maxburst = chan->dma_config.dst_maxburst; + + ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)); + ctcr = stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id)); + ctbr = stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id)); + + /* Enable HW request mode */ + ctcr &= ~STM32_MDMA_CTCR_SWRM; + + /* Set DINC, SINC, DINCOS, SINCOS, TRGM and TLEN retrieve from DT */ + ctcr &= ~STM32_MDMA_CTCR_CFG_MASK; + ctcr |= chan_config->transfer_config & STM32_MDMA_CTCR_CFG_MASK; + + /* + * For buffer transfer length (TLEN) we have to set + * the number of bytes - 1 in CTCR register + */ + tlen = STM32_MDMA_CTCR_LEN2_GET(ctcr); + ctcr &= ~STM32_MDMA_CTCR_LEN2_MSK; + ctcr |= STM32_MDMA_CTCR_TLEN((tlen - 1)); + + /* Check burst size constraints */ + if (src_maxburst * src_addr_width > STM32_MDMA_MAX_BURST || + dst_maxburst * dst_addr_width > STM32_MDMA_MAX_BURST) { + dev_err(chan2dev(chan), + "burst size * bus width higher than %d bytes\n", + STM32_MDMA_MAX_BURST); + return -EINVAL; + } + + if ((!is_power_of_2(src_maxburst) && src_maxburst > 0) || + (!is_power_of_2(dst_maxburst) && dst_maxburst > 0)) { + dev_err(chan2dev(chan), "burst size must be a power of 2\n"); + return -EINVAL; + } + + /* + * Configure channel control: + * - Clear SW request as in this case this is a HW one + * - Clear WEX, HEX and BEX bits + * - Set priority level + */ + ccr &= ~(STM32_MDMA_CCR_SWRQ | STM32_MDMA_CCR_WEX | STM32_MDMA_CCR_HEX | + STM32_MDMA_CCR_BEX | STM32_MDMA_CCR_PL_MASK); + ccr |= STM32_MDMA_CCR_PL(chan_config->priority_level); + + /* Configure Trigger selection */ + ctbr &= ~STM32_MDMA_CTBR_TSEL_MASK; + ctbr |= STM32_MDMA_CTBR_TSEL(chan_config->request); + + switch (direction) { + case DMA_MEM_TO_DEV: + /* Set device data size */ + dst_bus_width = stm32_mdma_get_width(chan, dst_addr_width); + if (dst_bus_width < 0) + return dst_bus_width; + ctcr &= ~STM32_MDMA_CTCR_DSIZE_MASK; + ctcr |= STM32_MDMA_CTCR_DSIZE(dst_bus_width); + + /* Set device burst value */ + dst_best_burst = stm32_mdma_get_best_burst(buf_len, tlen, + dst_maxburst, + dst_addr_width); + chan->mem_burst = dst_best_burst; + ctcr &= ~STM32_MDMA_CTCR_DBURST_MASK; + ctcr |= STM32_MDMA_CTCR_DBURST((ilog2(dst_best_burst))); + + /* Set memory data size */ + src_addr_width = stm32_mdma_get_max_width(buf_len, tlen); + chan->mem_width = src_addr_width; + src_bus_width = stm32_mdma_get_width(chan, src_addr_width); + if (src_bus_width < 0) + return src_bus_width; + ctcr &= ~STM32_MDMA_CTCR_SSIZE_MASK | + STM32_MDMA_CTCR_SINCOS_MASK; + ctcr |= STM32_MDMA_CTCR_SSIZE(src_bus_width) | + STM32_MDMA_CTCR_SINCOS(src_bus_width); + + /* Set memory burst value */ + src_maxburst = STM32_MDMA_MAX_BUF_LEN / src_addr_width; + src_best_burst = stm32_mdma_get_best_burst(buf_len, tlen, + src_maxburst, + src_addr_width); + chan->mem_burst = src_best_burst; + ctcr &= ~STM32_MDMA_CTCR_SBURST_MASK; + ctcr |= STM32_MDMA_CTCR_SBURST((ilog2(src_best_burst))); + + /* Select bus */ + dst_addr = chan->dma_config.dst_addr; + stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_DBUS, + dst_addr); + + /* Set destination address */ + stm32_mdma_write(dmadev, STM32_MDMA_CDAR(chan->id), dst_addr); + break; + + case DMA_DEV_TO_MEM: + /* Set device data size */ + src_bus_width = stm32_mdma_get_width(chan, src_addr_width); + if (src_bus_width < 0) + return src_bus_width; + ctcr &= ~STM32_MDMA_CTCR_SSIZE_MASK; + ctcr |= STM32_MDMA_CTCR_SSIZE(src_bus_width); + + /* Set device burst value */ + src_best_burst = stm32_mdma_get_best_burst(buf_len, tlen, + src_maxburst, + src_addr_width); + ctcr &= ~STM32_MDMA_CTCR_SBURST_MASK; + ctcr |= STM32_MDMA_CTCR_SBURST((ilog2(src_best_burst))); + + /* Set memory data size */ + dst_addr_width = stm32_mdma_get_max_width(buf_len, tlen); + chan->mem_width = dst_addr_width; + dst_bus_width = stm32_mdma_get_width(chan, dst_addr_width); + if (dst_bus_width < 0) + return dst_bus_width; + ctcr &= ~(STM32_MDMA_CTCR_DSIZE_MASK | + STM32_MDMA_CTCR_DINCOS_MASK); + ctcr |= STM32_MDMA_CTCR_DSIZE(dst_bus_width) | + STM32_MDMA_CTCR_DINCOS(dst_bus_width); + + /* Set memory burst value */ + dst_maxburst = STM32_MDMA_MAX_BUF_LEN / dst_addr_width; + dst_best_burst = stm32_mdma_get_best_burst(buf_len, tlen, + dst_maxburst, + dst_addr_width); + ctcr &= ~STM32_MDMA_CTCR_DBURST_MASK; + ctcr |= STM32_MDMA_CTCR_DBURST((ilog2(dst_best_burst))); + + /* Select bus */ + src_addr = chan->dma_config.src_addr; + stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_SBUS, + src_addr); + + /* Set source address */ + stm32_mdma_write(dmadev, STM32_MDMA_CSAR(chan->id), src_addr); + break; + + default: + dev_err(chan2dev(chan), "Dma direction is not supported\n"); + return -EINVAL; + } + + *mdma_ccr = ccr; + *mdma_ctcr = ctcr; + *mdma_ctbr = ctbr; + + return 0; +} + +static void stm32_mdma_dump_hwdesc(struct stm32_mdma_chan *chan, + struct stm32_mdma_hwdesc *hwdesc) +{ + dev_dbg(chan2dev(chan), "hwdesc: 0x%08x\n", (unsigned int)hwdesc); + dev_dbg(chan2dev(chan), "CTCR: 0x%08x\n", hwdesc->ctcr); + dev_dbg(chan2dev(chan), "CBNDTR: 0x%08x\n", hwdesc->cbndtr); + dev_dbg(chan2dev(chan), "CSAR: 0x%08x\n", hwdesc->csar); + dev_dbg(chan2dev(chan), "CDAR: 0x%08x\n", hwdesc->cdar); + dev_dbg(chan2dev(chan), "CBRUR: 0x%08x\n", hwdesc->cbrur); + dev_dbg(chan2dev(chan), "CLAR: 0x%08x\n", hwdesc->clar); + dev_dbg(chan2dev(chan), "CTBR: 0x%08x\n", hwdesc->ctbr); + dev_dbg(chan2dev(chan), "CMAR: 0x%08x\n", hwdesc->cmar); + dev_dbg(chan2dev(chan), "CMDR: 0x%08x\n\n", hwdesc->cmdr); +} + +static void stm32_mdma_setup_hwdesc(struct stm32_mdma_chan *chan, + struct stm32_mdma_desc *desc, + enum dma_transfer_direction dir, u32 count, + dma_addr_t src_addr, dma_addr_t dst_addr, + u32 len, u32 ctcr, u32 ctbr, bool is_last, + bool is_first, bool is_cyclic) +{ + struct stm32_mdma_chan_config *config = &chan->chan_config; + struct stm32_mdma_hwdesc *hwdesc; + u32 next = count + 1; + + hwdesc = &desc->hwdesc[count]; + hwdesc->ctcr = ctcr; + hwdesc->cbndtr &= ~(STM32_MDMA_CBNDTR_BRC_MK | + STM32_MDMA_CBNDTR_BRDUM | + STM32_MDMA_CBNDTR_BRSUM | + STM32_MDMA_CBNDTR_BNDT_MASK); + hwdesc->cbndtr |= STM32_MDMA_CBNDTR_BNDT(len); + hwdesc->csar = src_addr; + hwdesc->cdar = dst_addr; + hwdesc->cbrur = 0; + hwdesc->clar = desc->hwdesc_phys + next * sizeof(*hwdesc); + hwdesc->ctbr = ctbr; + hwdesc->cmar = config->mask_addr; + hwdesc->cmdr = config->mask_data; + + if (is_last) { + if (is_cyclic) + hwdesc->clar = desc->hwdesc_phys; + else + hwdesc->clar = 0; + } + + stm32_mdma_dump_hwdesc(chan, hwdesc); +} + +static int stm32_mdma_setup_xfer(struct stm32_mdma_chan *chan, + struct stm32_mdma_desc *desc, + struct scatterlist *sgl, u32 sg_len, + enum dma_transfer_direction direction) +{ + struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan); + struct dma_slave_config *dma_config = &chan->dma_config; + struct scatterlist *sg; + dma_addr_t src_addr, dst_addr; + u32 ccr, ctcr, ctbr; + int i, ret = 0; + + for_each_sg(sgl, sg, sg_len, i) { + if (sg_dma_len(sg) > STM32_MDMA_MAX_BLOCK_LEN) { + dev_err(chan2dev(chan), "Invalid block len\n"); + return -EINVAL; + } + + ret = stm32_mdma_set_xfer_param(chan, direction, &ccr, &ctcr, + &ctbr, sg_dma_len(sg)); + if (ret < 0) + return ret; + + if (direction == DMA_MEM_TO_DEV) { + src_addr = sg_dma_address(sg); + dst_addr = dma_config->dst_addr; + stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_SBUS, + src_addr); + } else { + src_addr = dma_config->src_addr; + dst_addr = sg_dma_address(sg); + stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_DBUS, + dst_addr); + } + + stm32_mdma_setup_hwdesc(chan, desc, direction, i, src_addr, + dst_addr, sg_dma_len(sg), ctcr, ctbr, + i == sg_len - 1, i == 0, false); + } + + /* Enable interrupts */ + ccr &= ~STM32_MDMA_CCR_IRQ_MASK; + ccr |= STM32_MDMA_CCR_TEIE | STM32_MDMA_CCR_CTCIE; + if (sg_len > 1) + ccr |= STM32_MDMA_CCR_BTIE; + desc->ccr = ccr; + + return 0; +} + +static struct dma_async_tx_descriptor * +stm32_mdma_prep_slave_sg(struct dma_chan *c, struct scatterlist *sgl, + u32 sg_len, enum dma_transfer_direction direction, + unsigned long flags, void *context) +{ + struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c); + struct stm32_mdma_desc *desc; + int ret; + + /* + * Once DMA is in setup cyclic mode the channel we cannot assign this + * channel anymore. The DMA channel needs to be aborted or terminated + * for allowing another request. + */ + if (chan->desc && chan->desc->cyclic) { + dev_err(chan2dev(chan), + "Request not allowed when dma in cyclic mode\n"); + return NULL; + } + + desc = stm32_mdma_alloc_desc(chan, sg_len); + if (!desc) + return NULL; + + ret = stm32_mdma_setup_xfer(chan, desc, sgl, sg_len, direction); + if (ret < 0) + goto xfer_setup_err; + + desc->cyclic = false; + + return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); + +xfer_setup_err: + dma_pool_free(chan->desc_pool, &desc->hwdesc, desc->hwdesc_phys); + kfree(desc); + return NULL; +} + +static struct dma_async_tx_descriptor * +stm32_mdma_prep_dma_cyclic(struct dma_chan *c, dma_addr_t buf_addr, + size_t buf_len, size_t period_len, + enum dma_transfer_direction direction, + unsigned long flags) +{ + struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c); + struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan); + struct dma_slave_config *dma_config = &chan->dma_config; + struct stm32_mdma_desc *desc; + dma_addr_t src_addr, dst_addr; + u32 ccr, ctcr, ctbr, count; + int i, ret; + + /* + * Once DMA is in setup cyclic mode the channel we cannot assign this + * channel anymore. The DMA channel needs to be aborted or terminated + * for allowing another request. + */ + if (chan->desc && chan->desc->cyclic) { + dev_err(chan2dev(chan), + "Request not allowed when dma in cyclic mode\n"); + return NULL; + } + + if (!buf_len || !period_len || period_len > STM32_MDMA_MAX_BLOCK_LEN) { + dev_err(chan2dev(chan), "Invalid buffer/period len\n"); + return NULL; + } + + if (buf_len % period_len) { + dev_err(chan2dev(chan), "buf_len not multiple of period_len\n"); + return NULL; + } + + count = buf_len / period_len; + + desc = stm32_mdma_alloc_desc(chan, count); + if (!desc) + return NULL; + + ret = stm32_mdma_set_xfer_param(chan, direction, &ccr, &ctcr, &ctbr, + period_len); + if (ret < 0) + goto xfer_setup_err; + + /* Enable interrupts */ + ccr &= ~STM32_MDMA_CCR_IRQ_MASK; + ccr |= STM32_MDMA_CCR_TEIE | STM32_MDMA_CCR_CTCIE | STM32_MDMA_CCR_BTIE; + desc->ccr = ccr; + + /* Select bus */ + if (direction == DMA_MEM_TO_DEV) { + src_addr = buf_addr; + stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_SBUS, + src_addr); + } else { + dst_addr = buf_addr; + stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_DBUS, + dst_addr); + } + + /* Configure hwdesc list */ + for (i = 0; i < count; i++) { + if (direction == DMA_MEM_TO_DEV) { + src_addr = buf_addr + i * period_len; + dst_addr = dma_config->dst_addr; + } else { + src_addr = dma_config->src_addr; + dst_addr = buf_addr + i * period_len; + } + + stm32_mdma_setup_hwdesc(chan, desc, direction, i, src_addr, + dst_addr, period_len, ctcr, ctbr, + i == count - 1, i == 0, true); + } + + desc->cyclic = true; + + return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); + +xfer_setup_err: + dma_pool_free(chan->desc_pool, &desc->hwdesc, desc->hwdesc_phys); + kfree(desc); + return NULL; +} + +static struct dma_async_tx_descriptor * +stm32_mdma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dest, dma_addr_t src, + size_t len, unsigned long flags) +{ + struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c); + struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan); + enum dma_slave_buswidth max_width; + struct stm32_mdma_desc *desc; + struct stm32_mdma_hwdesc *hwdesc; + u32 ccr, ctcr, ctbr, cbndtr, count, max_burst, mdma_burst; + u32 best_burst, tlen; + size_t xfer_count, offset; + int src_bus_width, dst_bus_width; + int i; + + /* + * Once DMA is in setup cyclic mode the channel we cannot assign this + * channel anymore. The DMA channel needs to be aborted or terminated + * to allow another request + */ + if (chan->desc && chan->desc->cyclic) { + dev_err(chan2dev(chan), + "Request not allowed when dma in cyclic mode\n"); + return NULL; + } + + count = DIV_ROUND_UP(len, STM32_MDMA_MAX_BLOCK_LEN); + desc = stm32_mdma_alloc_desc(chan, count); + if (!desc) + return NULL; + + ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)); + ctcr = stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id)); + ctbr = stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id)); + cbndtr = stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id)); + + /* Enable sw req, some interrupts and clear other bits */ + ccr &= ~(STM32_MDMA_CCR_WEX | STM32_MDMA_CCR_HEX | + STM32_MDMA_CCR_BEX | STM32_MDMA_CCR_PL_MASK | + STM32_MDMA_CCR_IRQ_MASK); + ccr |= STM32_MDMA_CCR_TEIE; + + /* Enable SW request mode, dest/src inc and clear other bits */ + ctcr &= ~(STM32_MDMA_CTCR_BWM | STM32_MDMA_CTCR_TRGM_MSK | + STM32_MDMA_CTCR_PAM_MASK | STM32_MDMA_CTCR_PKE | + STM32_MDMA_CTCR_TLEN_MSK | STM32_MDMA_CTCR_DBURST_MASK | + STM32_MDMA_CTCR_SBURST_MASK | STM32_MDMA_CTCR_DINCOS_MASK | + STM32_MDMA_CTCR_SINCOS_MASK | STM32_MDMA_CTCR_DSIZE_MASK | + STM32_MDMA_CTCR_SSIZE_MASK | STM32_MDMA_CTCR_DINC_MASK | + STM32_MDMA_CTCR_SINC_MASK); + ctcr |= STM32_MDMA_CTCR_SWRM | STM32_MDMA_CTCR_SINC(STM32_MDMA_INC) | + STM32_MDMA_CTCR_DINC(STM32_MDMA_INC); + + /* Reset HW request */ + ctbr &= ~STM32_MDMA_CTBR_TSEL_MASK; + + /* Select bus */ + stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_SBUS, src); + stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_DBUS, dest); + + /* Clear CBNDTR registers */ + cbndtr &= ~(STM32_MDMA_CBNDTR_BRC_MK | STM32_MDMA_CBNDTR_BRDUM | + STM32_MDMA_CBNDTR_BRSUM | STM32_MDMA_CBNDTR_BNDT_MASK); + + if (len <= STM32_MDMA_MAX_BLOCK_LEN) { + cbndtr |= STM32_MDMA_CBNDTR_BNDT(len); + if (len <= STM32_MDMA_MAX_BUF_LEN) { + /* Setup a buffer transfer */ + ccr |= STM32_MDMA_CCR_TCIE | STM32_MDMA_CCR_CTCIE; + ctcr |= STM32_MDMA_CTCR_TRGM(STM32_MDMA_BUFFER); + } else { + /* Setup a block transfer */ + ccr |= STM32_MDMA_CCR_BTIE | STM32_MDMA_CCR_CTCIE; + ctcr |= STM32_MDMA_CTCR_TRGM(STM32_MDMA_BLOCK); + } + + tlen = STM32_MDMA_MAX_BUF_LEN; + ctcr |= STM32_MDMA_CTCR_TLEN((tlen - 1)); + + /* Set source best burst size */ + max_width = stm32_mdma_get_max_width(len, tlen); + if (src % max_width) + max_width = DMA_SLAVE_BUSWIDTH_1_BYTE; + src_bus_width = stm32_mdma_get_width(chan, max_width); + + max_burst = tlen / max_width; + best_burst = stm32_mdma_get_best_burst(len, tlen, max_burst, + max_width); + mdma_burst = ilog2(best_burst); + + ctcr |= STM32_MDMA_CTCR_SBURST(mdma_burst) | + STM32_MDMA_CTCR_SSIZE(src_bus_width) | + STM32_MDMA_CTCR_SINCOS(src_bus_width); + + /* Set destination best burst size */ + max_width = stm32_mdma_get_max_width(len, tlen); + if (dest % max_width) + max_width = DMA_SLAVE_BUSWIDTH_1_BYTE; + dst_bus_width = stm32_mdma_get_width(chan, max_width); + + max_burst = tlen / max_width; + best_burst = stm32_mdma_get_best_burst(len, tlen, max_burst, + max_width); + mdma_burst = ilog2(best_burst); + + ctcr |= STM32_MDMA_CTCR_DBURST(mdma_burst) | + STM32_MDMA_CTCR_DSIZE(dst_bus_width) | + STM32_MDMA_CTCR_DINCOS(dst_bus_width); + + if (dst_bus_width != src_bus_width) + ctcr |= STM32_MDMA_CTCR_PKE; + + /* Prepare hardware descriptor */ + hwdesc = desc->hwdesc; + hwdesc->ctcr = ctcr; + hwdesc->cbndtr = cbndtr; + hwdesc->csar = src; + hwdesc->cdar = dest; + hwdesc->cbrur = 0; + hwdesc->clar = 0; + hwdesc->ctbr = ctbr; + hwdesc->cmar = 0; + hwdesc->cmdr = 0; + + stm32_mdma_dump_hwdesc(chan, hwdesc); + } else { + /* Setup a LLI transfer */ + ctcr |= STM32_MDMA_CTCR_TRGM(STM32_MDMA_LINKED_LIST) | + STM32_MDMA_CTCR_TLEN((STM32_MDMA_MAX_BUF_LEN - 1)); + ccr |= STM32_MDMA_CCR_BTIE | STM32_MDMA_CCR_CTCIE; + tlen = STM32_MDMA_MAX_BUF_LEN; + + for (i = 0, offset = 0; offset < len; + i++, offset += xfer_count) { + xfer_count = min_t(size_t, len - offset, + STM32_MDMA_MAX_BLOCK_LEN); + + /* Set source best burst size */ + max_width = stm32_mdma_get_max_width(len, tlen); + if (src % max_width) + max_width = DMA_SLAVE_BUSWIDTH_1_BYTE; + src_bus_width = stm32_mdma_get_width(chan, max_width); + + max_burst = tlen / max_width; + best_burst = stm32_mdma_get_best_burst(len, tlen, + max_burst, + max_width); + mdma_burst = ilog2(best_burst); + + ctcr |= STM32_MDMA_CTCR_SBURST(mdma_burst) | + STM32_MDMA_CTCR_SSIZE(src_bus_width) | + STM32_MDMA_CTCR_SINCOS(src_bus_width); + + /* Set destination best burst size */ + max_width = stm32_mdma_get_max_width(len, tlen); + if (dest % max_width) + max_width = DMA_SLAVE_BUSWIDTH_1_BYTE; + dst_bus_width = stm32_mdma_get_width(chan, max_width); + + max_burst = tlen / max_width; + best_burst = stm32_mdma_get_best_burst(len, tlen, + max_burst, + max_width); + mdma_burst = ilog2(best_burst); + + ctcr |= STM32_MDMA_CTCR_DBURST(mdma_burst) | + STM32_MDMA_CTCR_DSIZE(dst_bus_width) | + STM32_MDMA_CTCR_DINCOS(dst_bus_width); + + if (dst_bus_width != src_bus_width) + ctcr |= STM32_MDMA_CTCR_PKE; + + /* Prepare hardware descriptor */ + stm32_mdma_setup_hwdesc(chan, desc, DMA_MEM_TO_MEM, i, + src + offset, dest + offset, + xfer_count, ctcr, ctbr, + i == count - 1, i == 0, false); + } + } + + desc->ccr = ccr; + + desc->cyclic = false; + + return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); +} + +static void stm32_mdma_dump_reg(struct stm32_mdma_chan *chan) +{ + struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan); + + dev_dbg(chan2dev(chan), "CCR: 0x%08x\n", + stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id))); + dev_dbg(chan2dev(chan), "CTCR: 0x%08x\n", + stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id))); + dev_dbg(chan2dev(chan), "CBNDTR: 0x%08x\n", + stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id))); + dev_dbg(chan2dev(chan), "CSAR: 0x%08x\n", + stm32_mdma_read(dmadev, STM32_MDMA_CSAR(chan->id))); + dev_dbg(chan2dev(chan), "CDAR: 0x%08x\n", + stm32_mdma_read(dmadev, STM32_MDMA_CDAR(chan->id))); + dev_dbg(chan2dev(chan), "CBRUR: 0x%08x\n", + stm32_mdma_read(dmadev, STM32_MDMA_CBRUR(chan->id))); + dev_dbg(chan2dev(chan), "CLAR: 0x%08x\n", + stm32_mdma_read(dmadev, STM32_MDMA_CLAR(chan->id))); + dev_dbg(chan2dev(chan), "CTBR: 0x%08x\n", + stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id))); + dev_dbg(chan2dev(chan), "CMAR: 0x%08x\n", + stm32_mdma_read(dmadev, STM32_MDMA_CMAR(chan->id))); + dev_dbg(chan2dev(chan), "CMDR: 0x%08x\n", + stm32_mdma_read(dmadev, STM32_MDMA_CMDR(chan->id))); +} + +static void stm32_mdma_start_transfer(struct stm32_mdma_chan *chan) +{ + struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan); + struct virt_dma_desc *vdesc; + struct stm32_mdma_hwdesc *hwdesc; + u32 id = chan->id; + u32 status, reg; + + vdesc = vchan_next_desc(&chan->vchan); + if (!vdesc) { + chan->desc = NULL; + return; + } + + chan->desc = to_stm32_mdma_desc(vdesc); + hwdesc = chan->desc->hwdesc; + chan->curr_hwdesc = 0; + + stm32_mdma_write(dmadev, STM32_MDMA_CCR(id), chan->desc->ccr); + stm32_mdma_write(dmadev, STM32_MDMA_CTCR(id), hwdesc->ctcr); + stm32_mdma_write(dmadev, STM32_MDMA_CBNDTR(id), hwdesc->cbndtr); + stm32_mdma_write(dmadev, STM32_MDMA_CSAR(id), hwdesc->csar); + stm32_mdma_write(dmadev, STM32_MDMA_CDAR(id), hwdesc->cdar); + stm32_mdma_write(dmadev, STM32_MDMA_CBRUR(id), hwdesc->cbrur); + stm32_mdma_write(dmadev, STM32_MDMA_CLAR(id), hwdesc->clar); + stm32_mdma_write(dmadev, STM32_MDMA_CTBR(id), hwdesc->ctbr); + stm32_mdma_write(dmadev, STM32_MDMA_CMAR(id), hwdesc->cmar); + stm32_mdma_write(dmadev, STM32_MDMA_CMDR(id), hwdesc->cmdr); + + /* Clear interrupt status if it is there */ + status = stm32_mdma_read(dmadev, STM32_MDMA_CISR(id)); + if (status) + stm32_mdma_set_bits(dmadev, STM32_MDMA_CIFCR(id), status); + + stm32_mdma_dump_reg(chan); + + /* Start DMA */ + stm32_mdma_set_bits(dmadev, STM32_MDMA_CCR(id), STM32_MDMA_CCR_EN); + + /* Set SW request in case of MEM2MEM transfer */ + if (hwdesc->ctcr & STM32_MDMA_CTCR_SWRM) { + reg = STM32_MDMA_CCR(id); + stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CCR_SWRQ); + } + + chan->busy = true; + + dev_dbg(chan2dev(chan), "vchan %p: started\n", &chan->vchan); +} + +static void stm32_mdma_issue_pending(struct dma_chan *c) +{ + struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c); + unsigned long flags; + + spin_lock_irqsave(&chan->vchan.lock, flags); + + if (!vchan_issue_pending(&chan->vchan)) + goto end; + + dev_dbg(chan2dev(chan), "vchan %p: issued\n", &chan->vchan); + + if (!chan->desc && !chan->busy) + stm32_mdma_start_transfer(chan); + +end: + spin_unlock_irqrestore(&chan->vchan.lock, flags); +} + +static int stm32_mdma_pause(struct dma_chan *c) +{ + struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c); + unsigned long flags; + int ret; + + spin_lock_irqsave(&chan->vchan.lock, flags); + ret = stm32_mdma_disable_chan(chan); + spin_unlock_irqrestore(&chan->vchan.lock, flags); + + if (!ret) + dev_dbg(chan2dev(chan), "vchan %p: pause\n", &chan->vchan); + + return ret; +} + +static int stm32_mdma_resume(struct dma_chan *c) +{ + struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c); + struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan); + struct stm32_mdma_hwdesc *hwdesc; + unsigned long flags; + u32 status, reg; + + hwdesc = &chan->desc->hwdesc[chan->curr_hwdesc]; + + spin_lock_irqsave(&chan->vchan.lock, flags); + + /* Re-configure control register */ + stm32_mdma_write(dmadev, STM32_MDMA_CCR(chan->id), chan->desc->ccr); + + /* Clear interrupt status if it is there */ + status = stm32_mdma_read(dmadev, STM32_MDMA_CISR(chan->id)); + if (status) + stm32_mdma_set_bits(dmadev, STM32_MDMA_CIFCR(chan->id), status); + + stm32_mdma_dump_reg(chan); + + /* Re-start DMA */ + reg = STM32_MDMA_CCR(chan->id); + stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CCR_EN); + + /* Set SW request in case of MEM2MEM transfer */ + if (hwdesc->ctcr & STM32_MDMA_CTCR_SWRM) + stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CCR_SWRQ); + + spin_unlock_irqrestore(&chan->vchan.lock, flags); + + dev_dbg(chan2dev(chan), "vchan %p: resume\n", &chan->vchan); + + return 0; +} + +static int stm32_mdma_terminate_all(struct dma_chan *c) +{ + struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c); + unsigned long flags; + LIST_HEAD(head); + + spin_lock_irqsave(&chan->vchan.lock, flags); + if (chan->busy) { + stm32_mdma_stop(chan); + chan->desc = NULL; + } + vchan_get_all_descriptors(&chan->vchan, &head); + spin_unlock_irqrestore(&chan->vchan.lock, flags); + + vchan_dma_desc_free_list(&chan->vchan, &head); + + return 0; +} + +static void stm32_mdma_synchronize(struct dma_chan *c) +{ + struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c); + + vchan_synchronize(&chan->vchan); +} + +static int stm32_mdma_slave_config(struct dma_chan *c, + struct dma_slave_config *config) +{ + struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c); + + memcpy(&chan->dma_config, config, sizeof(*config)); + + return 0; +} + +static size_t stm32_mdma_desc_residue(struct stm32_mdma_chan *chan, + struct stm32_mdma_desc *desc, + u32 curr_hwdesc) +{ + struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan); + struct stm32_mdma_hwdesc *hwdesc = desc->hwdesc; + u32 cbndtr, residue, modulo, burst_size; + int i; + + residue = 0; + for (i = curr_hwdesc + 1; i < desc->count; i++) { + hwdesc = &desc->hwdesc[i]; + residue += STM32_MDMA_CBNDTR_BNDT(hwdesc->cbndtr); + } + cbndtr = stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id)); + residue += cbndtr & STM32_MDMA_CBNDTR_BNDT_MASK; + + if (!chan->mem_burst) + return residue; + + burst_size = chan->mem_burst * chan->mem_width; + modulo = residue % burst_size; + if (modulo) + residue = residue - modulo + burst_size; + + return residue; +} + +static enum dma_status stm32_mdma_tx_status(struct dma_chan *c, + dma_cookie_t cookie, + struct dma_tx_state *state) +{ + struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c); + struct virt_dma_desc *vdesc; + enum dma_status status; + unsigned long flags; + u32 residue = 0; + + status = dma_cookie_status(c, cookie, state); + if ((status == DMA_COMPLETE) || (!state)) + return status; + + spin_lock_irqsave(&chan->vchan.lock, flags); + + vdesc = vchan_find_desc(&chan->vchan, cookie); + if (chan->desc && cookie == chan->desc->vdesc.tx.cookie) + residue = stm32_mdma_desc_residue(chan, chan->desc, + chan->curr_hwdesc); + else if (vdesc) + residue = stm32_mdma_desc_residue(chan, + to_stm32_mdma_desc(vdesc), 0); + dma_set_residue(state, residue); + + spin_unlock_irqrestore(&chan->vchan.lock, flags); + + return status; +} + +static void stm32_mdma_xfer_end(struct stm32_mdma_chan *chan) +{ + list_del(&chan->desc->vdesc.node); + vchan_cookie_complete(&chan->desc->vdesc); + chan->desc = NULL; + chan->busy = false; + + /* Start the next transfer if this driver has a next desc */ + stm32_mdma_start_transfer(chan); +} + +static irqreturn_t stm32_mdma_irq_handler(int irq, void *devid) +{ + struct stm32_mdma_device *dmadev = devid; + struct stm32_mdma_chan *chan = devid; + u32 reg, id, ien, status, flag; + + /* Find out which channel generates the interrupt */ + status = readl_relaxed(dmadev->base + STM32_MDMA_GISR0); + if (status) { + id = __ffs(status); + } else { + status = readl_relaxed(dmadev->base + STM32_MDMA_GISR1); + if (!status) { + dev_dbg(mdma2dev(dmadev), "spurious it\n"); + return IRQ_NONE; + } + id = __ffs(status); + /* + * As GISR0 provides status for channel id from 0 to 31, + * so GISR1 provides status for channel id from 32 to 62 + */ + id += 32; + } + + chan = &dmadev->chan[id]; + if (!chan) { + dev_err(chan2dev(chan), "MDMA channel not initialized\n"); + goto exit; + } + + /* Handle interrupt for the channel */ + spin_lock(&chan->vchan.lock); + status = stm32_mdma_read(dmadev, STM32_MDMA_CISR(chan->id)); + ien = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)); + ien &= STM32_MDMA_CCR_IRQ_MASK; + ien >>= 1; + + if (!(status & ien)) { + spin_unlock(&chan->vchan.lock); + dev_dbg(chan2dev(chan), + "spurious it (status=0x%04x, ien=0x%04x)\n", + status, ien); + return IRQ_NONE; + } + + flag = __ffs(status & ien); + reg = STM32_MDMA_CIFCR(chan->id); + + switch (1 << flag) { + case STM32_MDMA_CISR_TEIF: + id = chan->id; + status = readl_relaxed(dmadev->base + STM32_MDMA_CESR(id)); + dev_err(chan2dev(chan), "Transfer Err: stat=0x%08x\n", status); + stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CTEIF); + break; + + case STM32_MDMA_CISR_CTCIF: + stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CCTCIF); + stm32_mdma_xfer_end(chan); + break; + + case STM32_MDMA_CISR_BRTIF: + stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CBRTIF); + break; + + case STM32_MDMA_CISR_BTIF: + stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CBTIF); + chan->curr_hwdesc++; + if (chan->desc && chan->desc->cyclic) { + if (chan->curr_hwdesc == chan->desc->count) + chan->curr_hwdesc = 0; + vchan_cyclic_callback(&chan->desc->vdesc); + } + break; + + case STM32_MDMA_CISR_TCIF: + stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CLTCIF); + break; + + default: + dev_err(chan2dev(chan), "it %d unhandled (status=0x%04x)\n", + 1 << flag, status); + } + + spin_unlock(&chan->vchan.lock); + +exit: + return IRQ_HANDLED; +} + +static int stm32_mdma_alloc_chan_resources(struct dma_chan *c) +{ + struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c); + struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan); + int ret; + + chan->desc_pool = dmam_pool_create(dev_name(&c->dev->device), + c->device->dev, + sizeof(struct stm32_mdma_hwdesc), + __alignof__(struct stm32_mdma_hwdesc), + 0); + if (!chan->desc_pool) { + dev_err(chan2dev(chan), "failed to allocate descriptor pool\n"); + return -ENOMEM; + } + + ret = clk_prepare_enable(dmadev->clk); + if (ret < 0) { + dev_err(chan2dev(chan), "clk_prepare_enable failed: %d\n", ret); + return ret; + } + + ret = stm32_mdma_disable_chan(chan); + if (ret < 0) + clk_disable_unprepare(dmadev->clk); + + return ret; +} + +static void stm32_mdma_free_chan_resources(struct dma_chan *c) +{ + struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c); + struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan); + unsigned long flags; + + dev_dbg(chan2dev(chan), "Freeing channel %d\n", chan->id); + + if (chan->busy) { + spin_lock_irqsave(&chan->vchan.lock, flags); + stm32_mdma_stop(chan); + chan->desc = NULL; + spin_unlock_irqrestore(&chan->vchan.lock, flags); + } + + clk_disable_unprepare(dmadev->clk); + vchan_free_chan_resources(to_virt_chan(c)); + dmam_pool_destroy(chan->desc_pool); + chan->desc_pool = NULL; +} + +static struct dma_chan *stm32_mdma_of_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct stm32_mdma_device *dmadev = ofdma->of_dma_data; + struct stm32_mdma_chan *chan; + struct dma_chan *c; + struct stm32_mdma_chan_config config; + + if (dma_spec->args_count < 5) { + dev_err(mdma2dev(dmadev), "Bad number of args\n"); + return NULL; + } + + config.request = dma_spec->args[0]; + config.priority_level = dma_spec->args[1]; + config.transfer_config = dma_spec->args[2]; + config.mask_addr = dma_spec->args[3]; + config.mask_data = dma_spec->args[4]; + + if (config.request >= dmadev->nr_requests) { + dev_err(mdma2dev(dmadev), "Bad request line\n"); + return NULL; + } + + if (config.priority_level > STM32_MDMA_VERY_HIGH_PRIORITY) { + dev_err(mdma2dev(dmadev), "Priority level not supported\n"); + return NULL; + } + + c = dma_get_any_slave_channel(&dmadev->ddev); + if (!c) { + dev_err(mdma2dev(dmadev), "No more channel avalaible\n"); + return NULL; + } + + chan = to_stm32_mdma_chan(c); + chan->chan_config = config; + + return c; +} + +static const struct of_device_id stm32_mdma_of_match[] = { + { .compatible = "st,stm32h7-mdma", }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, stm32_mdma_of_match); + +static int stm32_mdma_probe(struct platform_device *pdev) +{ + struct stm32_mdma_chan *chan; + struct stm32_mdma_device *dmadev; + struct dma_device *dd; + struct device_node *of_node; + struct resource *res; + u32 nr_channels, nr_requests; + int i, count, ret; + + of_node = pdev->dev.of_node; + if (!of_node) + return -ENODEV; + + ret = device_property_read_u32(&pdev->dev, "dma-channels", + &nr_channels); + if (ret) { + nr_channels = STM32_MDMA_MAX_CHANNELS; + dev_warn(&pdev->dev, "MDMA defaulting on %i channels\n", + nr_channels); + } + + ret = device_property_read_u32(&pdev->dev, "dma-requests", + &nr_requests); + if (ret) { + nr_requests = STM32_MDMA_MAX_REQUESTS; + dev_warn(&pdev->dev, "MDMA defaulting on %i request lines\n", + nr_requests); + } + + count = device_property_read_u32_array(&pdev->dev, "st,ahb-addr-masks", + NULL, 0); + if (count < 0) + count = 0; + + dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev) + sizeof(u32) * count, + GFP_KERNEL); + if (!dmadev) + return -ENOMEM; + + dmadev->nr_channels = nr_channels; + dmadev->nr_requests = nr_requests; + device_property_read_u32_array(&pdev->dev, "st,ahb-addr-masks", + dmadev->ahb_addr_masks, + count); + dmadev->nr_ahb_addr_masks = count; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + dmadev->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(dmadev->base)) + return PTR_ERR(dmadev->base); + + dmadev->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(dmadev->clk)) { + ret = PTR_ERR(dmadev->clk); + if (ret == -EPROBE_DEFER) + dev_info(&pdev->dev, "Missing controller clock\n"); + return ret; + } + + dmadev->rst = devm_reset_control_get(&pdev->dev, NULL); + if (!IS_ERR(dmadev->rst)) { + reset_control_assert(dmadev->rst); + udelay(2); + reset_control_deassert(dmadev->rst); + } + + dd = &dmadev->ddev; + dma_cap_set(DMA_SLAVE, dd->cap_mask); + dma_cap_set(DMA_PRIVATE, dd->cap_mask); + dma_cap_set(DMA_CYCLIC, dd->cap_mask); + dma_cap_set(DMA_MEMCPY, dd->cap_mask); + dd->device_alloc_chan_resources = stm32_mdma_alloc_chan_resources; + dd->device_free_chan_resources = stm32_mdma_free_chan_resources; + dd->device_tx_status = stm32_mdma_tx_status; + dd->device_issue_pending = stm32_mdma_issue_pending; + dd->device_prep_slave_sg = stm32_mdma_prep_slave_sg; + dd->device_prep_dma_cyclic = stm32_mdma_prep_dma_cyclic; + dd->device_prep_dma_memcpy = stm32_mdma_prep_dma_memcpy; + dd->device_config = stm32_mdma_slave_config; + dd->device_pause = stm32_mdma_pause; + dd->device_resume = stm32_mdma_resume; + dd->device_terminate_all = stm32_mdma_terminate_all; + dd->device_synchronize = stm32_mdma_synchronize; + dd->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | + BIT(DMA_SLAVE_BUSWIDTH_8_BYTES); + dd->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | + BIT(DMA_SLAVE_BUSWIDTH_8_BYTES); + dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) | + BIT(DMA_MEM_TO_MEM); + dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; + dd->max_burst = STM32_MDMA_MAX_BURST; + dd->dev = &pdev->dev; + INIT_LIST_HEAD(&dd->channels); + + for (i = 0; i < dmadev->nr_channels; i++) { + chan = &dmadev->chan[i]; + chan->id = i; + chan->vchan.desc_free = stm32_mdma_desc_free; + vchan_init(&chan->vchan, dd); + } + + dmadev->irq = platform_get_irq(pdev, 0); + if (dmadev->irq < 0) { + dev_err(&pdev->dev, "failed to get IRQ\n"); + return dmadev->irq; + } + + ret = devm_request_irq(&pdev->dev, dmadev->irq, stm32_mdma_irq_handler, + 0, dev_name(&pdev->dev), dmadev); + if (ret) { + dev_err(&pdev->dev, "failed to request IRQ\n"); + return ret; + } + + ret = dma_async_device_register(dd); + if (ret) + return ret; + + ret = of_dma_controller_register(of_node, stm32_mdma_of_xlate, dmadev); + if (ret < 0) { + dev_err(&pdev->dev, + "STM32 MDMA DMA OF registration failed %d\n", ret); + goto err_unregister; + } + + platform_set_drvdata(pdev, dmadev); + + dev_info(&pdev->dev, "STM32 MDMA driver registered\n"); + + return 0; + +err_unregister: + dma_async_device_unregister(dd); + + return ret; +} + +static struct platform_driver stm32_mdma_driver = { + .probe = stm32_mdma_probe, + .driver = { + .name = "stm32-mdma", + .of_match_table = stm32_mdma_of_match, + }, +}; + +static int __init stm32_mdma_init(void) +{ + return platform_driver_register(&stm32_mdma_driver); +} + +subsys_initcall(stm32_mdma_init); + +MODULE_DESCRIPTION("Driver for STM32 MDMA controller"); +MODULE_AUTHOR("M'boumba Cedric Madianga "); +MODULE_AUTHOR("Pierre-Yves Mordret "); +MODULE_LICENSE("GPL v2"); -- cgit From 4219ff33b26dbaa08c728ee4ad1a3a5aae300e1a Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 3 Oct 2017 13:54:50 +0300 Subject: dmaengine: stm32-dmamux: Fix a NULL vs IS_ERR() check in probe devm_ioremap_resource() doesn't return NULL, it returns error pointers. Fixes: df7e762db5f6 ("dmaengine: Add STM32 DMAMUX driver") Signed-off-by: Dan Carpenter Acked-by: Pierre-Yves MORDRET Signed-off-by: Vinod Koul --- drivers/dma/stm32-dmamux.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c index 22812e7a953b..d5db0f6e1ff8 100644 --- a/drivers/dma/stm32-dmamux.c +++ b/drivers/dma/stm32-dmamux.c @@ -257,8 +257,8 @@ static int stm32_dmamux_probe(struct platform_device *pdev) return -ENODEV; iomem = devm_ioremap_resource(&pdev->dev, res); - if (!iomem) - return -ENOMEM; + if (IS_ERR(iomem)) + return PTR_ERR(iomem); spin_lock_init(&stm32_dmamux->lock); -- cgit From 38502f232eafa3d3a1072ead3678090e11bdbecb Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Sun, 8 Oct 2017 20:28:15 +0530 Subject: dmaengine: stm32: use %p format specfier for pointer Pointer print was using explict cast and printing as %x which causes below warn on some arch's so print using %p format specfier. Reported-by: Fengguang Wu Signed-off-by: Vinod Koul --- drivers/dma/stm32-mdma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/dma') diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c index a9cb341c8ee0..0db59a7e80e0 100644 --- a/drivers/dma/stm32-mdma.c +++ b/drivers/dma/stm32-mdma.c @@ -653,7 +653,7 @@ static int stm32_mdma_set_xfer_param(struct stm32_mdma_chan *chan, static void stm32_mdma_dump_hwdesc(struct stm32_mdma_chan *chan, struct stm32_mdma_hwdesc *hwdesc) { - dev_dbg(chan2dev(chan), "hwdesc: 0x%08x\n", (unsigned int)hwdesc); + dev_dbg(chan2dev(chan), "hwdesc: 0x%p\n", hwdesc); dev_dbg(chan2dev(chan), "CTCR: 0x%08x\n", hwdesc->ctcr); dev_dbg(chan2dev(chan), "CBNDTR: 0x%08x\n", hwdesc->cbndtr); dev_dbg(chan2dev(chan), "CSAR: 0x%08x\n", hwdesc->csar); -- cgit From ea62e2ccbb18e8adc51a6591682e7efa7818478a Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 11 Oct 2017 16:00:04 +0200 Subject: dmaengine: stm32_mdma: add CONFIG_OF dependency Without CONFIG_OF we get a build warning: warning: (STM32_MDMA) selects DMA_OF which has unmet direct dependencies (DMADEVICES && OF) This adds a dependency on CONFIG_OF. Since this means we no longer need to select 'DMA_OF', I'm dropping that line as well. Fixes: a4ffb13c8946 ("dmaengine: Add STM32 MDMA driver") Signed-off-by: Arnd Bergmann Signed-off-by: Vinod Koul --- drivers/dma/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/dma') diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 73446622ccc9..303940b32f47 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -495,8 +495,8 @@ config STM32_DMAMUX config STM32_MDMA bool "STMicroelectronics STM32 master dma support" depends on ARCH_STM32 || COMPILE_TEST + depends on OF select DMA_ENGINE - select DMA_OF select DMA_VIRTUAL_CHANNELS help Enable support for the on-chip MDMA controller on STMicroelectronics -- cgit From f2fd4d9f323d8979fe77e673bb555199b531fa1a Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 11 Oct 2017 11:28:22 +0100 Subject: dmaengine: stm32: remove redundant initialization of hwdesc hwdesc is being initialized to desc->hwdesc but this is never read as hwdesc is overwritten in a for-loop. Remove the redundant initialization and move the declaration of hwdesc into the for-loop. Cleans up clang warning: Value stored to 'hwdesc' during its initialization is never read Signed-off-by: Colin Ian King Acked-by: Pierre-Yves MORDRET Signed-off-by: Vinod Koul --- drivers/dma/stm32-mdma.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c index 0db59a7e80e0..d3be6bffdf12 100644 --- a/drivers/dma/stm32-mdma.c +++ b/drivers/dma/stm32-mdma.c @@ -1252,13 +1252,13 @@ static size_t stm32_mdma_desc_residue(struct stm32_mdma_chan *chan, u32 curr_hwdesc) { struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan); - struct stm32_mdma_hwdesc *hwdesc = desc->hwdesc; u32 cbndtr, residue, modulo, burst_size; int i; residue = 0; for (i = curr_hwdesc + 1; i < desc->count; i++) { - hwdesc = &desc->hwdesc[i]; + struct stm32_mdma_hwdesc *hwdesc = &desc->hwdesc[i]; + residue += STM32_MDMA_CBNDTR_BNDT(hwdesc->cbndtr); } cbndtr = stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id)); -- cgit From ea09ea51ddb9c5026e3f2090598049302c194427 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Tue, 3 Oct 2017 11:35:37 +0300 Subject: dmaengine: edma: Implement protection for invalid max_burst the device's max_burst to 32767 (CIDX is 16bit signed value) so clients can take this into consideration when setting up the transfer. During slave transfer preparation check if the requested maxburst is valid. Signed-off-by: Peter Ujfalusi Signed-off-by: Vinod Koul --- drivers/dma/edma.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/dma') diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index 3879f80a4815..6970355abdc9 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c @@ -891,6 +891,10 @@ static int edma_slave_config(struct dma_chan *chan, cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) return -EINVAL; + if (cfg->src_maxburst > chan->device->max_burst || + cfg->dst_maxburst > chan->device->max_burst) + return -EINVAL; + memcpy(&echan->cfg, cfg, sizeof(echan->cfg)); return 0; @@ -1855,6 +1859,7 @@ static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode) s_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS; s_ddev->directions |= (BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV)); s_ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; + s_ddev->max_burst = SZ_32K - 1; /* CIDX: 16bit signed */ s_ddev->dev = ecc->dev; INIT_LIST_HEAD(&s_ddev->channels); -- cgit From 05ec62a106a3b358cdc6ac41025b320dc2dbcb3f Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Tue, 3 Oct 2017 11:35:38 +0300 Subject: dmaengine: omap-dma: Implement protection for invalid max_burst the device's max_burst to 16777215 (EN is 24bit unsigned value) so clients can take this into consideration when setting up the transfer. During slave transfer preparation check if the requested maxburst is valid. Signed-off-by: Peter Ujfalusi Cc: Russell King Signed-off-by: Vinod Koul --- drivers/dma/omap-dma.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/dma') diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index 8c1665c8fe33..f6dd849159d8 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c @@ -1288,6 +1288,10 @@ static int omap_dma_slave_config(struct dma_chan *chan, struct dma_slave_config cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) return -EINVAL; + if (cfg->src_maxburst > chan->device->max_burst || + cfg->dst_maxburst > chan->device->max_burst) + return -EINVAL; + memcpy(&c->cfg, cfg, sizeof(c->cfg)); return 0; @@ -1482,6 +1486,7 @@ static int omap_dma_probe(struct platform_device *pdev) od->ddev.dst_addr_widths = OMAP_DMA_BUSWIDTHS; od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; + od->ddev.max_burst = SZ_16M - 1; /* CCEN: 24bit unsigned */ od->ddev.dev = &pdev->dev; INIT_LIST_HEAD(&od->ddev.channels); spin_lock_init(&od->lock); -- cgit From f47a4133ea6529ecb7ea11047a1a2fcb214e2f97 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 4 Oct 2017 14:15:23 +0200 Subject: dmaengine: nbpfaxi: Use of_device_get_match_data() helper Use the of_device_get_match_data() helper instead of open coding. Note that when used with DT, there's always a valid match. Signed-off-by: Geert Uytterhoeven Signed-off-by: Vinod Koul --- drivers/dma/nbpfaxi.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c index d3f918a9ee76..50559338239b 100644 --- a/drivers/dma/nbpfaxi.c +++ b/drivers/dma/nbpfaxi.c @@ -1286,7 +1286,6 @@ MODULE_DEVICE_TABLE(of, nbpf_match); static int nbpf_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - const struct of_device_id *of_id = of_match_device(nbpf_match, dev); struct device_node *np = dev->of_node; struct nbpf_device *nbpf; struct dma_device *dma_dev; @@ -1300,10 +1299,10 @@ static int nbpf_probe(struct platform_device *pdev) BUILD_BUG_ON(sizeof(struct nbpf_desc_page) > PAGE_SIZE); /* DT only */ - if (!np || !of_id || !of_id->data) + if (!np) return -ENODEV; - cfg = of_id->data; + cfg = of_device_get_match_data(dev); num_channels = cfg->num_channels; nbpf = devm_kzalloc(dev, sizeof(*nbpf) + num_channels * -- cgit From fd9f22ae15b97997c76cdadf2bed23053881509a Mon Sep 17 00:00:00 2001 From: Ed Blake Date: Mon, 9 Oct 2017 15:40:26 +0100 Subject: dmaengine: img-mdc: Add suspend / resume handling Add suspend / resume handling using suspend_late and resume_early, and check that all channels are idle before suspending. DMA drivers should use suspend_late / resume_early to ensure that all DMA client devices are suspended before the DMA device itself, and that client devices are resumed after the DMA device. This avoids suspending the DMA device while transactions are still active. It is the responsibility of client drivers to terminate all DMA transactions in their suspend handlers, so there should be no active transactions by the time suspend_late is called. There's no need to save and restore registers for MDC during suspend / resume, as all transactions will be terminated as a result of the suspend, and all required registers are programmed anyway at the start of any new transactions following resume. Signed-off-by: Ed Blake Signed-off-by: Vinod Koul --- drivers/dma/img-mdc-dma.c | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) (limited to 'drivers/dma') diff --git a/drivers/dma/img-mdc-dma.c b/drivers/dma/img-mdc-dma.c index 54db1411ce73..c4d8d5ad2876 100644 --- a/drivers/dma/img-mdc-dma.c +++ b/drivers/dma/img-mdc-dma.c @@ -1009,9 +1009,42 @@ static int mdc_dma_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM_SLEEP +static int img_mdc_suspend_late(struct device *dev) +{ + struct mdc_dma *mdma = dev_get_drvdata(dev); + int i; + + /* Check that all channels are idle */ + for (i = 0; i < mdma->nr_channels; i++) { + struct mdc_chan *mchan = &mdma->channels[i]; + + if (unlikely(mchan->desc)) + return -EBUSY; + } + + clk_disable_unprepare(mdma->clk); + + return 0; +} + +static int img_mdc_resume_early(struct device *dev) +{ + struct mdc_dma *mdma = dev_get_drvdata(dev); + + return clk_prepare_enable(mdma->clk); +} +#endif /* CONFIG_PM_SLEEP */ + +static const struct dev_pm_ops img_mdc_pm_ops = { + SET_LATE_SYSTEM_SLEEP_PM_OPS(img_mdc_suspend_late, + img_mdc_resume_early) +}; + static struct platform_driver mdc_dma_driver = { .driver = { .name = "img-mdc-dma", + .pm = &img_mdc_pm_ops, .of_match_table = of_match_ptr(mdc_dma_of_match), }, .probe = mdc_dma_probe, -- cgit From 56d355e6f586539e5f87280376eed09fb8801f42 Mon Sep 17 00:00:00 2001 From: Ed Blake Date: Mon, 9 Oct 2017 15:40:27 +0100 Subject: dmaengine: img-mdc: Add runtime PM Add runtime PM support to disable the clock when the h/w is not in use. The existing clock_prepare_enable is removed from probe() as the clock is no longer permanently enabled. Signed-off-by: Ed Blake Signed-off-by: Vinod Koul --- drivers/dma/img-mdc-dma.c | 77 ++++++++++++++++++++++++++++++++--------------- 1 file changed, 53 insertions(+), 24 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/img-mdc-dma.c b/drivers/dma/img-mdc-dma.c index c4d8d5ad2876..0391f930aecc 100644 --- a/drivers/dma/img-mdc-dma.c +++ b/drivers/dma/img-mdc-dma.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -730,14 +731,23 @@ static int mdc_slave_config(struct dma_chan *chan, return 0; } +static int mdc_alloc_chan_resources(struct dma_chan *chan) +{ + struct mdc_chan *mchan = to_mdc_chan(chan); + struct device *dev = mdma2dev(mchan->mdma); + + return pm_runtime_get_sync(dev); +} + static void mdc_free_chan_resources(struct dma_chan *chan) { struct mdc_chan *mchan = to_mdc_chan(chan); struct mdc_dma *mdma = mchan->mdma; + struct device *dev = mdma2dev(mdma); mdc_terminate_all(chan); - mdma->soc->disable_chan(mchan); + pm_runtime_put(dev); } static irqreturn_t mdc_chan_irq(int irq, void *dev_id) @@ -854,6 +864,22 @@ static const struct of_device_id mdc_dma_of_match[] = { }; MODULE_DEVICE_TABLE(of, mdc_dma_of_match); +static int img_mdc_runtime_suspend(struct device *dev) +{ + struct mdc_dma *mdma = dev_get_drvdata(dev); + + clk_disable_unprepare(mdma->clk); + + return 0; +} + +static int img_mdc_runtime_resume(struct device *dev) +{ + struct mdc_dma *mdma = dev_get_drvdata(dev); + + return clk_prepare_enable(mdma->clk); +} + static int mdc_dma_probe(struct platform_device *pdev) { struct mdc_dma *mdma; @@ -883,10 +909,6 @@ static int mdc_dma_probe(struct platform_device *pdev) if (IS_ERR(mdma->clk)) return PTR_ERR(mdma->clk); - ret = clk_prepare_enable(mdma->clk); - if (ret) - return ret; - dma_cap_zero(mdma->dma_dev.cap_mask); dma_cap_set(DMA_SLAVE, mdma->dma_dev.cap_mask); dma_cap_set(DMA_PRIVATE, mdma->dma_dev.cap_mask); @@ -919,12 +941,13 @@ static int mdc_dma_probe(struct platform_device *pdev) "img,max-burst-multiplier", &mdma->max_burst_mult); if (ret) - goto disable_clk; + return ret; mdma->dma_dev.dev = &pdev->dev; mdma->dma_dev.device_prep_slave_sg = mdc_prep_slave_sg; mdma->dma_dev.device_prep_dma_cyclic = mdc_prep_dma_cyclic; mdma->dma_dev.device_prep_dma_memcpy = mdc_prep_dma_memcpy; + mdma->dma_dev.device_alloc_chan_resources = mdc_alloc_chan_resources; mdma->dma_dev.device_free_chan_resources = mdc_free_chan_resources; mdma->dma_dev.device_tx_status = mdc_tx_status; mdma->dma_dev.device_issue_pending = mdc_issue_pending; @@ -945,15 +968,14 @@ static int mdc_dma_probe(struct platform_device *pdev) mchan->mdma = mdma; mchan->chan_nr = i; mchan->irq = platform_get_irq(pdev, i); - if (mchan->irq < 0) { - ret = mchan->irq; - goto disable_clk; - } + if (mchan->irq < 0) + return mchan->irq; + ret = devm_request_irq(&pdev->dev, mchan->irq, mdc_chan_irq, IRQ_TYPE_LEVEL_HIGH, dev_name(&pdev->dev), mchan); if (ret < 0) - goto disable_clk; + return ret; mchan->vc.desc_free = mdc_desc_free; vchan_init(&mchan->vc, &mdma->dma_dev); @@ -962,14 +984,19 @@ static int mdc_dma_probe(struct platform_device *pdev) mdma->desc_pool = dmam_pool_create(dev_name(&pdev->dev), &pdev->dev, sizeof(struct mdc_hw_list_desc), 4, 0); - if (!mdma->desc_pool) { - ret = -ENOMEM; - goto disable_clk; + if (!mdma->desc_pool) + return -ENOMEM; + + pm_runtime_enable(&pdev->dev); + if (!pm_runtime_enabled(&pdev->dev)) { + ret = img_mdc_runtime_resume(&pdev->dev); + if (ret) + return ret; } ret = dma_async_device_register(&mdma->dma_dev); if (ret) - goto disable_clk; + goto suspend; ret = of_dma_controller_register(pdev->dev.of_node, mdc_of_xlate, mdma); if (ret) @@ -982,8 +1009,10 @@ static int mdc_dma_probe(struct platform_device *pdev) unregister: dma_async_device_unregister(&mdma->dma_dev); -disable_clk: - clk_disable_unprepare(mdma->clk); +suspend: + if (!pm_runtime_enabled(&pdev->dev)) + img_mdc_runtime_suspend(&pdev->dev); + pm_runtime_disable(&pdev->dev); return ret; } @@ -1004,7 +1033,9 @@ static int mdc_dma_remove(struct platform_device *pdev) tasklet_kill(&mchan->vc.task); } - clk_disable_unprepare(mdma->clk); + pm_runtime_disable(&pdev->dev); + if (!pm_runtime_status_suspended(&pdev->dev)) + img_mdc_runtime_suspend(&pdev->dev); return 0; } @@ -1023,20 +1054,18 @@ static int img_mdc_suspend_late(struct device *dev) return -EBUSY; } - clk_disable_unprepare(mdma->clk); - - return 0; + return pm_runtime_force_suspend(dev); } static int img_mdc_resume_early(struct device *dev) { - struct mdc_dma *mdma = dev_get_drvdata(dev); - - return clk_prepare_enable(mdma->clk); + return pm_runtime_force_resume(dev); } #endif /* CONFIG_PM_SLEEP */ static const struct dev_pm_ops img_mdc_pm_ops = { + SET_RUNTIME_PM_OPS(img_mdc_runtime_suspend, + img_mdc_runtime_resume, NULL) SET_LATE_SYSTEM_SLEEP_PM_OPS(img_mdc_suspend_late, img_mdc_resume_early) }; -- cgit From 50b12497547b5bff49bf90d54c2ca6a77c0bbe02 Mon Sep 17 00:00:00 2001 From: Stefan Brüns Date: Thu, 28 Sep 2017 03:49:18 +0200 Subject: dmaengine: sun6i: Correct setting of clock autogating register for A83T/H3 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The H83T uses a compatible string different from the A23, but requires the same clock autogating register setting. The H3 also requires setting the clock autogating register, but has the register at a different offset. Add three suitable callbacks for the existing controller generations and set it in the controller config structure. Signed-off-by: Stefan Brüns Acked-by: Maxime Ripard Signed-off-by: Vinod Koul --- drivers/dma/sun6i-dma.c | 28 +++++++++++++++++++++++----- 1 file changed, 23 insertions(+), 5 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c index 584f4e82a9be..1c0c2ec08fec 100644 --- a/drivers/dma/sun6i-dma.c +++ b/drivers/dma/sun6i-dma.c @@ -48,6 +48,9 @@ #define SUN8I_DMA_GATE 0x20 #define SUN8I_DMA_GATE_ENABLE 0x4 +#define SUNXI_H3_SECURE_REG 0x20 +#define SUNXI_H3_DMA_GATE 0x28 +#define SUNXI_H3_DMA_GATE_ENABLE 0x4 /* * Channels specific registers */ @@ -90,6 +93,9 @@ #define NORMAL_WAIT 8 #define DRQ_SDRAM 1 +/* forward declaration */ +struct sun6i_dma_dev; + /* * Hardware channels / ports representation * @@ -111,7 +117,7 @@ struct sun6i_dma_config { * however these SoCs really have and need this bit, as seen in the * BSP kernel source code. */ - bool gate_needed; + void (*clock_autogate_enable)(struct sun6i_dma_dev *); }; /* @@ -267,6 +273,16 @@ static inline s8 convert_buswidth(enum dma_slave_buswidth addr_width) return addr_width >> 1; } +static void sun6i_enable_clock_autogate_a23(struct sun6i_dma_dev *sdev) +{ + writel(SUN8I_DMA_GATE_ENABLE, sdev->base + SUN8I_DMA_GATE); +} + +static void sun6i_enable_clock_autogate_h3(struct sun6i_dma_dev *sdev) +{ + writel(SUNXI_H3_DMA_GATE_ENABLE, sdev->base + SUNXI_H3_DMA_GATE); +} + static size_t sun6i_get_chan_size(struct sun6i_pchan *pchan) { struct sun6i_desc *txd = pchan->desc; @@ -1020,13 +1036,14 @@ static struct sun6i_dma_config sun8i_a23_dma_cfg = { .nr_max_channels = 8, .nr_max_requests = 24, .nr_max_vchans = 37, - .gate_needed = true, + .clock_autogate_enable = sun6i_enable_clock_autogate_a23, }; static struct sun6i_dma_config sun8i_a83t_dma_cfg = { .nr_max_channels = 8, .nr_max_requests = 28, .nr_max_vchans = 39, + .clock_autogate_enable = sun6i_enable_clock_autogate_a23, }; /* @@ -1038,6 +1055,7 @@ static struct sun6i_dma_config sun8i_h3_dma_cfg = { .nr_max_channels = 12, .nr_max_requests = 27, .nr_max_vchans = 34, + .clock_autogate_enable = sun6i_enable_clock_autogate_h3, }; /* @@ -1049,7 +1067,7 @@ static struct sun6i_dma_config sun8i_v3s_dma_cfg = { .nr_max_channels = 8, .nr_max_requests = 23, .nr_max_vchans = 24, - .gate_needed = true, + .clock_autogate_enable = sun6i_enable_clock_autogate_a23, }; static const struct of_device_id sun6i_dma_match[] = { @@ -1197,8 +1215,8 @@ static int sun6i_dma_probe(struct platform_device *pdev) goto err_dma_unregister; } - if (sdc->cfg->gate_needed) - writel(SUN8I_DMA_GATE_ENABLE, sdc->base + SUN8I_DMA_GATE); + if (sdc->cfg->clock_autogate_enable) + sdc->cfg->clock_autogate_enable(sdc); return 0; -- cgit From 5a6a6202fa9ab63bce1726663ff5573741bfeab3 Mon Sep 17 00:00:00 2001 From: Stefan Brüns Date: Thu, 28 Sep 2017 03:49:19 +0200 Subject: dmaengine: sun6i: Correct burst length field offsets for H3 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For the H3, the burst lengths field offsets in the channel configuration register differs from earlier SoC generations. Using the A31 register macros actually configured the H3 controller do to bursts of length 1 always, which although working leads to higher bus utilisation. Signed-off-by: Stefan Brüns Acked-by: Maxime Ripard Signed-off-by: Vinod Koul --- drivers/dma/sun6i-dma.c | 34 +++++++++++++++++++++++++++------- 1 file changed, 27 insertions(+), 7 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c index 1c0c2ec08fec..03d8b9afb36f 100644 --- a/drivers/dma/sun6i-dma.c +++ b/drivers/dma/sun6i-dma.c @@ -68,13 +68,15 @@ #define DMA_CHAN_CFG_SRC_DRQ(x) ((x) & 0x1f) #define DMA_CHAN_CFG_SRC_IO_MODE BIT(5) #define DMA_CHAN_CFG_SRC_LINEAR_MODE (0 << 5) -#define DMA_CHAN_CFG_SRC_BURST(x) (((x) & 0x3) << 7) +#define DMA_CHAN_CFG_SRC_BURST_A31(x) (((x) & 0x3) << 7) +#define DMA_CHAN_CFG_SRC_BURST_H3(x) (((x) & 0x3) << 6) #define DMA_CHAN_CFG_SRC_WIDTH(x) (((x) & 0x3) << 9) #define DMA_CHAN_CFG_DST_DRQ(x) (DMA_CHAN_CFG_SRC_DRQ(x) << 16) #define DMA_CHAN_CFG_DST_IO_MODE (DMA_CHAN_CFG_SRC_IO_MODE << 16) #define DMA_CHAN_CFG_DST_LINEAR_MODE (DMA_CHAN_CFG_SRC_LINEAR_MODE << 16) -#define DMA_CHAN_CFG_DST_BURST(x) (DMA_CHAN_CFG_SRC_BURST(x) << 16) +#define DMA_CHAN_CFG_DST_BURST_A31(x) (DMA_CHAN_CFG_SRC_BURST_A31(x) << 16) +#define DMA_CHAN_CFG_DST_BURST_H3(x) (DMA_CHAN_CFG_SRC_BURST_H3(x) << 16) #define DMA_CHAN_CFG_DST_WIDTH(x) (DMA_CHAN_CFG_SRC_WIDTH(x) << 16) #define DMA_CHAN_CUR_SRC 0x10 @@ -118,6 +120,7 @@ struct sun6i_dma_config { * BSP kernel source code. */ void (*clock_autogate_enable)(struct sun6i_dma_dev *); + void (*set_burst_length)(u32 *p_cfg, s8 src_burst, s8 dst_burst); }; /* @@ -283,6 +286,18 @@ static void sun6i_enable_clock_autogate_h3(struct sun6i_dma_dev *sdev) writel(SUNXI_H3_DMA_GATE_ENABLE, sdev->base + SUNXI_H3_DMA_GATE); } +static void sun6i_set_burst_length_a31(u32 *p_cfg, s8 src_burst, s8 dst_burst) +{ + *p_cfg |= DMA_CHAN_CFG_SRC_BURST_A31(src_burst) | + DMA_CHAN_CFG_DST_BURST_A31(dst_burst); +} + +static void sun6i_set_burst_length_h3(u32 *p_cfg, s8 src_burst, s8 dst_burst) +{ + *p_cfg |= DMA_CHAN_CFG_SRC_BURST_H3(src_burst) | + DMA_CHAN_CFG_DST_BURST_H3(dst_burst); +} + static size_t sun6i_get_chan_size(struct sun6i_pchan *pchan) { struct sun6i_desc *txd = pchan->desc; @@ -562,11 +577,11 @@ static int set_config(struct sun6i_dma_dev *sdev, if (dst_width < 0) return dst_width; - *p_cfg = DMA_CHAN_CFG_SRC_BURST(src_burst) | - DMA_CHAN_CFG_SRC_WIDTH(src_width) | - DMA_CHAN_CFG_DST_BURST(dst_burst) | + *p_cfg = DMA_CHAN_CFG_SRC_WIDTH(src_width) | DMA_CHAN_CFG_DST_WIDTH(dst_width); + sdev->cfg->set_burst_length(p_cfg, src_burst, dst_burst); + return 0; } @@ -609,11 +624,11 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy( DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) | DMA_CHAN_CFG_DST_LINEAR_MODE | DMA_CHAN_CFG_SRC_LINEAR_MODE | - DMA_CHAN_CFG_SRC_BURST(burst) | DMA_CHAN_CFG_SRC_WIDTH(width) | - DMA_CHAN_CFG_DST_BURST(burst) | DMA_CHAN_CFG_DST_WIDTH(width); + sdev->cfg->set_burst_length(&v_lli->cfg, burst, burst); + sun6i_dma_lli_add(NULL, v_lli, p_lli, txd); sun6i_dma_dump_lli(vchan, v_lli); @@ -1025,6 +1040,7 @@ static struct sun6i_dma_config sun6i_a31_dma_cfg = { .nr_max_channels = 16, .nr_max_requests = 30, .nr_max_vchans = 53, + .set_burst_length = sun6i_set_burst_length_a31, }; /* @@ -1037,6 +1053,7 @@ static struct sun6i_dma_config sun8i_a23_dma_cfg = { .nr_max_requests = 24, .nr_max_vchans = 37, .clock_autogate_enable = sun6i_enable_clock_autogate_a23, + .set_burst_length = sun6i_set_burst_length_a31, }; static struct sun6i_dma_config sun8i_a83t_dma_cfg = { @@ -1044,6 +1061,7 @@ static struct sun6i_dma_config sun8i_a83t_dma_cfg = { .nr_max_requests = 28, .nr_max_vchans = 39, .clock_autogate_enable = sun6i_enable_clock_autogate_a23, + .set_burst_length = sun6i_set_burst_length_a31, }; /* @@ -1056,6 +1074,7 @@ static struct sun6i_dma_config sun8i_h3_dma_cfg = { .nr_max_requests = 27, .nr_max_vchans = 34, .clock_autogate_enable = sun6i_enable_clock_autogate_h3, + .set_burst_length = sun6i_set_burst_length_h3, }; /* @@ -1068,6 +1087,7 @@ static struct sun6i_dma_config sun8i_v3s_dma_cfg = { .nr_max_requests = 23, .nr_max_vchans = 24, .clock_autogate_enable = sun6i_enable_clock_autogate_a23, + .set_burst_length = sun6i_set_burst_length_a31, }; static const struct of_device_id sun6i_dma_match[] = { -- cgit From 88d8622c0071def657a7eb6c7fd97471808ea9bc Mon Sep 17 00:00:00 2001 From: Stefan Brüns Date: Thu, 28 Sep 2017 03:49:20 +0200 Subject: dmaengine: sun6i: Restructure code to allow extension for new SoCs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The current code mixes three distinct operations when transforming the slave config to register settings: 1. special handling of DMA_SLAVE_BUSWIDTH_UNDEFINED, maxburst == 0 2. range checking 3. conversion of raw to register values As the range checks depend on the specific SoC, move these out of the conversion to distinct operations. Signed-off-by: Stefan Brüns Acked-by: Maxime Ripard Signed-off-by: Vinod Koul --- drivers/dma/sun6i-dma.c | 66 ++++++++++++++++++++++++++++--------------------- 1 file changed, 38 insertions(+), 28 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c index 03d8b9afb36f..c6906a8fe8f1 100644 --- a/drivers/dma/sun6i-dma.c +++ b/drivers/dma/sun6i-dma.c @@ -121,6 +121,8 @@ struct sun6i_dma_config { */ void (*clock_autogate_enable)(struct sun6i_dma_dev *); void (*set_burst_length)(u32 *p_cfg, s8 src_burst, s8 dst_burst); + u32 src_burst_lengths; + u32 dst_burst_lengths; }; /* @@ -269,10 +271,6 @@ static inline s8 convert_burst(u32 maxburst) static inline s8 convert_buswidth(enum dma_slave_buswidth addr_width) { - if ((addr_width < DMA_SLAVE_BUSWIDTH_1_BYTE) || - (addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES)) - return -EINVAL; - return addr_width >> 1; } @@ -541,41 +539,43 @@ static int set_config(struct sun6i_dma_dev *sdev, enum dma_transfer_direction direction, u32 *p_cfg) { + enum dma_slave_buswidth src_addr_width, dst_addr_width; + u32 src_maxburst, dst_maxburst; s8 src_width, dst_width, src_burst, dst_burst; + src_addr_width = sconfig->src_addr_width; + dst_addr_width = sconfig->dst_addr_width; + src_maxburst = sconfig->src_maxburst; + dst_maxburst = sconfig->dst_maxburst; + switch (direction) { case DMA_MEM_TO_DEV: - src_burst = convert_burst(sconfig->src_maxburst ? - sconfig->src_maxburst : 8); - src_width = convert_buswidth(sconfig->src_addr_width != - DMA_SLAVE_BUSWIDTH_UNDEFINED ? - sconfig->src_addr_width : - DMA_SLAVE_BUSWIDTH_4_BYTES); - dst_burst = convert_burst(sconfig->dst_maxburst); - dst_width = convert_buswidth(sconfig->dst_addr_width); + if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) + src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + src_maxburst = src_maxburst ? src_maxburst : 8; break; case DMA_DEV_TO_MEM: - src_burst = convert_burst(sconfig->src_maxburst); - src_width = convert_buswidth(sconfig->src_addr_width); - dst_burst = convert_burst(sconfig->dst_maxburst ? - sconfig->dst_maxburst : 8); - dst_width = convert_buswidth(sconfig->dst_addr_width != - DMA_SLAVE_BUSWIDTH_UNDEFINED ? - sconfig->dst_addr_width : - DMA_SLAVE_BUSWIDTH_4_BYTES); + if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) + dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + dst_maxburst = dst_maxburst ? dst_maxburst : 8; break; default: return -EINVAL; } - if (src_burst < 0) - return src_burst; - if (src_width < 0) - return src_width; - if (dst_burst < 0) - return dst_burst; - if (dst_width < 0) - return dst_width; + if (!(BIT(src_addr_width) & sdev->slave.src_addr_widths)) + return -EINVAL; + if (!(BIT(dst_addr_width) & sdev->slave.dst_addr_widths)) + return -EINVAL; + if (!(BIT(src_maxburst) & sdev->cfg->src_burst_lengths)) + return -EINVAL; + if (!(BIT(dst_maxburst) & sdev->cfg->dst_burst_lengths)) + return -EINVAL; + + src_width = convert_buswidth(src_addr_width); + dst_width = convert_buswidth(dst_addr_width); + dst_burst = convert_burst(dst_maxburst); + src_burst = convert_burst(src_maxburst); *p_cfg = DMA_CHAN_CFG_SRC_WIDTH(src_width) | DMA_CHAN_CFG_DST_WIDTH(dst_width); @@ -1041,6 +1041,8 @@ static struct sun6i_dma_config sun6i_a31_dma_cfg = { .nr_max_requests = 30, .nr_max_vchans = 53, .set_burst_length = sun6i_set_burst_length_a31, + .src_burst_lengths = BIT(1) | BIT(8), + .dst_burst_lengths = BIT(1) | BIT(8), }; /* @@ -1054,6 +1056,8 @@ static struct sun6i_dma_config sun8i_a23_dma_cfg = { .nr_max_vchans = 37, .clock_autogate_enable = sun6i_enable_clock_autogate_a23, .set_burst_length = sun6i_set_burst_length_a31, + .src_burst_lengths = BIT(1) | BIT(8), + .dst_burst_lengths = BIT(1) | BIT(8), }; static struct sun6i_dma_config sun8i_a83t_dma_cfg = { @@ -1062,6 +1066,8 @@ static struct sun6i_dma_config sun8i_a83t_dma_cfg = { .nr_max_vchans = 39, .clock_autogate_enable = sun6i_enable_clock_autogate_a23, .set_burst_length = sun6i_set_burst_length_a31, + .src_burst_lengths = BIT(1) | BIT(8), + .dst_burst_lengths = BIT(1) | BIT(8), }; /* @@ -1075,6 +1081,8 @@ static struct sun6i_dma_config sun8i_h3_dma_cfg = { .nr_max_vchans = 34, .clock_autogate_enable = sun6i_enable_clock_autogate_h3, .set_burst_length = sun6i_set_burst_length_h3, + .src_burst_lengths = BIT(1) | BIT(8), + .dst_burst_lengths = BIT(1) | BIT(8), }; /* @@ -1088,6 +1096,8 @@ static struct sun6i_dma_config sun8i_v3s_dma_cfg = { .nr_max_vchans = 24, .clock_autogate_enable = sun6i_enable_clock_autogate_a23, .set_burst_length = sun6i_set_burst_length_a31, + .src_burst_lengths = BIT(1) | BIT(8), + .dst_burst_lengths = BIT(1) | BIT(8), }; static const struct of_device_id sun6i_dma_match[] = { -- cgit From d5f6d8cf31a89045fe5d8fe283ae428763e243a7 Mon Sep 17 00:00:00 2001 From: Stefan Brüns Date: Thu, 28 Sep 2017 03:49:21 +0200 Subject: dmaengine: sun6i: Enable additional burst lengths/widths on H3 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The H3 supports bursts lengths of 1, 4, 8 and 16 transfers, each with a width of 1, 2, 4 or 8 bytes. The register value for the the width is log2-encoded, change the conversion function to provide the correct value for width == 8. Signed-off-by: Stefan Brüns Acked-by: Maxime Ripard Signed-off-by: Vinod Koul --- drivers/dma/sun6i-dma.c | 54 ++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 45 insertions(+), 9 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c index c6906a8fe8f1..55c915f490e0 100644 --- a/drivers/dma/sun6i-dma.c +++ b/drivers/dma/sun6i-dma.c @@ -123,6 +123,8 @@ struct sun6i_dma_config { void (*set_burst_length)(u32 *p_cfg, s8 src_burst, s8 dst_burst); u32 src_burst_lengths; u32 dst_burst_lengths; + u32 src_addr_widths; + u32 dst_addr_widths; }; /* @@ -262,8 +264,12 @@ static inline s8 convert_burst(u32 maxburst) switch (maxburst) { case 1: return 0; + case 4: + return 1; case 8: return 2; + case 16: + return 3; default: return -EINVAL; } @@ -271,7 +277,7 @@ static inline s8 convert_burst(u32 maxburst) static inline s8 convert_buswidth(enum dma_slave_buswidth addr_width) { - return addr_width >> 1; + return ilog2(addr_width); } static void sun6i_enable_clock_autogate_a23(struct sun6i_dma_dev *sdev) @@ -1043,6 +1049,12 @@ static struct sun6i_dma_config sun6i_a31_dma_cfg = { .set_burst_length = sun6i_set_burst_length_a31, .src_burst_lengths = BIT(1) | BIT(8), .dst_burst_lengths = BIT(1) | BIT(8), + .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES), + .dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES), }; /* @@ -1058,6 +1070,12 @@ static struct sun6i_dma_config sun8i_a23_dma_cfg = { .set_burst_length = sun6i_set_burst_length_a31, .src_burst_lengths = BIT(1) | BIT(8), .dst_burst_lengths = BIT(1) | BIT(8), + .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES), + .dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES), }; static struct sun6i_dma_config sun8i_a83t_dma_cfg = { @@ -1068,11 +1086,19 @@ static struct sun6i_dma_config sun8i_a83t_dma_cfg = { .set_burst_length = sun6i_set_burst_length_a31, .src_burst_lengths = BIT(1) | BIT(8), .dst_burst_lengths = BIT(1) | BIT(8), + .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES), + .dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES), }; /* * The H3 has 12 physical channels, a maximum DRQ port id of 27, * and a total of 34 usable source and destination endpoints. + * It also supports additional burst lengths and bus widths, + * and the burst length fields have different offsets. */ static struct sun6i_dma_config sun8i_h3_dma_cfg = { @@ -1081,8 +1107,16 @@ static struct sun6i_dma_config sun8i_h3_dma_cfg = { .nr_max_vchans = 34, .clock_autogate_enable = sun6i_enable_clock_autogate_h3, .set_burst_length = sun6i_set_burst_length_h3, - .src_burst_lengths = BIT(1) | BIT(8), - .dst_burst_lengths = BIT(1) | BIT(8), + .src_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16), + .dst_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16), + .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | + BIT(DMA_SLAVE_BUSWIDTH_8_BYTES), + .dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | + BIT(DMA_SLAVE_BUSWIDTH_8_BYTES), }; /* @@ -1098,6 +1132,12 @@ static struct sun6i_dma_config sun8i_v3s_dma_cfg = { .set_burst_length = sun6i_set_burst_length_a31, .src_burst_lengths = BIT(1) | BIT(8), .dst_burst_lengths = BIT(1) | BIT(8), + .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES), + .dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES), }; static const struct of_device_id sun6i_dma_match[] = { @@ -1175,12 +1215,8 @@ static int sun6i_dma_probe(struct platform_device *pdev) sdc->slave.device_pause = sun6i_dma_pause; sdc->slave.device_resume = sun6i_dma_resume; sdc->slave.device_terminate_all = sun6i_dma_terminate_all; - sdc->slave.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | - BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | - BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); - sdc->slave.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | - BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | - BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); + sdc->slave.src_addr_widths = sdc->cfg->src_addr_widths; + sdc->slave.dst_addr_widths = sdc->cfg->dst_addr_widths; sdc->slave.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); sdc->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; -- cgit From 500fa9e76bbc40c7dbb65c7daccdc8bc49684429 Mon Sep 17 00:00:00 2001 From: Stefan Brüns Date: Thu, 28 Sep 2017 03:49:22 +0200 Subject: dmaengine: sun6i: Move number of pchans/vchans/request to device struct MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Preparatory patch: If the same compatible is used for different SoCs which have a common register layout, but different number of channels, the channel count can no longer be stored in the config. Store it in the device structure instead. Signed-off-by: Stefan Brüns Acked-by: Maxime Ripard Signed-off-by: Vinod Koul --- drivers/dma/sun6i-dma.c | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c index 55c915f490e0..f27b126dd6cd 100644 --- a/drivers/dma/sun6i-dma.c +++ b/drivers/dma/sun6i-dma.c @@ -188,6 +188,9 @@ struct sun6i_dma_dev { struct sun6i_pchan *pchans; struct sun6i_vchan *vchans; const struct sun6i_dma_config *cfg; + u32 num_pchans; + u32 num_vchans; + u32 max_request; }; static struct device *chan2dev(struct dma_chan *chan) @@ -434,7 +437,6 @@ static int sun6i_dma_start_desc(struct sun6i_vchan *vchan) static void sun6i_dma_tasklet(unsigned long data) { struct sun6i_dma_dev *sdev = (struct sun6i_dma_dev *)data; - const struct sun6i_dma_config *cfg = sdev->cfg; struct sun6i_vchan *vchan; struct sun6i_pchan *pchan; unsigned int pchan_alloc = 0; @@ -462,7 +464,7 @@ static void sun6i_dma_tasklet(unsigned long data) } spin_lock_irq(&sdev->lock); - for (pchan_idx = 0; pchan_idx < cfg->nr_max_channels; pchan_idx++) { + for (pchan_idx = 0; pchan_idx < sdev->num_pchans; pchan_idx++) { pchan = &sdev->pchans[pchan_idx]; if (pchan->vchan || list_empty(&sdev->pending)) @@ -483,7 +485,7 @@ static void sun6i_dma_tasklet(unsigned long data) } spin_unlock_irq(&sdev->lock); - for (pchan_idx = 0; pchan_idx < cfg->nr_max_channels; pchan_idx++) { + for (pchan_idx = 0; pchan_idx < sdev->num_pchans; pchan_idx++) { if (!(pchan_alloc & BIT(pchan_idx))) continue; @@ -505,7 +507,7 @@ static irqreturn_t sun6i_dma_interrupt(int irq, void *dev_id) int i, j, ret = IRQ_NONE; u32 status; - for (i = 0; i < sdev->cfg->nr_max_channels / DMA_IRQ_CHAN_NR; i++) { + for (i = 0; i < sdev->num_pchans / DMA_IRQ_CHAN_NR; i++) { status = readl(sdev->base + DMA_IRQ_STAT(i)); if (!status) continue; @@ -985,7 +987,7 @@ static struct dma_chan *sun6i_dma_of_xlate(struct of_phandle_args *dma_spec, struct dma_chan *chan; u8 port = dma_spec->args[0]; - if (port > sdev->cfg->nr_max_requests) + if (port > sdev->max_request) return NULL; chan = dma_get_any_slave_channel(&sdev->slave); @@ -1018,7 +1020,7 @@ static inline void sun6i_dma_free(struct sun6i_dma_dev *sdev) { int i; - for (i = 0; i < sdev->cfg->nr_max_vchans; i++) { + for (i = 0; i < sdev->num_vchans; i++) { struct sun6i_vchan *vchan = &sdev->vchans[i]; list_del(&vchan->vc.chan.device_node); @@ -1222,26 +1224,30 @@ static int sun6i_dma_probe(struct platform_device *pdev) sdc->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; sdc->slave.dev = &pdev->dev; - sdc->pchans = devm_kcalloc(&pdev->dev, sdc->cfg->nr_max_channels, + sdc->num_pchans = sdc->cfg->nr_max_channels; + sdc->num_vchans = sdc->cfg->nr_max_vchans; + sdc->max_request = sdc->cfg->nr_max_requests; + + sdc->pchans = devm_kcalloc(&pdev->dev, sdc->num_pchans, sizeof(struct sun6i_pchan), GFP_KERNEL); if (!sdc->pchans) return -ENOMEM; - sdc->vchans = devm_kcalloc(&pdev->dev, sdc->cfg->nr_max_vchans, + sdc->vchans = devm_kcalloc(&pdev->dev, sdc->num_vchans, sizeof(struct sun6i_vchan), GFP_KERNEL); if (!sdc->vchans) return -ENOMEM; tasklet_init(&sdc->task, sun6i_dma_tasklet, (unsigned long)sdc); - for (i = 0; i < sdc->cfg->nr_max_channels; i++) { + for (i = 0; i < sdc->num_pchans; i++) { struct sun6i_pchan *pchan = &sdc->pchans[i]; pchan->idx = i; pchan->base = sdc->base + 0x100 + i * 0x40; } - for (i = 0; i < sdc->cfg->nr_max_vchans; i++) { + for (i = 0; i < sdc->num_vchans; i++) { struct sun6i_vchan *vchan = &sdc->vchans[i]; INIT_LIST_HEAD(&vchan->node); -- cgit From 12e0177055ee4b5ab656414b32054e64568160d8 Mon Sep 17 00:00:00 2001 From: Stefan Brüns Date: Thu, 28 Sep 2017 03:49:25 +0200 Subject: dmaengine: sun6i: Add support for Allwinner A64 and compatibles MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The A64 SoC has the same dma engine as the H3 (sun8i), with a reduced amount of physical channels. To allow future reuse of the compatible, leave the channel count etc. in the config data blank and retrieve it from the devicetree. Signed-off-by: Stefan Brüns Acked-by: Maxime Ripard Signed-off-by: Vinod Koul --- drivers/dma/sun6i-dma.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) (limited to 'drivers/dma') diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c index f27b126dd6cd..0cabb48b0fb5 100644 --- a/drivers/dma/sun6i-dma.c +++ b/drivers/dma/sun6i-dma.c @@ -1121,6 +1121,25 @@ static struct sun6i_dma_config sun8i_h3_dma_cfg = { BIT(DMA_SLAVE_BUSWIDTH_8_BYTES), }; +/* + * The A64 binding uses the number of dma channels from the + * device tree node. + */ +static struct sun6i_dma_config sun50i_a64_dma_cfg = { + .clock_autogate_enable = sun6i_enable_clock_autogate_h3, + .set_burst_length = sun6i_set_burst_length_h3, + .src_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16), + .dst_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16), + .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | + BIT(DMA_SLAVE_BUSWIDTH_8_BYTES), + .dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | + BIT(DMA_SLAVE_BUSWIDTH_8_BYTES), +}; + /* * The V3s have only 8 physical channels, a maximum DRQ port id of 23, * and a total of 24 usable source and destination endpoints. @@ -1148,6 +1167,7 @@ static const struct of_device_id sun6i_dma_match[] = { { .compatible = "allwinner,sun8i-a83t-dma", .data = &sun8i_a83t_dma_cfg }, { .compatible = "allwinner,sun8i-h3-dma", .data = &sun8i_h3_dma_cfg }, { .compatible = "allwinner,sun8i-v3s-dma", .data = &sun8i_v3s_dma_cfg }, + { .compatible = "allwinner,sun50i-a64-dma", .data = &sun50i_a64_dma_cfg }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, sun6i_dma_match); -- cgit From 847449f23dcbff68234525f90dd53c7c7db18cad Mon Sep 17 00:00:00 2001 From: Hiroyuki Yokoyama Date: Thu, 19 Oct 2017 01:15:13 +0000 Subject: dmaengine: rcar-dmac: use TCRB instead of TCR for residue SYS/RT/Audio DMAC includes independent data buffers for reading and writing. Therefore, the read transfer counter and write transfer counter have different values. TCR indicates read counter, and TCRB indicates write counter. The relationship is like below. TCR TCRB [SOURCE] -> [DMAC] -> [SINK] In the MEM_TO_DEV direction, what really matters is how much data has been written to the device. If the DMA is interrupted between read and write, then, the data doesn't end up in the destination, so shouldn't be counted. TCRB is thus the register we should use in this cases. In the DEV_TO_MEM direction, the situation is more complex. Both the read and write side are important. What matters from a data consumer point of view is how much data has been written to memory. On the other hand, if the transfer is interrupted between read and write, we'll end up losing data. It can also be important to report. In the MEM_TO_MEM direction, what matters is of course how much data has been written to memory from data consumer point of view. Here, because read and write have independent data buffers, it will take a while for TCR and TCRB to become equal. Thus we should check TCRB in this case, too. Thus, all cases we should check TCRB instead of TCR. Without this patch, Sound Capture has noise after PluseAudio support (= 07b7acb51d2 ("ASoC: rsnd: update pointer more accurate")), because the recorder will use wrong residue counter which indicates transferred from sound device, but in reality the data was not yet put to memory and recorder will record it. Signed-off-by: Hiroyuki Yokoyama [Kuninori: added detail information in log] Signed-off-by: Kuninori Morimoto Reviewed-by: Geert Uytterhoeven Reviewed-by: Laurent Pinchart Signed-off-by: Vinod Koul --- drivers/dma/sh/rcar-dmac.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/dma') diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index 2b2c7db3e480..50c4950050be 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c @@ -1310,7 +1310,7 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan, } /* Add the residue for the current chunk. */ - residue += rcar_dmac_chan_read(chan, RCAR_DMATCR) << desc->xfer_shift; + residue += rcar_dmac_chan_read(chan, RCAR_DMATCRB) << desc->xfer_shift; return residue; } -- cgit From e588710311ee5bece284871d613418831d56f2bd Mon Sep 17 00:00:00 2001 From: Alexander Kochetkov Date: Wed, 4 Oct 2017 14:37:23 +0300 Subject: dmaengine: pl330: fix descriptor allocation fail If two concurrent threads call pl330_get_desc() when DMAC descriptor pool is empty it is possible that allocation for one of threads will fail with message: kernel: dma-pl330 20078000.dma-controller: pl330_get_desc:2469 ALERT! Here how that can happen. Thread A calls pl330_get_desc() to get descriptor. If DMAC descriptor pool is empty pl330_get_desc() allocates new descriptor on shared pool using add_desc() and then get newly allocated descriptor using pluck_desc(). At the same time thread B calls pluck_desc() and take newly allocated descriptor. In that case descriptor allocation for thread A will fail. Using on-stack pool for new descriptor allow avoid the issue described. The patch modify pl330_get_desc() to use on-stack pool for allocation new descriptors. Signed-off-by: Alexander Kochetkov Tested-by: Marek Szyprowski Signed-off-by: Vinod Koul --- drivers/dma/pl330.c | 39 ++++++++++++++++++++------------------- 1 file changed, 20 insertions(+), 19 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index f122c2a7b9f0..d7327fd5f445 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -2390,7 +2390,8 @@ static inline void _init_desc(struct dma_pl330_desc *desc) } /* Returns the number of descriptors added to the DMAC pool */ -static int add_desc(struct pl330_dmac *pl330, gfp_t flg, int count) +static int add_desc(struct list_head *pool, spinlock_t *lock, + gfp_t flg, int count) { struct dma_pl330_desc *desc; unsigned long flags; @@ -2400,27 +2401,28 @@ static int add_desc(struct pl330_dmac *pl330, gfp_t flg, int count) if (!desc) return 0; - spin_lock_irqsave(&pl330->pool_lock, flags); + spin_lock_irqsave(lock, flags); for (i = 0; i < count; i++) { _init_desc(&desc[i]); - list_add_tail(&desc[i].node, &pl330->desc_pool); + list_add_tail(&desc[i].node, pool); } - spin_unlock_irqrestore(&pl330->pool_lock, flags); + spin_unlock_irqrestore(lock, flags); return count; } -static struct dma_pl330_desc *pluck_desc(struct pl330_dmac *pl330) +static struct dma_pl330_desc *pluck_desc(struct list_head *pool, + spinlock_t *lock) { struct dma_pl330_desc *desc = NULL; unsigned long flags; - spin_lock_irqsave(&pl330->pool_lock, flags); + spin_lock_irqsave(lock, flags); - if (!list_empty(&pl330->desc_pool)) { - desc = list_entry(pl330->desc_pool.next, + if (!list_empty(pool)) { + desc = list_entry(pool->next, struct dma_pl330_desc, node); list_del_init(&desc->node); @@ -2429,7 +2431,7 @@ static struct dma_pl330_desc *pluck_desc(struct pl330_dmac *pl330) desc->txd.callback = NULL; } - spin_unlock_irqrestore(&pl330->pool_lock, flags); + spin_unlock_irqrestore(lock, flags); return desc; } @@ -2441,20 +2443,18 @@ static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch) struct dma_pl330_desc *desc; /* Pluck one desc from the pool of DMAC */ - desc = pluck_desc(pl330); + desc = pluck_desc(&pl330->desc_pool, &pl330->pool_lock); /* If the DMAC pool is empty, alloc new */ if (!desc) { - if (!add_desc(pl330, GFP_ATOMIC, 1)) - return NULL; + DEFINE_SPINLOCK(lock); + LIST_HEAD(pool); - /* Try again */ - desc = pluck_desc(pl330); - if (!desc) { - dev_err(pch->dmac->ddma.dev, - "%s:%d ALERT!\n", __func__, __LINE__); + if (!add_desc(&pool, &lock, GFP_ATOMIC, 1)) return NULL; - } + + desc = pluck_desc(&pool, &lock); + WARN_ON(!desc || !list_empty(&pool)); } /* Initialize the descriptor */ @@ -2868,7 +2868,8 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) spin_lock_init(&pl330->pool_lock); /* Create a descriptor pool of default size */ - if (!add_desc(pl330, GFP_KERNEL, NR_DEFAULT_DESC)) + if (!add_desc(&pl330->desc_pool, &pl330->pool_lock, + GFP_KERNEL, NR_DEFAULT_DESC)) dev_warn(&adev->dev, "unable to allocate desc\n"); INIT_LIST_HEAD(&pd->channels); -- cgit From 5d74aa7f641a8bf778b87941ae6a955121f64f7d Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Tue, 3 Oct 2017 10:52:57 +0530 Subject: dmaengine: bcm-sba-raid: serialize dma_cookie_complete() using reqs_lock As-per documentation in driver/dma/dmaengine.h, the dma_cookie_complete() API should be called with lock held. This patch ensures that Broadcom SBA RAID driver calls the dma_cookie_complete() API with reqs_lock held. Signed-off-by: Anup Patel Reviewed-by: Ray Jui Reviewed-by: Scott Branden Signed-off-by: Vinod Koul --- drivers/dma/bcm-sba-raid.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/dma') diff --git a/drivers/dma/bcm-sba-raid.c b/drivers/dma/bcm-sba-raid.c index 6c2c44724637..15c558508345 100644 --- a/drivers/dma/bcm-sba-raid.c +++ b/drivers/dma/bcm-sba-raid.c @@ -442,7 +442,9 @@ static void sba_process_received_request(struct sba_device *sba, WARN_ON(tx->cookie < 0); if (tx->cookie > 0) { + spin_lock_irqsave(&sba->reqs_lock, flags); dma_cookie_complete(tx); + spin_unlock_irqrestore(&sba->reqs_lock, flags); dmaengine_desc_get_callback_invoke(tx, NULL); dma_descriptor_unmap(tx); tx->callback = NULL; -- cgit From 4e9f8187aecb00d90ec385f5061c91549103a3cf Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Tue, 3 Oct 2017 10:52:58 +0530 Subject: dmaengine: bcm-sba-raid: Use only single mailbox channel Each mailbox channel used by Broadcom SBA RAID driver is a separate HW ring. Currently, Broadcom SBA RAID driver creates one DMA channel using one or more mailbox channels. When we are using more than one mailbox channels for a DMA channel, the sba_request are distributed evenly among multiple mailbox channels which results in sba_request being completed out-of-order. The above described out-of-order completion of sba_request breaks the dma_async_is_complete() API because it assumes DMA cookies are completed in orderly fashion. To ensure correct behaviour of dma_async_is_complete() API, this patch updates Broadcom SBA RAID driver to use only single mailbox channel. If additional mailbox channels are specified in DT then those will be ignored. Signed-off-by: Anup Patel Reviewed-by: Ray Jui Reviewed-by: Scott Branden Signed-off-by: Vinod Koul --- drivers/dma/bcm-sba-raid.c | 104 ++++++++++++--------------------------------- 1 file changed, 27 insertions(+), 77 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/bcm-sba-raid.c b/drivers/dma/bcm-sba-raid.c index 15c558508345..409da59d9315 100644 --- a/drivers/dma/bcm-sba-raid.c +++ b/drivers/dma/bcm-sba-raid.c @@ -25,11 +25,8 @@ * * The Broadcom SBA RAID driver does not require any register programming * except submitting request to SBA hardware device via mailbox channels. - * This driver implements a DMA device with one DMA channel using a set - * of mailbox channels provided by Broadcom SoC specific ring manager - * driver. To exploit parallelism (as described above), all DMA request - * coming to SBA RAID DMA channel are broken down to smaller requests - * and submitted to multiple mailbox channels in round-robin fashion. + * This driver implements a DMA device with one DMA channel using a single + * mailbox channel provided by Broadcom SoC specific ring manager driver. * For having more SBA DMA channels, we can create more SBA device nodes * in Broadcom SoC specific DTS based on number of hardware rings supported * by Broadcom SoC ring manager. @@ -85,6 +82,7 @@ #define SBA_CMD_GALOIS 0xe #define SBA_MAX_REQ_PER_MBOX_CHANNEL 8192 +#define SBA_MAX_MSG_SEND_PER_MBOX_CHANNEL 8 /* Driver helper macros */ #define to_sba_request(tx) \ @@ -142,9 +140,7 @@ struct sba_device { u32 max_cmds_pool_size; /* Maibox client and Mailbox channels */ struct mbox_client client; - int mchans_count; - atomic_t mchans_current; - struct mbox_chan **mchans; + struct mbox_chan *mchan; struct device *mbox_dev; /* DMA device and DMA channel */ struct dma_device dma_dev; @@ -200,14 +196,6 @@ static inline u32 __pure sba_cmd_pq_c_mdata(u32 d, u32 b1, u32 b0) /* ====== General helper routines ===== */ -static void sba_peek_mchans(struct sba_device *sba) -{ - int mchan_idx; - - for (mchan_idx = 0; mchan_idx < sba->mchans_count; mchan_idx++) - mbox_client_peek_data(sba->mchans[mchan_idx]); -} - static struct sba_request *sba_alloc_request(struct sba_device *sba) { bool found = false; @@ -231,7 +219,7 @@ static struct sba_request *sba_alloc_request(struct sba_device *sba) * would have completed which will create more * room for new requests. */ - sba_peek_mchans(sba); + mbox_client_peek_data(sba->mchan); return NULL; } @@ -369,15 +357,11 @@ static void sba_cleanup_pending_requests(struct sba_device *sba) static int sba_send_mbox_request(struct sba_device *sba, struct sba_request *req) { - int mchans_idx, ret = 0; - - /* Select mailbox channel in round-robin fashion */ - mchans_idx = atomic_inc_return(&sba->mchans_current); - mchans_idx = mchans_idx % sba->mchans_count; + int ret = 0; /* Send message for the request */ req->msg.error = 0; - ret = mbox_send_message(sba->mchans[mchans_idx], &req->msg); + ret = mbox_send_message(sba->mchan, &req->msg); if (ret < 0) { dev_err(sba->dev, "send message failed with error %d", ret); return ret; @@ -390,7 +374,7 @@ static int sba_send_mbox_request(struct sba_device *sba, } /* Signal txdone for mailbox channel */ - mbox_client_txdone(sba->mchans[mchans_idx], ret); + mbox_client_txdone(sba->mchan, ret); return ret; } @@ -402,13 +386,8 @@ static void _sba_process_pending_requests(struct sba_device *sba) u32 count; struct sba_request *req; - /* - * Process few pending requests - * - * For now, we process ( * 8) - * number of requests at a time. - */ - count = sba->mchans_count * 8; + /* Process few pending requests */ + count = SBA_MAX_MSG_SEND_PER_MBOX_CHANNEL; while (!list_empty(&sba->reqs_pending_list) && count) { /* Get the first pending request */ req = list_first_entry(&sba->reqs_pending_list, @@ -572,7 +551,7 @@ static enum dma_status sba_tx_status(struct dma_chan *dchan, if (ret == DMA_COMPLETE) return ret; - sba_peek_mchans(sba); + mbox_client_peek_data(sba->mchan); return dma_cookie_status(dchan, cookie, txstate); } @@ -1639,7 +1618,7 @@ static int sba_async_register(struct sba_device *sba) static int sba_probe(struct platform_device *pdev) { - int i, ret = 0, mchans_count; + int ret = 0; struct sba_device *sba; struct platform_device *mbox_pdev; struct of_phandle_args args; @@ -1652,12 +1631,11 @@ static int sba_probe(struct platform_device *pdev) sba->dev = &pdev->dev; platform_set_drvdata(pdev, sba); - /* Number of channels equals number of mailbox channels */ + /* Number of mailbox channels should be atleast 1 */ ret = of_count_phandle_with_args(pdev->dev.of_node, "mboxes", "#mbox-cells"); if (ret <= 0) return -ENODEV; - mchans_count = ret; /* Determine SBA version from DT compatible string */ if (of_device_is_compatible(sba->dev->of_node, "brcm,iproc-sba")) @@ -1690,7 +1668,7 @@ static int sba_probe(struct platform_device *pdev) default: return -EINVAL; } - sba->max_req = SBA_MAX_REQ_PER_MBOX_CHANNEL * mchans_count; + sba->max_req = SBA_MAX_REQ_PER_MBOX_CHANNEL; sba->max_cmd_per_req = sba->max_pq_srcs + 3; sba->max_xor_srcs = sba->max_cmd_per_req - 1; sba->max_resp_pool_size = sba->max_req * sba->hw_resp_size; @@ -1704,55 +1682,30 @@ static int sba_probe(struct platform_device *pdev) sba->client.knows_txdone = true; sba->client.tx_tout = 0; - /* Allocate mailbox channel array */ - sba->mchans = devm_kcalloc(&pdev->dev, mchans_count, - sizeof(*sba->mchans), GFP_KERNEL); - if (!sba->mchans) - return -ENOMEM; - - /* Request mailbox channels */ - sba->mchans_count = 0; - for (i = 0; i < mchans_count; i++) { - sba->mchans[i] = mbox_request_channel(&sba->client, i); - if (IS_ERR(sba->mchans[i])) { - ret = PTR_ERR(sba->mchans[i]); - goto fail_free_mchans; - } - sba->mchans_count++; + /* Request mailbox channel */ + sba->mchan = mbox_request_channel(&sba->client, 0); + if (IS_ERR(sba->mchan)) { + ret = PTR_ERR(sba->mchan); + goto fail_free_mchan; } - atomic_set(&sba->mchans_current, 0); /* Find-out underlying mailbox device */ ret = of_parse_phandle_with_args(pdev->dev.of_node, "mboxes", "#mbox-cells", 0, &args); if (ret) - goto fail_free_mchans; + goto fail_free_mchan; mbox_pdev = of_find_device_by_node(args.np); of_node_put(args.np); if (!mbox_pdev) { ret = -ENODEV; - goto fail_free_mchans; + goto fail_free_mchan; } sba->mbox_dev = &mbox_pdev->dev; - /* All mailbox channels should be of same ring manager device */ - for (i = 1; i < mchans_count; i++) { - ret = of_parse_phandle_with_args(pdev->dev.of_node, - "mboxes", "#mbox-cells", i, &args); - if (ret) - goto fail_free_mchans; - mbox_pdev = of_find_device_by_node(args.np); - of_node_put(args.np); - if (sba->mbox_dev != &mbox_pdev->dev) { - ret = -EINVAL; - goto fail_free_mchans; - } - } - /* Prealloc channel resource */ ret = sba_prealloc_channel_resources(sba); if (ret) - goto fail_free_mchans; + goto fail_free_mchan; /* Check availability of debugfs */ if (!debugfs_initialized()) @@ -1779,24 +1732,22 @@ skip_debugfs: goto fail_free_resources; /* Print device info */ - dev_info(sba->dev, "%s using SBAv%d and %d mailbox channels", + dev_info(sba->dev, "%s using SBAv%d mailbox channel from %s", dma_chan_name(&sba->dma_chan), sba->ver+1, - sba->mchans_count); + dev_name(sba->mbox_dev)); return 0; fail_free_resources: debugfs_remove_recursive(sba->root); sba_freeup_channel_resources(sba); -fail_free_mchans: - for (i = 0; i < sba->mchans_count; i++) - mbox_free_channel(sba->mchans[i]); +fail_free_mchan: + mbox_free_channel(sba->mchan); return ret; } static int sba_remove(struct platform_device *pdev) { - int i; struct sba_device *sba = platform_get_drvdata(pdev); dma_async_device_unregister(&sba->dma_dev); @@ -1805,8 +1756,7 @@ static int sba_remove(struct platform_device *pdev) sba_freeup_channel_resources(sba); - for (i = 0; i < sba->mchans_count; i++) - mbox_free_channel(sba->mchans[i]); + mbox_free_channel(sba->mchan); return 0; } -- cgit From d5c334870eb18649476233f5a0ce4eb907c23265 Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Tue, 3 Oct 2017 10:52:59 +0530 Subject: dmaengine: bcm-sba-raid: Use common GPL comment header This patch makes the comment header of Broadcom SBA RAID driver similar to the GPL comment header used across Broadcom driver sources. Signed-off-by: Anup Patel Signed-off-by: Vinod Koul --- drivers/dma/bcm-sba-raid.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/bcm-sba-raid.c b/drivers/dma/bcm-sba-raid.c index 409da59d9315..3956a018bf5a 100644 --- a/drivers/dma/bcm-sba-raid.c +++ b/drivers/dma/bcm-sba-raid.c @@ -1,9 +1,14 @@ /* * Copyright (C) 2017 Broadcom * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. */ /* -- cgit From 7076a1e4a4ea926ba9ae3b5f4a5eb6dced0a902d Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Tue, 3 Oct 2017 10:53:00 +0530 Subject: dmaengine: Build bcm-sba-raid driver as loadable module for iProc SoCs By default, we build Broadcom SBA RAID driver as loadable module for iProc SOCs so that kernel image is little smaller and we load SBA RAID driver only when required. Signed-off-by: Anup Patel Reviewed-by: Scott Branden Signed-off-by: Vinod Koul --- drivers/dma/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/dma') diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index fadc4d8783bd..48cf8df7255f 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -115,7 +115,7 @@ config BCM_SBA_RAID select DMA_ENGINE_RAID select ASYNC_TX_DISABLE_XOR_VAL_DMA select ASYNC_TX_DISABLE_PQ_VAL_DMA - default ARCH_BCM_IPROC + default m if ARCH_BCM_IPROC help Enable support for Broadcom SBA RAID Engine. The SBA RAID engine is available on most of the Broadcom iProc SoCs. It -- cgit From 464aa6f54b093df1ccaff6558207f8bf68a69c37 Mon Sep 17 00:00:00 2001 From: Stefan Brüns Date: Tue, 17 Oct 2017 01:06:34 +0200 Subject: dmaengine: sun6i: Retrieve channel count/max request from devicetree MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit To avoid introduction of a new compatible for each small SoC/DMA controller variation, move the definition of the channel count to the devicetree. The number of vchans is no longer explicit, but limited by the highest port/DMA request number. The result is a slight overallocation for SoCs with a sparse port mapping. Signed-off-by: Stefan Brüns Acked-by: Maxime Ripard Signed-off-by: Vinod Koul --- drivers/dma/sun6i-dma.c | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) (limited to 'drivers/dma') diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c index 0cabb48b0fb5..0cd13f17fc11 100644 --- a/drivers/dma/sun6i-dma.c +++ b/drivers/dma/sun6i-dma.c @@ -42,6 +42,9 @@ #define DMA_STAT 0x30 +/* Offset between DMA_IRQ_EN and DMA_IRQ_STAT limits number of channels */ +#define DMA_MAX_CHANNELS (DMA_IRQ_CHAN_NR * 0x10 / 4) + /* * sun8i specific registers */ @@ -65,7 +68,8 @@ #define DMA_CHAN_LLI_ADDR 0x08 #define DMA_CHAN_CUR_CFG 0x0c -#define DMA_CHAN_CFG_SRC_DRQ(x) ((x) & 0x1f) +#define DMA_CHAN_MAX_DRQ 0x1f +#define DMA_CHAN_CFG_SRC_DRQ(x) ((x) & DMA_CHAN_MAX_DRQ) #define DMA_CHAN_CFG_SRC_IO_MODE BIT(5) #define DMA_CHAN_CFG_SRC_LINEAR_MODE (0 << 5) #define DMA_CHAN_CFG_SRC_BURST_A31(x) (((x) & 0x3) << 7) @@ -1174,6 +1178,7 @@ MODULE_DEVICE_TABLE(of, sun6i_dma_match); static int sun6i_dma_probe(struct platform_device *pdev) { + struct device_node *np = pdev->dev.of_node; struct sun6i_dma_dev *sdc; struct resource *res; int ret, i; @@ -1248,6 +1253,26 @@ static int sun6i_dma_probe(struct platform_device *pdev) sdc->num_vchans = sdc->cfg->nr_max_vchans; sdc->max_request = sdc->cfg->nr_max_requests; + ret = of_property_read_u32(np, "dma-channels", &sdc->num_pchans); + if (ret && !sdc->num_pchans) { + dev_err(&pdev->dev, "Can't get dma-channels.\n"); + return ret; + } + + ret = of_property_read_u32(np, "dma-requests", &sdc->max_request); + if (ret && !sdc->max_request) { + dev_info(&pdev->dev, "Missing dma-requests, using %u.\n", + DMA_CHAN_MAX_DRQ); + sdc->max_request = DMA_CHAN_MAX_DRQ; + } + + /* + * If the number of vchans is not specified, derive it from the + * highest port number, at most one channel per port and direction. + */ + if (!sdc->num_vchans) + sdc->num_vchans = 2 * (sdc->max_request + 1); + sdc->pchans = devm_kcalloc(&pdev->dev, sdc->num_pchans, sizeof(struct sun6i_pchan), GFP_KERNEL); if (!sdc->pchans) -- cgit From 9b3b8171f7f4ecbbc28f3c1ae60462826a5d9072 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Tue, 24 Oct 2017 13:47:50 +0800 Subject: dmaengine: sprd: Add Spreadtrum DMA driver This patch adds the DMA controller driver for Spreadtrum SC9860 platform. Signed-off-by: Baolin Wang Signed-off-by: Vinod Koul --- drivers/dma/Kconfig | 8 + drivers/dma/Makefile | 1 + drivers/dma/sprd-dma.c | 988 +++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 997 insertions(+) create mode 100644 drivers/dma/sprd-dma.c (limited to 'drivers/dma') diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index fadc4d8783bd..a2aa7fe6b7a1 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -483,6 +483,14 @@ config STM32_DMA If you have a board based on such a MCU and wish to use DMA say Y here. +config SPRD_DMA + tristate "Spreadtrum DMA support" + depends on ARCH_SPRD || COMPILE_TEST + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + help + Enable support for the on-chip DMA controller on Spreadtrum platform. + config S3C24XX_DMAC bool "Samsung S3C24XX DMA support" depends on ARCH_S3C24XX || COMPILE_TEST diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index f08f8de1b567..9e7ec340826a 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -59,6 +59,7 @@ obj-$(CONFIG_RENESAS_DMA) += sh/ obj-$(CONFIG_SIRF_DMA) += sirf-dma.o obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o obj-$(CONFIG_STM32_DMA) += stm32-dma.o +obj-$(CONFIG_SPRD_DMA) += sprd-dma.o obj-$(CONFIG_S3C24XX_DMAC) += s3c24xx-dma.o obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c new file mode 100644 index 000000000000..b652071a2096 --- /dev/null +++ b/drivers/dma/sprd-dma.c @@ -0,0 +1,988 @@ +/* + * Copyright (C) 2017 Spreadtrum Communications Inc. + * + * SPDX-License-Identifier: GPL-2.0 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "virt-dma.h" + +#define SPRD_DMA_CHN_REG_OFFSET 0x1000 +#define SPRD_DMA_CHN_REG_LENGTH 0x40 +#define SPRD_DMA_MEMCPY_MIN_SIZE 64 + +/* DMA global registers definition */ +#define SPRD_DMA_GLB_PAUSE 0x0 +#define SPRD_DMA_GLB_FRAG_WAIT 0x4 +#define SPRD_DMA_GLB_REQ_PEND0_EN 0x8 +#define SPRD_DMA_GLB_REQ_PEND1_EN 0xc +#define SPRD_DMA_GLB_INT_RAW_STS 0x10 +#define SPRD_DMA_GLB_INT_MSK_STS 0x14 +#define SPRD_DMA_GLB_REQ_STS 0x18 +#define SPRD_DMA_GLB_CHN_EN_STS 0x1c +#define SPRD_DMA_GLB_DEBUG_STS 0x20 +#define SPRD_DMA_GLB_ARB_SEL_STS 0x24 +#define SPRD_DMA_GLB_REQ_UID(uid) (0x4 * ((uid) - 1)) +#define SPRD_DMA_GLB_REQ_UID_OFFSET 0x2000 + +/* DMA channel registers definition */ +#define SPRD_DMA_CHN_PAUSE 0x0 +#define SPRD_DMA_CHN_REQ 0x4 +#define SPRD_DMA_CHN_CFG 0x8 +#define SPRD_DMA_CHN_INTC 0xc +#define SPRD_DMA_CHN_SRC_ADDR 0x10 +#define SPRD_DMA_CHN_DES_ADDR 0x14 +#define SPRD_DMA_CHN_FRG_LEN 0x18 +#define SPRD_DMA_CHN_BLK_LEN 0x1c +#define SPRD_DMA_CHN_TRSC_LEN 0x20 +#define SPRD_DMA_CHN_TRSF_STEP 0x24 +#define SPRD_DMA_CHN_WARP_PTR 0x28 +#define SPRD_DMA_CHN_WARP_TO 0x2c +#define SPRD_DMA_CHN_LLIST_PTR 0x30 +#define SPRD_DMA_CHN_FRAG_STEP 0x34 +#define SPRD_DMA_CHN_SRC_BLK_STEP 0x38 +#define SPRD_DMA_CHN_DES_BLK_STEP 0x3c + +/* SPRD_DMA_CHN_INTC register definition */ +#define SPRD_DMA_INT_MASK GENMASK(4, 0) +#define SPRD_DMA_INT_CLR_OFFSET 24 +#define SPRD_DMA_FRAG_INT_EN BIT(0) +#define SPRD_DMA_BLK_INT_EN BIT(1) +#define SPRD_DMA_TRANS_INT_EN BIT(2) +#define SPRD_DMA_LIST_INT_EN BIT(3) +#define SPRD_DMA_CFG_ERR_INT_EN BIT(4) + +/* SPRD_DMA_CHN_CFG register definition */ +#define SPRD_DMA_CHN_EN BIT(0) +#define SPRD_DMA_WAIT_BDONE_OFFSET 24 +#define SPRD_DMA_DONOT_WAIT_BDONE 1 + +/* SPRD_DMA_CHN_REQ register definition */ +#define SPRD_DMA_REQ_EN BIT(0) + +/* SPRD_DMA_CHN_PAUSE register definition */ +#define SPRD_DMA_PAUSE_EN BIT(0) +#define SPRD_DMA_PAUSE_STS BIT(2) +#define SPRD_DMA_PAUSE_CNT 0x2000 + +/* DMA_CHN_WARP_* register definition */ +#define SPRD_DMA_HIGH_ADDR_MASK GENMASK(31, 28) +#define SPRD_DMA_LOW_ADDR_MASK GENMASK(31, 0) +#define SPRD_DMA_HIGH_ADDR_OFFSET 4 + +/* SPRD_DMA_CHN_INTC register definition */ +#define SPRD_DMA_FRAG_INT_STS BIT(16) +#define SPRD_DMA_BLK_INT_STS BIT(17) +#define SPRD_DMA_TRSC_INT_STS BIT(18) +#define SPRD_DMA_LIST_INT_STS BIT(19) +#define SPRD_DMA_CFGERR_INT_STS BIT(20) +#define SPRD_DMA_CHN_INT_STS \ + (SPRD_DMA_FRAG_INT_STS | SPRD_DMA_BLK_INT_STS | \ + SPRD_DMA_TRSC_INT_STS | SPRD_DMA_LIST_INT_STS | \ + SPRD_DMA_CFGERR_INT_STS) + +/* SPRD_DMA_CHN_FRG_LEN register definition */ +#define SPRD_DMA_SRC_DATAWIDTH_OFFSET 30 +#define SPRD_DMA_DES_DATAWIDTH_OFFSET 28 +#define SPRD_DMA_SWT_MODE_OFFSET 26 +#define SPRD_DMA_REQ_MODE_OFFSET 24 +#define SPRD_DMA_REQ_MODE_MASK GENMASK(1, 0) +#define SPRD_DMA_FIX_SEL_OFFSET 21 +#define SPRD_DMA_FIX_EN_OFFSET 20 +#define SPRD_DMA_LLIST_END_OFFSET 19 +#define SPRD_DMA_FRG_LEN_MASK GENMASK(16, 0) + +/* SPRD_DMA_CHN_BLK_LEN register definition */ +#define SPRD_DMA_BLK_LEN_MASK GENMASK(16, 0) + +/* SPRD_DMA_CHN_TRSC_LEN register definition */ +#define SPRD_DMA_TRSC_LEN_MASK GENMASK(27, 0) + +/* SPRD_DMA_CHN_TRSF_STEP register definition */ +#define SPRD_DMA_DEST_TRSF_STEP_OFFSET 16 +#define SPRD_DMA_SRC_TRSF_STEP_OFFSET 0 +#define SPRD_DMA_TRSF_STEP_MASK GENMASK(15, 0) + +#define SPRD_DMA_SOFTWARE_UID 0 + +/* + * enum sprd_dma_req_mode: define the DMA request mode + * @SPRD_DMA_FRAG_REQ: fragment request mode + * @SPRD_DMA_BLK_REQ: block request mode + * @SPRD_DMA_TRANS_REQ: transaction request mode + * @SPRD_DMA_LIST_REQ: link-list request mode + * + * We have 4 types request mode: fragment mode, block mode, transaction mode + * and linklist mode. One transaction can contain several blocks, one block can + * contain several fragments. Link-list mode means we can save several DMA + * configuration into one reserved memory, then DMA can fetch each DMA + * configuration automatically to start transfer. + */ +enum sprd_dma_req_mode { + SPRD_DMA_FRAG_REQ, + SPRD_DMA_BLK_REQ, + SPRD_DMA_TRANS_REQ, + SPRD_DMA_LIST_REQ, +}; + +/* + * enum sprd_dma_int_type: define the DMA interrupt type + * @SPRD_DMA_NO_INT: do not need generate DMA interrupts. + * @SPRD_DMA_FRAG_INT: fragment done interrupt when one fragment request + * is done. + * @SPRD_DMA_BLK_INT: block done interrupt when one block request is done. + * @SPRD_DMA_BLK_FRAG_INT: block and fragment interrupt when one fragment + * or one block request is done. + * @SPRD_DMA_TRANS_INT: tansaction done interrupt when one transaction + * request is done. + * @SPRD_DMA_TRANS_FRAG_INT: transaction and fragment interrupt when one + * transaction request or fragment request is done. + * @SPRD_DMA_TRANS_BLK_INT: transaction and block interrupt when one + * transaction request or block request is done. + * @SPRD_DMA_LIST_INT: link-list done interrupt when one link-list request + * is done. + * @SPRD_DMA_CFGERR_INT: configure error interrupt when configuration is + * incorrect. + */ +enum sprd_dma_int_type { + SPRD_DMA_NO_INT, + SPRD_DMA_FRAG_INT, + SPRD_DMA_BLK_INT, + SPRD_DMA_BLK_FRAG_INT, + SPRD_DMA_TRANS_INT, + SPRD_DMA_TRANS_FRAG_INT, + SPRD_DMA_TRANS_BLK_INT, + SPRD_DMA_LIST_INT, + SPRD_DMA_CFGERR_INT, +}; + +/* dma channel hardware configuration */ +struct sprd_dma_chn_hw { + u32 pause; + u32 req; + u32 cfg; + u32 intc; + u32 src_addr; + u32 des_addr; + u32 frg_len; + u32 blk_len; + u32 trsc_len; + u32 trsf_step; + u32 wrap_ptr; + u32 wrap_to; + u32 llist_ptr; + u32 frg_step; + u32 src_blk_step; + u32 des_blk_step; +}; + +/* dma request description */ +struct sprd_dma_desc { + struct virt_dma_desc vd; + struct sprd_dma_chn_hw chn_hw; +}; + +/* dma channel description */ +struct sprd_dma_chn { + struct virt_dma_chan vc; + void __iomem *chn_base; + u32 chn_num; + u32 dev_id; + struct sprd_dma_desc *cur_desc; +}; + +/* SPRD dma device */ +struct sprd_dma_dev { + struct dma_device dma_dev; + void __iomem *glb_base; + struct clk *clk; + struct clk *ashb_clk; + int irq; + u32 total_chns; + struct sprd_dma_chn channels[0]; +}; + +static bool sprd_dma_filter_fn(struct dma_chan *chan, void *param); +static struct of_dma_filter_info sprd_dma_info = { + .filter_fn = sprd_dma_filter_fn, +}; + +static inline struct sprd_dma_chn *to_sprd_dma_chan(struct dma_chan *c) +{ + return container_of(c, struct sprd_dma_chn, vc.chan); +} + +static inline struct sprd_dma_dev *to_sprd_dma_dev(struct dma_chan *c) +{ + struct sprd_dma_chn *schan = to_sprd_dma_chan(c); + + return container_of(schan, struct sprd_dma_dev, channels[c->chan_id]); +} + +static inline struct sprd_dma_desc *to_sprd_dma_desc(struct virt_dma_desc *vd) +{ + return container_of(vd, struct sprd_dma_desc, vd); +} + +static void sprd_dma_chn_update(struct sprd_dma_chn *schan, u32 reg, + u32 mask, u32 val) +{ + u32 orig = readl(schan->chn_base + reg); + u32 tmp; + + tmp = (orig & ~mask) | val; + writel(tmp, schan->chn_base + reg); +} + +static int sprd_dma_enable(struct sprd_dma_dev *sdev) +{ + int ret; + + ret = clk_prepare_enable(sdev->clk); + if (ret) + return ret; + + /* + * The ashb_clk is optional and only for AGCP DMA controller, so we + * need add one condition to check if the ashb_clk need enable. + */ + if (!IS_ERR(sdev->ashb_clk)) + ret = clk_prepare_enable(sdev->ashb_clk); + + return ret; +} + +static void sprd_dma_disable(struct sprd_dma_dev *sdev) +{ + clk_disable_unprepare(sdev->clk); + + /* + * Need to check if we need disable the optional ashb_clk for AGCP DMA. + */ + if (!IS_ERR(sdev->ashb_clk)) + clk_disable_unprepare(sdev->ashb_clk); +} + +static void sprd_dma_set_uid(struct sprd_dma_chn *schan) +{ + struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan); + u32 dev_id = schan->dev_id; + + if (dev_id != SPRD_DMA_SOFTWARE_UID) { + u32 uid_offset = SPRD_DMA_GLB_REQ_UID_OFFSET + + SPRD_DMA_GLB_REQ_UID(dev_id); + + writel(schan->chn_num + 1, sdev->glb_base + uid_offset); + } +} + +static void sprd_dma_unset_uid(struct sprd_dma_chn *schan) +{ + struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan); + u32 dev_id = schan->dev_id; + + if (dev_id != SPRD_DMA_SOFTWARE_UID) { + u32 uid_offset = SPRD_DMA_GLB_REQ_UID_OFFSET + + SPRD_DMA_GLB_REQ_UID(dev_id); + + writel(0, sdev->glb_base + uid_offset); + } +} + +static void sprd_dma_clear_int(struct sprd_dma_chn *schan) +{ + sprd_dma_chn_update(schan, SPRD_DMA_CHN_INTC, + SPRD_DMA_INT_MASK << SPRD_DMA_INT_CLR_OFFSET, + SPRD_DMA_INT_MASK << SPRD_DMA_INT_CLR_OFFSET); +} + +static void sprd_dma_enable_chn(struct sprd_dma_chn *schan) +{ + sprd_dma_chn_update(schan, SPRD_DMA_CHN_CFG, SPRD_DMA_CHN_EN, + SPRD_DMA_CHN_EN); +} + +static void sprd_dma_disable_chn(struct sprd_dma_chn *schan) +{ + sprd_dma_chn_update(schan, SPRD_DMA_CHN_CFG, SPRD_DMA_CHN_EN, 0); +} + +static void sprd_dma_soft_request(struct sprd_dma_chn *schan) +{ + sprd_dma_chn_update(schan, SPRD_DMA_CHN_REQ, SPRD_DMA_REQ_EN, + SPRD_DMA_REQ_EN); +} + +static void sprd_dma_pause_resume(struct sprd_dma_chn *schan, bool enable) +{ + struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan); + u32 pause, timeout = SPRD_DMA_PAUSE_CNT; + + if (enable) { + sprd_dma_chn_update(schan, SPRD_DMA_CHN_PAUSE, + SPRD_DMA_PAUSE_EN, SPRD_DMA_PAUSE_EN); + + do { + pause = readl(schan->chn_base + SPRD_DMA_CHN_PAUSE); + if (pause & SPRD_DMA_PAUSE_STS) + break; + + cpu_relax(); + } while (--timeout > 0); + + if (!timeout) + dev_warn(sdev->dma_dev.dev, + "pause dma controller timeout\n"); + } else { + sprd_dma_chn_update(schan, SPRD_DMA_CHN_PAUSE, + SPRD_DMA_PAUSE_EN, 0); + } +} + +static void sprd_dma_stop_and_disable(struct sprd_dma_chn *schan) +{ + u32 cfg = readl(schan->chn_base + SPRD_DMA_CHN_CFG); + + if (!(cfg & SPRD_DMA_CHN_EN)) + return; + + sprd_dma_pause_resume(schan, true); + sprd_dma_disable_chn(schan); +} + +static unsigned long sprd_dma_get_dst_addr(struct sprd_dma_chn *schan) +{ + unsigned long addr, addr_high; + + addr = readl(schan->chn_base + SPRD_DMA_CHN_DES_ADDR); + addr_high = readl(schan->chn_base + SPRD_DMA_CHN_WARP_TO) & + SPRD_DMA_HIGH_ADDR_MASK; + + return addr | (addr_high << SPRD_DMA_HIGH_ADDR_OFFSET); +} + +static enum sprd_dma_int_type sprd_dma_get_int_type(struct sprd_dma_chn *schan) +{ + struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan); + u32 intc_sts = readl(schan->chn_base + SPRD_DMA_CHN_INTC) & + SPRD_DMA_CHN_INT_STS; + + switch (intc_sts) { + case SPRD_DMA_CFGERR_INT_STS: + return SPRD_DMA_CFGERR_INT; + + case SPRD_DMA_LIST_INT_STS: + return SPRD_DMA_LIST_INT; + + case SPRD_DMA_TRSC_INT_STS: + return SPRD_DMA_TRANS_INT; + + case SPRD_DMA_BLK_INT_STS: + return SPRD_DMA_BLK_INT; + + case SPRD_DMA_FRAG_INT_STS: + return SPRD_DMA_FRAG_INT; + + default: + dev_warn(sdev->dma_dev.dev, "incorrect dma interrupt type\n"); + return SPRD_DMA_NO_INT; + } +} + +static enum sprd_dma_req_mode sprd_dma_get_req_type(struct sprd_dma_chn *schan) +{ + u32 frag_reg = readl(schan->chn_base + SPRD_DMA_CHN_FRG_LEN); + + return (frag_reg >> SPRD_DMA_REQ_MODE_OFFSET) & SPRD_DMA_REQ_MODE_MASK; +} + +static void sprd_dma_set_chn_config(struct sprd_dma_chn *schan, + struct sprd_dma_desc *sdesc) +{ + struct sprd_dma_chn_hw *cfg = &sdesc->chn_hw; + + writel(cfg->pause, schan->chn_base + SPRD_DMA_CHN_PAUSE); + writel(cfg->cfg, schan->chn_base + SPRD_DMA_CHN_CFG); + writel(cfg->intc, schan->chn_base + SPRD_DMA_CHN_INTC); + writel(cfg->src_addr, schan->chn_base + SPRD_DMA_CHN_SRC_ADDR); + writel(cfg->des_addr, schan->chn_base + SPRD_DMA_CHN_DES_ADDR); + writel(cfg->frg_len, schan->chn_base + SPRD_DMA_CHN_FRG_LEN); + writel(cfg->blk_len, schan->chn_base + SPRD_DMA_CHN_BLK_LEN); + writel(cfg->trsc_len, schan->chn_base + SPRD_DMA_CHN_TRSC_LEN); + writel(cfg->trsf_step, schan->chn_base + SPRD_DMA_CHN_TRSF_STEP); + writel(cfg->wrap_ptr, schan->chn_base + SPRD_DMA_CHN_WARP_PTR); + writel(cfg->wrap_to, schan->chn_base + SPRD_DMA_CHN_WARP_TO); + writel(cfg->llist_ptr, schan->chn_base + SPRD_DMA_CHN_LLIST_PTR); + writel(cfg->frg_step, schan->chn_base + SPRD_DMA_CHN_FRAG_STEP); + writel(cfg->src_blk_step, schan->chn_base + SPRD_DMA_CHN_SRC_BLK_STEP); + writel(cfg->des_blk_step, schan->chn_base + SPRD_DMA_CHN_DES_BLK_STEP); + writel(cfg->req, schan->chn_base + SPRD_DMA_CHN_REQ); +} + +static void sprd_dma_start(struct sprd_dma_chn *schan) +{ + struct virt_dma_desc *vd = vchan_next_desc(&schan->vc); + + if (!vd) + return; + + list_del(&vd->node); + schan->cur_desc = to_sprd_dma_desc(vd); + + /* + * Copy the DMA configuration from DMA descriptor to this hardware + * channel. + */ + sprd_dma_set_chn_config(schan, schan->cur_desc); + sprd_dma_set_uid(schan); + sprd_dma_enable_chn(schan); + + if (schan->dev_id == SPRD_DMA_SOFTWARE_UID) + sprd_dma_soft_request(schan); +} + +static void sprd_dma_stop(struct sprd_dma_chn *schan) +{ + sprd_dma_stop_and_disable(schan); + sprd_dma_unset_uid(schan); + sprd_dma_clear_int(schan); +} + +static bool sprd_dma_check_trans_done(struct sprd_dma_desc *sdesc, + enum sprd_dma_int_type int_type, + enum sprd_dma_req_mode req_mode) +{ + if (int_type == SPRD_DMA_NO_INT) + return false; + + if (int_type >= req_mode + 1) + return true; + else + return false; +} + +static irqreturn_t dma_irq_handle(int irq, void *dev_id) +{ + struct sprd_dma_dev *sdev = (struct sprd_dma_dev *)dev_id; + u32 irq_status = readl(sdev->glb_base + SPRD_DMA_GLB_INT_MSK_STS); + struct sprd_dma_chn *schan; + struct sprd_dma_desc *sdesc; + enum sprd_dma_req_mode req_type; + enum sprd_dma_int_type int_type; + bool trans_done = false; + u32 i; + + while (irq_status) { + i = __ffs(irq_status); + irq_status &= (irq_status - 1); + schan = &sdev->channels[i]; + + spin_lock(&schan->vc.lock); + int_type = sprd_dma_get_int_type(schan); + req_type = sprd_dma_get_req_type(schan); + sprd_dma_clear_int(schan); + + sdesc = schan->cur_desc; + + /* Check if the dma request descriptor is done. */ + trans_done = sprd_dma_check_trans_done(sdesc, int_type, + req_type); + if (trans_done == true) { + vchan_cookie_complete(&sdesc->vd); + schan->cur_desc = NULL; + sprd_dma_start(schan); + } + spin_unlock(&schan->vc.lock); + } + + return IRQ_HANDLED; +} + +static int sprd_dma_alloc_chan_resources(struct dma_chan *chan) +{ + struct sprd_dma_chn *schan = to_sprd_dma_chan(chan); + int ret; + + ret = pm_runtime_get_sync(chan->device->dev); + if (ret < 0) + return ret; + + schan->dev_id = SPRD_DMA_SOFTWARE_UID; + return 0; +} + +static void sprd_dma_free_chan_resources(struct dma_chan *chan) +{ + struct sprd_dma_chn *schan = to_sprd_dma_chan(chan); + unsigned long flags; + + spin_lock_irqsave(&schan->vc.lock, flags); + sprd_dma_stop(schan); + spin_unlock_irqrestore(&schan->vc.lock, flags); + + vchan_free_chan_resources(&schan->vc); + pm_runtime_put(chan->device->dev); +} + +static enum dma_status sprd_dma_tx_status(struct dma_chan *chan, + dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + struct sprd_dma_chn *schan = to_sprd_dma_chan(chan); + struct virt_dma_desc *vd; + unsigned long flags; + enum dma_status ret; + u32 pos; + + ret = dma_cookie_status(chan, cookie, txstate); + if (ret == DMA_COMPLETE || !txstate) + return ret; + + spin_lock_irqsave(&schan->vc.lock, flags); + vd = vchan_find_desc(&schan->vc, cookie); + if (vd) { + struct sprd_dma_desc *sdesc = to_sprd_dma_desc(vd); + struct sprd_dma_chn_hw *hw = &sdesc->chn_hw; + + if (hw->trsc_len > 0) + pos = hw->trsc_len; + else if (hw->blk_len > 0) + pos = hw->blk_len; + else if (hw->frg_len > 0) + pos = hw->frg_len; + else + pos = 0; + } else if (schan->cur_desc && schan->cur_desc->vd.tx.cookie == cookie) { + pos = sprd_dma_get_dst_addr(schan); + } else { + pos = 0; + } + spin_unlock_irqrestore(&schan->vc.lock, flags); + + dma_set_residue(txstate, pos); + return ret; +} + +static void sprd_dma_issue_pending(struct dma_chan *chan) +{ + struct sprd_dma_chn *schan = to_sprd_dma_chan(chan); + unsigned long flags; + + spin_lock_irqsave(&schan->vc.lock, flags); + if (vchan_issue_pending(&schan->vc) && !schan->cur_desc) + sprd_dma_start(schan); + spin_unlock_irqrestore(&schan->vc.lock, flags); +} + +static int sprd_dma_config(struct dma_chan *chan, struct sprd_dma_desc *sdesc, + dma_addr_t dest, dma_addr_t src, size_t len) +{ + struct sprd_dma_dev *sdev = to_sprd_dma_dev(chan); + struct sprd_dma_chn_hw *hw = &sdesc->chn_hw; + u32 datawidth, src_step, des_step, fragment_len; + u32 block_len, req_mode, irq_mode, transcation_len; + u32 fix_mode = 0, fix_en = 0; + + if (IS_ALIGNED(len, 4)) { + datawidth = 2; + src_step = 4; + des_step = 4; + } else if (IS_ALIGNED(len, 2)) { + datawidth = 1; + src_step = 2; + des_step = 2; + } else { + datawidth = 0; + src_step = 1; + des_step = 1; + } + + fragment_len = SPRD_DMA_MEMCPY_MIN_SIZE; + if (len <= SPRD_DMA_BLK_LEN_MASK) { + block_len = len; + transcation_len = 0; + req_mode = SPRD_DMA_BLK_REQ; + irq_mode = SPRD_DMA_BLK_INT; + } else { + block_len = SPRD_DMA_MEMCPY_MIN_SIZE; + transcation_len = len; + req_mode = SPRD_DMA_TRANS_REQ; + irq_mode = SPRD_DMA_TRANS_INT; + } + + hw->cfg = SPRD_DMA_DONOT_WAIT_BDONE << SPRD_DMA_WAIT_BDONE_OFFSET; + hw->wrap_ptr = (u32)((src >> SPRD_DMA_HIGH_ADDR_OFFSET) & + SPRD_DMA_HIGH_ADDR_MASK); + hw->wrap_to = (u32)((dest >> SPRD_DMA_HIGH_ADDR_OFFSET) & + SPRD_DMA_HIGH_ADDR_MASK); + + hw->src_addr = (u32)(src & SPRD_DMA_LOW_ADDR_MASK); + hw->des_addr = (u32)(dest & SPRD_DMA_LOW_ADDR_MASK); + + if ((src_step != 0 && des_step != 0) || (src_step | des_step) == 0) { + fix_en = 0; + } else { + fix_en = 1; + if (src_step) + fix_mode = 1; + else + fix_mode = 0; + } + + hw->frg_len = datawidth << SPRD_DMA_SRC_DATAWIDTH_OFFSET | + datawidth << SPRD_DMA_DES_DATAWIDTH_OFFSET | + req_mode << SPRD_DMA_REQ_MODE_OFFSET | + fix_mode << SPRD_DMA_FIX_SEL_OFFSET | + fix_en << SPRD_DMA_FIX_EN_OFFSET | + (fragment_len & SPRD_DMA_FRG_LEN_MASK); + hw->blk_len = block_len & SPRD_DMA_BLK_LEN_MASK; + + hw->intc = SPRD_DMA_CFG_ERR_INT_EN; + + switch (irq_mode) { + case SPRD_DMA_NO_INT: + break; + + case SPRD_DMA_FRAG_INT: + hw->intc |= SPRD_DMA_FRAG_INT_EN; + break; + + case SPRD_DMA_BLK_INT: + hw->intc |= SPRD_DMA_BLK_INT_EN; + break; + + case SPRD_DMA_BLK_FRAG_INT: + hw->intc |= SPRD_DMA_BLK_INT_EN | SPRD_DMA_FRAG_INT_EN; + break; + + case SPRD_DMA_TRANS_INT: + hw->intc |= SPRD_DMA_TRANS_INT_EN; + break; + + case SPRD_DMA_TRANS_FRAG_INT: + hw->intc |= SPRD_DMA_TRANS_INT_EN | SPRD_DMA_FRAG_INT_EN; + break; + + case SPRD_DMA_TRANS_BLK_INT: + hw->intc |= SPRD_DMA_TRANS_INT_EN | SPRD_DMA_BLK_INT_EN; + break; + + case SPRD_DMA_LIST_INT: + hw->intc |= SPRD_DMA_LIST_INT_EN; + break; + + case SPRD_DMA_CFGERR_INT: + hw->intc |= SPRD_DMA_CFG_ERR_INT_EN; + break; + + default: + dev_err(sdev->dma_dev.dev, "invalid irq mode\n"); + return -EINVAL; + } + + if (transcation_len == 0) + hw->trsc_len = block_len & SPRD_DMA_TRSC_LEN_MASK; + else + hw->trsc_len = transcation_len & SPRD_DMA_TRSC_LEN_MASK; + + hw->trsf_step = (des_step & SPRD_DMA_TRSF_STEP_MASK) << + SPRD_DMA_DEST_TRSF_STEP_OFFSET | + (src_step & SPRD_DMA_TRSF_STEP_MASK) << + SPRD_DMA_SRC_TRSF_STEP_OFFSET; + + hw->frg_step = 0; + hw->src_blk_step = 0; + hw->des_blk_step = 0; + hw->src_blk_step = 0; + return 0; +} + +struct dma_async_tx_descriptor * +sprd_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, + size_t len, unsigned long flags) +{ + struct sprd_dma_chn *schan = to_sprd_dma_chan(chan); + struct sprd_dma_desc *sdesc; + int ret; + + sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT); + if (!sdesc) + return NULL; + + ret = sprd_dma_config(chan, sdesc, dest, src, len); + if (ret) { + kfree(sdesc); + return NULL; + } + + return vchan_tx_prep(&schan->vc, &sdesc->vd, flags); +} + +static int sprd_dma_pause(struct dma_chan *chan) +{ + struct sprd_dma_chn *schan = to_sprd_dma_chan(chan); + unsigned long flags; + + spin_lock_irqsave(&schan->vc.lock, flags); + sprd_dma_pause_resume(schan, true); + spin_unlock_irqrestore(&schan->vc.lock, flags); + + return 0; +} + +static int sprd_dma_resume(struct dma_chan *chan) +{ + struct sprd_dma_chn *schan = to_sprd_dma_chan(chan); + unsigned long flags; + + spin_lock_irqsave(&schan->vc.lock, flags); + sprd_dma_pause_resume(schan, false); + spin_unlock_irqrestore(&schan->vc.lock, flags); + + return 0; +} + +static int sprd_dma_terminate_all(struct dma_chan *chan) +{ + struct sprd_dma_chn *schan = to_sprd_dma_chan(chan); + unsigned long flags; + LIST_HEAD(head); + + spin_lock_irqsave(&schan->vc.lock, flags); + sprd_dma_stop(schan); + + vchan_get_all_descriptors(&schan->vc, &head); + spin_unlock_irqrestore(&schan->vc.lock, flags); + + vchan_dma_desc_free_list(&schan->vc, &head); + return 0; +} + +static void sprd_dma_free_desc(struct virt_dma_desc *vd) +{ + struct sprd_dma_desc *sdesc = to_sprd_dma_desc(vd); + + kfree(sdesc); +} + +static bool sprd_dma_filter_fn(struct dma_chan *chan, void *param) +{ + struct sprd_dma_chn *schan = to_sprd_dma_chan(chan); + struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan); + u32 req = *(u32 *)param; + + if (req < sdev->total_chns) + return req == schan->chn_num + 1; + else + return false; +} + +static int sprd_dma_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct sprd_dma_dev *sdev; + struct sprd_dma_chn *dma_chn; + struct resource *res; + u32 chn_count; + int ret, i; + + ret = device_property_read_u32(&pdev->dev, "#dma-channels", &chn_count); + if (ret) { + dev_err(&pdev->dev, "get dma channels count failed\n"); + return ret; + } + + sdev = devm_kzalloc(&pdev->dev, sizeof(*sdev) + + sizeof(*dma_chn) * chn_count, + GFP_KERNEL); + if (!sdev) + return -ENOMEM; + + sdev->clk = devm_clk_get(&pdev->dev, "enable"); + if (IS_ERR(sdev->clk)) { + dev_err(&pdev->dev, "get enable clock failed\n"); + return PTR_ERR(sdev->clk); + } + + /* ashb clock is optional for AGCP DMA */ + sdev->ashb_clk = devm_clk_get(&pdev->dev, "ashb_eb"); + if (IS_ERR(sdev->ashb_clk)) + dev_warn(&pdev->dev, "no optional ashb eb clock\n"); + + /* + * We have three DMA controllers: AP DMA, AON DMA and AGCP DMA. For AGCP + * DMA controller, it can or do not request the irq, which will save + * system power without resuming system by DMA interrupts if AGCP DMA + * does not request the irq. Thus the DMA interrupts property should + * be optional. + */ + sdev->irq = platform_get_irq(pdev, 0); + if (sdev->irq > 0) { + ret = devm_request_irq(&pdev->dev, sdev->irq, dma_irq_handle, + 0, "sprd_dma", (void *)sdev); + if (ret < 0) { + dev_err(&pdev->dev, "request dma irq failed\n"); + return ret; + } + } else { + dev_warn(&pdev->dev, "no interrupts for the dma controller\n"); + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + sdev->glb_base = devm_ioremap_nocache(&pdev->dev, res->start, + resource_size(res)); + if (!sdev->glb_base) + return -ENOMEM; + + dma_cap_set(DMA_MEMCPY, sdev->dma_dev.cap_mask); + sdev->total_chns = chn_count; + sdev->dma_dev.chancnt = chn_count; + INIT_LIST_HEAD(&sdev->dma_dev.channels); + INIT_LIST_HEAD(&sdev->dma_dev.global_node); + sdev->dma_dev.dev = &pdev->dev; + sdev->dma_dev.device_alloc_chan_resources = sprd_dma_alloc_chan_resources; + sdev->dma_dev.device_free_chan_resources = sprd_dma_free_chan_resources; + sdev->dma_dev.device_tx_status = sprd_dma_tx_status; + sdev->dma_dev.device_issue_pending = sprd_dma_issue_pending; + sdev->dma_dev.device_prep_dma_memcpy = sprd_dma_prep_dma_memcpy; + sdev->dma_dev.device_pause = sprd_dma_pause; + sdev->dma_dev.device_resume = sprd_dma_resume; + sdev->dma_dev.device_terminate_all = sprd_dma_terminate_all; + + for (i = 0; i < chn_count; i++) { + dma_chn = &sdev->channels[i]; + dma_chn->chn_num = i; + dma_chn->cur_desc = NULL; + /* get each channel's registers base address. */ + dma_chn->chn_base = sdev->glb_base + SPRD_DMA_CHN_REG_OFFSET + + SPRD_DMA_CHN_REG_LENGTH * i; + + dma_chn->vc.desc_free = sprd_dma_free_desc; + vchan_init(&dma_chn->vc, &sdev->dma_dev); + } + + platform_set_drvdata(pdev, sdev); + ret = sprd_dma_enable(sdev); + if (ret) + return ret; + + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + + ret = pm_runtime_get_sync(&pdev->dev); + if (ret < 0) + goto err_rpm; + + ret = dma_async_device_register(&sdev->dma_dev); + if (ret < 0) { + dev_err(&pdev->dev, "register dma device failed:%d\n", ret); + goto err_register; + } + + sprd_dma_info.dma_cap = sdev->dma_dev.cap_mask; + ret = of_dma_controller_register(np, of_dma_simple_xlate, + &sprd_dma_info); + if (ret) + goto err_of_register; + + pm_runtime_put(&pdev->dev); + return 0; + +err_of_register: + dma_async_device_unregister(&sdev->dma_dev); +err_register: + pm_runtime_put_noidle(&pdev->dev); + pm_runtime_disable(&pdev->dev); +err_rpm: + sprd_dma_disable(sdev); + return ret; +} + +static int sprd_dma_remove(struct platform_device *pdev) +{ + struct sprd_dma_dev *sdev = platform_get_drvdata(pdev); + struct sprd_dma_chn *c, *cn; + int ret; + + ret = pm_runtime_get_sync(&pdev->dev); + if (ret < 0) + return ret; + + /* explicitly free the irq */ + if (sdev->irq > 0) + devm_free_irq(&pdev->dev, sdev->irq, sdev); + + list_for_each_entry_safe(c, cn, &sdev->dma_dev.channels, + vc.chan.device_node) { + list_del(&c->vc.chan.device_node); + tasklet_kill(&c->vc.task); + } + + of_dma_controller_free(pdev->dev.of_node); + dma_async_device_unregister(&sdev->dma_dev); + sprd_dma_disable(sdev); + + pm_runtime_put_noidle(&pdev->dev); + pm_runtime_disable(&pdev->dev); + return 0; +} + +static const struct of_device_id sprd_dma_match[] = { + { .compatible = "sprd,sc9860-dma", }, + {}, +}; + +static int __maybe_unused sprd_dma_runtime_suspend(struct device *dev) +{ + struct sprd_dma_dev *sdev = dev_get_drvdata(dev); + + sprd_dma_disable(sdev); + return 0; +} + +static int __maybe_unused sprd_dma_runtime_resume(struct device *dev) +{ + struct sprd_dma_dev *sdev = dev_get_drvdata(dev); + int ret; + + ret = sprd_dma_enable(sdev); + if (ret) + dev_err(sdev->dma_dev.dev, "enable dma failed\n"); + + return ret; +} + +static const struct dev_pm_ops sprd_dma_pm_ops = { + SET_RUNTIME_PM_OPS(sprd_dma_runtime_suspend, + sprd_dma_runtime_resume, + NULL) +}; + +static struct platform_driver sprd_dma_driver = { + .probe = sprd_dma_probe, + .remove = sprd_dma_remove, + .driver = { + .name = "sprd-dma", + .of_match_table = sprd_dma_match, + .pm = &sprd_dma_pm_ops, + }, +}; +module_platform_driver(sprd_dma_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("DMA driver for Spreadtrum"); +MODULE_AUTHOR("Baolin Wang "); +MODULE_ALIAS("platform:sprd-dma"); -- cgit From bcdc4bd356c76a5bab2f480a73f089dc8e0e4e89 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Tue, 24 Oct 2017 03:02:23 -0700 Subject: dmaengine: Convert timers to use timer_setup() In preparation for unconditionally passing the struct timer_list pointer to all timer callbacks, switch to using the new timer_setup() and from_timer() to pass the timer pointer explicitly. Signed-off-by: Kees Cook Signed-off-by: Vinod Koul --- drivers/dma/imx-dma.c | 8 +++----- drivers/dma/ioat/dma.c | 6 +++--- drivers/dma/ioat/dma.h | 3 +-- drivers/dma/ioat/init.c | 2 +- 4 files changed, 8 insertions(+), 11 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index f681df8f0ed3..331f863c605e 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -364,9 +364,9 @@ static void imxdma_disable_hw(struct imxdma_channel *imxdmac) local_irq_restore(flags); } -static void imxdma_watchdog(unsigned long data) +static void imxdma_watchdog(struct timer_list *t) { - struct imxdma_channel *imxdmac = (struct imxdma_channel *)data; + struct imxdma_channel *imxdmac = from_timer(imxdmac, t, watchdog); struct imxdma_engine *imxdma = imxdmac->imxdma; int channel = imxdmac->channel; @@ -1153,9 +1153,7 @@ static int __init imxdma_probe(struct platform_device *pdev) } imxdmac->irq = irq + i; - init_timer(&imxdmac->watchdog); - imxdmac->watchdog.function = &imxdma_watchdog; - imxdmac->watchdog.data = (unsigned long)imxdmac; + timer_setup(&imxdmac->watchdog, imxdma_watchdog, 0); } imxdmac->imxdma = imxdma; diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index f70cc74032ea..58d4ccd33672 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -474,7 +474,7 @@ int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs) if (time_is_before_jiffies(ioat_chan->timer.expires) && timer_pending(&ioat_chan->timer)) { mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT); - ioat_timer_event((unsigned long)ioat_chan); + ioat_timer_event(&ioat_chan->timer); } return -ENOMEM; @@ -862,9 +862,9 @@ static void check_active(struct ioatdma_chan *ioat_chan) mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); } -void ioat_timer_event(unsigned long data) +void ioat_timer_event(struct timer_list *t) { - struct ioatdma_chan *ioat_chan = to_ioat_chan((void *)data); + struct ioatdma_chan *ioat_chan = from_timer(ioat_chan, t, timer); dma_addr_t phys_complete; u64 status; diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 56200eefcf5e..1ab42ec2b7ff 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -406,10 +406,9 @@ enum dma_status ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie, struct dma_tx_state *txstate); void ioat_cleanup_event(unsigned long data); -void ioat_timer_event(unsigned long data); +void ioat_timer_event(struct timer_list *t); int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs); void ioat_issue_pending(struct dma_chan *chan); -void ioat_timer_event(unsigned long data); /* IOAT Init functions */ bool is_bwd_ioat(struct pci_dev *pdev); diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c index 93e006c3441d..2f31d3d0caa6 100644 --- a/drivers/dma/ioat/init.c +++ b/drivers/dma/ioat/init.c @@ -760,7 +760,7 @@ ioat_init_channel(struct ioatdma_device *ioat_dma, dma_cookie_init(&ioat_chan->dma_chan); list_add_tail(&ioat_chan->dma_chan.device_node, &dma->channels); ioat_dma->idx[idx] = ioat_chan; - setup_timer(&ioat_chan->timer, ioat_timer_event, data); + timer_setup(&ioat_chan->timer, ioat_timer_event, 0); tasklet_init(&ioat_chan->cleanup_task, ioat_cleanup_event, data); } -- cgit From 10c191a11c79bb576817f280e93bdfe4bb8b1cd0 Mon Sep 17 00:00:00 2001 From: Romain Perier Date: Mon, 23 Oct 2017 19:59:55 +0200 Subject: dmaengine: pch_dma: Replace PCI pool old API The PCI pool API is deprecated. This commit replaces the PCI pool old API by the appropriate function with the DMA pool API. Signed-off-by: Romain Perier Acked-by: Peter Senna Tschudin Tested-by: Peter Senna Tschudin Signed-off-by: Vinod Koul --- drivers/dma/pch_dma.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index f9028e9d0dfc..afd8f27bda96 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c @@ -123,7 +123,7 @@ struct pch_dma_chan { struct pch_dma { struct dma_device dma; void __iomem *membase; - struct pci_pool *pool; + struct dma_pool *pool; struct pch_dma_regs regs; struct pch_dma_desc_regs ch_regs[MAX_CHAN_NR]; struct pch_dma_chan channels[MAX_CHAN_NR]; @@ -437,7 +437,7 @@ static struct pch_dma_desc *pdc_alloc_desc(struct dma_chan *chan, gfp_t flags) struct pch_dma *pd = to_pd(chan->device); dma_addr_t addr; - desc = pci_pool_zalloc(pd->pool, flags, &addr); + desc = dma_pool_zalloc(pd->pool, flags, &addr); if (desc) { INIT_LIST_HEAD(&desc->tx_list); dma_async_tx_descriptor_init(&desc->txd, chan); @@ -549,7 +549,7 @@ static void pd_free_chan_resources(struct dma_chan *chan) spin_unlock_irq(&pd_chan->lock); list_for_each_entry_safe(desc, _d, &tmp_list, desc_node) - pci_pool_free(pd->pool, desc, desc->txd.phys); + dma_pool_free(pd->pool, desc, desc->txd.phys); pdc_enable_irq(chan, 0); } @@ -880,7 +880,7 @@ static int pch_dma_probe(struct pci_dev *pdev, goto err_iounmap; } - pd->pool = pci_pool_create("pch_dma_desc_pool", pdev, + pd->pool = dma_pool_create("pch_dma_desc_pool", &pdev->dev, sizeof(struct pch_dma_desc), 4, 0); if (!pd->pool) { dev_err(&pdev->dev, "Failed to alloc DMA descriptors\n"); @@ -931,7 +931,7 @@ static int pch_dma_probe(struct pci_dev *pdev, return 0; err_free_pool: - pci_pool_destroy(pd->pool); + dma_pool_destroy(pd->pool); err_free_irq: free_irq(pdev->irq, pd); err_iounmap: @@ -963,7 +963,7 @@ static void pch_dma_remove(struct pci_dev *pdev) tasklet_kill(&pd_chan->tasklet); } - pci_pool_destroy(pd->pool); + dma_pool_destroy(pd->pool); pci_iounmap(pdev, pd->membase); pci_release_regions(pdev); pci_disable_device(pdev); -- cgit From 6d82e05b3c4e2cdda861d43daf9df7b23afc3f1a Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Fri, 3 Nov 2017 10:33:11 +0530 Subject: dmaengine: coh901318: Remove unnecessary 0x prefixes before %pad Since commit 3cab1e711297 ("lib/vsprintf: refactor duplicate code to special_hex_number()") %pad doesn't need 0x prefix so drop that. Acked-by: Ludovic Desroches Signed-off-by: Vinod Koul --- drivers/dma/coh901318.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index 74794c9859f6..da74fd74636b 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c @@ -1319,8 +1319,8 @@ static void coh901318_list_print(struct coh901318_chan *cohc, int i = 0; while (l) { - dev_vdbg(COHC_2_DEV(cohc), "i %d, lli %p, ctrl 0x%x, src 0x%pad" - ", dst 0x%pad, link 0x%pad virt_link_addr 0x%p\n", + dev_vdbg(COHC_2_DEV(cohc), "i %d, lli %p, ctrl 0x%x, src %pad" + ", dst %pad, link %pad virt_link_addr 0x%p\n", i, l, l->control, &l->src_addr, &l->dst_addr, &l->link_addr, l->virt_link_addr); i++; @@ -2231,7 +2231,7 @@ coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, spin_lock_irqsave(&cohc->lock, flg); dev_vdbg(COHC_2_DEV(cohc), - "[%s] channel %d src 0x%pad dest 0x%pad size %zu\n", + "[%s] channel %d src %pad dest %pad size %zu\n", __func__, cohc->id, &src, &dest, size); if (flags & DMA_PREP_INTERRUPT) -- cgit From 77ea824c6d5430b887b00cda923397f43274d6e7 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Fri, 3 Nov 2017 10:33:11 +0530 Subject: dmaengine: at_hdmac: Remove unnecessary 0x prefixes before %pad Since commit 3cab1e711297 ("lib/vsprintf: refactor duplicate code to special_hex_number()") %pad doesn't need 0x prefix so drop that. Acked-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/at_hdmac_regs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/dma') diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index 7f58f06157f6..ef3f227ce3e6 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h @@ -385,7 +385,7 @@ static void vdbg_dump_regs(struct at_dma_chan *atchan) {} static void atc_dump_lli(struct at_dma_chan *atchan, struct at_lli *lli) { dev_crit(chan2dev(&atchan->chan_common), - " desc: s%pad d%pad ctrl0x%x:0x%x l0x%pad\n", + "desc: s%pad d%pad ctrl0x%x:0x%x l%pad\n", &lli->saddr, &lli->daddr, lli->ctrla, lli->ctrlb, &lli->dscr); } -- cgit From d83f4131c2e06ef93cd3d6d44bb3475728790ab6 Mon Sep 17 00:00:00 2001 From: Pierre-Yves MORDRET Date: Tue, 17 Oct 2017 15:43:47 +0200 Subject: dmaengine: stm32_mdma: activate pack/unpack feature If source and destination bus width differs pack/unpack MDMA feature has to be activated for alignment. This pack/unpack feature implies to have both source/destination address and buffer length aligned on bus width. Fixes: a4ffb13c8946 ("dmaengine: Add STM32 MDMA driver") Signed-off-by: Pierre-Yves MORDRET Signed-off-by: Vinod Koul --- drivers/dma/stm32-mdma.c | 84 ++++++++++++++++++++++++++++-------------------- 1 file changed, 50 insertions(+), 34 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c index d3be6bffdf12..daa1602eb9f5 100644 --- a/drivers/dma/stm32-mdma.c +++ b/drivers/dma/stm32-mdma.c @@ -387,14 +387,20 @@ static int stm32_mdma_get_width(struct stm32_mdma_chan *chan, } } -static enum dma_slave_buswidth stm32_mdma_get_max_width(u32 buf_len, u32 tlen) +static enum dma_slave_buswidth stm32_mdma_get_max_width(dma_addr_t addr, + u32 buf_len, u32 tlen) { enum dma_slave_buswidth max_width = DMA_SLAVE_BUSWIDTH_8_BYTES; for (max_width = DMA_SLAVE_BUSWIDTH_8_BYTES; max_width > DMA_SLAVE_BUSWIDTH_1_BYTE; max_width >>= 1) { - if (((buf_len % max_width) == 0) && (tlen >= max_width)) + /* + * Address and buffer length both have to be aligned on + * bus width + */ + if ((((buf_len | addr) & (max_width - 1)) == 0) && + tlen >= max_width) break; } @@ -486,7 +492,8 @@ static void stm32_mdma_set_bus(struct stm32_mdma_device *dmadev, u32 *ctbr, static int stm32_mdma_set_xfer_param(struct stm32_mdma_chan *chan, enum dma_transfer_direction direction, u32 *mdma_ccr, u32 *mdma_ctcr, - u32 *mdma_ctbr, u32 buf_len) + u32 *mdma_ctbr, dma_addr_t addr, + u32 buf_len) { struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan); struct stm32_mdma_chan_config *chan_config = &chan->chan_config; @@ -520,6 +527,9 @@ static int stm32_mdma_set_xfer_param(struct stm32_mdma_chan *chan, ctcr &= ~STM32_MDMA_CTCR_LEN2_MSK; ctcr |= STM32_MDMA_CTCR_TLEN((tlen - 1)); + /* Disable Pack Enable */ + ctcr &= ~STM32_MDMA_CTCR_PKE; + /* Check burst size constraints */ if (src_maxburst * src_addr_width > STM32_MDMA_MAX_BURST || dst_maxburst * dst_addr_width > STM32_MDMA_MAX_BURST) { @@ -551,6 +561,8 @@ static int stm32_mdma_set_xfer_param(struct stm32_mdma_chan *chan, switch (direction) { case DMA_MEM_TO_DEV: + dst_addr = chan->dma_config.dst_addr; + /* Set device data size */ dst_bus_width = stm32_mdma_get_width(chan, dst_addr_width); if (dst_bus_width < 0) @@ -567,7 +579,7 @@ static int stm32_mdma_set_xfer_param(struct stm32_mdma_chan *chan, ctcr |= STM32_MDMA_CTCR_DBURST((ilog2(dst_best_burst))); /* Set memory data size */ - src_addr_width = stm32_mdma_get_max_width(buf_len, tlen); + src_addr_width = stm32_mdma_get_max_width(addr, buf_len, tlen); chan->mem_width = src_addr_width; src_bus_width = stm32_mdma_get_width(chan, src_addr_width); if (src_bus_width < 0) @@ -587,15 +599,19 @@ static int stm32_mdma_set_xfer_param(struct stm32_mdma_chan *chan, ctcr |= STM32_MDMA_CTCR_SBURST((ilog2(src_best_burst))); /* Select bus */ - dst_addr = chan->dma_config.dst_addr; stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_DBUS, dst_addr); + if (dst_bus_width != src_bus_width) + ctcr |= STM32_MDMA_CTCR_PKE; + /* Set destination address */ stm32_mdma_write(dmadev, STM32_MDMA_CDAR(chan->id), dst_addr); break; case DMA_DEV_TO_MEM: + src_addr = chan->dma_config.src_addr; + /* Set device data size */ src_bus_width = stm32_mdma_get_width(chan, src_addr_width); if (src_bus_width < 0) @@ -611,7 +627,7 @@ static int stm32_mdma_set_xfer_param(struct stm32_mdma_chan *chan, ctcr |= STM32_MDMA_CTCR_SBURST((ilog2(src_best_burst))); /* Set memory data size */ - dst_addr_width = stm32_mdma_get_max_width(buf_len, tlen); + dst_addr_width = stm32_mdma_get_max_width(addr, buf_len, tlen); chan->mem_width = dst_addr_width; dst_bus_width = stm32_mdma_get_width(chan, dst_addr_width); if (dst_bus_width < 0) @@ -630,10 +646,12 @@ static int stm32_mdma_set_xfer_param(struct stm32_mdma_chan *chan, ctcr |= STM32_MDMA_CTCR_DBURST((ilog2(dst_best_burst))); /* Select bus */ - src_addr = chan->dma_config.src_addr; stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_SBUS, src_addr); + if (dst_bus_width != src_bus_width) + ctcr |= STM32_MDMA_CTCR_PKE; + /* Set source address */ stm32_mdma_write(dmadev, STM32_MDMA_CSAR(chan->id), src_addr); break; @@ -719,23 +737,27 @@ static int stm32_mdma_setup_xfer(struct stm32_mdma_chan *chan, return -EINVAL; } - ret = stm32_mdma_set_xfer_param(chan, direction, &ccr, &ctcr, - &ctbr, sg_dma_len(sg)); - if (ret < 0) - return ret; - if (direction == DMA_MEM_TO_DEV) { src_addr = sg_dma_address(sg); dst_addr = dma_config->dst_addr; + ret = stm32_mdma_set_xfer_param(chan, direction, &ccr, + &ctcr, &ctbr, src_addr, + sg_dma_len(sg)); stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_SBUS, src_addr); } else { src_addr = dma_config->src_addr; dst_addr = sg_dma_address(sg); + ret = stm32_mdma_set_xfer_param(chan, direction, &ccr, + &ctcr, &ctbr, dst_addr, + sg_dma_len(sg)); stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_DBUS, dst_addr); } + if (ret < 0) + return ret; + stm32_mdma_setup_hwdesc(chan, desc, direction, i, src_addr, dst_addr, sg_dma_len(sg), ctcr, ctbr, i == sg_len - 1, i == 0, false); @@ -830,27 +852,29 @@ stm32_mdma_prep_dma_cyclic(struct dma_chan *c, dma_addr_t buf_addr, if (!desc) return NULL; - ret = stm32_mdma_set_xfer_param(chan, direction, &ccr, &ctcr, &ctbr, - period_len); - if (ret < 0) - goto xfer_setup_err; - - /* Enable interrupts */ - ccr &= ~STM32_MDMA_CCR_IRQ_MASK; - ccr |= STM32_MDMA_CCR_TEIE | STM32_MDMA_CCR_CTCIE | STM32_MDMA_CCR_BTIE; - desc->ccr = ccr; - /* Select bus */ if (direction == DMA_MEM_TO_DEV) { src_addr = buf_addr; + ret = stm32_mdma_set_xfer_param(chan, direction, &ccr, &ctcr, + &ctbr, src_addr, period_len); stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_SBUS, src_addr); } else { dst_addr = buf_addr; + ret = stm32_mdma_set_xfer_param(chan, direction, &ccr, &ctcr, + &ctbr, dst_addr, period_len); stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_DBUS, dst_addr); } + if (ret < 0) + goto xfer_setup_err; + + /* Enable interrupts */ + ccr &= ~STM32_MDMA_CCR_IRQ_MASK; + ccr |= STM32_MDMA_CCR_TEIE | STM32_MDMA_CCR_CTCIE | STM32_MDMA_CCR_BTIE; + desc->ccr = ccr; + /* Configure hwdesc list */ for (i = 0; i < count; i++) { if (direction == DMA_MEM_TO_DEV) { @@ -956,9 +980,7 @@ stm32_mdma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dest, dma_addr_t src, ctcr |= STM32_MDMA_CTCR_TLEN((tlen - 1)); /* Set source best burst size */ - max_width = stm32_mdma_get_max_width(len, tlen); - if (src % max_width) - max_width = DMA_SLAVE_BUSWIDTH_1_BYTE; + max_width = stm32_mdma_get_max_width(src, len, tlen); src_bus_width = stm32_mdma_get_width(chan, max_width); max_burst = tlen / max_width; @@ -971,9 +993,7 @@ stm32_mdma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dest, dma_addr_t src, STM32_MDMA_CTCR_SINCOS(src_bus_width); /* Set destination best burst size */ - max_width = stm32_mdma_get_max_width(len, tlen); - if (dest % max_width) - max_width = DMA_SLAVE_BUSWIDTH_1_BYTE; + max_width = stm32_mdma_get_max_width(dest, len, tlen); dst_bus_width = stm32_mdma_get_width(chan, max_width); max_burst = tlen / max_width; @@ -1014,9 +1034,7 @@ stm32_mdma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dest, dma_addr_t src, STM32_MDMA_MAX_BLOCK_LEN); /* Set source best burst size */ - max_width = stm32_mdma_get_max_width(len, tlen); - if (src % max_width) - max_width = DMA_SLAVE_BUSWIDTH_1_BYTE; + max_width = stm32_mdma_get_max_width(src, len, tlen); src_bus_width = stm32_mdma_get_width(chan, max_width); max_burst = tlen / max_width; @@ -1030,9 +1048,7 @@ stm32_mdma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dest, dma_addr_t src, STM32_MDMA_CTCR_SINCOS(src_bus_width); /* Set destination best burst size */ - max_width = stm32_mdma_get_max_width(len, tlen); - if (dest % max_width) - max_width = DMA_SLAVE_BUSWIDTH_1_BYTE; + max_width = stm32_mdma_get_max_width(dest, len, tlen); dst_bus_width = stm32_mdma_get_width(chan, max_width); max_burst = tlen / max_width; -- cgit From 087ffdd2880b5dd7724ac6d0ca663da398fe1ccf Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Wed, 8 Nov 2017 11:01:45 +0530 Subject: dmaengine: Revert "rcar-dmac: use TCRB instead of TCR for residue" This reverts commit 847449f23dcb: ("dmaengine: rcar-dmac: use TCRB instead of TCR for residue") as it breaks small serial console. Reported-by: Geert Uytterhoeven Signed-off-by: Vinod Koul --- drivers/dma/sh/rcar-dmac.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/dma') diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index 50c4950050be..2b2c7db3e480 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c @@ -1310,7 +1310,7 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan, } /* Add the residue for the current chunk. */ - residue += rcar_dmac_chan_read(chan, RCAR_DMATCRB) << desc->xfer_shift; + residue += rcar_dmac_chan_read(chan, RCAR_DMATCR) << desc->xfer_shift; return residue; } -- cgit From a9df21e34b422f79d9a9fa5c3eff8c2a53491be6 Mon Sep 17 00:00:00 2001 From: Adam Wallis Date: Thu, 2 Nov 2017 08:53:30 -0400 Subject: dmaengine: dmatest: warn user when dma test times out Commit adfa543e7314 ("dmatest: don't use set_freezable_with_signal()") introduced a bug (that is in fact documented by the patch commit text) that leaves behind a dangling pointer. Since the done_wait structure is allocated on the stack, future invocations to the DMATEST can produce undesirable results (e.g., corrupted spinlocks). Ideally, this would be cleaned up in the thread handler, but at the very least, the kernel is left in a very precarious scenario that can lead to some long debug sessions when the crash comes later. Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=197605 Signed-off-by: Adam Wallis Signed-off-by: Vinod Koul --- drivers/dma/dmatest.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/dma') diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 34ff53290b03..47edc7fbf91f 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -702,6 +702,7 @@ static int dmatest_func(void *data) * free it this time?" dancing. For now, just * leave it dangling. */ + WARN(1, "dmatest: Kernel stack may be corrupted!!\n"); dmaengine_unmap_put(um); result("test timed out", total_tests, src_off, dst_off, len, 0); -- cgit From 288e7560e4d3e259aa28f8f58a8dfe63627a1bf6 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Wed, 8 Nov 2017 12:02:25 +0200 Subject: dmaengine: ti-dma-crossbar: Correct am335x/am43xx mux value type The used 0x1f mask is only valid for am335x family of SoC, different family using this type of crossbar might have different number of electable events. In case of am43xx family 0x3f mask should have been used for example. Instead of trying to handle each family's mask, just use u8 type to store the mux value since the event offsets are aligned to byte offset. Fixes: 42dbdcc6bf965 ("dmaengine: ti-dma-crossbar: Add support for crossbar on AM33xx/AM43xx") Signed-off-by: Peter Ujfalusi Signed-off-by: Vinod Koul --- drivers/dma/ti-dma-crossbar.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c index 2f65a8fde21d..10ef9d5d5a66 100644 --- a/drivers/dma/ti-dma-crossbar.c +++ b/drivers/dma/ti-dma-crossbar.c @@ -49,12 +49,12 @@ struct ti_am335x_xbar_data { struct ti_am335x_xbar_map { u16 dma_line; - u16 mux_val; + u8 mux_val; }; -static inline void ti_am335x_xbar_write(void __iomem *iomem, int event, u16 val) +static inline void ti_am335x_xbar_write(void __iomem *iomem, int event, u8 val) { - writeb_relaxed(val & 0x1f, iomem + event); + writeb_relaxed(val, iomem + event); } static void ti_am335x_xbar_free(struct device *dev, void *route_data) @@ -105,7 +105,7 @@ static void *ti_am335x_xbar_route_allocate(struct of_phandle_args *dma_spec, } map->dma_line = (u16)dma_spec->args[0]; - map->mux_val = (u16)dma_spec->args[2]; + map->mux_val = (u8)dma_spec->args[2]; dma_spec->args[2] = 0; dma_spec->args_count = 2; -- cgit