From 616f0f81d857e248a72b5af45ab185196556ae2e Mon Sep 17 00:00:00 2001 From: Andrea Merello Date: Tue, 20 Nov 2018 16:31:45 +0100 Subject: dmaengine: xilinx_dma: commonize DMA copy size calculation This patch removes a bit of duplicated code by introducing a new function that implements calculations for DMA copy size, and prepares for changes to the copy size calculation that will happen in following patches. Suggested-by: Vinod Koul Signed-off-by: Andrea Merello Reviewed-by: Radhey Shyam Pandey Signed-off-by: Vinod Koul --- drivers/dma/xilinx/xilinx_dma.c | 39 +++++++++++++++++++++++++++++++-------- 1 file changed, 31 insertions(+), 8 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index 02880963092f..fd9f37bafab0 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -425,6 +425,7 @@ struct xilinx_dma_config { * @rxs_clk: DMA s2mm stream clock * @nr_channels: Number of channels DMA device supports * @chan_id: DMA channel identifier + * @max_buffer_len: Max buffer length */ struct xilinx_dma_device { void __iomem *regs; @@ -444,6 +445,7 @@ struct xilinx_dma_device { struct clk *rxs_clk; u32 nr_channels; u32 chan_id; + u32 max_buffer_len; }; /* Macros */ @@ -959,6 +961,25 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan) return 0; } +/** + * xilinx_dma_calc_copysize - Calculate the amount of data to copy + * @chan: Driver specific DMA channel + * @size: Total data that needs to be copied + * @done: Amount of data that has been already copied + * + * Return: Amount of data that has to be copied + */ +static int xilinx_dma_calc_copysize(struct xilinx_dma_chan *chan, + int size, int done) +{ + size_t copy; + + copy = min_t(size_t, size - done, + chan->xdev->max_buffer_len); + + return copy; +} + /** * xilinx_dma_tx_status - Get DMA transaction status * @dchan: DMA channel @@ -992,7 +1013,7 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan, list_for_each_entry(segment, &desc->segments, node) { hw = &segment->hw; residue += (hw->control - hw->status) & - XILINX_DMA_MAX_TRANS_LEN; + chan->xdev->max_buffer_len; } } spin_unlock_irqrestore(&chan->lock, flags); @@ -1254,7 +1275,7 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan) /* Start the transfer */ dma_ctrl_write(chan, XILINX_DMA_REG_BTT, - hw->control & XILINX_DMA_MAX_TRANS_LEN); + hw->control & chan->xdev->max_buffer_len); } list_splice_tail_init(&chan->pending_list, &chan->active_list); @@ -1357,7 +1378,7 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) /* Start the transfer */ dma_ctrl_write(chan, XILINX_DMA_REG_BTT, - hw->control & XILINX_DMA_MAX_TRANS_LEN); + hw->control & chan->xdev->max_buffer_len); } list_splice_tail_init(&chan->pending_list, &chan->active_list); @@ -1718,7 +1739,7 @@ xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst, struct xilinx_cdma_tx_segment *segment; struct xilinx_cdma_desc_hw *hw; - if (!len || len > XILINX_DMA_MAX_TRANS_LEN) + if (!len || len > chan->xdev->max_buffer_len) return NULL; desc = xilinx_dma_alloc_tx_descriptor(chan); @@ -1808,8 +1829,8 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg( * Calculate the maximum number of bytes to transfer, * making sure it is less than the hw limit */ - copy = min_t(size_t, sg_dma_len(sg) - sg_used, - XILINX_DMA_MAX_TRANS_LEN); + copy = xilinx_dma_calc_copysize(chan, sg_dma_len(sg), + sg_used); hw = &segment->hw; /* Fill in the descriptor */ @@ -1913,8 +1934,8 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic( * Calculate the maximum number of bytes to transfer, * making sure it is less than the hw limit */ - copy = min_t(size_t, period_len - sg_used, - XILINX_DMA_MAX_TRANS_LEN); + copy = xilinx_dma_calc_copysize(chan, period_len, + sg_used); hw = &segment->hw; xilinx_axidma_buf(chan, hw, buf_addr, sg_used, period_len * i); @@ -2628,6 +2649,8 @@ static int xilinx_dma_probe(struct platform_device *pdev) /* Retrieve the DMA engine properties from the device tree */ xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg"); + xdev->max_buffer_len = XILINX_DMA_MAX_TRANS_LEN; + if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma"); -- cgit From 5c094d4cac5ba78139f4d7169145b57af7f07981 Mon Sep 17 00:00:00 2001 From: Andrea Merello Date: Tue, 20 Nov 2018 16:31:46 +0100 Subject: dmaengine: xilinx_dma: in axidma slave_sg and dma_cyclic mode align split descriptors Whenever a single or cyclic transaction is prepared, the driver could eventually split it over several SG descriptors in order to deal with the HW maximum transfer length. This could end up in DMA operations starting from a misaligned address. This seems fatal for the HW if DRE (Data Realignment Engine) is not enabled. This patch eventually adjusts the transfer size in order to make sure all operations start from an aligned address. Cc: Radhey Shyam Pandey Signed-off-by: Andrea Merello Reviewed-by: Radhey Shyam Pandey Signed-off-by: Vinod Koul --- drivers/dma/xilinx/xilinx_dma.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'drivers/dma') diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index fd9f37bafab0..93435f7002ab 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -977,6 +977,15 @@ static int xilinx_dma_calc_copysize(struct xilinx_dma_chan *chan, copy = min_t(size_t, size - done, chan->xdev->max_buffer_len); + if ((copy + done < size) && + chan->xdev->common.copy_align) { + /* + * If this is not the last descriptor, make sure + * the next one will be properly aligned + */ + copy = rounddown(copy, + (1 << chan->xdev->common.copy_align)); + } return copy; } -- cgit From ae809690b46a71dc56cda5b3b8884c8c41a0df15 Mon Sep 17 00:00:00 2001 From: Radhey Shyam Pandey Date: Tue, 20 Nov 2018 16:31:48 +0100 Subject: dmaengine: xilinx_dma: program hardware supported buffer length AXI-DMA IP supports configurable (c_sg_length_width) buffer length register width, hence read buffer length (xlnx,sg-length-width) DT property and ensure that driver doesn't program buffer length exceeding the supported limit. For VDMA and CDMA there is no change. Cc: Rob Herring Cc: Mark Rutland Cc: devicetree@vger.kernel.org Signed-off-by: Radhey Shyam Pandey Signed-off-by: Michal Simek Signed-off-by: Andrea Merello [rebase, reword] Signed-off-by: Vinod Koul --- drivers/dma/xilinx/xilinx_dma.c | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index 93435f7002ab..d59af2b33a99 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -161,7 +161,9 @@ #define XILINX_DMA_REG_BTT 0x28 /* AXI DMA Specific Masks/Bit fields */ -#define XILINX_DMA_MAX_TRANS_LEN GENMASK(22, 0) +#define XILINX_DMA_MAX_TRANS_LEN_MIN 8 +#define XILINX_DMA_MAX_TRANS_LEN_MAX 23 +#define XILINX_DMA_V2_MAX_TRANS_LEN_MAX 26 #define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16) #define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4) #define XILINX_DMA_CR_COALESCE_SHIFT 16 @@ -2626,7 +2628,7 @@ static int xilinx_dma_probe(struct platform_device *pdev) struct xilinx_dma_device *xdev; struct device_node *child, *np = pdev->dev.of_node; struct resource *io; - u32 num_frames, addr_width; + u32 num_frames, addr_width, len_width; int i, err; /* Allocate and initialize the DMA engine structure */ @@ -2658,10 +2660,24 @@ static int xilinx_dma_probe(struct platform_device *pdev) /* Retrieve the DMA engine properties from the device tree */ xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg"); - xdev->max_buffer_len = XILINX_DMA_MAX_TRANS_LEN; + xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0); - if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) + if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma"); + if (!of_property_read_u32(node, "xlnx,sg-length-width", + &len_width)) { + if (len_width < XILINX_DMA_MAX_TRANS_LEN_MIN || + len_width > XILINX_DMA_V2_MAX_TRANS_LEN_MAX) { + dev_warn(xdev->dev, + "invalid xlnx,sg-length-width property value. Using default width\n"); + } else { + if (len_width > XILINX_DMA_MAX_TRANS_LEN_MAX) + dev_warn(xdev->dev, "Please ensure that IP supports buffer length > 23 bits\n"); + xdev->max_buffer_len = + GENMASK(len_width - 1, 0); + } + } + } if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { err = of_property_read_u32(node, "xlnx,num-fstores", -- cgit From 05f7ea7f6ef611a077fcbb983fb8d6a99e91e1f6 Mon Sep 17 00:00:00 2001 From: Andrea Merello Date: Tue, 20 Nov 2018 16:31:49 +0100 Subject: dmaengine: xilinx_dma: autodetect whether the HW supports scatter-gather The AXIDMA and CDMA HW can be either direct-access or scatter-gather version. These are SW incompatible. The driver can handle both versions: a DT property was used to tell the driver whether to assume the HW is in scatter-gather mode. This patch makes the driver to autodetect this information. The DT property is not required anymore. No changes for VDMA. Cc: Rob Herring Cc: Mark Rutland Cc: devicetree@vger.kernel.org Cc: Radhey Shyam Pandey Signed-off-by: Andrea Merello Reviewed-by: Radhey Shyam Pandey Signed-off-by: Vinod Koul --- drivers/dma/xilinx/xilinx_dma.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index d59af2b33a99..b559efe06adb 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -86,6 +86,7 @@ #define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6) #define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5) #define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4) +#define XILINX_DMA_DMASR_SG_MASK BIT(3) #define XILINX_DMA_DMASR_IDLE BIT(1) #define XILINX_DMA_DMASR_HALTED BIT(0) #define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24) @@ -414,7 +415,6 @@ struct xilinx_dma_config { * @dev: Device Structure * @common: DMA device structure * @chan: Driver specific DMA channel - * @has_sg: Specifies whether Scatter-Gather is present or not * @mcdma: Specifies whether Multi-Channel is present or not * @flush_on_fsync: Flush on frame sync * @ext_addr: Indicates 64 bit addressing is supported by dma device @@ -434,7 +434,6 @@ struct xilinx_dma_device { struct device *dev; struct dma_device common; struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE]; - bool has_sg; bool mcdma; u32 flush_on_fsync; bool ext_addr; @@ -2421,7 +2420,6 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, chan->dev = xdev->dev; chan->xdev = xdev; - chan->has_sg = xdev->has_sg; chan->desc_pendingcount = 0x0; chan->ext_addr = xdev->ext_addr; /* This variable ensures that descriptors are not @@ -2521,6 +2519,15 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, chan->stop_transfer = xilinx_dma_stop_transfer; } + /* check if SG is enabled (only for AXIDMA and CDMA) */ + if (xdev->dma_config->dmatype != XDMA_TYPE_VDMA) { + if (dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) & + XILINX_DMA_DMASR_SG_MASK) + chan->has_sg = true; + dev_dbg(chan->dev, "ch %d: SG %s\n", chan->id, + chan->has_sg ? "enabled" : "disabled"); + } + /* Initialize the tasklet */ tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet, (unsigned long)chan); @@ -2659,7 +2666,6 @@ static int xilinx_dma_probe(struct platform_device *pdev) return PTR_ERR(xdev->regs); /* Retrieve the DMA engine properties from the device tree */ - xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg"); xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0); if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { -- cgit From b8349172b4001ea4e8b38a243275aecd90aa7573 Mon Sep 17 00:00:00 2001 From: Andrea Merello Date: Tue, 20 Nov 2018 16:31:51 +0100 Subject: dmaengine: xilinx_dma: Drop SG support for VDMA IP xilinx_vdma_start_transfer() is used only for VDMA IP, still it contains conditional code on has_sg variable. has_sg is set only whenever the HW does support SG mode, that is never true for VDMA IP. This patch drops the never-taken branches. Signed-off-by: Andrea Merello Reviewed-by: Radhey Shyam Pandey Signed-off-by: Vinod Koul --- drivers/dma/xilinx/xilinx_dma.c | 84 ++++++++++++++++------------------------- 1 file changed, 32 insertions(+), 52 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index b559efe06adb..d9431afa031b 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -1102,6 +1102,8 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) struct xilinx_dma_tx_descriptor *desc, *tail_desc; u32 reg, j; struct xilinx_vdma_tx_segment *tail_segment; + struct xilinx_vdma_tx_segment *segment, *last = NULL; + int i = 0; /* This function was invoked with lock held */ if (chan->err) @@ -1121,14 +1123,6 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) tail_segment = list_last_entry(&tail_desc->segments, struct xilinx_vdma_tx_segment, node); - /* - * If hardware is idle, then all descriptors on the running lists are - * done, start new transfers - */ - if (chan->has_sg) - dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, - desc->async_tx.phys); - /* Configure the hardware using info in the config structure */ if (chan->has_vflip) { reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP); @@ -1145,15 +1139,11 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) else reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN; - /* - * With SG, start with circular mode, so that BDs can be fetched. - * In direct register mode, if not parking, enable circular mode - */ - if (chan->has_sg || !config->park) - reg |= XILINX_DMA_DMACR_CIRC_EN; - + /* If not parking, enable circular mode */ if (config->park) reg &= ~XILINX_DMA_DMACR_CIRC_EN; + else + reg |= XILINX_DMA_DMACR_CIRC_EN; dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); @@ -1175,48 +1165,38 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) return; /* Start the transfer */ - if (chan->has_sg) { - dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, - tail_segment->phys); - list_splice_tail_init(&chan->pending_list, &chan->active_list); - chan->desc_pendingcount = 0; - } else { - struct xilinx_vdma_tx_segment *segment, *last = NULL; - int i = 0; - - if (chan->desc_submitcount < chan->num_frms) - i = chan->desc_submitcount; - - list_for_each_entry(segment, &desc->segments, node) { - if (chan->ext_addr) - vdma_desc_write_64(chan, - XILINX_VDMA_REG_START_ADDRESS_64(i++), - segment->hw.buf_addr, - segment->hw.buf_addr_msb); - else - vdma_desc_write(chan, + if (chan->desc_submitcount < chan->num_frms) + i = chan->desc_submitcount; + + list_for_each_entry(segment, &desc->segments, node) { + if (chan->ext_addr) + vdma_desc_write_64(chan, + XILINX_VDMA_REG_START_ADDRESS_64(i++), + segment->hw.buf_addr, + segment->hw.buf_addr_msb); + else + vdma_desc_write(chan, XILINX_VDMA_REG_START_ADDRESS(i++), segment->hw.buf_addr); - last = segment; - } - - if (!last) - return; + last = segment; + } - /* HW expects these parameters to be same for one transaction */ - vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize); - vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE, - last->hw.stride); - vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize); + if (!last) + return; - chan->desc_submitcount++; - chan->desc_pendingcount--; - list_del(&desc->node); - list_add_tail(&desc->node, &chan->active_list); - if (chan->desc_submitcount == chan->num_frms) - chan->desc_submitcount = 0; - } + /* HW expects these parameters to be same for one transaction */ + vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize); + vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE, + last->hw.stride); + vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize); + + chan->desc_submitcount++; + chan->desc_pendingcount--; + list_del(&desc->node); + list_add_tail(&desc->node, &chan->active_list); + if (chan->desc_submitcount == chan->num_frms) + chan->desc_submitcount = 0; chan->idle = false; } -- cgit From c2be36ac2141b97f9a35ab560381317566bee357 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Wed, 9 Jan 2019 12:10:37 +0000 Subject: dmaengine: xilinx_dma: remove set but not used variable 'tail_segment' Fixes gcc '-Wunused-but-set-variable' warning: drivers/dma/xilinx/xilinx_dma.c: In function 'xilinx_vdma_start_transfer': drivers/dma/xilinx/xilinx_dma.c:1104:33: warning: variable 'tail_segment' set but not used [-Wunused-but-set-variable] It not used since commit b8349172b400 ("dmaengine: xilinx_dma: Drop SG support for VDMA IP") Signed-off-by: YueHaibing Reviewed-by: Radhey Shyam Pandey Signed-off-by: Vinod Koul --- drivers/dma/xilinx/xilinx_dma.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index d9431afa031b..c8acd34f1f70 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -1101,7 +1101,6 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) struct xilinx_vdma_config *config = &chan->config; struct xilinx_dma_tx_descriptor *desc, *tail_desc; u32 reg, j; - struct xilinx_vdma_tx_segment *tail_segment; struct xilinx_vdma_tx_segment *segment, *last = NULL; int i = 0; @@ -1120,9 +1119,6 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) tail_desc = list_last_entry(&chan->pending_list, struct xilinx_dma_tx_descriptor, node); - tail_segment = list_last_entry(&tail_desc->segments, - struct xilinx_vdma_tx_segment, node); - /* Configure the hardware using info in the config structure */ if (chan->has_vflip) { reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP); -- cgit