diff options
Diffstat (limited to 'drivers/dma/xilinx/xilinx_dma.c')
| -rw-r--r-- | drivers/dma/xilinx/xilinx_dma.c | 215 |
1 files changed, 185 insertions, 30 deletions
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index 8cd4e69dc7b4..fabff602065f 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -41,11 +41,12 @@ #include <linux/io.h> #include <linux/iopoll.h> #include <linux/module.h> -#include <linux/of_address.h> +#include <linux/of.h> #include <linux/of_dma.h> -#include <linux/of_platform.h> #include <linux/of_irq.h> +#include <linux/platform_device.h> #include <linux/slab.h> +#include <linux/string_choices.h> #include <linux/clk.h> #include <linux/io-64-nonatomic-lo-hi.h> @@ -112,7 +113,9 @@ /* Register Direct Mode Registers */ #define XILINX_DMA_REG_VSIZE 0x0000 +#define XILINX_DMA_VSIZE_MASK GENMASK(12, 0) #define XILINX_DMA_REG_HSIZE 0x0004 +#define XILINX_DMA_HSIZE_MASK GENMASK(15, 0) #define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008 #define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24 @@ -173,12 +176,15 @@ #define XILINX_DMA_MAX_TRANS_LEN_MAX 23 #define XILINX_DMA_V2_MAX_TRANS_LEN_MAX 26 #define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16) +#define XILINX_DMA_CR_DELAY_MAX GENMASK(31, 24) #define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4) #define XILINX_DMA_CR_COALESCE_SHIFT 16 +#define XILINX_DMA_CR_DELAY_SHIFT 24 #define XILINX_DMA_BD_SOP BIT(27) #define XILINX_DMA_BD_EOP BIT(26) +#define XILINX_DMA_BD_COMP_MASK BIT(31) #define XILINX_DMA_COALESCE_MAX 255 -#define XILINX_DMA_NUM_DESCS 255 +#define XILINX_DMA_NUM_DESCS 512 #define XILINX_DMA_NUM_APP_WORDS 5 /* AXI CDMA Specific Registers/Offsets */ @@ -410,6 +416,7 @@ struct xilinx_dma_tx_descriptor { * @stop_transfer: Differentiate b/w DMA IP's quiesce * @tdest: TDEST value for mcdma * @has_vflip: S2MM vertical flip + * @irq_delay: Interrupt delay timeout */ struct xilinx_dma_chan { struct xilinx_dma_device *xdev; @@ -448,6 +455,7 @@ struct xilinx_dma_chan { int (*stop_transfer)(struct xilinx_dma_chan *chan); u16 tdest; bool has_vflip; + u8 irq_delay; }; /** @@ -493,6 +501,7 @@ struct xilinx_dma_config { * @s2mm_chan_id: DMA s2mm channel identifier * @mm2s_chan_id: DMA mm2s channel identifier * @max_buffer_len: Max buffer length + * @has_axistream_connected: AXI DMA connected to AXI Stream IP */ struct xilinx_dma_device { void __iomem *regs; @@ -511,6 +520,7 @@ struct xilinx_dma_device { u32 s2mm_chan_id; u32 mm2s_chan_id; u32 max_buffer_len; + bool has_axistream_connected; }; /* Macros */ @@ -623,6 +633,29 @@ static inline void xilinx_aximcdma_buf(struct xilinx_dma_chan *chan, } } +/** + * xilinx_dma_get_metadata_ptr- Populate metadata pointer and payload length + * @tx: async transaction descriptor + * @payload_len: metadata payload length + * @max_len: metadata max length + * Return: The app field pointer. + */ +static void *xilinx_dma_get_metadata_ptr(struct dma_async_tx_descriptor *tx, + size_t *payload_len, size_t *max_len) +{ + struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx); + struct xilinx_axidma_tx_segment *seg; + + *max_len = *payload_len = sizeof(u32) * XILINX_DMA_NUM_APP_WORDS; + seg = list_first_entry(&desc->segments, + struct xilinx_axidma_tx_segment, node); + return seg->hw.app; +} + +static struct dma_descriptor_metadata_ops xilinx_dma_metadata_ops = { + .get_ptr = xilinx_dma_get_metadata_ptr, +}; + /* ----------------------------------------------------------------------------- * Descriptors and segments alloc and free */ @@ -1372,16 +1405,18 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); - j = chan->desc_submitcount; - reg = dma_read(chan, XILINX_DMA_REG_PARK_PTR); - if (chan->direction == DMA_MEM_TO_DEV) { - reg &= ~XILINX_DMA_PARK_PTR_RD_REF_MASK; - reg |= j << XILINX_DMA_PARK_PTR_RD_REF_SHIFT; - } else { - reg &= ~XILINX_DMA_PARK_PTR_WR_REF_MASK; - reg |= j << XILINX_DMA_PARK_PTR_WR_REF_SHIFT; + if (config->park) { + j = chan->desc_submitcount; + reg = dma_read(chan, XILINX_DMA_REG_PARK_PTR); + if (chan->direction == DMA_MEM_TO_DEV) { + reg &= ~XILINX_DMA_PARK_PTR_RD_REF_MASK; + reg |= j << XILINX_DMA_PARK_PTR_RD_REF_SHIFT; + } else { + reg &= ~XILINX_DMA_PARK_PTR_WR_REF_MASK; + reg |= j << XILINX_DMA_PARK_PTR_WR_REF_SHIFT; + } + dma_write(chan, XILINX_DMA_REG_PARK_PTR, reg); } - dma_write(chan, XILINX_DMA_REG_PARK_PTR, reg); /* Start the hardware */ xilinx_dma_start(chan); @@ -1535,6 +1570,9 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) if (chan->has_sg) xilinx_write(chan, XILINX_DMA_REG_CURDESC, head_desc->async_tx.phys); + reg &= ~XILINX_DMA_CR_DELAY_MAX; + reg |= chan->irq_delay << XILINX_DMA_CR_DELAY_SHIFT; + dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); xilinx_dma_start(chan); @@ -1659,6 +1697,8 @@ static void xilinx_dma_issue_pending(struct dma_chan *dchan) * xilinx_dma_device_config - Configure the DMA channel * @dchan: DMA channel * @config: channel configuration + * + * Return: 0 always. */ static int xilinx_dma_device_config(struct dma_chan *dchan, struct dma_slave_config *config) @@ -1681,6 +1721,14 @@ static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan) return; list_for_each_entry_safe(desc, next, &chan->active_list, node) { + if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { + struct xilinx_axidma_tx_segment *seg; + + seg = list_last_entry(&desc->segments, + struct xilinx_axidma_tx_segment, node); + if (!(seg->hw.status & XILINX_DMA_BD_COMP_MASK) && chan->has_sg) + break; + } if (chan->has_sg && chan->xdev->dma_config->dmatype != XDMA_TYPE_VDMA) desc->residue = xilinx_dma_get_residue(chan, desc); @@ -1814,7 +1862,7 @@ static irqreturn_t xilinx_mcdma_irq_handler(int irq, void *data) spin_unlock(&chan->lock); } - tasklet_schedule(&chan->tasklet); + tasklet_hi_schedule(&chan->tasklet); return IRQ_HANDLED; } @@ -1862,15 +1910,8 @@ static irqreturn_t xilinx_dma_irq_handler(int irq, void *data) } } - if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) { - /* - * Device takes too long to do the transfer when user requires - * responsiveness. - */ - dev_dbg(chan->dev, "Inter-packet latency too long\n"); - } - - if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) { + if (status & (XILINX_DMA_DMASR_FRM_CNT_IRQ | + XILINX_DMA_DMASR_DLY_CNT_IRQ)) { spin_lock(&chan->lock); xilinx_dma_complete_descriptor(chan); chan->idle = true; @@ -2014,6 +2055,10 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan, if (!xt->numf || !xt->sgl[0].size) return NULL; + if (xt->numf & ~XILINX_DMA_VSIZE_MASK || + xt->sgl[0].size & ~XILINX_DMA_HSIZE_MASK) + return NULL; + if (xt->frame_size != 1) return NULL; @@ -2128,6 +2173,99 @@ error: } /** + * xilinx_dma_prep_peripheral_dma_vec - prepare descriptors for a DMA_SLAVE + * transaction from DMA vectors + * @dchan: DMA channel + * @vecs: Array of DMA vectors that should be transferred + * @nb: number of entries in @vecs + * @direction: DMA direction + * @flags: transfer ack flags + * + * Return: Async transaction descriptor on success and NULL on failure + */ +static struct dma_async_tx_descriptor *xilinx_dma_prep_peripheral_dma_vec( + struct dma_chan *dchan, const struct dma_vec *vecs, size_t nb, + enum dma_transfer_direction direction, unsigned long flags) +{ + struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); + struct xilinx_dma_tx_descriptor *desc; + struct xilinx_axidma_tx_segment *segment, *head, *prev = NULL; + size_t copy; + size_t sg_used; + unsigned int i; + + if (!is_slave_direction(direction) || direction != chan->direction) + return NULL; + + desc = xilinx_dma_alloc_tx_descriptor(chan); + if (!desc) + return NULL; + + dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); + desc->async_tx.tx_submit = xilinx_dma_tx_submit; + + /* Build transactions using information from DMA vectors */ + for (i = 0; i < nb; i++) { + sg_used = 0; + + /* Loop until the entire dma_vec entry is used */ + while (sg_used < vecs[i].len) { + struct xilinx_axidma_desc_hw *hw; + + /* Get a free segment */ + segment = xilinx_axidma_alloc_tx_segment(chan); + if (!segment) + goto error; + + /* + * Calculate the maximum number of bytes to transfer, + * making sure it is less than the hw limit + */ + copy = xilinx_dma_calc_copysize(chan, vecs[i].len, + sg_used); + hw = &segment->hw; + + /* Fill in the descriptor */ + xilinx_axidma_buf(chan, hw, vecs[i].addr, sg_used, 0); + hw->control = copy; + + if (prev) + prev->hw.next_desc = segment->phys; + + prev = segment; + sg_used += copy; + + /* + * Insert the segment into the descriptor segments + * list. + */ + list_add_tail(&segment->node, &desc->segments); + } + } + + head = list_first_entry(&desc->segments, struct xilinx_axidma_tx_segment, node); + desc->async_tx.phys = head->phys; + + /* For the last DMA_MEM_TO_DEV transfer, set EOP */ + if (chan->direction == DMA_MEM_TO_DEV) { + segment->hw.control |= XILINX_DMA_BD_SOP; + segment = list_last_entry(&desc->segments, + struct xilinx_axidma_tx_segment, + node); + segment->hw.control |= XILINX_DMA_BD_EOP; + } + + if (chan->xdev->has_axistream_connected) + desc->async_tx.metadata_ops = &xilinx_dma_metadata_ops; + + return &desc->async_tx; + +error: + xilinx_dma_free_tx_descriptor(chan, desc); + return NULL; +} + +/** * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction * @dchan: DMA channel * @sgl: scatterlist to transfer to/from @@ -2219,6 +2357,9 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg( segment->hw.control |= XILINX_DMA_BD_EOP; } + if (chan->xdev->has_axistream_connected) + desc->async_tx.metadata_ops = &xilinx_dma_metadata_ops; + return &desc->async_tx; error: @@ -2794,6 +2935,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, /* Retrieve the channel properties from the device tree */ has_dre = of_property_read_bool(node, "xlnx,include-dre"); + of_property_read_u8(node, "xlnx,irq-delay", &chan->irq_delay); + chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode"); err = of_property_read_u32(node, "xlnx,datawidth", &value); @@ -2859,6 +3002,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, return -EINVAL; } + xdev->common.directions |= chan->direction; + /* Request the interrupt */ chan->irq = of_irq_get(node, chan->tdest); if (chan->irq < 0) @@ -2891,7 +3036,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, XILINX_DMA_DMASR_SG_MASK) chan->has_sg = true; dev_dbg(chan->dev, "ch %d: SG %s\n", chan->id, - chan->has_sg ? "enabled" : "disabled"); + str_enabled_disabled(chan->has_sg)); } /* Initialize the tasklet */ @@ -2924,7 +3069,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, * @xdev: Driver specific device structure * @node: Device node * - * Return: 0 always. + * Return: '0' on success and failure value on error. */ static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev, struct device_node *node) @@ -3065,6 +3210,13 @@ static int xilinx_dma_probe(struct platform_device *pdev) } } + dma_set_max_seg_size(xdev->dev, xdev->max_buffer_len); + + if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { + xdev->has_axistream_connected = + of_property_read_bool(node, "xlnx,axistream-connected"); + } + if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { err = of_property_read_u32(node, "xlnx,num-fstores", &num_frames); @@ -3090,6 +3242,10 @@ static int xilinx_dma_probe(struct platform_device *pdev) else xdev->ext_addr = false; + /* Set metadata mode */ + if (xdev->has_axistream_connected) + xdev->common.desc_metadata_modes = DESC_METADATA_ENGINE; + /* Set the dma mask bits */ err = dma_set_mask_and_coherent(xdev->dev, DMA_BIT_MASK(addr_width)); if (err < 0) { @@ -3117,6 +3273,7 @@ static int xilinx_dma_probe(struct platform_device *pdev) xdev->common.device_config = xilinx_dma_device_config; if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask); + xdev->common.device_prep_peripheral_dma_vec = xilinx_dma_prep_peripheral_dma_vec; xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg; xdev->common.device_prep_dma_cyclic = xilinx_dma_prep_dma_cyclic; @@ -3141,8 +3298,10 @@ static int xilinx_dma_probe(struct platform_device *pdev) /* Initialize the channels */ for_each_child_of_node(node, child) { err = xilinx_dma_child_probe(xdev, child); - if (err < 0) + if (err < 0) { + of_node_put(child); goto error; + } } if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { @@ -3190,10 +3349,8 @@ disable_clks: /** * xilinx_dma_remove - Driver remove function * @pdev: Pointer to the platform_device structure - * - * Return: Always '0' */ -static int xilinx_dma_remove(struct platform_device *pdev) +static void xilinx_dma_remove(struct platform_device *pdev) { struct xilinx_dma_device *xdev = platform_get_drvdata(pdev); int i; @@ -3207,8 +3364,6 @@ static int xilinx_dma_remove(struct platform_device *pdev) xilinx_dma_chan_remove(xdev->chan[i]); xdma_disable_allclks(xdev); - - return 0; } static struct platform_driver xilinx_vdma_driver = { |
