summaryrefslogtreecommitdiff
path: root/drivers/dma/ti/edma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/ti/edma.c')
-rw-r--r--drivers/dma/ti/edma.c111
1 files changed, 33 insertions, 78 deletions
diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
index 08e47f44d325..552be71db6c4 100644
--- a/drivers/dma/ti/edma.c
+++ b/drivers/dma/ti/edma.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* TI EDMA DMA engine driver
*
* Copyright 2012 Texas Instruments
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/dmaengine.h>
@@ -24,11 +16,11 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/string_choices.h>
#include <linux/of.h>
#include <linux/of_dma.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <linux/platform_data/edma.h>
@@ -118,10 +110,10 @@
/*
* Max of 20 segments per channel to conserve PaRAM slots
- * Also note that MAX_NR_SG should be atleast the no.of periods
+ * Also note that MAX_NR_SG should be at least the no.of periods
* that are required for ASoC, otherwise DMA prep calls will
* fail. Today davinci-pcm is the only user of this driver and
- * requires atleast 17 slots, so we setup the default to 20.
+ * requires at least 17 slots, so we setup the default to 20.
*/
#define MAX_NR_SG 20
#define EDMA_MAX_SLOTS MAX_NR_SG
@@ -211,13 +203,12 @@ struct edma_desc {
u32 residue;
u32 residue_stat;
- struct edma_pset pset[];
+ struct edma_pset pset[] __counted_by(pset_nr);
};
struct edma_cc;
struct edma_tc {
- struct device_node *node;
u16 id;
};
@@ -326,14 +317,6 @@ static inline void edma_modify(struct edma_cc *ecc, int offset, unsigned and,
edma_write(ecc, offset, val);
}
-static inline void edma_and(struct edma_cc *ecc, int offset, unsigned and)
-{
- unsigned val = edma_read(ecc, offset);
-
- val &= and;
- edma_write(ecc, offset, val);
-}
-
static inline void edma_or(struct edma_cc *ecc, int offset, unsigned or)
{
unsigned val = edma_read(ecc, offset);
@@ -360,12 +343,6 @@ static inline void edma_modify_array(struct edma_cc *ecc, int offset, int i,
edma_modify(ecc, offset + (i << 2), and, or);
}
-static inline void edma_or_array(struct edma_cc *ecc, int offset, int i,
- unsigned or)
-{
- edma_or(ecc, offset + (i << 2), or);
-}
-
static inline void edma_or_array2(struct edma_cc *ecc, int offset, int i, int j,
unsigned or)
{
@@ -378,11 +355,6 @@ static inline void edma_write_array2(struct edma_cc *ecc, int offset, int i,
edma_write(ecc, offset + ((i * 2 + j) << 2), val);
}
-static inline unsigned int edma_shadow0_read(struct edma_cc *ecc, int offset)
-{
- return edma_read(ecc, EDMA_SHADOW0 + offset);
-}
-
static inline unsigned int edma_shadow0_read_array(struct edma_cc *ecc,
int offset, int i)
{
@@ -401,36 +373,12 @@ static inline void edma_shadow0_write_array(struct edma_cc *ecc, int offset,
edma_write(ecc, EDMA_SHADOW0 + offset + (i << 2), val);
}
-static inline unsigned int edma_param_read(struct edma_cc *ecc, int offset,
- int param_no)
-{
- return edma_read(ecc, EDMA_PARM + offset + (param_no << 5));
-}
-
-static inline void edma_param_write(struct edma_cc *ecc, int offset,
- int param_no, unsigned val)
-{
- edma_write(ecc, EDMA_PARM + offset + (param_no << 5), val);
-}
-
static inline void edma_param_modify(struct edma_cc *ecc, int offset,
int param_no, unsigned and, unsigned or)
{
edma_modify(ecc, EDMA_PARM + offset + (param_no << 5), and, or);
}
-static inline void edma_param_and(struct edma_cc *ecc, int offset, int param_no,
- unsigned and)
-{
- edma_and(ecc, EDMA_PARM + offset + (param_no << 5), and);
-}
-
-static inline void edma_param_or(struct edma_cc *ecc, int offset, int param_no,
- unsigned or)
-{
- edma_or(ecc, EDMA_PARM + offset + (param_no << 5), or);
-}
-
static void edma_assign_priority_to_queue(struct edma_cc *ecc, int queue_no,
int priority)
{
@@ -751,11 +699,6 @@ static void edma_free_channel(struct edma_chan *echan)
edma_setup_interrupt(echan, false);
}
-static inline struct edma_cc *to_edma_cc(struct dma_device *d)
-{
- return container_of(d, struct edma_cc, dma_slave);
-}
-
static inline struct edma_chan *to_edma_chan(struct dma_chan *c)
{
return container_of(c, struct edma_chan, vchan.chan);
@@ -976,7 +919,7 @@ static int edma_config_pset(struct dma_chan *chan, struct edma_pset *epset,
* and quotient respectively of the division of:
* (dma_length / acnt) by (SZ_64K -1). This is so
* that in case bcnt over flows, we have ccnt to use.
- * Note: In A-sync tranfer only, bcntrld is used, but it
+ * Note: In A-sync transfer only, bcntrld is used, but it
* only applies for sg_dma_len(sg) >= SZ_64K.
* In this case, the best way adopted is- bccnt for the
* first frame will be the remainder below. Then for
@@ -1203,7 +1146,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
* slot2: the remaining amount of data after slot1.
* ACNT = full_length - length1, length2 = ACNT
*
- * When the full_length is multibple of 32767 one slot can be
+ * When the full_length is a multiple of 32767 one slot can be
* used to complete the transfer.
*/
width = array_size;
@@ -1814,7 +1757,7 @@ static void edma_issue_pending(struct dma_chan *chan)
* This limit exists to avoid a possible infinite loop when waiting for proof
* that a particular transfer is completed. This limit can be hit if there
* are large bursts to/from slow devices or the CPU is never able to catch
- * the DMA hardware idle. On an AM335x transfering 48 bytes from the UART
+ * the DMA hardware idle. On an AM335x transferring 48 bytes from the UART
* RX-FIFO, as many as 55 loops have been seen.
*/
#define EDMA_MAX_TR_WAIT_LOOPS 1000
@@ -2105,7 +2048,7 @@ static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata,
dev_dbg(dev, "num_qchannels: %u\n", ecc->num_qchannels);
dev_dbg(dev, "num_slots: %u\n", ecc->num_slots);
dev_dbg(dev, "num_tc: %u\n", ecc->num_tc);
- dev_dbg(dev, "chmap_exist: %s\n", ecc->chmap_exist ? "yes" : "no");
+ dev_dbg(dev, "chmap_exist: %s\n", str_yes_no(ecc->chmap_exist));
/* Nothing need to be done if queue priority is provided */
if (pdata->queue_priority_mapping)
@@ -2121,8 +2064,8 @@ static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata,
* priority. So Q0 is the highest priority queue and the last queue has
* the lowest priority.
*/
- queue_priority_map = devm_kcalloc(dev, ecc->num_tc + 1, sizeof(s8),
- GFP_KERNEL);
+ queue_priority_map = devm_kcalloc(dev, ecc->num_tc + 1,
+ sizeof(*queue_priority_map), GFP_KERNEL);
if (!queue_priority_map)
return -ENOMEM;
@@ -2316,8 +2259,12 @@ static struct dma_chan *of_edma_xlate(struct of_phandle_args *dma_spec,
return NULL;
out:
- /* The channel is going to be used as HW synchronized */
- echan->hw_triggered = true;
+ /*
+ * The channel is going to be HW synchronized, unless it was
+ * reserved as a memcpy channel
+ */
+ echan->hw_triggered =
+ !edma_is_memcpy_channel(i, ecc->info->memcpy_channels);
return dma_get_slave_channel(chan);
}
#else
@@ -2458,9 +2405,14 @@ static int edma_probe(struct platform_device *pdev)
if (irq < 0 && node)
irq = irq_of_parse_and_map(node, 0);
- if (irq >= 0) {
+ if (irq > 0) {
irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccint",
dev_name(dev));
+ if (!irq_name) {
+ ret = -ENOMEM;
+ goto err_disable_pm;
+ }
+
ret = devm_request_irq(dev, irq, dma_irq_handler, 0, irq_name,
ecc);
if (ret) {
@@ -2474,9 +2426,14 @@ static int edma_probe(struct platform_device *pdev)
if (irq < 0 && node)
irq = irq_of_parse_and_map(node, 2);
- if (irq >= 0) {
+ if (irq > 0) {
irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccerrint",
dev_name(dev));
+ if (!irq_name) {
+ ret = -ENOMEM;
+ goto err_disable_pm;
+ }
+
ret = devm_request_irq(dev, irq, dma_ccerr_handler, 0, irq_name,
ecc);
if (ret) {
@@ -2507,19 +2464,19 @@ static int edma_probe(struct platform_device *pdev)
goto err_reg1;
}
- for (i = 0;; i++) {
+ for (i = 0; i < ecc->num_tc; i++) {
ret = of_parse_phandle_with_fixed_args(node, "ti,tptcs",
1, i, &tc_args);
- if (ret || i == ecc->num_tc)
+ if (ret)
break;
- ecc->tc_list[i].node = tc_args.np;
ecc->tc_list[i].id = i;
queue_priority_mapping[i][1] = tc_args.args[0];
if (queue_priority_mapping[i][1] > lowest_priority) {
lowest_priority = queue_priority_mapping[i][1];
info->default_queue = i;
}
+ of_node_put(tc_args.np);
}
/* See if we have optional dma-channel-mask array */
@@ -2607,7 +2564,7 @@ static void edma_cleanupp_vchan(struct dma_device *dmadev)
}
}
-static int edma_remove(struct platform_device *pdev)
+static void edma_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct edma_cc *ecc = dev_get_drvdata(dev);
@@ -2625,8 +2582,6 @@ static int edma_remove(struct platform_device *pdev)
edma_free_slot(ecc, ecc->dummy_slot);
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP