summaryrefslogtreecommitdiff
path: root/drivers/dma/ti
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/ti')
-rw-r--r--drivers/dma/ti/cppi41.c3
-rw-r--r--drivers/dma/ti/edma.c20
-rw-r--r--drivers/dma/ti/k3-psil.c1
-rw-r--r--drivers/dma/ti/k3-udma-glue.c24
-rw-r--r--drivers/dma/ti/k3-udma.c133
-rw-r--r--drivers/dma/ti/k3-udma.h1
-rw-r--r--drivers/dma/ti/omap-dma.c9
7 files changed, 150 insertions, 41 deletions
diff --git a/drivers/dma/ti/cppi41.c b/drivers/dma/ti/cppi41.c
index 7e0b06b5dff0..8d8c3d6038fc 100644
--- a/drivers/dma/ti/cppi41.c
+++ b/drivers/dma/ti/cppi41.c
@@ -1243,7 +1243,7 @@ static const struct dev_pm_ops cppi41_pm_ops = {
static struct platform_driver cpp41_dma_driver = {
.probe = cppi41_dma_probe,
- .remove_new = cppi41_dma_remove,
+ .remove = cppi41_dma_remove,
.driver = {
.name = "cppi41-dma-engine",
.pm = &cppi41_pm_ops,
@@ -1252,5 +1252,6 @@ static struct platform_driver cpp41_dma_driver = {
};
module_platform_driver(cpp41_dma_driver);
+MODULE_DESCRIPTION("Texas Instruments CPPI 4.1 DMA support");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Sebastian Andrzej Siewior <bigeasy@linutronix.de>");
diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
index 5f8d2e93ff3f..3ed406f08c44 100644
--- a/drivers/dma/ti/edma.c
+++ b/drivers/dma/ti/edma.c
@@ -16,6 +16,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/string_choices.h>
#include <linux/of.h>
#include <linux/of_dma.h>
#include <linux/of_irq.h>
@@ -208,7 +209,6 @@ struct edma_desc {
struct edma_cc;
struct edma_tc {
- struct device_node *node;
u16 id;
};
@@ -2048,7 +2048,7 @@ static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata,
dev_dbg(dev, "num_qchannels: %u\n", ecc->num_qchannels);
dev_dbg(dev, "num_slots: %u\n", ecc->num_slots);
dev_dbg(dev, "num_tc: %u\n", ecc->num_tc);
- dev_dbg(dev, "chmap_exist: %s\n", ecc->chmap_exist ? "yes" : "no");
+ dev_dbg(dev, "chmap_exist: %s\n", str_yes_no(ecc->chmap_exist));
/* Nothing need to be done if queue priority is provided */
if (pdata->queue_priority_mapping)
@@ -2259,8 +2259,12 @@ static struct dma_chan *of_edma_xlate(struct of_phandle_args *dma_spec,
return NULL;
out:
- /* The channel is going to be used as HW synchronized */
- echan->hw_triggered = true;
+ /*
+ * The channel is going to be HW synchronized, unless it was
+ * reserved as a memcpy channel
+ */
+ echan->hw_triggered =
+ !edma_is_memcpy_channel(i, ecc->info->memcpy_channels);
return dma_get_slave_channel(chan);
}
#else
@@ -2460,19 +2464,19 @@ static int edma_probe(struct platform_device *pdev)
goto err_reg1;
}
- for (i = 0;; i++) {
+ for (i = 0; i < ecc->num_tc; i++) {
ret = of_parse_phandle_with_fixed_args(node, "ti,tptcs",
1, i, &tc_args);
- if (ret || i == ecc->num_tc)
+ if (ret)
break;
- ecc->tc_list[i].node = tc_args.np;
ecc->tc_list[i].id = i;
queue_priority_mapping[i][1] = tc_args.args[0];
if (queue_priority_mapping[i][1] > lowest_priority) {
lowest_priority = queue_priority_mapping[i][1];
info->default_queue = i;
}
+ of_node_put(tc_args.np);
}
/* See if we have optional dma-channel-mask array */
@@ -2636,7 +2640,7 @@ static const struct dev_pm_ops edma_pm_ops = {
static struct platform_driver edma_driver = {
.probe = edma_probe,
- .remove_new = edma_remove,
+ .remove = edma_remove,
.driver = {
.name = "edma",
.pm = &edma_pm_ops,
diff --git a/drivers/dma/ti/k3-psil.c b/drivers/dma/ti/k3-psil.c
index 25148d952472..c4b6f0df4686 100644
--- a/drivers/dma/ti/k3-psil.c
+++ b/drivers/dma/ti/k3-psil.c
@@ -106,4 +106,5 @@ int psil_set_new_ep_config(struct device *dev, const char *name,
return 0;
}
EXPORT_SYMBOL_GPL(psil_set_new_ep_config);
+MODULE_DESCRIPTION("K3 PSI-L endpoint configuration");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/ti/k3-udma-glue.c b/drivers/dma/ti/k3-udma-glue.c
index c9b93055dc9d..f87d244cc2d6 100644
--- a/drivers/dma/ti/k3-udma-glue.c
+++ b/drivers/dma/ti/k3-udma-glue.c
@@ -84,6 +84,7 @@ struct k3_udma_glue_rx_channel {
struct k3_udma_glue_rx_flow *flows;
u32 flow_num;
u32 flows_ready;
+ bool single_fdq; /* one FDQ for all flows */
};
static void k3_udma_chan_dev_release(struct device *dev)
@@ -200,12 +201,9 @@ of_k3_udma_glue_parse_chn_by_id(struct device_node *udmax_np, struct k3_udma_glu
ret = of_k3_udma_glue_parse(udmax_np, common);
if (ret)
- goto out_put_spec;
+ return ret;
ret = of_k3_udma_glue_parse_chn_common(common, thread_id, tx_chn);
-
-out_put_spec:
- of_node_put(udmax_np);
return ret;
}
@@ -973,10 +971,13 @@ k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name,
ep_cfg = rx_chn->common.ep_config;
- if (xudma_is_pktdma(rx_chn->common.udmax))
+ if (xudma_is_pktdma(rx_chn->common.udmax)) {
rx_chn->udma_rchan_id = ep_cfg->mapped_channel_id;
- else
+ rx_chn->single_fdq = false;
+ } else {
rx_chn->udma_rchan_id = -1;
+ rx_chn->single_fdq = true;
+ }
/* request and cfg UDMAP RX channel */
rx_chn->udma_rchanx = xudma_rchan_get(rx_chn->common.udmax,
@@ -1106,6 +1107,9 @@ k3_udma_glue_request_remote_rx_chn_common(struct k3_udma_glue_rx_channel *rx_chn
rx_chn->common.chan_dev.dma_coherent = true;
dma_coerce_mask_and_coherent(&rx_chn->common.chan_dev,
DMA_BIT_MASK(48));
+ rx_chn->single_fdq = false;
+ } else {
+ rx_chn->single_fdq = true;
}
ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
@@ -1456,7 +1460,7 @@ EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_rx_chn);
void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
u32 flow_num, void *data,
- void (*cleanup)(void *data, dma_addr_t desc_dma), bool skip_fdq)
+ void (*cleanup)(void *data, dma_addr_t desc_dma))
{
struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
struct device *dev = rx_chn->common.dev;
@@ -1468,7 +1472,7 @@ void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
dev_dbg(dev, "RX reset flow %u occ_rx %u\n", flow_num, occ_rx);
/* Skip RX FDQ in case one FDQ is used for the set of flows */
- if (skip_fdq)
+ if (rx_chn->single_fdq && flow_num)
goto do_reset;
/*
@@ -1531,6 +1535,9 @@ int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn,
flow->virq = k3_ringacc_get_ring_irq_num(flow->ringrx);
}
+ if (!flow->virq)
+ return -ENXIO;
+
return flow->virq;
}
EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_irq);
@@ -1574,4 +1581,5 @@ static int __init k3_udma_glue_class_init(void)
}
module_init(k3_udma_glue_class_init);
+MODULE_DESCRIPTION("TI K3 NAVSS DMA glue interface");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index 6400d06588a2..aa2dc762140f 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -1091,8 +1091,11 @@ static void udma_check_tx_completion(struct work_struct *work)
u32 residue_diff;
ktime_t time_diff;
unsigned long delay;
+ unsigned long flags;
while (1) {
+ spin_lock_irqsave(&uc->vc.lock, flags);
+
if (uc->desc) {
/* Get previous residue and time stamp */
residue_diff = uc->tx_drain.residue;
@@ -1127,6 +1130,8 @@ static void udma_check_tx_completion(struct work_struct *work)
break;
}
+ spin_unlock_irqrestore(&uc->vc.lock, flags);
+
usleep_range(ktime_to_us(delay),
ktime_to_us(delay) + 10);
continue;
@@ -1143,6 +1148,8 @@ static void udma_check_tx_completion(struct work_struct *work)
break;
}
+
+ spin_unlock_irqrestore(&uc->vc.lock, flags);
}
static irqreturn_t udma_ring_irq_handler(int irq, void *data)
@@ -3185,27 +3192,40 @@ static int udma_configure_statictr(struct udma_chan *uc, struct udma_desc *d,
d->static_tr.elcnt = elcnt;
- /*
- * PDMA must to close the packet when the channel is in packet mode.
- * For TR mode when the channel is not cyclic we also need PDMA to close
- * the packet otherwise the transfer will stall because PDMA holds on
- * the data it has received from the peripheral.
- */
if (uc->config.pkt_mode || !uc->cyclic) {
+ /*
+ * PDMA must close the packet when the channel is in packet mode.
+ * For TR mode when the channel is not cyclic we also need PDMA
+ * to close the packet otherwise the transfer will stall because
+ * PDMA holds on the data it has received from the peripheral.
+ */
unsigned int div = dev_width * elcnt;
if (uc->cyclic)
d->static_tr.bstcnt = d->residue / d->sglen / div;
else
d->static_tr.bstcnt = d->residue / div;
+ } else if (uc->ud->match_data->type == DMA_TYPE_BCDMA &&
+ uc->config.dir == DMA_DEV_TO_MEM &&
+ uc->cyclic) {
+ /*
+ * For cyclic mode with BCDMA we have to set EOP in each TR to
+ * prevent short packet errors seen on channel teardown. So the
+ * PDMA must close the packet after every TR transfer by setting
+ * burst count equal to the number of bytes transferred.
+ */
+ struct cppi5_tr_type1_t *tr_req = d->hwdesc[0].tr_req_base;
- if (uc->config.dir == DMA_DEV_TO_MEM &&
- d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask)
- return -EINVAL;
+ d->static_tr.bstcnt =
+ (tr_req->icnt0 * tr_req->icnt1) / dev_width;
} else {
d->static_tr.bstcnt = 0;
}
+ if (uc->config.dir == DMA_DEV_TO_MEM &&
+ d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask)
+ return -EINVAL;
+
return 0;
}
@@ -3450,8 +3470,9 @@ udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
/* static TR for remote PDMA */
if (udma_configure_statictr(uc, d, dev_width, burst)) {
dev_err(uc->ud->dev,
- "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
- __func__, d->static_tr.bstcnt);
+ "%s: StaticTR Z is limited to maximum %u (%u)\n",
+ __func__, uc->ud->match_data->statictr_z_mask,
+ d->static_tr.bstcnt);
udma_free_hwdesc(uc, d);
kfree(d);
@@ -3476,6 +3497,7 @@ udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
unsigned int i;
int num_tr;
+ u32 period_csf = 0;
num_tr = udma_get_tr_counters(period_len, __ffs(buf_addr), &tr0_cnt0,
&tr0_cnt1, &tr1_cnt0);
@@ -3498,6 +3520,20 @@ udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
period_addr = buf_addr |
((u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT);
+ /*
+ * For BCDMA <-> PDMA transfers, the EOP flag needs to be set on the
+ * last TR of a descriptor, to mark the packet as complete.
+ * This is required for getting the teardown completion message in case
+ * of TX, and to avoid short-packet error in case of RX.
+ *
+ * As we are in cyclic mode, we do not know which period might be the
+ * last one, so set the flag for each period.
+ */
+ if (uc->config.ep_type == PSIL_EP_PDMA_XY &&
+ uc->ud->match_data->type == DMA_TYPE_BCDMA) {
+ period_csf = CPPI5_TR_CSF_EOP;
+ }
+
for (i = 0; i < periods; i++) {
int tr_idx = i * num_tr;
@@ -3525,8 +3561,10 @@ udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
}
if (!(flags & DMA_PREP_INTERRUPT))
- cppi5_tr_csf_set(&tr_req[tr_idx].flags,
- CPPI5_TR_CSF_SUPR_EVT);
+ period_csf |= CPPI5_TR_CSF_SUPR_EVT;
+
+ if (period_csf)
+ cppi5_tr_csf_set(&tr_req[tr_idx].flags, period_csf);
period_addr += period_len;
}
@@ -3655,8 +3693,9 @@ udma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
/* static TR for remote PDMA */
if (udma_configure_statictr(uc, d, dev_width, burst)) {
dev_err(uc->ud->dev,
- "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
- __func__, d->static_tr.bstcnt);
+ "%s: StaticTR Z is limited to maximum %u (%u)\n",
+ __func__, uc->ud->match_data->statictr_z_mask,
+ d->static_tr.bstcnt);
udma_free_hwdesc(uc, d);
kfree(d);
@@ -4214,7 +4253,6 @@ static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma)
{
struct udma_dev *ud = ofdma->of_dma_data;
- dma_cap_mask_t mask = ud->ddev.cap_mask;
struct udma_filter_param filter_param;
struct dma_chan *chan;
@@ -4246,7 +4284,7 @@ static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
}
}
- chan = __dma_request_channel(&mask, udma_dma_filter_fn, &filter_param,
+ chan = __dma_request_channel(&ud->ddev.cap_mask, udma_dma_filter_fn, &filter_param,
ofdma->of_node);
if (!chan) {
dev_err(ud->dev, "get channel fail in %s.\n", __func__);
@@ -4372,6 +4410,18 @@ static struct udma_match_data j721s2_bcdma_csi_data = {
.soc_data = &j721s2_bcdma_csi_soc_data,
};
+static struct udma_match_data j722s_bcdma_csi_data = {
+ .type = DMA_TYPE_BCDMA,
+ .psil_base = 0x3100,
+ .enable_memcpy_support = false,
+ .burst_size = {
+ TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
+ 0, /* No H Channels */
+ 0, /* No UH Channels */
+ },
+ .soc_data = &j721s2_bcdma_csi_soc_data,
+};
+
static const struct of_device_id udma_of_match[] = {
{
.compatible = "ti,am654-navss-main-udmap",
@@ -4403,8 +4453,13 @@ static const struct of_device_id udma_of_match[] = {
.compatible = "ti,j721s2-dmss-bcdma-csi",
.data = &j721s2_bcdma_csi_data,
},
+ {
+ .compatible = "ti,j722s-dmss-bcdma-csi",
+ .data = &j722s_bcdma_csi_data,
+ },
{ /* Sentinel */ },
};
+MODULE_DEVICE_TABLE(of, udma_of_match);
static struct udma_soc_data am654_soc_data = {
.oes = {
@@ -4472,7 +4527,9 @@ static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud)
ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2);
break;
case DMA_TYPE_BCDMA:
- ud->bchan_cnt = BCDMA_CAP2_BCHAN_CNT(cap2);
+ ud->bchan_cnt = BCDMA_CAP2_BCHAN_CNT(cap2) +
+ BCDMA_CAP3_HBCHAN_CNT(cap3) +
+ BCDMA_CAP3_UBCHAN_CNT(cap3);
ud->tchan_cnt = BCDMA_CAP2_TCHAN_CNT(cap2);
ud->rchan_cnt = BCDMA_CAP2_RCHAN_CNT(cap2);
ud->rflow_cnt = ud->rchan_cnt;
@@ -4835,6 +4892,12 @@ static int bcdma_setup_resources(struct udma_dev *ud)
irq_res.desc[i].start = rm_res->desc[i].start +
oes->bcdma_bchan_ring;
irq_res.desc[i].num = rm_res->desc[i].num;
+
+ if (rm_res->desc[i].num_sec) {
+ irq_res.desc[i].start_sec = rm_res->desc[i].start_sec +
+ oes->bcdma_bchan_ring;
+ irq_res.desc[i].num_sec = rm_res->desc[i].num_sec;
+ }
}
}
} else {
@@ -4858,6 +4921,15 @@ static int bcdma_setup_resources(struct udma_dev *ud)
irq_res.desc[i + 1].start = rm_res->desc[j].start +
oes->bcdma_tchan_ring;
irq_res.desc[i + 1].num = rm_res->desc[j].num;
+
+ if (rm_res->desc[j].num_sec) {
+ irq_res.desc[i].start_sec = rm_res->desc[j].start_sec +
+ oes->bcdma_tchan_data;
+ irq_res.desc[i].num_sec = rm_res->desc[j].num_sec;
+ irq_res.desc[i + 1].start_sec = rm_res->desc[j].start_sec +
+ oes->bcdma_tchan_ring;
+ irq_res.desc[i + 1].num_sec = rm_res->desc[j].num_sec;
+ }
}
}
}
@@ -4878,6 +4950,15 @@ static int bcdma_setup_resources(struct udma_dev *ud)
irq_res.desc[i + 1].start = rm_res->desc[j].start +
oes->bcdma_rchan_ring;
irq_res.desc[i + 1].num = rm_res->desc[j].num;
+
+ if (rm_res->desc[j].num_sec) {
+ irq_res.desc[i].start_sec = rm_res->desc[j].start_sec +
+ oes->bcdma_rchan_data;
+ irq_res.desc[i].num_sec = rm_res->desc[j].num_sec;
+ irq_res.desc[i + 1].start_sec = rm_res->desc[j].start_sec +
+ oes->bcdma_rchan_ring;
+ irq_res.desc[i + 1].num_sec = rm_res->desc[j].num_sec;
+ }
}
}
}
@@ -5012,6 +5093,12 @@ static int pktdma_setup_resources(struct udma_dev *ud)
irq_res.desc[i].start = rm_res->desc[i].start +
oes->pktdma_tchan_flow;
irq_res.desc[i].num = rm_res->desc[i].num;
+
+ if (rm_res->desc[i].num_sec) {
+ irq_res.desc[i].start_sec = rm_res->desc[i].start_sec +
+ oes->pktdma_tchan_flow;
+ irq_res.desc[i].num_sec = rm_res->desc[i].num_sec;
+ }
}
}
rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
@@ -5023,6 +5110,12 @@ static int pktdma_setup_resources(struct udma_dev *ud)
irq_res.desc[i].start = rm_res->desc[j].start +
oes->pktdma_rchan_flow;
irq_res.desc[i].num = rm_res->desc[j].num;
+
+ if (rm_res->desc[j].num_sec) {
+ irq_res.desc[i].start_sec = rm_res->desc[j].start_sec +
+ oes->pktdma_rchan_flow;
+ irq_res.desc[i].num_sec = rm_res->desc[j].num_sec;
+ }
}
}
ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
@@ -5531,7 +5624,8 @@ static int udma_probe(struct platform_device *pdev)
uc->config.dir = DMA_MEM_TO_MEM;
uc->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d",
dev_name(dev), i);
-
+ if (!uc->name)
+ return -ENOMEM;
vchan_init(&uc->vc, &ud->ddev);
/* Use custom vchan completion handling */
tasklet_setup(&uc->vc.task, udma_vchan_complete);
@@ -5621,6 +5715,7 @@ static struct platform_driver udma_driver = {
};
module_platform_driver(udma_driver);
+MODULE_DESCRIPTION("Texas Instruments UDMA support");
MODULE_LICENSE("GPL v2");
/* Private interfaces to UDMA */
diff --git a/drivers/dma/ti/k3-udma.h b/drivers/dma/ti/k3-udma.h
index d349c6d482ae..9062a237cd16 100644
--- a/drivers/dma/ti/k3-udma.h
+++ b/drivers/dma/ti/k3-udma.h
@@ -131,7 +131,6 @@ int xudma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property);
struct device *xudma_get_device(struct udma_dev *ud);
struct k3_ringacc *xudma_get_ringacc(struct udma_dev *ud);
-void xudma_dev_put(struct udma_dev *ud);
u32 xudma_dev_get_psil_base(struct udma_dev *ud);
struct udma_tisci_rm *xudma_dev_get_tisci_rm(struct udma_dev *ud);
diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c
index b9e0e22383b7..8c023c6e623a 100644
--- a/drivers/dma/ti/omap-dma.c
+++ b/drivers/dma/ti/omap-dma.c
@@ -1186,10 +1186,10 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
d->dev_addr = dev_addr;
d->fi = burst;
d->es = es;
+ d->sglen = 1;
d->sg[0].addr = buf_addr;
d->sg[0].en = period_len / es_bytes[es];
d->sg[0].fn = buf_len / period_len;
- d->sglen = 1;
d->ccr = c->ccr;
if (dir == DMA_DEV_TO_MEM)
@@ -1258,10 +1258,10 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_memcpy(
d->dev_addr = src;
d->fi = 0;
d->es = data_type;
+ d->sglen = 1;
d->sg[0].en = len / BIT(data_type);
d->sg[0].fn = 1;
d->sg[0].addr = dest;
- d->sglen = 1;
d->ccr = c->ccr;
d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_POSTINC;
@@ -1309,6 +1309,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_interleaved(
if (data_type > CSDP_DATA_TYPE_32)
data_type = CSDP_DATA_TYPE_32;
+ d->sglen = 1;
sg = &d->sg[0];
d->dir = DMA_MEM_TO_MEM;
d->dev_addr = xt->src_start;
@@ -1316,7 +1317,6 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_interleaved(
sg->en = xt->sgl[0].size / BIT(data_type);
sg->fn = xt->numf;
sg->addr = xt->dst_start;
- d->sglen = 1;
d->ccr = c->ccr;
src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]);
@@ -1915,7 +1915,7 @@ MODULE_DEVICE_TABLE(of, omap_dma_match);
static struct platform_driver omap_dma_driver = {
.probe = omap_dma_probe,
- .remove_new = omap_dma_remove,
+ .remove = omap_dma_remove,
.driver = {
.name = "omap-dma-engine",
.of_match_table = omap_dma_match,
@@ -1950,4 +1950,5 @@ static void __exit omap_dma_exit(void)
module_exit(omap_dma_exit);
MODULE_AUTHOR("Russell King");
+MODULE_DESCRIPTION("Texas Instruments sDMA DMAengine support");
MODULE_LICENSE("GPL");