summaryrefslogtreecommitdiff
path: root/drivers/dma/ti/k3-udma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/ti/k3-udma.c')
-rw-r--r--drivers/dma/ti/k3-udma.c288
1 files changed, 251 insertions, 37 deletions
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index 4c62274e0b33..aa2dc762140f 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -20,7 +20,6 @@
#include <linux/sys_soc.h>
#include <linux/of.h>
#include <linux/of_dma.h>
-#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/workqueue.h>
#include <linux/completion.h>
@@ -135,6 +134,7 @@ struct udma_match_data {
u32 flags;
u32 statictr_z_mask;
u8 burst_size[3];
+ struct udma_soc_data *soc_data;
};
struct udma_soc_data {
@@ -304,6 +304,8 @@ struct udma_chan {
/* Channel configuration parameters */
struct udma_chan_config config;
+ /* Channel configuration parameters (backup) */
+ struct udma_chan_config backup_config;
/* dmapool for packet mode descriptors */
bool use_dma_pool;
@@ -1089,8 +1091,11 @@ static void udma_check_tx_completion(struct work_struct *work)
u32 residue_diff;
ktime_t time_diff;
unsigned long delay;
+ unsigned long flags;
while (1) {
+ spin_lock_irqsave(&uc->vc.lock, flags);
+
if (uc->desc) {
/* Get previous residue and time stamp */
residue_diff = uc->tx_drain.residue;
@@ -1125,6 +1130,8 @@ static void udma_check_tx_completion(struct work_struct *work)
break;
}
+ spin_unlock_irqrestore(&uc->vc.lock, flags);
+
usleep_range(ktime_to_us(delay),
ktime_to_us(delay) + 10);
continue;
@@ -1141,6 +1148,8 @@ static void udma_check_tx_completion(struct work_struct *work)
break;
}
+
+ spin_unlock_irqrestore(&uc->vc.lock, flags);
}
static irqreturn_t udma_ring_irq_handler(int irq, void *data)
@@ -2963,6 +2972,7 @@ udma_prep_slave_sg_triggered_tr(struct udma_chan *uc, struct scatterlist *sgl,
struct scatterlist *sgent;
struct cppi5_tr_type15_t *tr_req = NULL;
enum dma_slave_buswidth dev_width;
+ u32 csf = CPPI5_TR_CSF_SUPR_EVT;
u16 tr_cnt0, tr_cnt1;
dma_addr_t dev_addr;
struct udma_desc *d;
@@ -3033,6 +3043,7 @@ udma_prep_slave_sg_triggered_tr(struct udma_chan *uc, struct scatterlist *sgl,
if (uc->ud->match_data->type == DMA_TYPE_UDMA) {
asel = 0;
+ csf |= CPPI5_TR_CSF_EOL_ICNT0;
} else {
asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
dev_addr |= asel;
@@ -3056,7 +3067,7 @@ udma_prep_slave_sg_triggered_tr(struct udma_chan *uc, struct scatterlist *sgl,
cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE15, false,
true, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
- cppi5_tr_csf_set(&tr_req[tr_idx].flags, CPPI5_TR_CSF_SUPR_EVT);
+ cppi5_tr_csf_set(&tr_req[tr_idx].flags, csf);
cppi5_tr_set_trigger(&tr_req[tr_idx].flags,
uc->config.tr_trigger_type,
CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC, 0, 0);
@@ -3102,8 +3113,7 @@ udma_prep_slave_sg_triggered_tr(struct udma_chan *uc, struct scatterlist *sgl,
cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE15,
false, true,
CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
- cppi5_tr_csf_set(&tr_req[tr_idx].flags,
- CPPI5_TR_CSF_SUPR_EVT);
+ cppi5_tr_csf_set(&tr_req[tr_idx].flags, csf);
cppi5_tr_set_trigger(&tr_req[tr_idx].flags,
uc->config.tr_trigger_type,
CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC,
@@ -3147,8 +3157,7 @@ udma_prep_slave_sg_triggered_tr(struct udma_chan *uc, struct scatterlist *sgl,
d->residue += sg_len;
}
- cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags,
- CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
+ cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags, csf | CPPI5_TR_CSF_EOP);
return d;
}
@@ -3183,27 +3192,40 @@ static int udma_configure_statictr(struct udma_chan *uc, struct udma_desc *d,
d->static_tr.elcnt = elcnt;
- /*
- * PDMA must to close the packet when the channel is in packet mode.
- * For TR mode when the channel is not cyclic we also need PDMA to close
- * the packet otherwise the transfer will stall because PDMA holds on
- * the data it has received from the peripheral.
- */
if (uc->config.pkt_mode || !uc->cyclic) {
+ /*
+ * PDMA must close the packet when the channel is in packet mode.
+ * For TR mode when the channel is not cyclic we also need PDMA
+ * to close the packet otherwise the transfer will stall because
+ * PDMA holds on the data it has received from the peripheral.
+ */
unsigned int div = dev_width * elcnt;
if (uc->cyclic)
d->static_tr.bstcnt = d->residue / d->sglen / div;
else
d->static_tr.bstcnt = d->residue / div;
+ } else if (uc->ud->match_data->type == DMA_TYPE_BCDMA &&
+ uc->config.dir == DMA_DEV_TO_MEM &&
+ uc->cyclic) {
+ /*
+ * For cyclic mode with BCDMA we have to set EOP in each TR to
+ * prevent short packet errors seen on channel teardown. So the
+ * PDMA must close the packet after every TR transfer by setting
+ * burst count equal to the number of bytes transferred.
+ */
+ struct cppi5_tr_type1_t *tr_req = d->hwdesc[0].tr_req_base;
- if (uc->config.dir == DMA_DEV_TO_MEM &&
- d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask)
- return -EINVAL;
+ d->static_tr.bstcnt =
+ (tr_req->icnt0 * tr_req->icnt1) / dev_width;
} else {
d->static_tr.bstcnt = 0;
}
+ if (uc->config.dir == DMA_DEV_TO_MEM &&
+ d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask)
+ return -EINVAL;
+
return 0;
}
@@ -3448,8 +3470,9 @@ udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
/* static TR for remote PDMA */
if (udma_configure_statictr(uc, d, dev_width, burst)) {
dev_err(uc->ud->dev,
- "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
- __func__, d->static_tr.bstcnt);
+ "%s: StaticTR Z is limited to maximum %u (%u)\n",
+ __func__, uc->ud->match_data->statictr_z_mask,
+ d->static_tr.bstcnt);
udma_free_hwdesc(uc, d);
kfree(d);
@@ -3474,6 +3497,7 @@ udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
unsigned int i;
int num_tr;
+ u32 period_csf = 0;
num_tr = udma_get_tr_counters(period_len, __ffs(buf_addr), &tr0_cnt0,
&tr0_cnt1, &tr1_cnt0);
@@ -3496,6 +3520,20 @@ udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
period_addr = buf_addr |
((u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT);
+ /*
+ * For BCDMA <-> PDMA transfers, the EOP flag needs to be set on the
+ * last TR of a descriptor, to mark the packet as complete.
+ * This is required for getting the teardown completion message in case
+ * of TX, and to avoid short-packet error in case of RX.
+ *
+ * As we are in cyclic mode, we do not know which period might be the
+ * last one, so set the flag for each period.
+ */
+ if (uc->config.ep_type == PSIL_EP_PDMA_XY &&
+ uc->ud->match_data->type == DMA_TYPE_BCDMA) {
+ period_csf = CPPI5_TR_CSF_EOP;
+ }
+
for (i = 0; i < periods; i++) {
int tr_idx = i * num_tr;
@@ -3523,8 +3561,10 @@ udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
}
if (!(flags & DMA_PREP_INTERRUPT))
- cppi5_tr_csf_set(&tr_req[tr_idx].flags,
- CPPI5_TR_CSF_SUPR_EVT);
+ period_csf |= CPPI5_TR_CSF_SUPR_EVT;
+
+ if (period_csf)
+ cppi5_tr_csf_set(&tr_req[tr_idx].flags, period_csf);
period_addr += period_len;
}
@@ -3653,8 +3693,9 @@ udma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
/* static TR for remote PDMA */
if (udma_configure_statictr(uc, d, dev_width, burst)) {
dev_err(uc->ud->dev,
- "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
- __func__, d->static_tr.bstcnt);
+ "%s: StaticTR Z is limited to maximum %u (%u)\n",
+ __func__, uc->ud->match_data->statictr_z_mask,
+ d->static_tr.bstcnt);
udma_free_hwdesc(uc, d);
kfree(d);
@@ -3677,6 +3718,7 @@ udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
int num_tr;
size_t tr_size = sizeof(struct cppi5_tr_type15_t);
u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
+ u32 csf = CPPI5_TR_CSF_SUPR_EVT;
if (uc->config.dir != DMA_MEM_TO_MEM) {
dev_err(chan->device->dev,
@@ -3707,13 +3749,15 @@ udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
if (uc->ud->match_data->type != DMA_TYPE_UDMA) {
src |= (u64)uc->ud->asel << K3_ADDRESS_ASEL_SHIFT;
dest |= (u64)uc->ud->asel << K3_ADDRESS_ASEL_SHIFT;
+ } else {
+ csf |= CPPI5_TR_CSF_EOL_ICNT0;
}
tr_req = d->hwdesc[0].tr_req_base;
cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
- cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT);
+ cppi5_tr_csf_set(&tr_req[0].flags, csf);
tr_req[0].addr = src;
tr_req[0].icnt0 = tr0_cnt0;
@@ -3732,7 +3776,7 @@ udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
if (num_tr == 2) {
cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true,
CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
- cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT);
+ cppi5_tr_csf_set(&tr_req[1].flags, csf);
tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0;
tr_req[1].icnt0 = tr1_cnt0;
@@ -3747,8 +3791,7 @@ udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
tr_req[1].dicnt3 = 1;
}
- cppi5_tr_csf_set(&tr_req[num_tr - 1].flags,
- CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
+ cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, csf | CPPI5_TR_CSF_EOP);
if (uc->config.metadata_size)
d->vd.tx.metadata_ops = &metadata_ops;
@@ -3964,6 +4007,7 @@ static void udma_desc_pre_callback(struct virt_dma_chan *vc,
{
struct udma_chan *uc = to_udma_chan(&vc->chan);
struct udma_desc *d;
+ u8 status;
if (!vd)
return;
@@ -3973,12 +4017,12 @@ static void udma_desc_pre_callback(struct virt_dma_chan *vc,
if (d->metadata_size)
udma_fetch_epib(uc, d);
- /* Provide residue information for the client */
if (result) {
void *desc_vaddr = udma_curr_cppi5_desc_vaddr(d, d->desc_idx);
if (cppi5_desc_get_type(desc_vaddr) ==
CPPI5_INFO0_DESC_TYPE_VAL_HOST) {
+ /* Provide residue information for the client */
result->residue = d->residue -
cppi5_hdesc_get_pktlen(desc_vaddr);
if (result->residue)
@@ -3987,7 +4031,12 @@ static void udma_desc_pre_callback(struct virt_dma_chan *vc,
result->result = DMA_TRANS_NOERROR;
} else {
result->residue = 0;
- result->result = DMA_TRANS_NOERROR;
+ /* Propagate TR Response errors to the client */
+ status = d->hwdesc[0].tr_resp_base->status;
+ if (status)
+ result->result = DMA_TRANS_ABORTED;
+ else
+ result->result = DMA_TRANS_NOERROR;
}
}
}
@@ -4204,7 +4253,6 @@ static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma)
{
struct udma_dev *ud = ofdma->of_dma_data;
- dma_cap_mask_t mask = ud->ddev.cap_mask;
struct udma_filter_param filter_param;
struct dma_chan *chan;
@@ -4236,7 +4284,7 @@ static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
}
}
- chan = __dma_request_channel(&mask, udma_dma_filter_fn, &filter_param,
+ chan = __dma_request_channel(&ud->ddev.cap_mask, udma_dma_filter_fn, &filter_param,
ofdma->of_node);
if (!chan) {
dev_err(ud->dev, "get channel fail in %s.\n", __func__);
@@ -4296,6 +4344,34 @@ static struct udma_match_data j721e_mcu_data = {
},
};
+static struct udma_soc_data am62a_dmss_csi_soc_data = {
+ .oes = {
+ .bcdma_rchan_data = 0xe00,
+ .bcdma_rchan_ring = 0x1000,
+ },
+};
+
+static struct udma_soc_data j721s2_bcdma_csi_soc_data = {
+ .oes = {
+ .bcdma_tchan_data = 0x800,
+ .bcdma_tchan_ring = 0xa00,
+ .bcdma_rchan_data = 0xe00,
+ .bcdma_rchan_ring = 0x1000,
+ },
+};
+
+static struct udma_match_data am62a_bcdma_csirx_data = {
+ .type = DMA_TYPE_BCDMA,
+ .psil_base = 0x3100,
+ .enable_memcpy_support = false,
+ .burst_size = {
+ TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
+ 0, /* No H Channels */
+ 0, /* No UH Channels */
+ },
+ .soc_data = &am62a_dmss_csi_soc_data,
+};
+
static struct udma_match_data am64_bcdma_data = {
.type = DMA_TYPE_BCDMA,
.psil_base = 0x2000, /* for tchan and rchan, not applicable to bchan */
@@ -4322,6 +4398,30 @@ static struct udma_match_data am64_pktdma_data = {
},
};
+static struct udma_match_data j721s2_bcdma_csi_data = {
+ .type = DMA_TYPE_BCDMA,
+ .psil_base = 0x2000,
+ .enable_memcpy_support = false,
+ .burst_size = {
+ TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
+ 0, /* No H Channels */
+ 0, /* No UH Channels */
+ },
+ .soc_data = &j721s2_bcdma_csi_soc_data,
+};
+
+static struct udma_match_data j722s_bcdma_csi_data = {
+ .type = DMA_TYPE_BCDMA,
+ .psil_base = 0x3100,
+ .enable_memcpy_support = false,
+ .burst_size = {
+ TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
+ 0, /* No H Channels */
+ 0, /* No UH Channels */
+ },
+ .soc_data = &j721s2_bcdma_csi_soc_data,
+};
+
static const struct of_device_id udma_of_match[] = {
{
.compatible = "ti,am654-navss-main-udmap",
@@ -4345,8 +4445,21 @@ static const struct of_device_id udma_of_match[] = {
.compatible = "ti,am64-dmss-pktdma",
.data = &am64_pktdma_data,
},
+ {
+ .compatible = "ti,am62a-dmss-bcdma-csirx",
+ .data = &am62a_bcdma_csirx_data,
+ },
+ {
+ .compatible = "ti,j721s2-dmss-bcdma-csi",
+ .data = &j721s2_bcdma_csi_data,
+ },
+ {
+ .compatible = "ti,j722s-dmss-bcdma-csi",
+ .data = &j722s_bcdma_csi_data,
+ },
{ /* Sentinel */ },
};
+MODULE_DEVICE_TABLE(of, udma_of_match);
static struct udma_soc_data am654_soc_data = {
.oes = {
@@ -4387,6 +4500,10 @@ static const struct soc_device_attribute k3_soc_devices[] = {
{ .family = "AM64X", .data = &am64_soc_data },
{ .family = "J721S2", .data = &j721e_soc_data},
{ .family = "AM62X", .data = &am64_soc_data },
+ { .family = "AM62AX", .data = &am64_soc_data },
+ { .family = "J784S4", .data = &j721e_soc_data },
+ { .family = "AM62PX", .data = &am64_soc_data },
+ { .family = "J722S", .data = &am64_soc_data },
{ /* sentinel */ }
};
@@ -4410,7 +4527,9 @@ static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud)
ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2);
break;
case DMA_TYPE_BCDMA:
- ud->bchan_cnt = BCDMA_CAP2_BCHAN_CNT(cap2);
+ ud->bchan_cnt = BCDMA_CAP2_BCHAN_CNT(cap2) +
+ BCDMA_CAP3_HBCHAN_CNT(cap3) +
+ BCDMA_CAP3_UBCHAN_CNT(cap3);
ud->tchan_cnt = BCDMA_CAP2_TCHAN_CNT(cap2);
ud->rchan_cnt = BCDMA_CAP2_RCHAN_CNT(cap2);
ud->rflow_cnt = ud->rchan_cnt;
@@ -4773,9 +4892,18 @@ static int bcdma_setup_resources(struct udma_dev *ud)
irq_res.desc[i].start = rm_res->desc[i].start +
oes->bcdma_bchan_ring;
irq_res.desc[i].num = rm_res->desc[i].num;
+
+ if (rm_res->desc[i].num_sec) {
+ irq_res.desc[i].start_sec = rm_res->desc[i].start_sec +
+ oes->bcdma_bchan_ring;
+ irq_res.desc[i].num_sec = rm_res->desc[i].num_sec;
+ }
}
}
+ } else {
+ i = 0;
}
+
if (ud->tchan_cnt) {
rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
if (IS_ERR(rm_res)) {
@@ -4793,6 +4921,15 @@ static int bcdma_setup_resources(struct udma_dev *ud)
irq_res.desc[i + 1].start = rm_res->desc[j].start +
oes->bcdma_tchan_ring;
irq_res.desc[i + 1].num = rm_res->desc[j].num;
+
+ if (rm_res->desc[j].num_sec) {
+ irq_res.desc[i].start_sec = rm_res->desc[j].start_sec +
+ oes->bcdma_tchan_data;
+ irq_res.desc[i].num_sec = rm_res->desc[j].num_sec;
+ irq_res.desc[i + 1].start_sec = rm_res->desc[j].start_sec +
+ oes->bcdma_tchan_ring;
+ irq_res.desc[i + 1].num_sec = rm_res->desc[j].num_sec;
+ }
}
}
}
@@ -4813,6 +4950,15 @@ static int bcdma_setup_resources(struct udma_dev *ud)
irq_res.desc[i + 1].start = rm_res->desc[j].start +
oes->bcdma_rchan_ring;
irq_res.desc[i + 1].num = rm_res->desc[j].num;
+
+ if (rm_res->desc[j].num_sec) {
+ irq_res.desc[i].start_sec = rm_res->desc[j].start_sec +
+ oes->bcdma_rchan_data;
+ irq_res.desc[i].num_sec = rm_res->desc[j].num_sec;
+ irq_res.desc[i + 1].start_sec = rm_res->desc[j].start_sec +
+ oes->bcdma_rchan_ring;
+ irq_res.desc[i + 1].num_sec = rm_res->desc[j].num_sec;
+ }
}
}
}
@@ -4947,6 +5093,12 @@ static int pktdma_setup_resources(struct udma_dev *ud)
irq_res.desc[i].start = rm_res->desc[i].start +
oes->pktdma_tchan_flow;
irq_res.desc[i].num = rm_res->desc[i].num;
+
+ if (rm_res->desc[i].num_sec) {
+ irq_res.desc[i].start_sec = rm_res->desc[i].start_sec +
+ oes->pktdma_tchan_flow;
+ irq_res.desc[i].num_sec = rm_res->desc[i].num_sec;
+ }
}
}
rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
@@ -4958,6 +5110,12 @@ static int pktdma_setup_resources(struct udma_dev *ud)
irq_res.desc[i].start = rm_res->desc[j].start +
oes->pktdma_rchan_flow;
irq_res.desc[i].num = rm_res->desc[j].num;
+
+ if (rm_res->desc[j].num_sec) {
+ irq_res.desc[i].start_sec = rm_res->desc[j].start_sec +
+ oes->pktdma_rchan_flow;
+ irq_res.desc[i].num_sec = rm_res->desc[j].num_sec;
+ }
}
}
ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
@@ -5271,12 +5429,15 @@ static int udma_probe(struct platform_device *pdev)
}
ud->match_data = match->data;
- soc = soc_device_match(k3_soc_devices);
- if (!soc) {
- dev_err(dev, "No compatible SoC found\n");
- return -ENODEV;
+ ud->soc_data = ud->match_data->soc_data;
+ if (!ud->soc_data) {
+ soc = soc_device_match(k3_soc_devices);
+ if (!soc) {
+ dev_err(dev, "No compatible SoC found\n");
+ return -ENODEV;
+ }
+ ud->soc_data = soc->data;
}
- ud->soc_data = soc->data;
ret = udma_get_mmrs(pdev, ud);
if (ret)
@@ -5345,7 +5506,6 @@ static int udma_probe(struct platform_device *pdev)
dev->msi.domain = of_msi_get_domain(dev, dev->of_node,
DOMAIN_BUS_TI_SCI_INTA_MSI);
if (!dev->msi.domain) {
- dev_err(dev, "Failed to get MSI domain\n");
return -EPROBE_DEFER;
}
@@ -5464,7 +5624,8 @@ static int udma_probe(struct platform_device *pdev)
uc->config.dir = DMA_MEM_TO_MEM;
uc->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d",
dev_name(dev), i);
-
+ if (!uc->name)
+ return -ENOMEM;
vchan_init(&uc->vc, &ud->ddev);
/* Use custom vchan completion handling */
tasklet_setup(&uc->vc.task, udma_vchan_complete);
@@ -5492,16 +5653,69 @@ static int udma_probe(struct platform_device *pdev)
return ret;
}
+static int __maybe_unused udma_pm_suspend(struct device *dev)
+{
+ struct udma_dev *ud = dev_get_drvdata(dev);
+ struct dma_device *dma_dev = &ud->ddev;
+ struct dma_chan *chan;
+ struct udma_chan *uc;
+
+ list_for_each_entry(chan, &dma_dev->channels, device_node) {
+ if (chan->client_count) {
+ uc = to_udma_chan(chan);
+ /* backup the channel configuration */
+ memcpy(&uc->backup_config, &uc->config,
+ sizeof(struct udma_chan_config));
+ dev_dbg(dev, "Suspending channel %s\n",
+ dma_chan_name(chan));
+ ud->ddev.device_free_chan_resources(chan);
+ }
+ }
+
+ return 0;
+}
+
+static int __maybe_unused udma_pm_resume(struct device *dev)
+{
+ struct udma_dev *ud = dev_get_drvdata(dev);
+ struct dma_device *dma_dev = &ud->ddev;
+ struct dma_chan *chan;
+ struct udma_chan *uc;
+ int ret;
+
+ list_for_each_entry(chan, &dma_dev->channels, device_node) {
+ if (chan->client_count) {
+ uc = to_udma_chan(chan);
+ /* restore the channel configuration */
+ memcpy(&uc->config, &uc->backup_config,
+ sizeof(struct udma_chan_config));
+ dev_dbg(dev, "Resuming channel %s\n",
+ dma_chan_name(chan));
+ ret = ud->ddev.device_alloc_chan_resources(chan);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops udma_pm_ops = {
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(udma_pm_suspend, udma_pm_resume)
+};
+
static struct platform_driver udma_driver = {
.driver = {
.name = "ti-udma",
.of_match_table = udma_of_match,
.suppress_bind_attrs = true,
+ .pm = &udma_pm_ops,
},
.probe = udma_probe,
};
module_platform_driver(udma_driver);
+MODULE_DESCRIPTION("Texas Instruments UDMA support");
MODULE_LICENSE("GPL v2");
/* Private interfaces to UDMA */