summaryrefslogtreecommitdiff
path: root/drivers/dma/ti/k3-udma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/ti/k3-udma.c')
-rw-r--r--drivers/dma/ti/k3-udma.c133
1 files changed, 114 insertions, 19 deletions
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index 6400d06588a2..aa2dc762140f 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -1091,8 +1091,11 @@ static void udma_check_tx_completion(struct work_struct *work)
u32 residue_diff;
ktime_t time_diff;
unsigned long delay;
+ unsigned long flags;
while (1) {
+ spin_lock_irqsave(&uc->vc.lock, flags);
+
if (uc->desc) {
/* Get previous residue and time stamp */
residue_diff = uc->tx_drain.residue;
@@ -1127,6 +1130,8 @@ static void udma_check_tx_completion(struct work_struct *work)
break;
}
+ spin_unlock_irqrestore(&uc->vc.lock, flags);
+
usleep_range(ktime_to_us(delay),
ktime_to_us(delay) + 10);
continue;
@@ -1143,6 +1148,8 @@ static void udma_check_tx_completion(struct work_struct *work)
break;
}
+
+ spin_unlock_irqrestore(&uc->vc.lock, flags);
}
static irqreturn_t udma_ring_irq_handler(int irq, void *data)
@@ -3185,27 +3192,40 @@ static int udma_configure_statictr(struct udma_chan *uc, struct udma_desc *d,
d->static_tr.elcnt = elcnt;
- /*
- * PDMA must to close the packet when the channel is in packet mode.
- * For TR mode when the channel is not cyclic we also need PDMA to close
- * the packet otherwise the transfer will stall because PDMA holds on
- * the data it has received from the peripheral.
- */
if (uc->config.pkt_mode || !uc->cyclic) {
+ /*
+ * PDMA must close the packet when the channel is in packet mode.
+ * For TR mode when the channel is not cyclic we also need PDMA
+ * to close the packet otherwise the transfer will stall because
+ * PDMA holds on the data it has received from the peripheral.
+ */
unsigned int div = dev_width * elcnt;
if (uc->cyclic)
d->static_tr.bstcnt = d->residue / d->sglen / div;
else
d->static_tr.bstcnt = d->residue / div;
+ } else if (uc->ud->match_data->type == DMA_TYPE_BCDMA &&
+ uc->config.dir == DMA_DEV_TO_MEM &&
+ uc->cyclic) {
+ /*
+ * For cyclic mode with BCDMA we have to set EOP in each TR to
+ * prevent short packet errors seen on channel teardown. So the
+ * PDMA must close the packet after every TR transfer by setting
+ * burst count equal to the number of bytes transferred.
+ */
+ struct cppi5_tr_type1_t *tr_req = d->hwdesc[0].tr_req_base;
- if (uc->config.dir == DMA_DEV_TO_MEM &&
- d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask)
- return -EINVAL;
+ d->static_tr.bstcnt =
+ (tr_req->icnt0 * tr_req->icnt1) / dev_width;
} else {
d->static_tr.bstcnt = 0;
}
+ if (uc->config.dir == DMA_DEV_TO_MEM &&
+ d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask)
+ return -EINVAL;
+
return 0;
}
@@ -3450,8 +3470,9 @@ udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
/* static TR for remote PDMA */
if (udma_configure_statictr(uc, d, dev_width, burst)) {
dev_err(uc->ud->dev,
- "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
- __func__, d->static_tr.bstcnt);
+ "%s: StaticTR Z is limited to maximum %u (%u)\n",
+ __func__, uc->ud->match_data->statictr_z_mask,
+ d->static_tr.bstcnt);
udma_free_hwdesc(uc, d);
kfree(d);
@@ -3476,6 +3497,7 @@ udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
unsigned int i;
int num_tr;
+ u32 period_csf = 0;
num_tr = udma_get_tr_counters(period_len, __ffs(buf_addr), &tr0_cnt0,
&tr0_cnt1, &tr1_cnt0);
@@ -3498,6 +3520,20 @@ udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
period_addr = buf_addr |
((u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT);
+ /*
+ * For BCDMA <-> PDMA transfers, the EOP flag needs to be set on the
+ * last TR of a descriptor, to mark the packet as complete.
+ * This is required for getting the teardown completion message in case
+ * of TX, and to avoid short-packet error in case of RX.
+ *
+ * As we are in cyclic mode, we do not know which period might be the
+ * last one, so set the flag for each period.
+ */
+ if (uc->config.ep_type == PSIL_EP_PDMA_XY &&
+ uc->ud->match_data->type == DMA_TYPE_BCDMA) {
+ period_csf = CPPI5_TR_CSF_EOP;
+ }
+
for (i = 0; i < periods; i++) {
int tr_idx = i * num_tr;
@@ -3525,8 +3561,10 @@ udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
}
if (!(flags & DMA_PREP_INTERRUPT))
- cppi5_tr_csf_set(&tr_req[tr_idx].flags,
- CPPI5_TR_CSF_SUPR_EVT);
+ period_csf |= CPPI5_TR_CSF_SUPR_EVT;
+
+ if (period_csf)
+ cppi5_tr_csf_set(&tr_req[tr_idx].flags, period_csf);
period_addr += period_len;
}
@@ -3655,8 +3693,9 @@ udma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
/* static TR for remote PDMA */
if (udma_configure_statictr(uc, d, dev_width, burst)) {
dev_err(uc->ud->dev,
- "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
- __func__, d->static_tr.bstcnt);
+ "%s: StaticTR Z is limited to maximum %u (%u)\n",
+ __func__, uc->ud->match_data->statictr_z_mask,
+ d->static_tr.bstcnt);
udma_free_hwdesc(uc, d);
kfree(d);
@@ -4214,7 +4253,6 @@ static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma)
{
struct udma_dev *ud = ofdma->of_dma_data;
- dma_cap_mask_t mask = ud->ddev.cap_mask;
struct udma_filter_param filter_param;
struct dma_chan *chan;
@@ -4246,7 +4284,7 @@ static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
}
}
- chan = __dma_request_channel(&mask, udma_dma_filter_fn, &filter_param,
+ chan = __dma_request_channel(&ud->ddev.cap_mask, udma_dma_filter_fn, &filter_param,
ofdma->of_node);
if (!chan) {
dev_err(ud->dev, "get channel fail in %s.\n", __func__);
@@ -4372,6 +4410,18 @@ static struct udma_match_data j721s2_bcdma_csi_data = {
.soc_data = &j721s2_bcdma_csi_soc_data,
};
+static struct udma_match_data j722s_bcdma_csi_data = {
+ .type = DMA_TYPE_BCDMA,
+ .psil_base = 0x3100,
+ .enable_memcpy_support = false,
+ .burst_size = {
+ TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
+ 0, /* No H Channels */
+ 0, /* No UH Channels */
+ },
+ .soc_data = &j721s2_bcdma_csi_soc_data,
+};
+
static const struct of_device_id udma_of_match[] = {
{
.compatible = "ti,am654-navss-main-udmap",
@@ -4403,8 +4453,13 @@ static const struct of_device_id udma_of_match[] = {
.compatible = "ti,j721s2-dmss-bcdma-csi",
.data = &j721s2_bcdma_csi_data,
},
+ {
+ .compatible = "ti,j722s-dmss-bcdma-csi",
+ .data = &j722s_bcdma_csi_data,
+ },
{ /* Sentinel */ },
};
+MODULE_DEVICE_TABLE(of, udma_of_match);
static struct udma_soc_data am654_soc_data = {
.oes = {
@@ -4472,7 +4527,9 @@ static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud)
ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2);
break;
case DMA_TYPE_BCDMA:
- ud->bchan_cnt = BCDMA_CAP2_BCHAN_CNT(cap2);
+ ud->bchan_cnt = BCDMA_CAP2_BCHAN_CNT(cap2) +
+ BCDMA_CAP3_HBCHAN_CNT(cap3) +
+ BCDMA_CAP3_UBCHAN_CNT(cap3);
ud->tchan_cnt = BCDMA_CAP2_TCHAN_CNT(cap2);
ud->rchan_cnt = BCDMA_CAP2_RCHAN_CNT(cap2);
ud->rflow_cnt = ud->rchan_cnt;
@@ -4835,6 +4892,12 @@ static int bcdma_setup_resources(struct udma_dev *ud)
irq_res.desc[i].start = rm_res->desc[i].start +
oes->bcdma_bchan_ring;
irq_res.desc[i].num = rm_res->desc[i].num;
+
+ if (rm_res->desc[i].num_sec) {
+ irq_res.desc[i].start_sec = rm_res->desc[i].start_sec +
+ oes->bcdma_bchan_ring;
+ irq_res.desc[i].num_sec = rm_res->desc[i].num_sec;
+ }
}
}
} else {
@@ -4858,6 +4921,15 @@ static int bcdma_setup_resources(struct udma_dev *ud)
irq_res.desc[i + 1].start = rm_res->desc[j].start +
oes->bcdma_tchan_ring;
irq_res.desc[i + 1].num = rm_res->desc[j].num;
+
+ if (rm_res->desc[j].num_sec) {
+ irq_res.desc[i].start_sec = rm_res->desc[j].start_sec +
+ oes->bcdma_tchan_data;
+ irq_res.desc[i].num_sec = rm_res->desc[j].num_sec;
+ irq_res.desc[i + 1].start_sec = rm_res->desc[j].start_sec +
+ oes->bcdma_tchan_ring;
+ irq_res.desc[i + 1].num_sec = rm_res->desc[j].num_sec;
+ }
}
}
}
@@ -4878,6 +4950,15 @@ static int bcdma_setup_resources(struct udma_dev *ud)
irq_res.desc[i + 1].start = rm_res->desc[j].start +
oes->bcdma_rchan_ring;
irq_res.desc[i + 1].num = rm_res->desc[j].num;
+
+ if (rm_res->desc[j].num_sec) {
+ irq_res.desc[i].start_sec = rm_res->desc[j].start_sec +
+ oes->bcdma_rchan_data;
+ irq_res.desc[i].num_sec = rm_res->desc[j].num_sec;
+ irq_res.desc[i + 1].start_sec = rm_res->desc[j].start_sec +
+ oes->bcdma_rchan_ring;
+ irq_res.desc[i + 1].num_sec = rm_res->desc[j].num_sec;
+ }
}
}
}
@@ -5012,6 +5093,12 @@ static int pktdma_setup_resources(struct udma_dev *ud)
irq_res.desc[i].start = rm_res->desc[i].start +
oes->pktdma_tchan_flow;
irq_res.desc[i].num = rm_res->desc[i].num;
+
+ if (rm_res->desc[i].num_sec) {
+ irq_res.desc[i].start_sec = rm_res->desc[i].start_sec +
+ oes->pktdma_tchan_flow;
+ irq_res.desc[i].num_sec = rm_res->desc[i].num_sec;
+ }
}
}
rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
@@ -5023,6 +5110,12 @@ static int pktdma_setup_resources(struct udma_dev *ud)
irq_res.desc[i].start = rm_res->desc[j].start +
oes->pktdma_rchan_flow;
irq_res.desc[i].num = rm_res->desc[j].num;
+
+ if (rm_res->desc[j].num_sec) {
+ irq_res.desc[i].start_sec = rm_res->desc[j].start_sec +
+ oes->pktdma_rchan_flow;
+ irq_res.desc[i].num_sec = rm_res->desc[j].num_sec;
+ }
}
}
ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
@@ -5531,7 +5624,8 @@ static int udma_probe(struct platform_device *pdev)
uc->config.dir = DMA_MEM_TO_MEM;
uc->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d",
dev_name(dev), i);
-
+ if (!uc->name)
+ return -ENOMEM;
vchan_init(&uc->vc, &ud->ddev);
/* Use custom vchan completion handling */
tasklet_setup(&uc->vc.task, udma_vchan_complete);
@@ -5621,6 +5715,7 @@ static struct platform_driver udma_driver = {
};
module_platform_driver(udma_driver);
+MODULE_DESCRIPTION("Texas Instruments UDMA support");
MODULE_LICENSE("GPL v2");
/* Private interfaces to UDMA */