summaryrefslogtreecommitdiff
path: root/drivers/dma/ti
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/ti')
-rw-r--r--drivers/dma/ti/cppi41.c3
-rw-r--r--drivers/dma/ti/edma.c9
-rw-r--r--drivers/dma/ti/k3-psil-j721s2.c73
-rw-r--r--drivers/dma/ti/k3-psil.c1
-rw-r--r--drivers/dma/ti/k3-udma-glue.c301
-rw-r--r--drivers/dma/ti/k3-udma.c84
-rw-r--r--drivers/dma/ti/k3-udma.h1
-rw-r--r--drivers/dma/ti/omap-dma.c9
8 files changed, 370 insertions, 111 deletions
diff --git a/drivers/dma/ti/cppi41.c b/drivers/dma/ti/cppi41.c
index 7e0b06b5dff0..8d8c3d6038fc 100644
--- a/drivers/dma/ti/cppi41.c
+++ b/drivers/dma/ti/cppi41.c
@@ -1243,7 +1243,7 @@ static const struct dev_pm_ops cppi41_pm_ops = {
static struct platform_driver cpp41_dma_driver = {
.probe = cppi41_dma_probe,
- .remove_new = cppi41_dma_remove,
+ .remove = cppi41_dma_remove,
.driver = {
.name = "cppi41-dma-engine",
.pm = &cppi41_pm_ops,
@@ -1252,5 +1252,6 @@ static struct platform_driver cpp41_dma_driver = {
};
module_platform_driver(cpp41_dma_driver);
+MODULE_DESCRIPTION("Texas Instruments CPPI 4.1 DMA support");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Sebastian Andrzej Siewior <bigeasy@linutronix.de>");
diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
index 5f8d2e93ff3f..4ece125b2ae7 100644
--- a/drivers/dma/ti/edma.c
+++ b/drivers/dma/ti/edma.c
@@ -208,7 +208,6 @@ struct edma_desc {
struct edma_cc;
struct edma_tc {
- struct device_node *node;
u16 id;
};
@@ -2460,19 +2459,19 @@ static int edma_probe(struct platform_device *pdev)
goto err_reg1;
}
- for (i = 0;; i++) {
+ for (i = 0; i < ecc->num_tc; i++) {
ret = of_parse_phandle_with_fixed_args(node, "ti,tptcs",
1, i, &tc_args);
- if (ret || i == ecc->num_tc)
+ if (ret)
break;
- ecc->tc_list[i].node = tc_args.np;
ecc->tc_list[i].id = i;
queue_priority_mapping[i][1] = tc_args.args[0];
if (queue_priority_mapping[i][1] > lowest_priority) {
lowest_priority = queue_priority_mapping[i][1];
info->default_queue = i;
}
+ of_node_put(tc_args.np);
}
/* See if we have optional dma-channel-mask array */
@@ -2636,7 +2635,7 @@ static const struct dev_pm_ops edma_pm_ops = {
static struct platform_driver edma_driver = {
.probe = edma_probe,
- .remove_new = edma_remove,
+ .remove = edma_remove,
.driver = {
.name = "edma",
.pm = &edma_pm_ops,
diff --git a/drivers/dma/ti/k3-psil-j721s2.c b/drivers/dma/ti/k3-psil-j721s2.c
index 1d5430fc5724..ba08bdcdcd2b 100644
--- a/drivers/dma/ti/k3-psil-j721s2.c
+++ b/drivers/dma/ti/k3-psil-j721s2.c
@@ -57,6 +57,14 @@
}, \
}
+#define PSIL_CSI2RX(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ }, \
+ }
+
/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */
static struct psil_ep j721s2_src_ep_map[] = {
/* PDMA_MCASP - McASP0-4 */
@@ -114,6 +122,71 @@ static struct psil_ep j721s2_src_ep_map[] = {
PSIL_PDMA_XY_PKT(0x4707),
PSIL_PDMA_XY_PKT(0x4708),
PSIL_PDMA_XY_PKT(0x4709),
+ /* CSI2RX */
+ PSIL_CSI2RX(0x4940),
+ PSIL_CSI2RX(0x4941),
+ PSIL_CSI2RX(0x4942),
+ PSIL_CSI2RX(0x4943),
+ PSIL_CSI2RX(0x4944),
+ PSIL_CSI2RX(0x4945),
+ PSIL_CSI2RX(0x4946),
+ PSIL_CSI2RX(0x4947),
+ PSIL_CSI2RX(0x4948),
+ PSIL_CSI2RX(0x4949),
+ PSIL_CSI2RX(0x494a),
+ PSIL_CSI2RX(0x494b),
+ PSIL_CSI2RX(0x494c),
+ PSIL_CSI2RX(0x494d),
+ PSIL_CSI2RX(0x494e),
+ PSIL_CSI2RX(0x494f),
+ PSIL_CSI2RX(0x4950),
+ PSIL_CSI2RX(0x4951),
+ PSIL_CSI2RX(0x4952),
+ PSIL_CSI2RX(0x4953),
+ PSIL_CSI2RX(0x4954),
+ PSIL_CSI2RX(0x4955),
+ PSIL_CSI2RX(0x4956),
+ PSIL_CSI2RX(0x4957),
+ PSIL_CSI2RX(0x4958),
+ PSIL_CSI2RX(0x4959),
+ PSIL_CSI2RX(0x495a),
+ PSIL_CSI2RX(0x495b),
+ PSIL_CSI2RX(0x495c),
+ PSIL_CSI2RX(0x495d),
+ PSIL_CSI2RX(0x495e),
+ PSIL_CSI2RX(0x495f),
+ PSIL_CSI2RX(0x4960),
+ PSIL_CSI2RX(0x4961),
+ PSIL_CSI2RX(0x4962),
+ PSIL_CSI2RX(0x4963),
+ PSIL_CSI2RX(0x4964),
+ PSIL_CSI2RX(0x4965),
+ PSIL_CSI2RX(0x4966),
+ PSIL_CSI2RX(0x4967),
+ PSIL_CSI2RX(0x4968),
+ PSIL_CSI2RX(0x4969),
+ PSIL_CSI2RX(0x496a),
+ PSIL_CSI2RX(0x496b),
+ PSIL_CSI2RX(0x496c),
+ PSIL_CSI2RX(0x496d),
+ PSIL_CSI2RX(0x496e),
+ PSIL_CSI2RX(0x496f),
+ PSIL_CSI2RX(0x4970),
+ PSIL_CSI2RX(0x4971),
+ PSIL_CSI2RX(0x4972),
+ PSIL_CSI2RX(0x4973),
+ PSIL_CSI2RX(0x4974),
+ PSIL_CSI2RX(0x4975),
+ PSIL_CSI2RX(0x4976),
+ PSIL_CSI2RX(0x4977),
+ PSIL_CSI2RX(0x4978),
+ PSIL_CSI2RX(0x4979),
+ PSIL_CSI2RX(0x497a),
+ PSIL_CSI2RX(0x497b),
+ PSIL_CSI2RX(0x497c),
+ PSIL_CSI2RX(0x497d),
+ PSIL_CSI2RX(0x497e),
+ PSIL_CSI2RX(0x497f),
/* MAIN SA2UL */
PSIL_SA2UL(0x4a40, 0),
PSIL_SA2UL(0x4a41, 0),
diff --git a/drivers/dma/ti/k3-psil.c b/drivers/dma/ti/k3-psil.c
index 25148d952472..c4b6f0df4686 100644
--- a/drivers/dma/ti/k3-psil.c
+++ b/drivers/dma/ti/k3-psil.c
@@ -106,4 +106,5 @@ int psil_set_new_ep_config(struct device *dev, const char *name,
return 0;
}
EXPORT_SYMBOL_GPL(psil_set_new_ep_config);
+MODULE_DESCRIPTION("K3 PSI-L endpoint configuration");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/ti/k3-udma-glue.c b/drivers/dma/ti/k3-udma-glue.c
index c278d5facf7d..7c224c3ab7a0 100644
--- a/drivers/dma/ti/k3-udma-glue.c
+++ b/drivers/dma/ti/k3-udma-glue.c
@@ -111,6 +111,35 @@ static int of_k3_udma_glue_parse(struct device_node *udmax_np,
return 0;
}
+static int of_k3_udma_glue_parse_chn_common(struct k3_udma_glue_common *common, u32 thread_id,
+ bool tx_chn)
+{
+ if (tx_chn && !(thread_id & K3_PSIL_DST_THREAD_ID_OFFSET))
+ return -EINVAL;
+
+ if (!tx_chn && (thread_id & K3_PSIL_DST_THREAD_ID_OFFSET))
+ return -EINVAL;
+
+ /* get psil endpoint config */
+ common->ep_config = psil_get_ep_config(thread_id);
+ if (IS_ERR(common->ep_config)) {
+ dev_err(common->dev,
+ "No configuration for psi-l thread 0x%04x\n",
+ thread_id);
+ return PTR_ERR(common->ep_config);
+ }
+
+ common->epib = common->ep_config->needs_epib;
+ common->psdata_size = common->ep_config->psd_size;
+
+ if (tx_chn)
+ common->dst_thread = thread_id;
+ else
+ common->src_thread = thread_id;
+
+ return 0;
+}
+
static int of_k3_udma_glue_parse_chn(struct device_node *chn_np,
const char *name, struct k3_udma_glue_common *common,
bool tx_chn)
@@ -153,38 +182,29 @@ static int of_k3_udma_glue_parse_chn(struct device_node *chn_np,
common->atype_asel = dma_spec.args[1];
}
- if (tx_chn && !(thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) {
- ret = -EINVAL;
- goto out_put_spec;
- }
+ ret = of_k3_udma_glue_parse_chn_common(common, thread_id, tx_chn);
- if (!tx_chn && (thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) {
- ret = -EINVAL;
- goto out_put_spec;
- }
+out_put_spec:
+ of_node_put(dma_spec.np);
+ return ret;
+}
- /* get psil endpoint config */
- common->ep_config = psil_get_ep_config(thread_id);
- if (IS_ERR(common->ep_config)) {
- dev_err(common->dev,
- "No configuration for psi-l thread 0x%04x\n",
- thread_id);
- ret = PTR_ERR(common->ep_config);
- goto out_put_spec;
- }
+static int
+of_k3_udma_glue_parse_chn_by_id(struct device_node *udmax_np, struct k3_udma_glue_common *common,
+ bool tx_chn, u32 thread_id)
+{
+ int ret = 0;
- common->epib = common->ep_config->needs_epib;
- common->psdata_size = common->ep_config->psd_size;
+ if (unlikely(!udmax_np))
+ return -EINVAL;
- if (tx_chn)
- common->dst_thread = thread_id;
- else
- common->src_thread = thread_id;
+ ret = of_k3_udma_glue_parse(udmax_np, common);
+ if (ret)
+ return ret;
-out_put_spec:
- of_node_put(dma_spec.np);
+ ret = of_k3_udma_glue_parse_chn_common(common, thread_id, tx_chn);
return ret;
-};
+}
static void k3_udma_glue_dump_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
{
@@ -251,29 +271,13 @@ static int k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
return tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
}
-struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
- const char *name, struct k3_udma_glue_tx_channel_cfg *cfg)
+static int
+k3_udma_glue_request_tx_chn_common(struct device *dev,
+ struct k3_udma_glue_tx_channel *tx_chn,
+ struct k3_udma_glue_tx_channel_cfg *cfg)
{
- struct k3_udma_glue_tx_channel *tx_chn;
int ret;
- tx_chn = devm_kzalloc(dev, sizeof(*tx_chn), GFP_KERNEL);
- if (!tx_chn)
- return ERR_PTR(-ENOMEM);
-
- tx_chn->common.dev = dev;
- tx_chn->common.swdata_size = cfg->swdata_size;
- tx_chn->tx_pause_on_err = cfg->tx_pause_on_err;
- tx_chn->tx_filt_einfo = cfg->tx_filt_einfo;
- tx_chn->tx_filt_pswords = cfg->tx_filt_pswords;
- tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt;
-
- /* parse of udmap channel */
- ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
- &tx_chn->common, true);
- if (ret)
- goto err;
-
tx_chn->common.hdesc_size = cppi5_hdesc_calc_size(tx_chn->common.epib,
tx_chn->common.psdata_size,
tx_chn->common.swdata_size);
@@ -289,7 +293,7 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
if (IS_ERR(tx_chn->udma_tchanx)) {
ret = PTR_ERR(tx_chn->udma_tchanx);
dev_err(dev, "UDMAX tchanx get err %d\n", ret);
- goto err;
+ return ret;
}
tx_chn->udma_tchan_id = xudma_tchan_get_id(tx_chn->udma_tchanx);
@@ -302,7 +306,7 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
dev_err(dev, "Channel Device registration failed %d\n", ret);
put_device(&tx_chn->common.chan_dev);
tx_chn->common.chan_dev.parent = NULL;
- goto err;
+ return ret;
}
if (xudma_is_pktdma(tx_chn->common.udmax)) {
@@ -326,7 +330,7 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
&tx_chn->ringtxcq);
if (ret) {
dev_err(dev, "Failed to get TX/TXCQ rings %d\n", ret);
- goto err;
+ return ret;
}
/* Set the dma_dev for the rings to be configured */
@@ -342,13 +346,13 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
ret = k3_ringacc_ring_cfg(tx_chn->ringtx, &cfg->tx_cfg);
if (ret) {
dev_err(dev, "Failed to cfg ringtx %d\n", ret);
- goto err;
+ return ret;
}
ret = k3_ringacc_ring_cfg(tx_chn->ringtxcq, &cfg->txcq_cfg);
if (ret) {
dev_err(dev, "Failed to cfg ringtx %d\n", ret);
- goto err;
+ return ret;
}
/* request and cfg psi-l */
@@ -359,11 +363,42 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
ret = k3_udma_glue_cfg_tx_chn(tx_chn);
if (ret) {
dev_err(dev, "Failed to cfg tchan %d\n", ret);
- goto err;
+ return ret;
}
k3_udma_glue_dump_tx_chn(tx_chn);
+ return 0;
+}
+
+struct k3_udma_glue_tx_channel *
+k3_udma_glue_request_tx_chn(struct device *dev, const char *name,
+ struct k3_udma_glue_tx_channel_cfg *cfg)
+{
+ struct k3_udma_glue_tx_channel *tx_chn;
+ int ret;
+
+ tx_chn = devm_kzalloc(dev, sizeof(*tx_chn), GFP_KERNEL);
+ if (!tx_chn)
+ return ERR_PTR(-ENOMEM);
+
+ tx_chn->common.dev = dev;
+ tx_chn->common.swdata_size = cfg->swdata_size;
+ tx_chn->tx_pause_on_err = cfg->tx_pause_on_err;
+ tx_chn->tx_filt_einfo = cfg->tx_filt_einfo;
+ tx_chn->tx_filt_pswords = cfg->tx_filt_pswords;
+ tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt;
+
+ /* parse of udmap channel */
+ ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
+ &tx_chn->common, true);
+ if (ret)
+ goto err;
+
+ ret = k3_udma_glue_request_tx_chn_common(dev, tx_chn, cfg);
+ if (ret)
+ goto err;
+
return tx_chn;
err:
@@ -372,6 +407,41 @@ err:
}
EXPORT_SYMBOL_GPL(k3_udma_glue_request_tx_chn);
+struct k3_udma_glue_tx_channel *
+k3_udma_glue_request_tx_chn_for_thread_id(struct device *dev,
+ struct k3_udma_glue_tx_channel_cfg *cfg,
+ struct device_node *udmax_np, u32 thread_id)
+{
+ struct k3_udma_glue_tx_channel *tx_chn;
+ int ret;
+
+ tx_chn = devm_kzalloc(dev, sizeof(*tx_chn), GFP_KERNEL);
+ if (!tx_chn)
+ return ERR_PTR(-ENOMEM);
+
+ tx_chn->common.dev = dev;
+ tx_chn->common.swdata_size = cfg->swdata_size;
+ tx_chn->tx_pause_on_err = cfg->tx_pause_on_err;
+ tx_chn->tx_filt_einfo = cfg->tx_filt_einfo;
+ tx_chn->tx_filt_pswords = cfg->tx_filt_pswords;
+ tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt;
+
+ ret = of_k3_udma_glue_parse_chn_by_id(udmax_np, &tx_chn->common, true, thread_id);
+ if (ret)
+ goto err;
+
+ ret = k3_udma_glue_request_tx_chn_common(dev, tx_chn, cfg);
+ if (ret)
+ goto err;
+
+ return tx_chn;
+
+err:
+ k3_udma_glue_release_tx_chn(tx_chn);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_request_tx_chn_for_thread_id);
+
void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
{
if (tx_chn->psil_paired) {
@@ -1000,12 +1070,59 @@ err:
return ERR_PTR(ret);
}
+static int
+k3_udma_glue_request_remote_rx_chn_common(struct k3_udma_glue_rx_channel *rx_chn,
+ struct k3_udma_glue_rx_channel_cfg *cfg,
+ struct device *dev)
+{
+ int ret, i;
+
+ rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib,
+ rx_chn->common.psdata_size,
+ rx_chn->common.swdata_size);
+
+ rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
+ sizeof(*rx_chn->flows), GFP_KERNEL);
+ if (!rx_chn->flows)
+ return -ENOMEM;
+
+ rx_chn->common.chan_dev.class = &k3_udma_glue_devclass;
+ rx_chn->common.chan_dev.parent = xudma_get_device(rx_chn->common.udmax);
+ dev_set_name(&rx_chn->common.chan_dev, "rchan_remote-0x%04x-0x%02x",
+ rx_chn->common.src_thread, rx_chn->flow_id_base);
+ ret = device_register(&rx_chn->common.chan_dev);
+ if (ret) {
+ dev_err(dev, "Channel Device registration failed %d\n", ret);
+ put_device(&rx_chn->common.chan_dev);
+ rx_chn->common.chan_dev.parent = NULL;
+ return ret;
+ }
+
+ if (xudma_is_pktdma(rx_chn->common.udmax)) {
+ /* prepare the channel device as coherent */
+ rx_chn->common.chan_dev.dma_coherent = true;
+ dma_coerce_mask_and_coherent(&rx_chn->common.chan_dev,
+ DMA_BIT_MASK(48));
+ }
+
+ ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < rx_chn->flow_num; i++)
+ rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
+
+ k3_udma_glue_dump_rx_chn(rx_chn);
+
+ return 0;
+}
+
static struct k3_udma_glue_rx_channel *
k3_udma_glue_request_remote_rx_chn(struct device *dev, const char *name,
struct k3_udma_glue_rx_channel_cfg *cfg)
{
struct k3_udma_glue_rx_channel *rx_chn;
- int ret, i;
+ int ret;
if (cfg->flow_id_num <= 0 ||
cfg->flow_id_use_rxchan_id ||
@@ -1036,44 +1153,55 @@ k3_udma_glue_request_remote_rx_chn(struct device *dev, const char *name,
if (ret)
goto err;
- rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib,
- rx_chn->common.psdata_size,
- rx_chn->common.swdata_size);
-
- rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
- sizeof(*rx_chn->flows), GFP_KERNEL);
- if (!rx_chn->flows) {
- ret = -ENOMEM;
+ ret = k3_udma_glue_request_remote_rx_chn_common(rx_chn, cfg, dev);
+ if (ret)
goto err;
- }
- rx_chn->common.chan_dev.class = &k3_udma_glue_devclass;
- rx_chn->common.chan_dev.parent = xudma_get_device(rx_chn->common.udmax);
- dev_set_name(&rx_chn->common.chan_dev, "rchan_remote-0x%04x",
- rx_chn->common.src_thread);
- ret = device_register(&rx_chn->common.chan_dev);
- if (ret) {
- dev_err(dev, "Channel Device registration failed %d\n", ret);
- put_device(&rx_chn->common.chan_dev);
- rx_chn->common.chan_dev.parent = NULL;
- goto err;
- }
+ return rx_chn;
- if (xudma_is_pktdma(rx_chn->common.udmax)) {
- /* prepare the channel device as coherent */
- rx_chn->common.chan_dev.dma_coherent = true;
- dma_coerce_mask_and_coherent(&rx_chn->common.chan_dev,
- DMA_BIT_MASK(48));
- }
+err:
+ k3_udma_glue_release_rx_chn(rx_chn);
+ return ERR_PTR(ret);
+}
- ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
+struct k3_udma_glue_rx_channel *
+k3_udma_glue_request_remote_rx_chn_for_thread_id(struct device *dev,
+ struct k3_udma_glue_rx_channel_cfg *cfg,
+ struct device_node *udmax_np, u32 thread_id)
+{
+ struct k3_udma_glue_rx_channel *rx_chn;
+ int ret;
+
+ if (cfg->flow_id_num <= 0 ||
+ cfg->flow_id_use_rxchan_id ||
+ cfg->def_flow_cfg ||
+ cfg->flow_id_base < 0)
+ return ERR_PTR(-EINVAL);
+
+ /*
+ * Remote RX channel is under control of Remote CPU core, so
+ * Linux can only request and manipulate by dedicated RX flows
+ */
+
+ rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL);
+ if (!rx_chn)
+ return ERR_PTR(-ENOMEM);
+
+ rx_chn->common.dev = dev;
+ rx_chn->common.swdata_size = cfg->swdata_size;
+ rx_chn->remote = true;
+ rx_chn->udma_rchan_id = -1;
+ rx_chn->flow_num = cfg->flow_id_num;
+ rx_chn->flow_id_base = cfg->flow_id_base;
+ rx_chn->psil_paired = false;
+
+ ret = of_k3_udma_glue_parse_chn_by_id(udmax_np, &rx_chn->common, false, thread_id);
if (ret)
goto err;
- for (i = 0; i < rx_chn->flow_num; i++)
- rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
-
- k3_udma_glue_dump_rx_chn(rx_chn);
+ ret = k3_udma_glue_request_remote_rx_chn_common(rx_chn, cfg, dev);
+ if (ret)
+ goto err;
return rx_chn;
@@ -1081,6 +1209,7 @@ err:
k3_udma_glue_release_rx_chn(rx_chn);
return ERR_PTR(ret);
}
+EXPORT_SYMBOL_GPL(k3_udma_glue_request_remote_rx_chn_for_thread_id);
struct k3_udma_glue_rx_channel *
k3_udma_glue_request_rx_chn(struct device *dev, const char *name,
@@ -1399,6 +1528,9 @@ int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn,
flow->virq = k3_ringacc_get_ring_irq_num(flow->ringrx);
}
+ if (!flow->virq)
+ return -ENXIO;
+
return flow->virq;
}
EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_irq);
@@ -1442,4 +1574,5 @@ static int __init k3_udma_glue_class_init(void)
}
module_init(k3_udma_glue_class_init);
+MODULE_DESCRIPTION("TI K3 NAVSS DMA glue interface");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index 6400d06588a2..7ed1956b4642 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -3185,27 +3185,40 @@ static int udma_configure_statictr(struct udma_chan *uc, struct udma_desc *d,
d->static_tr.elcnt = elcnt;
- /*
- * PDMA must to close the packet when the channel is in packet mode.
- * For TR mode when the channel is not cyclic we also need PDMA to close
- * the packet otherwise the transfer will stall because PDMA holds on
- * the data it has received from the peripheral.
- */
if (uc->config.pkt_mode || !uc->cyclic) {
+ /*
+ * PDMA must close the packet when the channel is in packet mode.
+ * For TR mode when the channel is not cyclic we also need PDMA
+ * to close the packet otherwise the transfer will stall because
+ * PDMA holds on the data it has received from the peripheral.
+ */
unsigned int div = dev_width * elcnt;
if (uc->cyclic)
d->static_tr.bstcnt = d->residue / d->sglen / div;
else
d->static_tr.bstcnt = d->residue / div;
+ } else if (uc->ud->match_data->type == DMA_TYPE_BCDMA &&
+ uc->config.dir == DMA_DEV_TO_MEM &&
+ uc->cyclic) {
+ /*
+ * For cyclic mode with BCDMA we have to set EOP in each TR to
+ * prevent short packet errors seen on channel teardown. So the
+ * PDMA must close the packet after every TR transfer by setting
+ * burst count equal to the number of bytes transferred.
+ */
+ struct cppi5_tr_type1_t *tr_req = d->hwdesc[0].tr_req_base;
- if (uc->config.dir == DMA_DEV_TO_MEM &&
- d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask)
- return -EINVAL;
+ d->static_tr.bstcnt =
+ (tr_req->icnt0 * tr_req->icnt1) / dev_width;
} else {
d->static_tr.bstcnt = 0;
}
+ if (uc->config.dir == DMA_DEV_TO_MEM &&
+ d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask)
+ return -EINVAL;
+
return 0;
}
@@ -3450,8 +3463,9 @@ udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
/* static TR for remote PDMA */
if (udma_configure_statictr(uc, d, dev_width, burst)) {
dev_err(uc->ud->dev,
- "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
- __func__, d->static_tr.bstcnt);
+ "%s: StaticTR Z is limited to maximum %u (%u)\n",
+ __func__, uc->ud->match_data->statictr_z_mask,
+ d->static_tr.bstcnt);
udma_free_hwdesc(uc, d);
kfree(d);
@@ -3476,6 +3490,7 @@ udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
unsigned int i;
int num_tr;
+ u32 period_csf = 0;
num_tr = udma_get_tr_counters(period_len, __ffs(buf_addr), &tr0_cnt0,
&tr0_cnt1, &tr1_cnt0);
@@ -3498,6 +3513,20 @@ udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
period_addr = buf_addr |
((u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT);
+ /*
+ * For BCDMA <-> PDMA transfers, the EOP flag needs to be set on the
+ * last TR of a descriptor, to mark the packet as complete.
+ * This is required for getting the teardown completion message in case
+ * of TX, and to avoid short-packet error in case of RX.
+ *
+ * As we are in cyclic mode, we do not know which period might be the
+ * last one, so set the flag for each period.
+ */
+ if (uc->config.ep_type == PSIL_EP_PDMA_XY &&
+ uc->ud->match_data->type == DMA_TYPE_BCDMA) {
+ period_csf = CPPI5_TR_CSF_EOP;
+ }
+
for (i = 0; i < periods; i++) {
int tr_idx = i * num_tr;
@@ -3525,8 +3554,10 @@ udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
}
if (!(flags & DMA_PREP_INTERRUPT))
- cppi5_tr_csf_set(&tr_req[tr_idx].flags,
- CPPI5_TR_CSF_SUPR_EVT);
+ period_csf |= CPPI5_TR_CSF_SUPR_EVT;
+
+ if (period_csf)
+ cppi5_tr_csf_set(&tr_req[tr_idx].flags, period_csf);
period_addr += period_len;
}
@@ -3655,8 +3686,9 @@ udma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
/* static TR for remote PDMA */
if (udma_configure_statictr(uc, d, dev_width, burst)) {
dev_err(uc->ud->dev,
- "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
- __func__, d->static_tr.bstcnt);
+ "%s: StaticTR Z is limited to maximum %u (%u)\n",
+ __func__, uc->ud->match_data->statictr_z_mask,
+ d->static_tr.bstcnt);
udma_free_hwdesc(uc, d);
kfree(d);
@@ -4372,6 +4404,18 @@ static struct udma_match_data j721s2_bcdma_csi_data = {
.soc_data = &j721s2_bcdma_csi_soc_data,
};
+static struct udma_match_data j722s_bcdma_csi_data = {
+ .type = DMA_TYPE_BCDMA,
+ .psil_base = 0x3100,
+ .enable_memcpy_support = false,
+ .burst_size = {
+ TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
+ 0, /* No H Channels */
+ 0, /* No UH Channels */
+ },
+ .soc_data = &j721s2_bcdma_csi_soc_data,
+};
+
static const struct of_device_id udma_of_match[] = {
{
.compatible = "ti,am654-navss-main-udmap",
@@ -4403,8 +4447,13 @@ static const struct of_device_id udma_of_match[] = {
.compatible = "ti,j721s2-dmss-bcdma-csi",
.data = &j721s2_bcdma_csi_data,
},
+ {
+ .compatible = "ti,j722s-dmss-bcdma-csi",
+ .data = &j722s_bcdma_csi_data,
+ },
{ /* Sentinel */ },
};
+MODULE_DEVICE_TABLE(of, udma_of_match);
static struct udma_soc_data am654_soc_data = {
.oes = {
@@ -4472,7 +4521,9 @@ static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud)
ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2);
break;
case DMA_TYPE_BCDMA:
- ud->bchan_cnt = BCDMA_CAP2_BCHAN_CNT(cap2);
+ ud->bchan_cnt = BCDMA_CAP2_BCHAN_CNT(cap2) +
+ BCDMA_CAP3_HBCHAN_CNT(cap3) +
+ BCDMA_CAP3_UBCHAN_CNT(cap3);
ud->tchan_cnt = BCDMA_CAP2_TCHAN_CNT(cap2);
ud->rchan_cnt = BCDMA_CAP2_RCHAN_CNT(cap2);
ud->rflow_cnt = ud->rchan_cnt;
@@ -5621,6 +5672,7 @@ static struct platform_driver udma_driver = {
};
module_platform_driver(udma_driver);
+MODULE_DESCRIPTION("Texas Instruments UDMA support");
MODULE_LICENSE("GPL v2");
/* Private interfaces to UDMA */
diff --git a/drivers/dma/ti/k3-udma.h b/drivers/dma/ti/k3-udma.h
index d349c6d482ae..9062a237cd16 100644
--- a/drivers/dma/ti/k3-udma.h
+++ b/drivers/dma/ti/k3-udma.h
@@ -131,7 +131,6 @@ int xudma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property);
struct device *xudma_get_device(struct udma_dev *ud);
struct k3_ringacc *xudma_get_ringacc(struct udma_dev *ud);
-void xudma_dev_put(struct udma_dev *ud);
u32 xudma_dev_get_psil_base(struct udma_dev *ud);
struct udma_tisci_rm *xudma_dev_get_tisci_rm(struct udma_dev *ud);
diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c
index b9e0e22383b7..8c023c6e623a 100644
--- a/drivers/dma/ti/omap-dma.c
+++ b/drivers/dma/ti/omap-dma.c
@@ -1186,10 +1186,10 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
d->dev_addr = dev_addr;
d->fi = burst;
d->es = es;
+ d->sglen = 1;
d->sg[0].addr = buf_addr;
d->sg[0].en = period_len / es_bytes[es];
d->sg[0].fn = buf_len / period_len;
- d->sglen = 1;
d->ccr = c->ccr;
if (dir == DMA_DEV_TO_MEM)
@@ -1258,10 +1258,10 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_memcpy(
d->dev_addr = src;
d->fi = 0;
d->es = data_type;
+ d->sglen = 1;
d->sg[0].en = len / BIT(data_type);
d->sg[0].fn = 1;
d->sg[0].addr = dest;
- d->sglen = 1;
d->ccr = c->ccr;
d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_POSTINC;
@@ -1309,6 +1309,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_interleaved(
if (data_type > CSDP_DATA_TYPE_32)
data_type = CSDP_DATA_TYPE_32;
+ d->sglen = 1;
sg = &d->sg[0];
d->dir = DMA_MEM_TO_MEM;
d->dev_addr = xt->src_start;
@@ -1316,7 +1317,6 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_interleaved(
sg->en = xt->sgl[0].size / BIT(data_type);
sg->fn = xt->numf;
sg->addr = xt->dst_start;
- d->sglen = 1;
d->ccr = c->ccr;
src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]);
@@ -1915,7 +1915,7 @@ MODULE_DEVICE_TABLE(of, omap_dma_match);
static struct platform_driver omap_dma_driver = {
.probe = omap_dma_probe,
- .remove_new = omap_dma_remove,
+ .remove = omap_dma_remove,
.driver = {
.name = "omap-dma-engine",
.of_match_table = omap_dma_match,
@@ -1950,4 +1950,5 @@ static void __exit omap_dma_exit(void)
module_exit(omap_dma_exit);
MODULE_AUTHOR("Russell King");
+MODULE_DESCRIPTION("Texas Instruments sDMA DMAengine support");
MODULE_LICENSE("GPL");