summaryrefslogtreecommitdiff
path: root/drivers/spi/spi-s3c64xx.c
diff options
context:
space:
mode:
authorAdithya K V <adithya.kv@samsung.com>2022-05-24 19:31:32 +0530
committerMark Brown <broonie@kernel.org>2022-06-06 12:39:17 +0100
commitf52b03c707444c5a3d1a0b9c5724f93ddc3c588e (patch)
treefbbd7feb679912eea3fe8551449cbe95500392b1 /drivers/spi/spi-s3c64xx.c
parent8e3ca32f46994e74b7f43c57731150b2aedb2630 (diff)
spi: s3c64xx: requests spi-dma channel only during data transfer
Current s3c64xx SPI driver acquires DMA channel during driver probe and holds on it even when channels are not used (no DMA transfer). This is a problem especially when all the DMA channels are exhausted (as other IPs on the same DMA controller also acquires DMA channel) and if a new IP/Device requests for a DMA channel (on the same DMA controller), it won’t get DMA channel allocated. The said issue can be avoided if s3c64xx driver request and release DMA channel before and after data transfer. Let’s modify the driver to request and release DMA channel before and after DMA mode data transfer. Signed-off-by: Adithya K V <adithya.kv@samsung.com> Link: https://lore.kernel.org/r/20220524140132.59300-1-adithya.kv@samsung.com Signed-off-by: Mark Brown <broonie@kernel.org>
Diffstat (limited to 'drivers/spi/spi-s3c64xx.c')
-rw-r--r--drivers/spi/spi-s3c64xx.c56
1 files changed, 32 insertions, 24 deletions
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index c26440e9058d..82558e37c735 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -350,6 +350,23 @@ static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
if (is_polling(sdd))
return 0;
+ /* Requests DMA channels */
+ sdd->rx_dma.ch = dma_request_chan(&sdd->pdev->dev, "rx");
+ if (IS_ERR(sdd->rx_dma.ch)) {
+ dev_err(&sdd->pdev->dev, "Failed to get RX DMA channel\n");
+ sdd->rx_dma.ch = 0;
+ return 0;
+ }
+
+ sdd->tx_dma.ch = dma_request_chan(&sdd->pdev->dev, "tx");
+ if (IS_ERR(sdd->tx_dma.ch)) {
+ dev_err(&sdd->pdev->dev, "Failed to get TX DMA hannel\n");
+ dma_release_channel(sdd->rx_dma.ch);
+ sdd->tx_dma.ch = 0;
+ sdd->rx_dma.ch = 0;
+ return 0;
+ }
+
spi->dma_rx = sdd->rx_dma.ch;
spi->dma_tx = sdd->tx_dma.ch;
@@ -362,7 +379,12 @@ static bool s3c64xx_spi_can_dma(struct spi_master *master,
{
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
- return xfer->len > (FIFO_LVL_MASK(sdd) >> 1) + 1;
+ if (sdd->rx_dma.ch && sdd->tx_dma.ch) {
+ return xfer->len > (FIFO_LVL_MASK(sdd) >> 1) + 1;
+ } else {
+ return 0;
+ }
+
}
static int s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
@@ -697,7 +719,7 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
sdd->rx_dma.ch && sdd->tx_dma.ch) {
use_dma = 1;
- } else if (is_polling(sdd) && xfer->len > fifo_len) {
+ } else if (xfer->len > fifo_len) {
tx_buf = xfer->tx_buf;
rx_buf = xfer->rx_buf;
origin_len = xfer->len;
@@ -782,6 +804,14 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
xfer->len = origin_len;
}
+ /* Releases DMA channels after data transfer is completed */
+ if (sdd->rx_dma.ch && sdd->tx_dma.ch) {
+ dma_release_channel(sdd->rx_dma.ch);
+ dma_release_channel(sdd->tx_dma.ch);
+ sdd->rx_dma.ch = 0;
+ sdd->tx_dma.ch = 0;
+ }
+
return status;
}
@@ -1167,22 +1197,6 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
}
}
- if (!is_polling(sdd)) {
- /* Acquire DMA channels */
- sdd->rx_dma.ch = dma_request_chan(&pdev->dev, "rx");
- if (IS_ERR(sdd->rx_dma.ch)) {
- dev_err(&pdev->dev, "Failed to get RX DMA channel\n");
- ret = PTR_ERR(sdd->rx_dma.ch);
- goto err_disable_io_clk;
- }
- sdd->tx_dma.ch = dma_request_chan(&pdev->dev, "tx");
- if (IS_ERR(sdd->tx_dma.ch)) {
- dev_err(&pdev->dev, "Failed to get TX DMA channel\n");
- ret = PTR_ERR(sdd->tx_dma.ch);
- goto err_release_rx_dma;
- }
- }
-
pm_runtime_set_autosuspend_delay(&pdev->dev, AUTOSUSPEND_TIMEOUT);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
@@ -1228,12 +1242,6 @@ err_pm_put:
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
- if (!is_polling(sdd))
- dma_release_channel(sdd->tx_dma.ch);
-err_release_rx_dma:
- if (!is_polling(sdd))
- dma_release_channel(sdd->rx_dma.ch);
-err_disable_io_clk:
clk_disable_unprepare(sdd->ioclk);
err_disable_src_clk:
clk_disable_unprepare(sdd->src_clk);