diff options
Diffstat (limited to 'drivers/spi/spi-pxa2xx-dma.c')
| -rw-r--r-- | drivers/spi/spi-pxa2xx-dma.c | 334 |
1 files changed, 83 insertions, 251 deletions
diff --git a/drivers/spi/spi-pxa2xx-dma.c b/drivers/spi/spi-pxa2xx-dma.c index 3c0b55125f1e..08cb6e96ac94 100644 --- a/drivers/spi/spi-pxa2xx-dma.c +++ b/drivers/spi/spi-pxa2xx-dma.c @@ -1,167 +1,60 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * PXA2xx SPI DMA engine support. * - * Copyright (C) 2013, Intel Corporation + * Copyright (C) 2013, 2021 Intel Corporation * Author: Mika Westerberg <mika.westerberg@linux.intel.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ -#include <linux/init.h> -#include <linux/device.h> +#include <linux/atomic.h> +#include <linux/dev_printk.h> #include <linux/dma-mapping.h> #include <linux/dmaengine.h> -#include <linux/pxa2xx_ssp.h> +#include <linux/errno.h> +#include <linux/irqreturn.h> #include <linux/scatterlist.h> -#include <linux/sizes.h> +#include <linux/string.h> +#include <linux/types.h> + #include <linux/spi/spi.h> -#include <linux/spi/pxa2xx_spi.h> #include "spi-pxa2xx.h" -static int pxa2xx_spi_map_dma_buffer(struct driver_data *drv_data, - enum dma_data_direction dir) -{ - int i, nents, len = drv_data->len; - struct scatterlist *sg; - struct device *dmadev; - struct sg_table *sgt; - void *buf, *pbuf; - - /* - * Some DMA controllers have problems transferring buffers that are - * not multiple of 4 bytes. So we truncate the transfer so that it - * is suitable for such controllers, and handle the trailing bytes - * manually after the DMA completes. - * - * REVISIT: It would be better if this information could be - * retrieved directly from the DMA device in a similar way than - * ->copy_align etc. is done. - */ - len = ALIGN(drv_data->len, 4); - - if (dir == DMA_TO_DEVICE) { - dmadev = drv_data->tx_chan->device->dev; - sgt = &drv_data->tx_sgt; - buf = drv_data->tx; - drv_data->tx_map_len = len; - } else { - dmadev = drv_data->rx_chan->device->dev; - sgt = &drv_data->rx_sgt; - buf = drv_data->rx; - drv_data->rx_map_len = len; - } - - nents = DIV_ROUND_UP(len, SZ_2K); - if (nents != sgt->nents) { - int ret; - - sg_free_table(sgt); - ret = sg_alloc_table(sgt, nents, GFP_ATOMIC); - if (ret) - return ret; - } - - pbuf = buf; - for_each_sg(sgt->sgl, sg, sgt->nents, i) { - size_t bytes = min_t(size_t, len, SZ_2K); - - if (buf) - sg_set_buf(sg, pbuf, bytes); - else - sg_set_buf(sg, drv_data->dummy, bytes); - - pbuf += bytes; - len -= bytes; - } - - nents = dma_map_sg(dmadev, sgt->sgl, sgt->nents, dir); - if (!nents) - return -ENOMEM; - - return nents; -} - -static void pxa2xx_spi_unmap_dma_buffer(struct driver_data *drv_data, - enum dma_data_direction dir) -{ - struct device *dmadev; - struct sg_table *sgt; - - if (dir == DMA_TO_DEVICE) { - dmadev = drv_data->tx_chan->device->dev; - sgt = &drv_data->tx_sgt; - } else { - dmadev = drv_data->rx_chan->device->dev; - sgt = &drv_data->rx_sgt; - } - - dma_unmap_sg(dmadev, sgt->sgl, sgt->nents, dir); -} - -static void pxa2xx_spi_unmap_dma_buffers(struct driver_data *drv_data) -{ - if (!drv_data->dma_mapped) - return; - - pxa2xx_spi_unmap_dma_buffer(drv_data, DMA_FROM_DEVICE); - pxa2xx_spi_unmap_dma_buffer(drv_data, DMA_TO_DEVICE); - - drv_data->dma_mapped = 0; -} +struct device; static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data, bool error) { - struct spi_message *msg = drv_data->cur_msg; + struct spi_message *msg = drv_data->controller->cur_msg; /* * It is possible that one CPU is handling ROR interrupt and other * just gets DMA completion. Calling pump_transfers() twice for the * same transfer leads to problems thus we prevent concurrent calls - * by using ->dma_running. + * by using dma_running. */ if (atomic_dec_and_test(&drv_data->dma_running)) { - void __iomem *reg = drv_data->ioaddr; - /* * If the other CPU is still handling the ROR interrupt we * might not know about the error yet. So we re-check the * ROR bit here before we clear the status register. */ - if (!error) { - u32 status = read_SSSR(reg) & drv_data->mask_sr; - error = status & SSSR_ROR; - } + if (!error) + error = read_SSSR_bits(drv_data, drv_data->mask_sr) & SSSR_ROR; /* Clear status & disable interrupts */ - write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); + clear_SSCR1_bits(drv_data, drv_data->dma_cr1); write_SSSR_CS(drv_data, drv_data->clear_sr); if (!pxa25x_ssp_comp(drv_data)) - write_SSTO(0, reg); - - if (!error) { - pxa2xx_spi_unmap_dma_buffers(drv_data); - - /* Handle the last bytes of unaligned transfer */ - drv_data->tx += drv_data->tx_map_len; - drv_data->write(drv_data); - - drv_data->rx += drv_data->rx_map_len; - drv_data->read(drv_data); + pxa2xx_spi_write(drv_data, SSTO, 0); - msg->actual_length += drv_data->len; - msg->state = pxa2xx_spi_next_transfer(drv_data); - } else { + if (error) { /* In case we got an error we disable the SSP now */ - write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); - - msg->state = ERROR_STATE; + pxa_ssp_disable(drv_data->ssp); + msg->status = -EIO; } - tasklet_schedule(&drv_data->pump_transfers); + spi_finalize_current_transfer(drv_data->controller); } } @@ -172,15 +65,14 @@ static void pxa2xx_spi_dma_callback(void *data) static struct dma_async_tx_descriptor * pxa2xx_spi_dma_prepare_one(struct driver_data *drv_data, - enum dma_transfer_direction dir) + enum dma_transfer_direction dir, + struct spi_transfer *xfer) { - struct pxa2xx_spi_master *pdata = drv_data->master_info; - struct chip_data *chip = drv_data->cur_chip; enum dma_slave_buswidth width; struct dma_slave_config cfg; struct dma_chan *chan; struct sg_table *sgt; - int nents, ret; + int ret; switch (drv_data->n_bytes) { case 1: @@ -198,89 +90,41 @@ pxa2xx_spi_dma_prepare_one(struct driver_data *drv_data, cfg.direction = dir; if (dir == DMA_MEM_TO_DEV) { - cfg.dst_addr = drv_data->ssdr_physical; + cfg.dst_addr = drv_data->ssp->phys_base + SSDR; cfg.dst_addr_width = width; - cfg.dst_maxburst = chip->dma_burst_size; - cfg.slave_id = pdata->tx_slave_id; + cfg.dst_maxburst = drv_data->controller_info->dma_burst_size; - sgt = &drv_data->tx_sgt; - nents = drv_data->tx_nents; - chan = drv_data->tx_chan; + sgt = &xfer->tx_sg; + chan = drv_data->controller->dma_tx; } else { - cfg.src_addr = drv_data->ssdr_physical; + cfg.src_addr = drv_data->ssp->phys_base + SSDR; cfg.src_addr_width = width; - cfg.src_maxburst = chip->dma_burst_size; - cfg.slave_id = pdata->rx_slave_id; + cfg.src_maxburst = drv_data->controller_info->dma_burst_size; - sgt = &drv_data->rx_sgt; - nents = drv_data->rx_nents; - chan = drv_data->rx_chan; + sgt = &xfer->rx_sg; + chan = drv_data->controller->dma_rx; } ret = dmaengine_slave_config(chan, &cfg); if (ret) { - dev_warn(&drv_data->pdev->dev, "DMA slave config failed\n"); + dev_warn(drv_data->ssp->dev, "DMA slave config failed\n"); return NULL; } - return dmaengine_prep_slave_sg(chan, sgt->sgl, nents, dir, + return dmaengine_prep_slave_sg(chan, sgt->sgl, sgt->nents, dir, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); } -static bool pxa2xx_spi_dma_filter(struct dma_chan *chan, void *param) -{ - const struct pxa2xx_spi_master *pdata = param; - - return chan->chan_id == pdata->tx_chan_id || - chan->chan_id == pdata->rx_chan_id; -} - -bool pxa2xx_spi_dma_is_possible(size_t len) -{ - return len <= MAX_DMA_LEN; -} - -int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data) -{ - const struct chip_data *chip = drv_data->cur_chip; - int ret; - - if (!chip->enable_dma) - return 0; - - /* Don't bother with DMA if we can't do even a single burst */ - if (drv_data->len < chip->dma_burst_size) - return 0; - - ret = pxa2xx_spi_map_dma_buffer(drv_data, DMA_TO_DEVICE); - if (ret <= 0) { - dev_warn(&drv_data->pdev->dev, "failed to DMA map TX\n"); - return 0; - } - - drv_data->tx_nents = ret; - - ret = pxa2xx_spi_map_dma_buffer(drv_data, DMA_FROM_DEVICE); - if (ret <= 0) { - pxa2xx_spi_unmap_dma_buffer(drv_data, DMA_TO_DEVICE); - dev_warn(&drv_data->pdev->dev, "failed to DMA map RX\n"); - return 0; - } - - drv_data->rx_nents = ret; - return 1; -} - irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data) { u32 status; - status = read_SSSR(drv_data->ioaddr) & drv_data->mask_sr; + status = read_SSSR_bits(drv_data, drv_data->mask_sr); if (status & SSSR_ROR) { - dev_err(&drv_data->pdev->dev, "FIFO overrun\n"); + dev_err(drv_data->ssp->dev, "FIFO overrun\n"); - dmaengine_terminate_all(drv_data->rx_chan); - dmaengine_terminate_all(drv_data->tx_chan); + dmaengine_terminate_async(drv_data->controller->dma_rx); + dmaengine_terminate_async(drv_data->controller->dma_tx); pxa2xx_spi_dma_transfer_complete(drv_data, true); return IRQ_HANDLED; @@ -289,22 +133,24 @@ irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data) return IRQ_NONE; } -int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, u32 dma_burst) +int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, + struct spi_transfer *xfer) { struct dma_async_tx_descriptor *tx_desc, *rx_desc; + int err; - tx_desc = pxa2xx_spi_dma_prepare_one(drv_data, DMA_MEM_TO_DEV); + tx_desc = pxa2xx_spi_dma_prepare_one(drv_data, DMA_MEM_TO_DEV, xfer); if (!tx_desc) { - dev_err(&drv_data->pdev->dev, - "failed to get DMA TX descriptor\n"); - return -EBUSY; + dev_err(drv_data->ssp->dev, "failed to get DMA TX descriptor\n"); + err = -EBUSY; + goto err_tx; } - rx_desc = pxa2xx_spi_dma_prepare_one(drv_data, DMA_DEV_TO_MEM); + rx_desc = pxa2xx_spi_dma_prepare_one(drv_data, DMA_DEV_TO_MEM, xfer); if (!rx_desc) { - dev_err(&drv_data->pdev->dev, - "failed to get DMA RX descriptor\n"); - return -EBUSY; + dev_err(drv_data->ssp->dev, "failed to get DMA RX descriptor\n"); + err = -EBUSY; + goto err_rx; } /* We are ready when RX completes */ @@ -314,39 +160,48 @@ int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, u32 dma_burst) dmaengine_submit(rx_desc); dmaengine_submit(tx_desc); return 0; + +err_rx: + dmaengine_terminate_async(drv_data->controller->dma_tx); +err_tx: + return err; } void pxa2xx_spi_dma_start(struct driver_data *drv_data) { - dma_async_issue_pending(drv_data->rx_chan); - dma_async_issue_pending(drv_data->tx_chan); + dma_async_issue_pending(drv_data->controller->dma_rx); + dma_async_issue_pending(drv_data->controller->dma_tx); atomic_set(&drv_data->dma_running, 1); } +void pxa2xx_spi_dma_stop(struct driver_data *drv_data) +{ + atomic_set(&drv_data->dma_running, 0); + dmaengine_terminate_sync(drv_data->controller->dma_rx); + dmaengine_terminate_sync(drv_data->controller->dma_tx); +} + int pxa2xx_spi_dma_setup(struct driver_data *drv_data) { - struct pxa2xx_spi_master *pdata = drv_data->master_info; - struct device *dev = &drv_data->pdev->dev; + struct pxa2xx_spi_controller *pdata = drv_data->controller_info; + struct spi_controller *controller = drv_data->controller; + struct device *dev = drv_data->ssp->dev; dma_cap_mask_t mask; dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); - drv_data->dummy = devm_kzalloc(dev, SZ_2K, GFP_KERNEL); - if (!drv_data->dummy) - return -ENOMEM; - - drv_data->tx_chan = dma_request_slave_channel_compat(mask, - pxa2xx_spi_dma_filter, pdata, dev, "tx"); - if (!drv_data->tx_chan) + controller->dma_tx = dma_request_slave_channel_compat(mask, + pdata->dma_filter, pdata->tx_param, dev, "tx"); + if (!controller->dma_tx) return -ENODEV; - drv_data->rx_chan = dma_request_slave_channel_compat(mask, - pxa2xx_spi_dma_filter, pdata, dev, "rx"); - if (!drv_data->rx_chan) { - dma_release_channel(drv_data->tx_chan); - drv_data->tx_chan = NULL; + controller->dma_rx = dma_request_slave_channel_compat(mask, + pdata->dma_filter, pdata->rx_param, dev, "rx"); + if (!controller->dma_rx) { + dma_release_channel(controller->dma_tx); + controller->dma_tx = NULL; return -ENODEV; } @@ -355,39 +210,16 @@ int pxa2xx_spi_dma_setup(struct driver_data *drv_data) void pxa2xx_spi_dma_release(struct driver_data *drv_data) { - if (drv_data->rx_chan) { - dmaengine_terminate_all(drv_data->rx_chan); - dma_release_channel(drv_data->rx_chan); - sg_free_table(&drv_data->rx_sgt); - drv_data->rx_chan = NULL; + struct spi_controller *controller = drv_data->controller; + + if (controller->dma_rx) { + dmaengine_terminate_sync(controller->dma_rx); + dma_release_channel(controller->dma_rx); + controller->dma_rx = NULL; } - if (drv_data->tx_chan) { - dmaengine_terminate_all(drv_data->tx_chan); - dma_release_channel(drv_data->tx_chan); - sg_free_table(&drv_data->tx_sgt); - drv_data->tx_chan = NULL; + if (controller->dma_tx) { + dmaengine_terminate_sync(controller->dma_tx); + dma_release_channel(controller->dma_tx); + controller->dma_tx = NULL; } } - -void pxa2xx_spi_dma_resume(struct driver_data *drv_data) -{ -} - -int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip, - struct spi_device *spi, - u8 bits_per_word, u32 *burst_code, - u32 *threshold) -{ - struct pxa2xx_spi_chip *chip_info = spi->controller_data; - - /* - * If the DMA burst size is given in chip_info we use that, - * otherwise we use the default. Also we use the default FIFO - * thresholds for now. - */ - *burst_code = chip_info ? chip_info->dma_burst_size : 16; - *threshold = SSCR1_RxTresh(RX_THRESH_DFLT) - | SSCR1_TxTresh(TX_THRESH_DFLT); - - return 0; -} |
