diff options
Diffstat (limited to 'drivers/dma/tegra20-apb-dma.c')
| -rw-r--r-- | drivers/dma/tegra20-apb-dma.c | 1022 |
1 files changed, 590 insertions, 432 deletions
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index f137914d7b16..14a61e53a41b 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c @@ -1,19 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * DMA driver for Nvidia's Tegra20 APB DMA controller. * - * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. + * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved. */ #include <linux/bitops.h> @@ -28,15 +17,19 @@ #include <linux/mm.h> #include <linux/module.h> #include <linux/of.h> -#include <linux/of_device.h> +#include <linux/of_dma.h> #include <linux/platform_device.h> #include <linux/pm.h> #include <linux/pm_runtime.h> +#include <linux/reset.h> #include <linux/slab.h> -#include <linux/clk/tegra.h> +#include <linux/wait.h> #include "dmaengine.h" +#define CREATE_TRACE_POINTS +#include <trace/events/tegra_apb_dma.h> + #define TEGRA_APBDMA_GENERAL 0x0 #define TEGRA_APBDMA_GENERAL_ENABLE BIT(31) @@ -53,6 +46,7 @@ #define TEGRA_APBDMA_CSR_ONCE BIT(27) #define TEGRA_APBDMA_CSR_FLOW BIT(21) #define TEGRA_APBDMA_CSR_REQ_SEL_SHIFT 16 +#define TEGRA_APBDMA_CSR_REQ_SEL_MASK 0x1F #define TEGRA_APBDMA_CSR_WCOUNT_MASK 0xFFFC /* STATUS register */ @@ -65,7 +59,7 @@ #define TEGRA_APBDMA_STATUS_COUNT_MASK 0xFFFC #define TEGRA_APBDMA_CHAN_CSRE 0x00C -#define TEGRA_APBDMA_CHAN_CSRE_PAUSE (1 << 31) +#define TEGRA_APBDMA_CHAN_CSRE_PAUSE BIT(31) /* AHB memory address */ #define TEGRA_APBDMA_CHAN_AHBPTR 0x010 @@ -99,6 +93,11 @@ #define TEGRA_APBDMA_APBSEQ_DATA_SWAP BIT(27) #define TEGRA_APBDMA_APBSEQ_WRAP_WORD_1 (1 << 16) +/* Tegra148 specific registers */ +#define TEGRA_APBDMA_CHAN_WCOUNT 0x20 + +#define TEGRA_APBDMA_CHAN_WORD_TRANSFER 0x24 + /* * If any burst is in flight and DMA paused then this is the time to complete * on-flight burst and update DMA status register. @@ -108,34 +107,38 @@ /* Channel base address offset from APBDMA base address */ #define TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET 0x1000 -/* DMA channel register space size */ -#define TEGRA_APBDMA_CHANNEL_REGISTER_SIZE 0x20 +#define TEGRA_APBDMA_SLAVE_ID_INVALID (TEGRA_APBDMA_CSR_REQ_SEL_MASK + 1) struct tegra_dma; /* * tegra_dma_chip_data Tegra chip specific DMA data * @nr_channels: Number of channels available in the controller. + * @channel_reg_size: Channel register size/stride. * @max_dma_count: Maximum DMA transfer count supported by DMA controller. * @support_channel_pause: Support channel wise pause of dma. + * @support_separate_wcount_reg: Support separate word count register. */ struct tegra_dma_chip_data { - int nr_channels; - int max_dma_count; + unsigned int nr_channels; + unsigned int channel_reg_size; + unsigned int max_dma_count; bool support_channel_pause; + bool support_separate_wcount_reg; }; /* DMA channel registers */ struct tegra_dma_channel_regs { - unsigned long csr; - unsigned long ahb_ptr; - unsigned long apb_ptr; - unsigned long ahb_seq; - unsigned long apb_seq; + u32 csr; + u32 ahb_ptr; + u32 apb_ptr; + u32 ahb_seq; + u32 apb_seq; + u32 wcount; }; /* - * tegra_dma_sg_req: Dma request details to configure hardware. This + * tegra_dma_sg_req: DMA request details to configure hardware. This * contains the details for one transfer to configure DMA hw. * The client's request for data transfer can be broken into multiple * sub-transfer as per requester details and hw support. @@ -144,12 +147,12 @@ struct tegra_dma_channel_regs { */ struct tegra_dma_sg_req { struct tegra_dma_channel_regs ch_regs; - int req_len; + unsigned int req_len; bool configured; bool last_sg; - bool half_done; struct list_head node; struct tegra_dma_desc *dma_desc; + unsigned int words_xferred; }; /* @@ -159,13 +162,13 @@ struct tegra_dma_sg_req { */ struct tegra_dma_desc { struct dma_async_tx_descriptor txd; - int bytes_requested; - int bytes_transferred; + unsigned int bytes_requested; + unsigned int bytes_transferred; enum dma_status dma_status; struct list_head node; struct list_head tx_list; struct list_head cb_node; - int cb_count; + unsigned int cb_count; }; struct tegra_dma_channel; @@ -176,11 +179,10 @@ typedef void (*dma_isr_handler)(struct tegra_dma_channel *tdc, /* tegra_dma_channel: Channel specific information */ struct tegra_dma_channel { struct dma_chan dma_chan; - char name[30]; + char name[12]; bool config_init; - int id; - int irq; - unsigned long chan_base_offset; + unsigned int id; + void __iomem *chan_addr; spinlock_t lock; bool busy; struct tegra_dma *tdma; @@ -195,12 +197,13 @@ struct tegra_dma_channel { /* ISR handler and tasklet for bottom half of isr handling */ dma_isr_handler isr_handler; struct tasklet_struct tasklet; - dma_async_tx_callback callback; - void *callback_param; /* Channel-slave specific configuration */ + unsigned int slave_id; struct dma_slave_config dma_sconfig; - struct tegra_dma_channel_regs channel_reg; + struct tegra_dma_channel_regs channel_reg; + + struct wait_queue_head wq; }; /* tegra_dma: Tegra DMA specific information */ @@ -208,15 +211,20 @@ struct tegra_dma { struct dma_device dma_dev; struct device *dev; struct clk *dma_clk; + struct reset_control *rst; spinlock_t global_lock; void __iomem *base_addr; const struct tegra_dma_chip_data *chip_data; - /* Some register need to be cache before suspend */ - u32 reg_gen; + /* + * Counter for managing global pausing of the DMA controller. + * Only applicable for devices that don't support individual + * channel pausing. + */ + u32 global_pause_count; /* Last member of the structure */ - struct tegra_dma_channel channels[0]; + struct tegra_dma_channel channels[]; }; static inline void tdma_write(struct tegra_dma *tdma, u32 reg, u32 val) @@ -224,20 +232,15 @@ static inline void tdma_write(struct tegra_dma *tdma, u32 reg, u32 val) writel(val, tdma->base_addr + reg); } -static inline u32 tdma_read(struct tegra_dma *tdma, u32 reg) -{ - return readl(tdma->base_addr + reg); -} - static inline void tdc_write(struct tegra_dma_channel *tdc, - u32 reg, u32 val) + u32 reg, u32 val) { - writel(val, tdc->tdma->base_addr + tdc->chan_base_offset + reg); + writel(val, tdc->chan_addr + reg); } static inline u32 tdc_read(struct tegra_dma_channel *tdc, u32 reg) { - return readl(tdc->tdma->base_addr + tdc->chan_base_offset + reg); + return readl(tdc->chan_addr + reg); } static inline struct tegra_dma_channel *to_tegra_dma_chan(struct dma_chan *dc) @@ -245,8 +248,8 @@ static inline struct tegra_dma_channel *to_tegra_dma_chan(struct dma_chan *dc) return container_of(dc, struct tegra_dma_channel, dma_chan); } -static inline struct tegra_dma_desc *txd_to_tegra_dma_desc( - struct dma_async_tx_descriptor *td) +static inline struct tegra_dma_desc * +txd_to_tegra_dma_desc(struct dma_async_tx_descriptor *td) { return container_of(td, struct tegra_dma_desc, txd); } @@ -257,12 +260,9 @@ static inline struct device *tdc2dev(struct tegra_dma_channel *tdc) } static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *tx); -static int tegra_dma_runtime_suspend(struct device *dev); -static int tegra_dma_runtime_resume(struct device *dev); /* Get DMA desc from free list, if not there then allocate it. */ -static struct tegra_dma_desc *tegra_dma_desc_get( - struct tegra_dma_channel *tdc) +static struct tegra_dma_desc *tegra_dma_desc_get(struct tegra_dma_channel *tdc) { struct tegra_dma_desc *dma_desc; unsigned long flags; @@ -271,7 +271,7 @@ static struct tegra_dma_desc *tegra_dma_desc_get( /* Do not allocate if desc are waiting for ack */ list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) { - if (async_tx_test_ack(&dma_desc->txd)) { + if (async_tx_test_ack(&dma_desc->txd) && !dma_desc->cb_count) { list_del(&dma_desc->node); spin_unlock_irqrestore(&tdc->lock, flags); dma_desc->txd.flags = 0; @@ -282,20 +282,19 @@ static struct tegra_dma_desc *tegra_dma_desc_get( spin_unlock_irqrestore(&tdc->lock, flags); /* Allocate DMA desc */ - dma_desc = kzalloc(sizeof(*dma_desc), GFP_ATOMIC); - if (!dma_desc) { - dev_err(tdc2dev(tdc), "dma_desc alloc failed\n"); + dma_desc = kzalloc(sizeof(*dma_desc), GFP_NOWAIT); + if (!dma_desc) return NULL; - } dma_async_tx_descriptor_init(&dma_desc->txd, &tdc->dma_chan); dma_desc->txd.tx_submit = tegra_dma_tx_submit; dma_desc->txd.flags = 0; + return dma_desc; } static void tegra_dma_desc_put(struct tegra_dma_channel *tdc, - struct tegra_dma_desc *dma_desc) + struct tegra_dma_desc *dma_desc) { unsigned long flags; @@ -306,30 +305,29 @@ static void tegra_dma_desc_put(struct tegra_dma_channel *tdc, spin_unlock_irqrestore(&tdc->lock, flags); } -static struct tegra_dma_sg_req *tegra_dma_sg_req_get( - struct tegra_dma_channel *tdc) +static struct tegra_dma_sg_req * +tegra_dma_sg_req_get(struct tegra_dma_channel *tdc) { - struct tegra_dma_sg_req *sg_req = NULL; + struct tegra_dma_sg_req *sg_req; unsigned long flags; spin_lock_irqsave(&tdc->lock, flags); if (!list_empty(&tdc->free_sg_req)) { - sg_req = list_first_entry(&tdc->free_sg_req, - typeof(*sg_req), node); + sg_req = list_first_entry(&tdc->free_sg_req, typeof(*sg_req), + node); list_del(&sg_req->node); spin_unlock_irqrestore(&tdc->lock, flags); return sg_req; } spin_unlock_irqrestore(&tdc->lock, flags); - sg_req = kzalloc(sizeof(struct tegra_dma_sg_req), GFP_ATOMIC); - if (!sg_req) - dev_err(tdc2dev(tdc), "sg_req alloc failed\n"); + sg_req = kzalloc(sizeof(*sg_req), GFP_NOWAIT); + return sg_req; } static int tegra_dma_slave_config(struct dma_chan *dc, - struct dma_slave_config *sconfig) + struct dma_slave_config *sconfig) { struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); @@ -340,36 +338,53 @@ static int tegra_dma_slave_config(struct dma_chan *dc, memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig)); tdc->config_init = true; + return 0; } static void tegra_dma_global_pause(struct tegra_dma_channel *tdc, - bool wait_for_burst_complete) + bool wait_for_burst_complete) { struct tegra_dma *tdma = tdc->tdma; spin_lock(&tdma->global_lock); - tdma_write(tdma, TEGRA_APBDMA_GENERAL, 0); - if (wait_for_burst_complete) - udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME); + + if (tdc->tdma->global_pause_count == 0) { + tdma_write(tdma, TEGRA_APBDMA_GENERAL, 0); + if (wait_for_burst_complete) + udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME); + } + + tdc->tdma->global_pause_count++; + + spin_unlock(&tdma->global_lock); } static void tegra_dma_global_resume(struct tegra_dma_channel *tdc) { struct tegra_dma *tdma = tdc->tdma; - tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE); + spin_lock(&tdma->global_lock); + + if (WARN_ON(tdc->tdma->global_pause_count == 0)) + goto out; + + if (--tdc->tdma->global_pause_count == 0) + tdma_write(tdma, TEGRA_APBDMA_GENERAL, + TEGRA_APBDMA_GENERAL_ENABLE); + +out: spin_unlock(&tdma->global_lock); } static void tegra_dma_pause(struct tegra_dma_channel *tdc, - bool wait_for_burst_complete) + bool wait_for_burst_complete) { struct tegra_dma *tdma = tdc->tdma; if (tdma->chip_data->support_channel_pause) { tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE, - TEGRA_APBDMA_CHAN_CSRE_PAUSE); + TEGRA_APBDMA_CHAN_CSRE_PAUSE); if (wait_for_burst_complete) udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME); } else { @@ -381,17 +396,15 @@ static void tegra_dma_resume(struct tegra_dma_channel *tdc) { struct tegra_dma *tdma = tdc->tdma; - if (tdma->chip_data->support_channel_pause) { + if (tdma->chip_data->support_channel_pause) tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE, 0); - } else { + else tegra_dma_global_resume(tdc); - } } static void tegra_dma_stop(struct tegra_dma_channel *tdc) { - u32 csr; - u32 status; + u32 csr, status; /* Disable interrupts */ csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR); @@ -412,7 +425,7 @@ static void tegra_dma_stop(struct tegra_dma_channel *tdc) } static void tegra_dma_start(struct tegra_dma_channel *tdc, - struct tegra_dma_sg_req *sg_req) + struct tegra_dma_sg_req *sg_req) { struct tegra_dma_channel_regs *ch_regs = &sg_req->ch_regs; @@ -421,14 +434,16 @@ static void tegra_dma_start(struct tegra_dma_channel *tdc, tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_regs->apb_ptr); tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_regs->ahb_seq); tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_regs->ahb_ptr); + if (tdc->tdma->chip_data->support_separate_wcount_reg) + tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT, ch_regs->wcount); /* Start DMA */ tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, - ch_regs->csr | TEGRA_APBDMA_CSR_ENB); + ch_regs->csr | TEGRA_APBDMA_CSR_ENB); } static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc, - struct tegra_dma_sg_req *nsg_req) + struct tegra_dma_sg_req *nsg_req) { unsigned long status; @@ -444,11 +459,11 @@ static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc, * load new configuration. */ tegra_dma_pause(tdc, false); - status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); + status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); /* * If interrupt is pending then do nothing as the ISR will handle - * the programing for new request. + * the programming for new request. */ if (status & TEGRA_APBDMA_STATUS_ISE_EOC) { dev_err(tdc2dev(tdc), @@ -460,9 +475,13 @@ static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc, /* Safe to program new configuration */ tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, nsg_req->ch_regs.apb_ptr); tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, nsg_req->ch_regs.ahb_ptr); + if (tdc->tdma->chip_data->support_separate_wcount_reg) + tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT, + nsg_req->ch_regs.wcount); tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, - nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB); + nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB); nsg_req->configured = true; + nsg_req->words_xferred = 0; tegra_dma_resume(tdc); } @@ -471,46 +490,41 @@ static void tdc_start_head_req(struct tegra_dma_channel *tdc) { struct tegra_dma_sg_req *sg_req; - if (list_empty(&tdc->pending_sg_req)) - return; - - sg_req = list_first_entry(&tdc->pending_sg_req, - typeof(*sg_req), node); + sg_req = list_first_entry(&tdc->pending_sg_req, typeof(*sg_req), node); tegra_dma_start(tdc, sg_req); sg_req->configured = true; + sg_req->words_xferred = 0; tdc->busy = true; } static void tdc_configure_next_head_desc(struct tegra_dma_channel *tdc) { - struct tegra_dma_sg_req *hsgreq; - struct tegra_dma_sg_req *hnsgreq; - - if (list_empty(&tdc->pending_sg_req)) - return; + struct tegra_dma_sg_req *hsgreq, *hnsgreq; hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node); if (!list_is_last(&hsgreq->node, &tdc->pending_sg_req)) { - hnsgreq = list_first_entry(&hsgreq->node, - typeof(*hnsgreq), node); + hnsgreq = list_first_entry(&hsgreq->node, typeof(*hnsgreq), + node); tegra_dma_configure_for_next(tdc, hnsgreq); } } -static inline int get_current_xferred_count(struct tegra_dma_channel *tdc, - struct tegra_dma_sg_req *sg_req, unsigned long status) +static inline unsigned int +get_current_xferred_count(struct tegra_dma_channel *tdc, + struct tegra_dma_sg_req *sg_req, + unsigned long status) { return sg_req->req_len - (status & TEGRA_APBDMA_STATUS_COUNT_MASK) - 4; } static void tegra_dma_abort_all(struct tegra_dma_channel *tdc) { - struct tegra_dma_sg_req *sgreq; struct tegra_dma_desc *dma_desc; + struct tegra_dma_sg_req *sgreq; while (!list_empty(&tdc->pending_sg_req)) { - sgreq = list_first_entry(&tdc->pending_sg_req, - typeof(*sgreq), node); + sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), + node); list_move_tail(&sgreq->node, &tdc->free_sg_req); if (sgreq->last_sg) { dma_desc = sgreq->dma_desc; @@ -520,7 +534,7 @@ static void tegra_dma_abort_all(struct tegra_dma_channel *tdc) /* Add in cb list if it is not there. */ if (!dma_desc->cb_count) list_add_tail(&dma_desc->cb_node, - &tdc->cb_desc); + &tdc->cb_desc); dma_desc->cb_count++; } } @@ -528,15 +542,9 @@ static void tegra_dma_abort_all(struct tegra_dma_channel *tdc) } static bool handle_continuous_head_request(struct tegra_dma_channel *tdc, - struct tegra_dma_sg_req *last_sg_req, bool to_terminate) + bool to_terminate) { - struct tegra_dma_sg_req *hsgreq = NULL; - - if (list_empty(&tdc->pending_sg_req)) { - dev_err(tdc2dev(tdc), "Dma is running without req\n"); - tegra_dma_stop(tdc); - return false; - } + struct tegra_dma_sg_req *hsgreq; /* * Check that head req on list should be in flight. @@ -546,7 +554,8 @@ static bool handle_continuous_head_request(struct tegra_dma_channel *tdc, hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node); if (!hsgreq->configured) { tegra_dma_stop(tdc); - dev_err(tdc2dev(tdc), "Error in dma transfer, aborting dma\n"); + pm_runtime_put(tdc->tdma->dev); + dev_err(tdc2dev(tdc), "DMA transfer underflow, aborting DMA\n"); tegra_dma_abort_all(tdc); return false; } @@ -554,14 +563,15 @@ static bool handle_continuous_head_request(struct tegra_dma_channel *tdc, /* Configure next request */ if (!to_terminate) tdc_configure_next_head_desc(tdc); + return true; } static void handle_once_dma_done(struct tegra_dma_channel *tdc, - bool to_terminate) + bool to_terminate) { - struct tegra_dma_sg_req *sgreq; struct tegra_dma_desc *dma_desc; + struct tegra_dma_sg_req *sgreq; tdc->busy = false; sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node); @@ -570,7 +580,7 @@ static void handle_once_dma_done(struct tegra_dma_channel *tdc, list_del(&sgreq->node); if (sgreq->last_sg) { - dma_desc->dma_status = DMA_SUCCESS; + dma_desc->dma_status = DMA_COMPLETE; dma_cookie_complete(&dma_desc->txd); if (!dma_desc->cb_count) list_add_tail(&dma_desc->cb_node, &tdc->cb_desc); @@ -580,61 +590,69 @@ static void handle_once_dma_done(struct tegra_dma_channel *tdc, list_add_tail(&sgreq->node, &tdc->free_sg_req); /* Do not start DMA if it is going to be terminate */ - if (to_terminate || list_empty(&tdc->pending_sg_req)) + if (to_terminate) + return; + + if (list_empty(&tdc->pending_sg_req)) { + pm_runtime_put(tdc->tdma->dev); return; + } tdc_start_head_req(tdc); - return; } static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc, - bool to_terminate) + bool to_terminate) { - struct tegra_dma_sg_req *sgreq; struct tegra_dma_desc *dma_desc; + struct tegra_dma_sg_req *sgreq; bool st; sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node); dma_desc = sgreq->dma_desc; - dma_desc->bytes_transferred += sgreq->req_len; + /* if we dma for long enough the transfer count will wrap */ + dma_desc->bytes_transferred = + (dma_desc->bytes_transferred + sgreq->req_len) % + dma_desc->bytes_requested; /* Callback need to be call */ if (!dma_desc->cb_count) list_add_tail(&dma_desc->cb_node, &tdc->cb_desc); dma_desc->cb_count++; + sgreq->words_xferred = 0; + /* If not last req then put at end of pending list */ if (!list_is_last(&sgreq->node, &tdc->pending_sg_req)) { list_move_tail(&sgreq->node, &tdc->pending_sg_req); sgreq->configured = false; - st = handle_continuous_head_request(tdc, sgreq, to_terminate); + st = handle_continuous_head_request(tdc, to_terminate); if (!st) dma_desc->dma_status = DMA_ERROR; } - return; } -static void tegra_dma_tasklet(unsigned long data) +static void tegra_dma_tasklet(struct tasklet_struct *t) { - struct tegra_dma_channel *tdc = (struct tegra_dma_channel *)data; - dma_async_tx_callback callback = NULL; - void *callback_param = NULL; + struct tegra_dma_channel *tdc = from_tasklet(tdc, t, tasklet); + struct dmaengine_desc_callback cb; struct tegra_dma_desc *dma_desc; + unsigned int cb_count; unsigned long flags; - int cb_count; spin_lock_irqsave(&tdc->lock, flags); while (!list_empty(&tdc->cb_desc)) { - dma_desc = list_first_entry(&tdc->cb_desc, - typeof(*dma_desc), cb_node); + dma_desc = list_first_entry(&tdc->cb_desc, typeof(*dma_desc), + cb_node); list_del(&dma_desc->cb_node); - callback = dma_desc->txd.callback; - callback_param = dma_desc->txd.callback_param; + dmaengine_desc_get_callback(&dma_desc->txd, &cb); cb_count = dma_desc->cb_count; dma_desc->cb_count = 0; + trace_tegra_dma_complete_cb(&tdc->dma_chan, cb_count, + cb.callback); spin_unlock_irqrestore(&tdc->lock, flags); - while (cb_count-- && callback) - callback(callback_param); + while (cb_count--) + dmaengine_desc_callback_invoke(&cb, NULL); spin_lock_irqsave(&tdc->lock, flags); } spin_unlock_irqrestore(&tdc->lock, flags); @@ -643,23 +661,25 @@ static void tegra_dma_tasklet(unsigned long data) static irqreturn_t tegra_dma_isr(int irq, void *dev_id) { struct tegra_dma_channel *tdc = dev_id; - unsigned long status; - unsigned long flags; + u32 status; - spin_lock_irqsave(&tdc->lock, flags); + spin_lock(&tdc->lock); + trace_tegra_dma_isr(&tdc->dma_chan, irq); status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); if (status & TEGRA_APBDMA_STATUS_ISE_EOC) { tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status); tdc->isr_handler(tdc, false); tasklet_schedule(&tdc->tasklet); - spin_unlock_irqrestore(&tdc->lock, flags); + wake_up_all(&tdc->wq); + spin_unlock(&tdc->lock); return IRQ_HANDLED; } - spin_unlock_irqrestore(&tdc->lock, flags); - dev_info(tdc2dev(tdc), - "Interrupt already served status 0x%08lx\n", status); + spin_unlock(&tdc->lock); + dev_info(tdc2dev(tdc), "Interrupt already served status 0x%08x\n", + status); + return IRQ_NONE; } @@ -675,6 +695,7 @@ static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *txd) cookie = dma_cookie_assign(&dma_desc->txd); list_splice_tail_init(&dma_desc->tx_list, &tdc->pending_sg_req); spin_unlock_irqrestore(&tdc->lock, flags); + return cookie; } @@ -682,6 +703,7 @@ static void tegra_dma_issue_pending(struct dma_chan *dc) { struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); unsigned long flags; + int err; spin_lock_irqsave(&tdc->lock, flags); if (list_empty(&tdc->pending_sg_req)) { @@ -689,6 +711,12 @@ static void tegra_dma_issue_pending(struct dma_chan *dc) goto end; } if (!tdc->busy) { + err = pm_runtime_resume_and_get(tdc->tdma->dev); + if (err < 0) { + dev_err(tdc2dev(tdc), "Failed to enable DMA\n"); + goto end; + } + tdc_start_head_req(tdc); /* Continuous single mode: Configure next req */ @@ -703,23 +731,18 @@ static void tegra_dma_issue_pending(struct dma_chan *dc) } end: spin_unlock_irqrestore(&tdc->lock, flags); - return; } -static void tegra_dma_terminate_all(struct dma_chan *dc) +static int tegra_dma_terminate_all(struct dma_chan *dc) { struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); - struct tegra_dma_sg_req *sgreq; struct tegra_dma_desc *dma_desc; + struct tegra_dma_sg_req *sgreq; unsigned long flags; - unsigned long status; + u32 status, wcount; bool was_busy; spin_lock_irqsave(&tdc->lock, flags); - if (list_empty(&tdc->pending_sg_req)) { - spin_unlock_irqrestore(&tdc->lock, flags); - return; - } if (!tdc->busy) goto skip_dma_stop; @@ -733,32 +756,136 @@ static void tegra_dma_terminate_all(struct dma_chan *dc) tdc->isr_handler(tdc, true); status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); } + if (tdc->tdma->chip_data->support_separate_wcount_reg) + wcount = tdc_read(tdc, TEGRA_APBDMA_CHAN_WORD_TRANSFER); + else + wcount = status; was_busy = tdc->busy; tegra_dma_stop(tdc); if (!list_empty(&tdc->pending_sg_req) && was_busy) { - sgreq = list_first_entry(&tdc->pending_sg_req, - typeof(*sgreq), node); + sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), + node); sgreq->dma_desc->bytes_transferred += - get_current_xferred_count(tdc, sgreq, status); + get_current_xferred_count(tdc, sgreq, wcount); } tegra_dma_resume(tdc); + pm_runtime_put(tdc->tdma->dev); + wake_up_all(&tdc->wq); + skip_dma_stop: tegra_dma_abort_all(tdc); while (!list_empty(&tdc->cb_desc)) { - dma_desc = list_first_entry(&tdc->cb_desc, - typeof(*dma_desc), cb_node); + dma_desc = list_first_entry(&tdc->cb_desc, typeof(*dma_desc), + cb_node); list_del(&dma_desc->cb_node); dma_desc->cb_count = 0; } spin_unlock_irqrestore(&tdc->lock, flags); + + return 0; +} + +static bool tegra_dma_eoc_interrupt_deasserted(struct tegra_dma_channel *tdc) +{ + unsigned long flags; + u32 status; + + spin_lock_irqsave(&tdc->lock, flags); + status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); + spin_unlock_irqrestore(&tdc->lock, flags); + + return !(status & TEGRA_APBDMA_STATUS_ISE_EOC); +} + +static void tegra_dma_synchronize(struct dma_chan *dc) +{ + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); + int err; + + err = pm_runtime_resume_and_get(tdc->tdma->dev); + if (err < 0) { + dev_err(tdc2dev(tdc), "Failed to synchronize DMA: %d\n", err); + return; + } + + /* + * CPU, which handles interrupt, could be busy in + * uninterruptible state, in this case sibling CPU + * should wait until interrupt is handled. + */ + wait_event(tdc->wq, tegra_dma_eoc_interrupt_deasserted(tdc)); + + tasklet_kill(&tdc->tasklet); + + pm_runtime_put(tdc->tdma->dev); +} + +static unsigned int tegra_dma_sg_bytes_xferred(struct tegra_dma_channel *tdc, + struct tegra_dma_sg_req *sg_req) +{ + u32 status, wcount = 0; + + if (!list_is_first(&sg_req->node, &tdc->pending_sg_req)) + return 0; + + if (tdc->tdma->chip_data->support_separate_wcount_reg) + wcount = tdc_read(tdc, TEGRA_APBDMA_CHAN_WORD_TRANSFER); + + status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); + + if (!tdc->tdma->chip_data->support_separate_wcount_reg) + wcount = status; + + if (status & TEGRA_APBDMA_STATUS_ISE_EOC) + return sg_req->req_len; + + wcount = get_current_xferred_count(tdc, sg_req, wcount); + + if (!wcount) { + /* + * If wcount wasn't ever polled for this SG before, then + * simply assume that transfer hasn't started yet. + * + * Otherwise it's the end of the transfer. + * + * The alternative would be to poll the status register + * until EOC bit is set or wcount goes UP. That's so + * because EOC bit is getting set only after the last + * burst's completion and counter is less than the actual + * transfer size by 4 bytes. The counter value wraps around + * in a cyclic mode before EOC is set(!), so we can't easily + * distinguish start of transfer from its end. + */ + if (sg_req->words_xferred) + wcount = sg_req->req_len - 4; + + } else if (wcount < sg_req->words_xferred) { + /* + * This case will never happen for a non-cyclic transfer. + * + * For a cyclic transfer, although it is possible for the + * next transfer to have already started (resetting the word + * count), this case should still not happen because we should + * have detected that the EOC bit is set and hence the transfer + * was completed. + */ + WARN_ON_ONCE(1); + + wcount = sg_req->req_len - 4; + } else { + sg_req->words_xferred = wcount; + } + + return wcount; } static enum dma_status tegra_dma_tx_status(struct dma_chan *dc, - dma_cookie_t cookie, struct dma_tx_state *txstate) + dma_cookie_t cookie, + struct dma_tx_state *txstate) { struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); struct tegra_dma_desc *dma_desc; @@ -766,25 +893,19 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc, enum dma_status ret; unsigned long flags; unsigned int residual; - - spin_lock_irqsave(&tdc->lock, flags); + unsigned int bytes = 0; ret = dma_cookie_status(dc, cookie, txstate); - if (ret == DMA_SUCCESS) { - spin_unlock_irqrestore(&tdc->lock, flags); + if (ret == DMA_COMPLETE) return ret; - } + + spin_lock_irqsave(&tdc->lock, flags); /* Check on wait_ack desc status */ list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) { if (dma_desc->txd.cookie == cookie) { - residual = dma_desc->bytes_requested - - (dma_desc->bytes_transferred % - dma_desc->bytes_requested); - dma_set_residue(txstate, residual); ret = dma_desc->dma_status; - spin_unlock_irqrestore(&tdc->lock, flags); - return ret; + goto found; } } @@ -792,42 +913,31 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc, list_for_each_entry(sg_req, &tdc->pending_sg_req, node) { dma_desc = sg_req->dma_desc; if (dma_desc->txd.cookie == cookie) { - residual = dma_desc->bytes_requested - - (dma_desc->bytes_transferred % - dma_desc->bytes_requested); - dma_set_residue(txstate, residual); + bytes = tegra_dma_sg_bytes_xferred(tdc, sg_req); ret = dma_desc->dma_status; - spin_unlock_irqrestore(&tdc->lock, flags); - return ret; + goto found; } } - dev_dbg(tdc2dev(tdc), "cookie %d does not found\n", cookie); - spin_unlock_irqrestore(&tdc->lock, flags); - return ret; -} - -static int tegra_dma_device_control(struct dma_chan *dc, enum dma_ctrl_cmd cmd, - unsigned long arg) -{ - switch (cmd) { - case DMA_SLAVE_CONFIG: - return tegra_dma_slave_config(dc, - (struct dma_slave_config *)arg); + dev_dbg(tdc2dev(tdc), "cookie %d not found\n", cookie); + dma_desc = NULL; - case DMA_TERMINATE_ALL: - tegra_dma_terminate_all(dc); - return 0; - - default: - break; +found: + if (dma_desc && txstate) { + residual = dma_desc->bytes_requested - + ((dma_desc->bytes_transferred + bytes) % + dma_desc->bytes_requested); + dma_set_residue(txstate, residual); } - return -ENXIO; + trace_tegra_dma_tx_status(&tdc->dma_chan, cookie, txstate); + spin_unlock_irqrestore(&tdc->lock, flags); + + return ret; } -static inline int get_bus_width(struct tegra_dma_channel *tdc, - enum dma_slave_buswidth slave_bw) +static inline unsigned int get_bus_width(struct tegra_dma_channel *tdc, + enum dma_slave_buswidth slave_bw) { switch (slave_bw) { case DMA_SLAVE_BUSWIDTH_1_BYTE: @@ -840,16 +950,17 @@ static inline int get_bus_width(struct tegra_dma_channel *tdc, return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64; default: dev_warn(tdc2dev(tdc), - "slave bw is not supported, using 32bits\n"); + "slave bw is not supported, using 32bits\n"); return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32; } } -static inline int get_burst_size(struct tegra_dma_channel *tdc, - u32 burst_size, enum dma_slave_buswidth slave_bw, int len) +static inline unsigned int get_burst_size(struct tegra_dma_channel *tdc, + u32 burst_size, + enum dma_slave_buswidth slave_bw, + u32 len) { - int burst_byte; - int burst_ahb_width; + unsigned int burst_byte, burst_ahb_width; /* * burst_size from client is in terms of the bus_width. @@ -876,11 +987,13 @@ static inline int get_burst_size(struct tegra_dma_channel *tdc, } static int get_transfer_param(struct tegra_dma_channel *tdc, - enum dma_transfer_direction direction, unsigned long *apb_addr, - unsigned long *apb_seq, unsigned long *csr, unsigned int *burst_size, - enum dma_slave_buswidth *slave_bw) + enum dma_transfer_direction direction, + u32 *apb_addr, + u32 *apb_seq, + u32 *csr, + unsigned int *burst_size, + enum dma_slave_buswidth *slave_bw) { - switch (direction) { case DMA_MEM_TO_DEV: *apb_addr = tdc->dma_sconfig.dst_addr; @@ -899,30 +1012,45 @@ static int get_transfer_param(struct tegra_dma_channel *tdc, return 0; default: - dev_err(tdc2dev(tdc), "Dma direction is not supported\n"); - return -EINVAL; + dev_err(tdc2dev(tdc), "DMA direction is not supported\n"); + break; } + return -EINVAL; } -static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg( - struct dma_chan *dc, struct scatterlist *sgl, unsigned int sg_len, - enum dma_transfer_direction direction, unsigned long flags, - void *context) +static void tegra_dma_prep_wcount(struct tegra_dma_channel *tdc, + struct tegra_dma_channel_regs *ch_regs, + u32 len) +{ + u32 len_field = (len - 4) & 0xFFFC; + + if (tdc->tdma->chip_data->support_separate_wcount_reg) + ch_regs->wcount = len_field; + else + ch_regs->csr |= len_field; +} + +static struct dma_async_tx_descriptor * +tegra_dma_prep_slave_sg(struct dma_chan *dc, + struct scatterlist *sgl, + unsigned int sg_len, + enum dma_transfer_direction direction, + unsigned long flags, + void *context) { struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); + struct tegra_dma_sg_req *sg_req = NULL; + u32 csr, ahb_seq, apb_ptr, apb_seq; + enum dma_slave_buswidth slave_bw; struct tegra_dma_desc *dma_desc; - unsigned int i; - struct scatterlist *sg; - unsigned long csr, ahb_seq, apb_ptr, apb_seq; struct list_head req_list; - struct tegra_dma_sg_req *sg_req = NULL; - u32 burst_size; - enum dma_slave_buswidth slave_bw; - int ret; + struct scatterlist *sg; + unsigned int burst_size; + unsigned int i; if (!tdc->config_init) { - dev_err(tdc2dev(tdc), "dma channel is not configured\n"); + dev_err(tdc2dev(tdc), "DMA channel is not configured\n"); return NULL; } if (sg_len < 1) { @@ -930,9 +1058,8 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg( return NULL; } - ret = get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr, - &burst_size, &slave_bw); - if (ret < 0) + if (get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr, + &burst_size, &slave_bw) < 0) return NULL; INIT_LIST_HEAD(&req_list); @@ -942,16 +1069,25 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg( TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT; ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32; - csr |= TEGRA_APBDMA_CSR_ONCE | TEGRA_APBDMA_CSR_FLOW; - csr |= tdc->dma_sconfig.slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT; - if (flags & DMA_PREP_INTERRUPT) + csr |= TEGRA_APBDMA_CSR_ONCE; + + if (tdc->slave_id != TEGRA_APBDMA_SLAVE_ID_INVALID) { + csr |= TEGRA_APBDMA_CSR_FLOW; + csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT; + } + + if (flags & DMA_PREP_INTERRUPT) { csr |= TEGRA_APBDMA_CSR_IE_EOC; + } else { + WARN_ON_ONCE(1); + return NULL; + } apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1; dma_desc = tegra_dma_desc_get(tdc); if (!dma_desc) { - dev_err(tdc2dev(tdc), "Dma descriptors not available\n"); + dev_err(tdc2dev(tdc), "DMA descriptors not available\n"); return NULL; } INIT_LIST_HEAD(&dma_desc->tx_list); @@ -969,16 +1105,16 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg( len = sg_dma_len(sg); if ((len & 3) || (mem & 3) || - (len > tdc->tdma->chip_data->max_dma_count)) { + len > tdc->tdma->chip_data->max_dma_count) { dev_err(tdc2dev(tdc), - "Dma length/memory address is not supported\n"); + "DMA length/memory address is not supported\n"); tegra_dma_desc_put(tdc, dma_desc); return NULL; } sg_req = tegra_dma_sg_req_get(tdc); if (!sg_req) { - dev_err(tdc2dev(tdc), "Dma sg-req not available\n"); + dev_err(tdc2dev(tdc), "DMA sg-req not available\n"); tegra_dma_desc_put(tdc, dma_desc); return NULL; } @@ -988,7 +1124,8 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg( sg_req->ch_regs.apb_ptr = apb_ptr; sg_req->ch_regs.ahb_ptr = mem; - sg_req->ch_regs.csr = csr | ((len - 4) & 0xFFFC); + sg_req->ch_regs.csr = csr; + tegra_dma_prep_wcount(tdc, &sg_req->ch_regs, len); sg_req->ch_regs.apb_seq = apb_seq; sg_req->ch_regs.ahb_seq = ahb_seq; sg_req->configured = false; @@ -1020,21 +1157,21 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg( return &dma_desc->txd; } -struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic( - struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len, - size_t period_len, enum dma_transfer_direction direction, - unsigned long flags, void *context) +static struct dma_async_tx_descriptor * +tegra_dma_prep_dma_cyclic(struct dma_chan *dc, dma_addr_t buf_addr, + size_t buf_len, + size_t period_len, + enum dma_transfer_direction direction, + unsigned long flags) { struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); - struct tegra_dma_desc *dma_desc = NULL; - struct tegra_dma_sg_req *sg_req = NULL; - unsigned long csr, ahb_seq, apb_ptr, apb_seq; - int len; - size_t remain_len; - dma_addr_t mem = buf_addr; - u32 burst_size; + struct tegra_dma_sg_req *sg_req = NULL; + u32 csr, ahb_seq, apb_ptr, apb_seq; enum dma_slave_buswidth slave_bw; - int ret; + struct tegra_dma_desc *dma_desc; + dma_addr_t mem = buf_addr; + unsigned int burst_size; + size_t len, remain_len; if (!buf_len || !period_len) { dev_err(tdc2dev(tdc), "Invalid buffer/period len\n"); @@ -1053,7 +1190,7 @@ struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic( * terminating the DMA. */ if (tdc->busy) { - dev_err(tdc2dev(tdc), "Request not allowed when dma running\n"); + dev_err(tdc2dev(tdc), "Request not allowed when DMA running\n"); return NULL; } @@ -1068,26 +1205,31 @@ struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic( len = period_len; if ((len & 3) || (buf_addr & 3) || - (len > tdc->tdma->chip_data->max_dma_count)) { + len > tdc->tdma->chip_data->max_dma_count) { dev_err(tdc2dev(tdc), "Req len/mem address is not correct\n"); return NULL; } - ret = get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr, - &burst_size, &slave_bw); - if (ret < 0) + if (get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr, + &burst_size, &slave_bw) < 0) return NULL; - ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB; ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE << TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT; ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32; - csr |= TEGRA_APBDMA_CSR_FLOW; - if (flags & DMA_PREP_INTERRUPT) + if (tdc->slave_id != TEGRA_APBDMA_SLAVE_ID_INVALID) { + csr |= TEGRA_APBDMA_CSR_FLOW; + csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT; + } + + if (flags & DMA_PREP_INTERRUPT) { csr |= TEGRA_APBDMA_CSR_IE_EOC; - csr |= tdc->dma_sconfig.slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT; + } else { + WARN_ON_ONCE(1); + return NULL; + } apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1; @@ -1109,7 +1251,7 @@ struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic( while (remain_len) { sg_req = tegra_dma_sg_req_get(tdc); if (!sg_req) { - dev_err(tdc2dev(tdc), "Dma sg-req not available\n"); + dev_err(tdc2dev(tdc), "DMA sg-req not available\n"); tegra_dma_desc_put(tdc, dma_desc); return NULL; } @@ -1117,11 +1259,11 @@ struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic( ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len); sg_req->ch_regs.apb_ptr = apb_ptr; sg_req->ch_regs.ahb_ptr = mem; - sg_req->ch_regs.csr = csr | ((len - 4) & 0xFFFC); + sg_req->ch_regs.csr = csr; + tegra_dma_prep_wcount(tdc, &sg_req->ch_regs, len); sg_req->ch_regs.apb_seq = apb_seq; sg_req->ch_regs.ahb_seq = ahb_seq; sg_req->configured = false; - sg_req->half_done = false; sg_req->last_sg = false; sg_req->dma_desc = dma_desc; sg_req->req_len = len; @@ -1155,48 +1297,38 @@ struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic( static int tegra_dma_alloc_chan_resources(struct dma_chan *dc) { struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); - struct tegra_dma *tdma = tdc->tdma; - int ret; dma_cookie_init(&tdc->dma_chan); - tdc->config_init = false; - ret = clk_prepare_enable(tdma->dma_clk); - if (ret < 0) - dev_err(tdc2dev(tdc), "clk_prepare_enable failed: %d\n", ret); - return ret; + + return 0; } static void tegra_dma_free_chan_resources(struct dma_chan *dc) { struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); - struct tegra_dma *tdma = tdc->tdma; - struct tegra_dma_desc *dma_desc; struct tegra_dma_sg_req *sg_req; struct list_head dma_desc_list; struct list_head sg_req_list; - unsigned long flags; INIT_LIST_HEAD(&dma_desc_list); INIT_LIST_HEAD(&sg_req_list); dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id); - if (tdc->busy) - tegra_dma_terminate_all(dc); + tegra_dma_terminate_all(dc); + tasklet_kill(&tdc->tasklet); - spin_lock_irqsave(&tdc->lock, flags); list_splice_init(&tdc->pending_sg_req, &sg_req_list); list_splice_init(&tdc->free_sg_req, &sg_req_list); list_splice_init(&tdc->free_dma_desc, &dma_desc_list); INIT_LIST_HEAD(&tdc->cb_desc); tdc->config_init = false; tdc->isr_handler = NULL; - spin_unlock_irqrestore(&tdc->lock, flags); while (!list_empty(&dma_desc_list)) { - dma_desc = list_first_entry(&dma_desc_list, - typeof(*dma_desc), node); + dma_desc = list_first_entry(&dma_desc_list, typeof(*dma_desc), + node); list_del(&dma_desc->node); kfree(dma_desc); } @@ -1206,75 +1338,118 @@ static void tegra_dma_free_chan_resources(struct dma_chan *dc) list_del(&sg_req->node); kfree(sg_req); } - clk_disable_unprepare(tdma->dma_clk); + + tdc->slave_id = TEGRA_APBDMA_SLAVE_ID_INVALID; +} + +static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct tegra_dma *tdma = ofdma->of_dma_data; + struct tegra_dma_channel *tdc; + struct dma_chan *chan; + + if (dma_spec->args[0] > TEGRA_APBDMA_CSR_REQ_SEL_MASK) { + dev_err(tdma->dev, "Invalid slave id: %d\n", dma_spec->args[0]); + return NULL; + } + + chan = dma_get_any_slave_channel(&tdma->dma_dev); + if (!chan) + return NULL; + + tdc = to_tegra_dma_chan(chan); + tdc->slave_id = dma_spec->args[0]; + + return chan; } /* Tegra20 specific DMA controller information */ static const struct tegra_dma_chip_data tegra20_dma_chip_data = { .nr_channels = 16, + .channel_reg_size = 0x20, .max_dma_count = 1024UL * 64, .support_channel_pause = false, + .support_separate_wcount_reg = false, }; /* Tegra30 specific DMA controller information */ static const struct tegra_dma_chip_data tegra30_dma_chip_data = { .nr_channels = 32, + .channel_reg_size = 0x20, .max_dma_count = 1024UL * 64, .support_channel_pause = false, + .support_separate_wcount_reg = false, }; /* Tegra114 specific DMA controller information */ static const struct tegra_dma_chip_data tegra114_dma_chip_data = { .nr_channels = 32, + .channel_reg_size = 0x20, .max_dma_count = 1024UL * 64, .support_channel_pause = true, + .support_separate_wcount_reg = false, }; - -static const struct of_device_id tegra_dma_of_match[] = { - { - .compatible = "nvidia,tegra114-apbdma", - .data = &tegra114_dma_chip_data, - }, { - .compatible = "nvidia,tegra30-apbdma", - .data = &tegra30_dma_chip_data, - }, { - .compatible = "nvidia,tegra20-apbdma", - .data = &tegra20_dma_chip_data, - }, { - }, +/* Tegra148 specific DMA controller information */ +static const struct tegra_dma_chip_data tegra148_dma_chip_data = { + .nr_channels = 32, + .channel_reg_size = 0x40, + .max_dma_count = 1024UL * 64, + .support_channel_pause = true, + .support_separate_wcount_reg = true, }; -MODULE_DEVICE_TABLE(of, tegra_dma_of_match); + +static int tegra_dma_init_hw(struct tegra_dma *tdma) +{ + int err; + + err = reset_control_assert(tdma->rst); + if (err) { + dev_err(tdma->dev, "failed to assert reset: %d\n", err); + return err; + } + + err = clk_enable(tdma->dma_clk); + if (err) { + dev_err(tdma->dev, "failed to enable clk: %d\n", err); + return err; + } + + /* reset DMA controller */ + udelay(2); + reset_control_deassert(tdma->rst); + + /* enable global DMA registers */ + tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE); + tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0); + tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFF); + + clk_disable(tdma->dma_clk); + + return 0; +} static int tegra_dma_probe(struct platform_device *pdev) { - struct resource *res; + const struct tegra_dma_chip_data *cdata; struct tegra_dma *tdma; + unsigned int i; + size_t size; int ret; - int i; - const struct tegra_dma_chip_data *cdata = NULL; - const struct of_device_id *match; - - match = of_match_device(tegra_dma_of_match, &pdev->dev); - if (!match) { - dev_err(&pdev->dev, "Error: No device match found\n"); - return -ENODEV; - } - cdata = match->data; - tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels * - sizeof(struct tegra_dma_channel), GFP_KERNEL); - if (!tdma) { - dev_err(&pdev->dev, "Error: memory allocation failed\n"); + cdata = of_device_get_match_data(&pdev->dev); + size = struct_size(tdma, channels, cdata->nr_channels); + + tdma = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); + if (!tdma) return -ENOMEM; - } tdma->dev = &pdev->dev; tdma->chip_data = cdata; platform_set_drvdata(pdev, tdma); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - tdma->base_addr = devm_ioremap_resource(&pdev->dev, res); + tdma->base_addr = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(tdma->base_addr)) return PTR_ERR(tdma->base_addr); @@ -1284,71 +1459,61 @@ static int tegra_dma_probe(struct platform_device *pdev) return PTR_ERR(tdma->dma_clk); } - spin_lock_init(&tdma->global_lock); - - pm_runtime_enable(&pdev->dev); - if (!pm_runtime_enabled(&pdev->dev)) { - ret = tegra_dma_runtime_resume(&pdev->dev); - if (ret) { - dev_err(&pdev->dev, "dma_runtime_resume failed %d\n", - ret); - goto err_pm_disable; - } + tdma->rst = devm_reset_control_get(&pdev->dev, "dma"); + if (IS_ERR(tdma->rst)) { + dev_err(&pdev->dev, "Error: Missing reset\n"); + return PTR_ERR(tdma->rst); } - /* Enable clock before accessing registers */ - ret = clk_prepare_enable(tdma->dma_clk); - if (ret < 0) { - dev_err(&pdev->dev, "clk_prepare_enable failed: %d\n", ret); - goto err_pm_disable; - } + spin_lock_init(&tdma->global_lock); - /* Reset DMA controller */ - tegra_periph_reset_assert(tdma->dma_clk); - udelay(2); - tegra_periph_reset_deassert(tdma->dma_clk); + ret = clk_prepare(tdma->dma_clk); + if (ret) + return ret; - /* Enable global DMA registers */ - tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE); - tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0); - tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul); + ret = tegra_dma_init_hw(tdma); + if (ret) + goto err_clk_unprepare; - clk_disable_unprepare(tdma->dma_clk); + pm_runtime_irq_safe(&pdev->dev); + pm_runtime_enable(&pdev->dev); INIT_LIST_HEAD(&tdma->dma_dev.channels); for (i = 0; i < cdata->nr_channels; i++) { struct tegra_dma_channel *tdc = &tdma->channels[i]; + int irq; - tdc->chan_base_offset = TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET + - i * TEGRA_APBDMA_CHANNEL_REGISTER_SIZE; + tdc->chan_addr = tdma->base_addr + + TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET + + (i * cdata->channel_reg_size); - res = platform_get_resource(pdev, IORESOURCE_IRQ, i); - if (!res) { - ret = -EINVAL; - dev_err(&pdev->dev, "No irq resource for chan %d\n", i); - goto err_irq; + irq = platform_get_irq(pdev, i); + if (irq < 0) { + ret = irq; + goto err_pm_disable; } - tdc->irq = res->start; + snprintf(tdc->name, sizeof(tdc->name), "apbdma.%d", i); - ret = devm_request_irq(&pdev->dev, tdc->irq, - tegra_dma_isr, 0, tdc->name, tdc); + ret = devm_request_irq(&pdev->dev, irq, tegra_dma_isr, 0, + tdc->name, tdc); if (ret) { dev_err(&pdev->dev, "request_irq failed with err %d channel %d\n", ret, i); - goto err_irq; + goto err_pm_disable; } tdc->dma_chan.device = &tdma->dma_dev; dma_cookie_init(&tdc->dma_chan); list_add_tail(&tdc->dma_chan.device_node, - &tdma->dma_dev.channels); + &tdma->dma_dev.channels); tdc->tdma = tdma; tdc->id = i; + tdc->slave_id = TEGRA_APBDMA_SLAVE_ID_INVALID; - tasklet_init(&tdc->tasklet, tegra_dma_tasklet, - (unsigned long)tdc); + tasklet_setup(&tdc->tasklet, tegra_dma_tasklet); spin_lock_init(&tdc->lock); + init_waitqueue_head(&tdc->wq); INIT_LIST_HEAD(&tdc->pending_sg_req); INIT_LIST_HEAD(&tdc->free_sg_req); @@ -1360,6 +1525,7 @@ static int tegra_dma_probe(struct platform_device *pdev) dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask); dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask); + tdma->global_pause_count = 0; tdma->dma_dev.dev = &pdev->dev; tdma->dma_dev.device_alloc_chan_resources = tegra_dma_alloc_chan_resources; @@ -1367,7 +1533,19 @@ static int tegra_dma_probe(struct platform_device *pdev) tegra_dma_free_chan_resources; tdma->dma_dev.device_prep_slave_sg = tegra_dma_prep_slave_sg; tdma->dma_dev.device_prep_dma_cyclic = tegra_dma_prep_dma_cyclic; - tdma->dma_dev.device_control = tegra_dma_device_control; + tdma->dma_dev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | + BIT(DMA_SLAVE_BUSWIDTH_8_BYTES); + tdma->dma_dev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | + BIT(DMA_SLAVE_BUSWIDTH_8_BYTES); + tdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); + tdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; + tdma->dma_dev.device_config = tegra_dma_slave_config; + tdma->dma_dev.device_terminate_all = tegra_dma_terminate_all; + tdma->dma_dev.device_synchronize = tegra_dma_synchronize; tdma->dma_dev.device_tx_status = tegra_dma_tx_status; tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending; @@ -1375,143 +1553,124 @@ static int tegra_dma_probe(struct platform_device *pdev) if (ret < 0) { dev_err(&pdev->dev, "Tegra20 APB DMA driver registration failed %d\n", ret); - goto err_irq; + goto err_pm_disable; + } + + ret = of_dma_controller_register(pdev->dev.of_node, + tegra_dma_of_xlate, tdma); + if (ret < 0) { + dev_err(&pdev->dev, + "Tegra20 APB DMA OF registration failed %d\n", ret); + goto err_unregister_dma_dev; } - dev_info(&pdev->dev, "Tegra20 APB DMA driver register %d channels\n", - cdata->nr_channels); + dev_info(&pdev->dev, "Tegra20 APB DMA driver registered %u channels\n", + cdata->nr_channels); + return 0; -err_irq: - while (--i >= 0) { - struct tegra_dma_channel *tdc = &tdma->channels[i]; - tasklet_kill(&tdc->tasklet); - } +err_unregister_dma_dev: + dma_async_device_unregister(&tdma->dma_dev); err_pm_disable: pm_runtime_disable(&pdev->dev); - if (!pm_runtime_status_suspended(&pdev->dev)) - tegra_dma_runtime_suspend(&pdev->dev); + +err_clk_unprepare: + clk_unprepare(tdma->dma_clk); + return ret; } -static int tegra_dma_remove(struct platform_device *pdev) +static void tegra_dma_remove(struct platform_device *pdev) { struct tegra_dma *tdma = platform_get_drvdata(pdev); - int i; - struct tegra_dma_channel *tdc; + of_dma_controller_free(pdev->dev.of_node); dma_async_device_unregister(&tdma->dma_dev); - - for (i = 0; i < tdma->chip_data->nr_channels; ++i) { - tdc = &tdma->channels[i]; - tasklet_kill(&tdc->tasklet); - } - pm_runtime_disable(&pdev->dev); - if (!pm_runtime_status_suspended(&pdev->dev)) - tegra_dma_runtime_suspend(&pdev->dev); - - return 0; + clk_unprepare(tdma->dma_clk); } -static int tegra_dma_runtime_suspend(struct device *dev) +static int __maybe_unused tegra_dma_runtime_suspend(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct tegra_dma *tdma = platform_get_drvdata(pdev); + struct tegra_dma *tdma = dev_get_drvdata(dev); + + clk_disable(tdma->dma_clk); - clk_disable_unprepare(tdma->dma_clk); return 0; } -static int tegra_dma_runtime_resume(struct device *dev) +static int __maybe_unused tegra_dma_runtime_resume(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct tegra_dma *tdma = platform_get_drvdata(pdev); - int ret; + struct tegra_dma *tdma = dev_get_drvdata(dev); - ret = clk_prepare_enable(tdma->dma_clk); - if (ret < 0) { - dev_err(dev, "clk_enable failed: %d\n", ret); - return ret; - } - return 0; + return clk_enable(tdma->dma_clk); } -#ifdef CONFIG_PM_SLEEP -static int tegra_dma_pm_suspend(struct device *dev) +static int __maybe_unused tegra_dma_dev_suspend(struct device *dev) { struct tegra_dma *tdma = dev_get_drvdata(dev); - int i; - int ret; - - /* Enable clock before accessing register */ - ret = tegra_dma_runtime_resume(dev); - if (ret < 0) - return ret; + unsigned long flags; + unsigned int i; + bool busy; - tdma->reg_gen = tdma_read(tdma, TEGRA_APBDMA_GENERAL); for (i = 0; i < tdma->chip_data->nr_channels; i++) { struct tegra_dma_channel *tdc = &tdma->channels[i]; - struct tegra_dma_channel_regs *ch_reg = &tdc->channel_reg; - ch_reg->csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR); - ch_reg->ahb_ptr = tdc_read(tdc, TEGRA_APBDMA_CHAN_AHBPTR); - ch_reg->apb_ptr = tdc_read(tdc, TEGRA_APBDMA_CHAN_APBPTR); - ch_reg->ahb_seq = tdc_read(tdc, TEGRA_APBDMA_CHAN_AHBSEQ); - ch_reg->apb_seq = tdc_read(tdc, TEGRA_APBDMA_CHAN_APBSEQ); + tasklet_kill(&tdc->tasklet); + + spin_lock_irqsave(&tdc->lock, flags); + busy = tdc->busy; + spin_unlock_irqrestore(&tdc->lock, flags); + + if (busy) { + dev_err(tdma->dev, "channel %u busy\n", i); + return -EBUSY; + } } - /* Disable clock */ - tegra_dma_runtime_suspend(dev); - return 0; + return pm_runtime_force_suspend(dev); } -static int tegra_dma_pm_resume(struct device *dev) +static int __maybe_unused tegra_dma_dev_resume(struct device *dev) { struct tegra_dma *tdma = dev_get_drvdata(dev); - int i; - int ret; + int err; - /* Enable clock before accessing register */ - ret = tegra_dma_runtime_resume(dev); - if (ret < 0) - return ret; - - tdma_write(tdma, TEGRA_APBDMA_GENERAL, tdma->reg_gen); - tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0); - tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul); - - for (i = 0; i < tdma->chip_data->nr_channels; i++) { - struct tegra_dma_channel *tdc = &tdma->channels[i]; - struct tegra_dma_channel_regs *ch_reg = &tdc->channel_reg; - - tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_reg->apb_seq); - tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_reg->apb_ptr); - tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_reg->ahb_seq); - tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_reg->ahb_ptr); - tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, - (ch_reg->csr & ~TEGRA_APBDMA_CSR_ENB)); - } + err = tegra_dma_init_hw(tdma); + if (err) + return err; - /* Disable clock */ - tegra_dma_runtime_suspend(dev); - return 0; + return pm_runtime_force_resume(dev); } -#endif static const struct dev_pm_ops tegra_dma_dev_pm_ops = { -#ifdef CONFIG_PM_RUNTIME - .runtime_suspend = tegra_dma_runtime_suspend, - .runtime_resume = tegra_dma_runtime_resume, -#endif - SET_SYSTEM_SLEEP_PM_OPS(tegra_dma_pm_suspend, tegra_dma_pm_resume) + SET_RUNTIME_PM_OPS(tegra_dma_runtime_suspend, tegra_dma_runtime_resume, + NULL) + SET_SYSTEM_SLEEP_PM_OPS(tegra_dma_dev_suspend, tegra_dma_dev_resume) }; +static const struct of_device_id tegra_dma_of_match[] = { + { + .compatible = "nvidia,tegra148-apbdma", + .data = &tegra148_dma_chip_data, + }, { + .compatible = "nvidia,tegra114-apbdma", + .data = &tegra114_dma_chip_data, + }, { + .compatible = "nvidia,tegra30-apbdma", + .data = &tegra30_dma_chip_data, + }, { + .compatible = "nvidia,tegra20-apbdma", + .data = &tegra20_dma_chip_data, + }, { + }, +}; +MODULE_DEVICE_TABLE(of, tegra_dma_of_match); + static struct platform_driver tegra_dmac_driver = { .driver = { .name = "tegra-apbdma", - .owner = THIS_MODULE, .pm = &tegra_dma_dev_pm_ops, .of_match_table = tegra_dma_of_match, }, @@ -1521,7 +1680,6 @@ static struct platform_driver tegra_dmac_driver = { module_platform_driver(tegra_dmac_driver); -MODULE_ALIAS("platform:tegra20-apbdma"); MODULE_DESCRIPTION("NVIDIA Tegra APB DMA Controller driver"); MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>"); MODULE_LICENSE("GPL v2"); |
