diff options
Diffstat (limited to 'drivers/dma/pl330.c')
| -rw-r--r-- | drivers/dma/pl330.c | 2219 |
1 files changed, 1211 insertions, 1008 deletions
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 593827b3fdd4..82a9fe88ad54 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -1,16 +1,13 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 2012 Samsung Electronics Co., Ltd. * http://www.samsung.com * * Copyright (C) 2010 Samsung Electronics Co. Ltd. * Jaswinder Singh <jassi.brar@samsung.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ +#include <linux/debugfs.h> #include <linux/kernel.h> #include <linux/io.h> #include <linux/init.h> @@ -22,37 +19,32 @@ #include <linux/dma-mapping.h> #include <linux/dmaengine.h> #include <linux/amba/bus.h> -#include <linux/amba/pl330.h> #include <linux/scatterlist.h> #include <linux/of.h> #include <linux/of_dma.h> #include <linux/err.h> +#include <linux/pm_runtime.h> +#include <linux/bug.h> +#include <linux/reset.h> #include "dmaengine.h" #define PL330_MAX_CHAN 8 #define PL330_MAX_IRQS 32 #define PL330_MAX_PERI 32 - -enum pl330_srccachectrl { - SCCTRL0, /* Noncacheable and nonbufferable */ - SCCTRL1, /* Bufferable only */ - SCCTRL2, /* Cacheable, but do not allocate */ - SCCTRL3, /* Cacheable and bufferable, but do not allocate */ - SINVALID1, - SINVALID2, - SCCTRL6, /* Cacheable write-through, allocate on reads only */ - SCCTRL7, /* Cacheable write-back, allocate on reads only */ -}; - -enum pl330_dstcachectrl { - DCCTRL0, /* Noncacheable and nonbufferable */ - DCCTRL1, /* Bufferable only */ - DCCTRL2, /* Cacheable, but do not allocate */ - DCCTRL3, /* Cacheable and bufferable, but do not allocate */ - DINVALID1, /* AWCACHE = 0x1000 */ - DINVALID2, - DCCTRL6, /* Cacheable write-through, allocate on writes only */ - DCCTRL7, /* Cacheable write-back, allocate on writes only */ +#define PL330_MAX_BURST 16 + +#define PL330_QUIRK_BROKEN_NO_FLUSHP BIT(0) +#define PL330_QUIRK_PERIPH_BURST BIT(1) + +enum pl330_cachectrl { + CCTRL0, /* Noncacheable and nonbufferable */ + CCTRL1, /* Bufferable only */ + CCTRL2, /* Cacheable, but do not allocate */ + CCTRL3, /* Cacheable and bufferable, but do not allocate */ + INVALID1, /* AWCACHE = 0x1000 */ + INVALID2, + CCTRL6, /* Cacheable write-through, allocate on writes only */ + CCTRL7, /* Cacheable write-back, allocate on writes only */ }; enum pl330_byteswap { @@ -63,13 +55,6 @@ enum pl330_byteswap { SWAP_16, }; -enum pl330_reqtype { - MEMTOMEM, - MEMTODEV, - DEVTOMEM, - DEVTODEV, -}; - /* Register and Bit field Definitions */ #define DS 0x0 #define DS_ST_STOP 0x0 @@ -263,9 +248,6 @@ enum pl330_reqtype { */ #define MCODE_BUFF_PER_REQ 256 -/* If the _pl330_req is available to the client */ -#define IS_FREE(req) (*((u8 *)((req)->mc_cpu)) == CMD_DMAEND) - /* Use this _only_ to wait on transient states */ #define UNTIL(t, s) while (!(_state(t) & (s))) cpu_relax(); @@ -273,7 +255,7 @@ enum pl330_reqtype { static unsigned cmd_line; #define PL330_DBGCMD_DUMP(off, x...) do { \ printk("%x:", cmd_line); \ - printk(x); \ + printk(KERN_CONT x); \ cmd_line += off; \ } while (0) #define PL330_DBGMC_START(addr) (cmd_line = addr) @@ -286,13 +268,16 @@ static unsigned cmd_line; #define NR_DEFAULT_DESC 16 +/* Delay for runtime PM autosuspend, ms */ +#define PL330_AUTOSUSPEND_DELAY 20 + /* Populated by the PL330 core driver for DMA API driver's info */ struct pl330_config { u32 periph_id; #define DMAC_MODE_NS (1 << 0) unsigned int mode; unsigned int data_bus_width:10; /* In number of bits */ - unsigned int data_buf_dep:10; + unsigned int data_buf_dep:11; unsigned int num_chan:4; unsigned int num_peri:6; u32 peri_ns; @@ -300,28 +285,7 @@ struct pl330_config { u32 irq_ns; }; -/* Handle to the DMAC provided to the PL330 core */ -struct pl330_info { - /* Owning device */ - struct device *dev; - /* Size of MicroCode buffers for each channel. */ - unsigned mcbufsz; - /* ioremap'ed address of PL330 registers. */ - void __iomem *base; - /* Client can freely use it. */ - void *client_data; - /* PL330 core data, Client must not touch it. */ - void *pl330_data; - /* Populated by the PL330 core driver during pl330_add */ - struct pl330_config pcfg; - /* - * If the DMAC has some reset mechanism, then the - * client may want to provide pointer to the method. - */ - void (*dmac_reset)(struct pl330_info *pi); -}; - -/** +/* * Request Configuration. * The PL330 core does not modify this and uses the last * working configuration if the request doesn't provide any. @@ -344,8 +308,8 @@ struct pl330_reqcfg { unsigned brst_len:5; unsigned brst_size:3; /* in power of 2 */ - enum pl330_dstcachectrl dcctl; - enum pl330_srccachectrl scctl; + enum pl330_cachectrl dcctl; + enum pl330_cachectrl scctl; enum pl330_byteswap swap; struct pl330_config *pcfg; }; @@ -359,11 +323,6 @@ struct pl330_xfer { u32 dst_addr; /* Size to xfer */ u32 bytes; - /* - * Pointer to next xfer in the list. - * The last xfer in the req must point to NULL. - */ - struct pl330_xfer *next; }; /* The xfer callbacks are made with one of these arguments. */ @@ -376,67 +335,6 @@ enum pl330_op_err { PL330_ERR_FAIL, }; -/* A request defining Scatter-Gather List ending with NULL xfer. */ -struct pl330_req { - enum pl330_reqtype rqtype; - /* Index of peripheral for the xfer. */ - unsigned peri:5; - /* Unique token for this xfer, set by the client. */ - void *token; - /* Callback to be called after xfer. */ - void (*xfer_cb)(void *token, enum pl330_op_err err); - /* If NULL, req will be done at last set parameters. */ - struct pl330_reqcfg *cfg; - /* Pointer to first xfer in the request. */ - struct pl330_xfer *x; - /* Hook to attach to DMAC's list of reqs with due callback */ - struct list_head rqd; -}; - -/* - * To know the status of the channel and DMAC, the client - * provides a pointer to this structure. The PL330 core - * fills it with current information. - */ -struct pl330_chanstatus { - /* - * If the DMAC engine halted due to some error, - * the client should remove-add DMAC. - */ - bool dmac_halted; - /* - * If channel is halted due to some error, - * the client should ABORT/FLUSH and START the channel. - */ - bool faulting; - /* Location of last load */ - u32 src_addr; - /* Location of last store */ - u32 dst_addr; - /* - * Pointer to the currently active req, NULL if channel is - * inactive, even though the requests may be present. - */ - struct pl330_req *top_req; - /* Pointer to req waiting second in the queue if any. */ - struct pl330_req *wait_req; -}; - -enum pl330_chan_op { - /* Start the channel */ - PL330_OP_START, - /* Abort the active xfer */ - PL330_OP_ABORT, - /* Stop xfer and flush queue */ - PL330_OP_FLUSH, -}; - -struct _xfer_spec { - u32 ccr; - struct pl330_req *r; - struct pl330_xfer *x; -}; - enum dmamov_dst { SAR = 0, CCR, @@ -454,12 +352,12 @@ enum pl330_cond { ALWAYS, }; +struct dma_pl330_desc; + struct _pl330_req { u32 mc_bus; void *mc_cpu; - /* Number of bytes taken to setup MC for the req */ - u32 mc_len; - struct pl330_req *r; + struct dma_pl330_desc *desc; }; /* ToBeDone for tasklet */ @@ -491,30 +389,6 @@ enum pl330_dmac_state { DYING, }; -/* A DMAC */ -struct pl330_dmac { - spinlock_t lock; - /* Holds list of reqs with due callbacks */ - struct list_head req_done; - /* Pointer to platform specific stuff */ - struct pl330_info *pinfo; - /* Maximum possible events/irqs */ - int events[32]; - /* BUS address of MicroCode buffer */ - dma_addr_t mcode_bus; - /* CPU address of MicroCode buffer */ - void *mcode_cpu; - /* List of all Channel threads */ - struct pl330_thread *channels; - /* Pointer to the MANAGER thread */ - struct pl330_thread *manager; - /* To handle bad news in interrupt */ - struct tasklet_struct tasks; - struct _pl330_tbd dmac_tbd; - /* State of DMAC operation */ - enum pl330_dmac_state state; -}; - enum desc_status { /* In the DMAC pool */ FREE, @@ -530,6 +404,12 @@ enum desc_status { */ BUSY, /* + * Pause was called while descriptor was BUSY. Due to hardware + * limitations, only termination is possible for descriptors + * that have been paused. + */ + PAUSED, + /* * Sitting on the channel work_list but xfer done * by PL330 core */ @@ -543,36 +423,46 @@ struct dma_pl330_chan { /* DMA-Engine Channel */ struct dma_chan chan; - /* List of to be xfered descriptors */ + /* List of submitted descriptors */ + struct list_head submitted_list; + /* List of issued descriptors */ struct list_head work_list; + /* List of completed descriptors */ + struct list_head completed_list; /* Pointer to the DMAC that manages this channel, * NULL if the channel is available to be acquired. * As the parent, this DMAC also provides descriptors * to the channel. */ - struct dma_pl330_dmac *dmac; + struct pl330_dmac *dmac; /* To protect channel manipulation */ spinlock_t lock; - /* Token of a hardware channel thread of PL330 DMAC - * NULL if the channel is available to be acquired. + /* + * Hardware channel thread of PL330 DMAC. NULL if the channel is + * available. */ - void *pl330_chid; + struct pl330_thread *thread; /* For D-to-M and M-to-D channels */ int burst_sz; /* the peripheral fifo width */ int burst_len; /* the number of burst */ - dma_addr_t fifo_addr; + phys_addr_t fifo_addr; + /* DMA-mapped view of the FIFO; may differ if an IOMMU is present */ + dma_addr_t fifo_dma; + enum dma_data_direction dir; + struct dma_slave_config slave_config; /* for cyclic capability */ bool cyclic; -}; -struct dma_pl330_dmac { - struct pl330_info pif; + /* for runtime pm tracking */ + bool active; +}; +struct pl330_dmac { /* DMA-Engine Device */ struct dma_device ddma; @@ -581,8 +471,53 @@ struct dma_pl330_dmac { /* To protect desc_pool manipulation */ spinlock_t pool_lock; + /* Size of MicroCode buffers for each channel. */ + unsigned mcbufsz; + /* ioremap'ed address of PL330 registers. */ + void __iomem *base; + /* Populated by the PL330 core driver during pl330_add */ + struct pl330_config pcfg; + + spinlock_t lock; + /* Maximum possible events/irqs */ + int events[32]; + /* BUS address of MicroCode buffer */ + dma_addr_t mcode_bus; + /* CPU address of MicroCode buffer */ + void *mcode_cpu; + /* List of all Channel threads */ + struct pl330_thread *channels; + /* Pointer to the MANAGER thread */ + struct pl330_thread *manager; + /* To handle bad news in interrupt */ + struct tasklet_struct tasks; + struct _pl330_tbd dmac_tbd; + /* State of DMAC operation */ + enum pl330_dmac_state state; + /* Holds list of reqs with due callbacks */ + struct list_head req_done; + /* Peripheral channels connected to this DMAC */ + unsigned int num_peripherals; struct dma_pl330_chan *peripherals; /* keep at end */ + int quirks; + + struct reset_control *rstc; + struct reset_control *rstc_ocp; +}; + +static struct pl330_of_quirks { + char *quirk; + int id; +} of_quirks[] = { + { + .quirk = "arm,pl330-broken-no-flushp", + .id = PL330_QUIRK_BROKEN_NO_FLUSHP, + }, + { + .quirk = "arm,pl330-periph-burst", + .id = PL330_QUIRK_PERIPH_BURST, + } }; struct dma_pl330_desc { @@ -596,54 +531,45 @@ struct dma_pl330_desc { struct pl330_xfer px; struct pl330_reqcfg rqcfg; - struct pl330_req req; enum desc_status status; + int bytes_requested; + bool last; + /* The channel which currently holds this desc */ struct dma_pl330_chan *pchan; -}; -struct dma_pl330_filter_args { - struct dma_pl330_dmac *pdmac; - unsigned int chan_id; + enum dma_transfer_direction rqtype; + /* Index of peripheral for the xfer. */ + unsigned peri:5; + /* Hook to attach to DMAC's list of reqs with due callback */ + struct list_head rqd; }; -static inline void _callback(struct pl330_req *r, enum pl330_op_err err) -{ - if (r && r->xfer_cb) - r->xfer_cb(r->token, err); -} +struct _xfer_spec { + u32 ccr; + struct dma_pl330_desc *desc; +}; -static inline bool _queue_empty(struct pl330_thread *thrd) -{ - return (IS_FREE(&thrd->req[0]) && IS_FREE(&thrd->req[1])) - ? true : false; -} +static int pl330_config_write(struct dma_chan *chan, + struct dma_slave_config *slave_config, + enum dma_transfer_direction direction); static inline bool _queue_full(struct pl330_thread *thrd) { - return (IS_FREE(&thrd->req[0]) || IS_FREE(&thrd->req[1])) - ? false : true; + return thrd->req[0].desc != NULL && thrd->req[1].desc != NULL; } static inline bool is_manager(struct pl330_thread *thrd) { - struct pl330_dmac *pl330 = thrd->dmac; - - /* MANAGER is indexed at the end */ - if (thrd->id == pl330->pinfo->pcfg.num_chan) - return true; - else - return false; + return thrd->dmac->manager == thrd; } /* If manager of the thread is in Non-Secure mode */ static inline bool _manager_ns(struct pl330_thread *thrd) { - struct pl330_dmac *pl330 = thrd->dmac; - - return (pl330->pinfo->pcfg.mode & DMAC_MODE_NS) ? true : false; + return (thrd->dmac->pcfg.mode & DMAC_MODE_NS) ? true : false; } static inline u32 get_revision(u32 periph_id) @@ -651,22 +577,6 @@ static inline u32 get_revision(u32 periph_id) return (periph_id >> PERIPH_REV_SHIFT) & PERIPH_REV_MASK; } -static inline u32 _emit_ADDH(unsigned dry_run, u8 buf[], - enum pl330_dst da, u16 val) -{ - if (dry_run) - return SZ_DMAADDH; - - buf[0] = CMD_DMAADDH; - buf[0] |= (da << 1); - *((u16 *)&buf[1]) = val; - - PL330_DBGCMD_DUMP(SZ_DMAADDH, "\tDMAADDH %s %u\n", - da == 1 ? "DA" : "SA", val); - - return SZ_DMAADDH; -} - static inline u32 _emit_END(unsigned dry_run, u8 buf[]) { if (dry_run) @@ -813,7 +723,10 @@ static inline u32 _emit_MOV(unsigned dry_run, u8 buf[], buf[0] = CMD_DMAMOV; buf[1] = dst; - *((u32 *)&buf[2]) = val; + buf[2] = val; + buf[3] = val >> 8; + buf[4] = val >> 16; + buf[5] = val >> 24; PL330_DBGCMD_DUMP(SZ_DMAMOV, "\tDMAMOV %s 0x%x\n", dst == SAR ? "SAR" : (dst == DAR ? "DAR" : "CCR"), val); @@ -821,18 +734,6 @@ static inline u32 _emit_MOV(unsigned dry_run, u8 buf[], return SZ_DMAMOV; } -static inline u32 _emit_NOP(unsigned dry_run, u8 buf[]) -{ - if (dry_run) - return SZ_DMANOP; - - buf[0] = CMD_DMANOP; - - PL330_DBGCMD_DUMP(SZ_DMANOP, "\tDMANOP\n"); - - return SZ_DMANOP; -} - static inline u32 _emit_RMB(unsigned dry_run, u8 buf[]) { if (dry_run) @@ -900,39 +801,6 @@ static inline u32 _emit_STP(unsigned dry_run, u8 buf[], return SZ_DMASTP; } -static inline u32 _emit_STZ(unsigned dry_run, u8 buf[]) -{ - if (dry_run) - return SZ_DMASTZ; - - buf[0] = CMD_DMASTZ; - - PL330_DBGCMD_DUMP(SZ_DMASTZ, "\tDMASTZ\n"); - - return SZ_DMASTZ; -} - -static inline u32 _emit_WFE(unsigned dry_run, u8 buf[], u8 ev, - unsigned invalidate) -{ - if (dry_run) - return SZ_DMAWFE; - - buf[0] = CMD_DMAWFE; - - ev &= 0x1f; - ev <<= 3; - buf[1] = ev; - - if (invalidate) - buf[1] |= (1 << 1); - - PL330_DBGCMD_DUMP(SZ_DMAWFE, "\tDMAWFE %u%s\n", - ev >> 3, invalidate ? ", I" : ""); - - return SZ_DMAWFE; -} - static inline u32 _emit_WFP(unsigned dry_run, u8 buf[], enum pl330_cond cond, u8 peri) { @@ -988,10 +856,11 @@ static inline u32 _emit_GO(unsigned dry_run, u8 buf[], buf[0] = CMD_DMAGO; buf[0] |= (ns << 1); - buf[1] = chan & 0x7; - - *((u32 *)&buf[2]) = addr; + buf[2] = addr; + buf[3] = addr >> 8; + buf[4] = addr >> 16; + buf[5] = addr >> 24; return SZ_DMAGO; } @@ -1001,7 +870,7 @@ static inline u32 _emit_GO(unsigned dry_run, u8 buf[], /* Returns Time-Out */ static bool _until_dmac_idle(struct pl330_thread *thrd) { - void __iomem *regs = thrd->dmac->pinfo->base; + void __iomem *regs = thrd->dmac->base; unsigned long loops = msecs_to_loops(5); do { @@ -1021,9 +890,15 @@ static bool _until_dmac_idle(struct pl330_thread *thrd) static inline void _execute_DBGINSN(struct pl330_thread *thrd, u8 insn[], bool as_manager) { - void __iomem *regs = thrd->dmac->pinfo->base; + void __iomem *regs = thrd->dmac->base; u32 val; + /* If timed out due to halted state-machine */ + if (_until_dmac_idle(thrd)) { + dev_err(thrd->dmac->ddma.dev, "DMAC halted!\n"); + return; + } + val = (insn[0] << 16) | (insn[1] << 24); if (!as_manager) { val |= (1 << 0); @@ -1031,38 +906,16 @@ static inline void _execute_DBGINSN(struct pl330_thread *thrd, } writel(val, regs + DBGINST0); - val = *((u32 *)&insn[2]); + val = le32_to_cpu(*((__le32 *)&insn[2])); writel(val, regs + DBGINST1); - /* If timed out due to halted state-machine */ - if (_until_dmac_idle(thrd)) { - dev_err(thrd->dmac->pinfo->dev, "DMAC halted!\n"); - return; - } - /* Get going */ writel(0, regs + DBGCMD); } -/* - * Mark a _pl330_req as free. - * We do it by writing DMAEND as the first instruction - * because no valid request is going to have DMAEND as - * its first instruction to execute. - */ -static void mark_free(struct pl330_thread *thrd, int idx) -{ - struct _pl330_req *req = &thrd->req[idx]; - - _emit_END(0, req->mc_cpu); - req->mc_len = 0; - - thrd->req_running = -1; -} - static inline u32 _state(struct pl330_thread *thrd) { - void __iomem *regs = thrd->dmac->pinfo->base; + void __iomem *regs = thrd->dmac->base; u32 val; if (is_manager(thrd)) @@ -1120,8 +973,9 @@ static inline u32 _state(struct pl330_thread *thrd) static void _stop(struct pl330_thread *thrd) { - void __iomem *regs = thrd->dmac->pinfo->base; + void __iomem *regs = thrd->dmac->base; u8 insn[6] = {0, 0, 0, 0, 0, 0}; + u32 inten = readl(regs + INTEN); if (_state(thrd) == PL330_STATE_FAULT_COMPLETING) UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING); @@ -1134,18 +988,21 @@ static void _stop(struct pl330_thread *thrd) _emit_KILL(0, insn); - /* Stop generating interrupts for SEV */ - writel(readl(regs + INTEN) & ~(1 << thrd->ev), regs + INTEN); - _execute_DBGINSN(thrd, insn, is_manager(thrd)); + + /* clear the event */ + if (inten & (1 << thrd->ev)) + writel(1 << thrd->ev, regs + INTCLR); + /* Stop generating interrupts for SEV */ + writel(inten & ~(1 << thrd->ev), regs + INTEN); } /* Start doing req 'idx' of thread 'thrd' */ static bool _trigger(struct pl330_thread *thrd) { - void __iomem *regs = thrd->dmac->pinfo->base; + void __iomem *regs = thrd->dmac->base; struct _pl330_req *req; - struct pl330_req *r; + struct dma_pl330_desc *desc; struct _arg_GO go; unsigned ns; u8 insn[6] = {0, 0, 0, 0, 0, 0}; @@ -1156,32 +1013,31 @@ static bool _trigger(struct pl330_thread *thrd) return true; idx = 1 - thrd->lstenq; - if (!IS_FREE(&thrd->req[idx])) + if (thrd->req[idx].desc != NULL) { req = &thrd->req[idx]; - else { + } else { idx = thrd->lstenq; - if (!IS_FREE(&thrd->req[idx])) + if (thrd->req[idx].desc != NULL) req = &thrd->req[idx]; else req = NULL; } /* Return if no request */ - if (!req || !req->r) + if (!req) return true; - r = req->r; + /* Return if req is running */ + if (idx == thrd->req_running) + return true; - if (r->cfg) - ns = r->cfg->nonsecure ? 1 : 0; - else if (readl(regs + CS(thrd->id)) & CS_CNS) - ns = 1; - else - ns = 0; + desc = req->desc; + + ns = desc->rqcfg.nonsecure ? 1 : 0; /* See 'Abort Sources' point-4 at Page 2-25 */ if (_manager_ns(thrd) && !ns) - dev_info(thrd->dmac->pinfo->dev, "%s:%d Recipe for ABORT!\n", + dev_info(thrd->dmac->ddma.dev, "%s:%d Recipe for ABORT!\n", __func__, __LINE__); go.chan = thrd->id; @@ -1200,7 +1056,7 @@ static bool _trigger(struct pl330_thread *thrd) return true; } -static bool _start(struct pl330_thread *thrd) +static bool pl330_start_thread(struct pl330_thread *thrd) { switch (_state(thrd)) { case PL330_STATE_FAULT_COMPLETING: @@ -1208,13 +1064,16 @@ static bool _start(struct pl330_thread *thrd) if (_state(thrd) == PL330_STATE_KILLING) UNTIL(thrd, PL330_STATE_STOPPED) + fallthrough; case PL330_STATE_FAULTING: _stop(thrd); + fallthrough; case PL330_STATE_KILLING: case PL330_STATE_COMPLETING: UNTIL(thrd, PL330_STATE_STOPPED) + fallthrough; case PL330_STATE_STOPPED: return _trigger(thrd); @@ -1237,7 +1096,7 @@ static inline int _ldst_memtomem(unsigned dry_run, u8 buf[], const struct _xfer_spec *pxs, int cyc) { int off = 0; - struct pl330_config *pcfg = pxs->r->cfg->pcfg; + struct pl330_config *pcfg = pxs->desc->rqcfg.pcfg; /* check lock-up free version */ if (get_revision(pcfg->periph_id) >= PERIPH_REV_R1P0) { @@ -1257,53 +1116,170 @@ static inline int _ldst_memtomem(unsigned dry_run, u8 buf[], return off; } -static inline int _ldst_devtomem(unsigned dry_run, u8 buf[], - const struct _xfer_spec *pxs, int cyc) +static u32 _emit_load(unsigned int dry_run, u8 buf[], + enum pl330_cond cond, enum dma_transfer_direction direction, + u8 peri) { int off = 0; - while (cyc--) { - off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->r->peri); - off += _emit_LDP(dry_run, &buf[off], SINGLE, pxs->r->peri); - off += _emit_ST(dry_run, &buf[off], ALWAYS); - off += _emit_FLUSHP(dry_run, &buf[off], pxs->r->peri); + switch (direction) { + case DMA_MEM_TO_MEM: + case DMA_MEM_TO_DEV: + off += _emit_LD(dry_run, &buf[off], cond); + break; + + case DMA_DEV_TO_MEM: + if (cond == ALWAYS) { + off += _emit_LDP(dry_run, &buf[off], SINGLE, + peri); + off += _emit_LDP(dry_run, &buf[off], BURST, + peri); + } else { + off += _emit_LDP(dry_run, &buf[off], cond, + peri); + } + break; + + default: + /* this code should be unreachable */ + WARN_ON(1); + break; } return off; } -static inline int _ldst_memtodev(unsigned dry_run, u8 buf[], - const struct _xfer_spec *pxs, int cyc) +static inline u32 _emit_store(unsigned int dry_run, u8 buf[], + enum pl330_cond cond, enum dma_transfer_direction direction, + u8 peri) +{ + int off = 0; + + switch (direction) { + case DMA_MEM_TO_MEM: + case DMA_DEV_TO_MEM: + off += _emit_ST(dry_run, &buf[off], cond); + break; + + case DMA_MEM_TO_DEV: + if (cond == ALWAYS) { + off += _emit_STP(dry_run, &buf[off], SINGLE, + peri); + off += _emit_STP(dry_run, &buf[off], BURST, + peri); + } else { + off += _emit_STP(dry_run, &buf[off], cond, + peri); + } + break; + + default: + /* this code should be unreachable */ + WARN_ON(1); + break; + } + + return off; +} + +static inline int _ldst_peripheral(struct pl330_dmac *pl330, + unsigned dry_run, u8 buf[], + const struct _xfer_spec *pxs, int cyc, + enum pl330_cond cond) { int off = 0; + /* + * do FLUSHP at beginning to clear any stale dma requests before the + * first WFP. + */ + if (!(pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP)) + off += _emit_FLUSHP(dry_run, &buf[off], pxs->desc->peri); while (cyc--) { - off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->r->peri); - off += _emit_LD(dry_run, &buf[off], ALWAYS); - off += _emit_STP(dry_run, &buf[off], SINGLE, pxs->r->peri); - off += _emit_FLUSHP(dry_run, &buf[off], pxs->r->peri); + off += _emit_WFP(dry_run, &buf[off], cond, pxs->desc->peri); + off += _emit_load(dry_run, &buf[off], cond, pxs->desc->rqtype, + pxs->desc->peri); + off += _emit_store(dry_run, &buf[off], cond, pxs->desc->rqtype, + pxs->desc->peri); } return off; } -static int _bursts(unsigned dry_run, u8 buf[], +static int _bursts(struct pl330_dmac *pl330, unsigned dry_run, u8 buf[], const struct _xfer_spec *pxs, int cyc) { int off = 0; + enum pl330_cond cond = BRST_LEN(pxs->ccr) > 1 ? BURST : SINGLE; - switch (pxs->r->rqtype) { - case MEMTODEV: - off += _ldst_memtodev(dry_run, &buf[off], pxs, cyc); - break; - case DEVTOMEM: - off += _ldst_devtomem(dry_run, &buf[off], pxs, cyc); + if (pl330->quirks & PL330_QUIRK_PERIPH_BURST) + cond = BURST; + + switch (pxs->desc->rqtype) { + case DMA_MEM_TO_DEV: + case DMA_DEV_TO_MEM: + off += _ldst_peripheral(pl330, dry_run, &buf[off], pxs, cyc, + cond); break; - case MEMTOMEM: + + case DMA_MEM_TO_MEM: off += _ldst_memtomem(dry_run, &buf[off], pxs, cyc); break; + default: - off += 0x40000000; /* Scare off the Client */ + /* this code should be unreachable */ + WARN_ON(1); + break; + } + + return off; +} + +/* + * only the unaligned burst transfers have the dregs. + * so, still transfer dregs with a reduced size burst + * for mem-to-mem, mem-to-dev or dev-to-mem. + */ +static int _dregs(struct pl330_dmac *pl330, unsigned int dry_run, u8 buf[], + const struct _xfer_spec *pxs, int transfer_length) +{ + int off = 0; + int dregs_ccr; + + if (transfer_length == 0) + return off; + + /* + * dregs_len = (total bytes - BURST_TO_BYTE(bursts, ccr)) / + * BRST_SIZE(ccr) + * the dregs len must be smaller than burst len, + * so, for higher efficiency, we can modify CCR + * to use a reduced size burst len for the dregs. + */ + dregs_ccr = pxs->ccr; + dregs_ccr &= ~((0xf << CC_SRCBRSTLEN_SHFT) | + (0xf << CC_DSTBRSTLEN_SHFT)); + dregs_ccr |= (((transfer_length - 1) & 0xf) << + CC_SRCBRSTLEN_SHFT); + dregs_ccr |= (((transfer_length - 1) & 0xf) << + CC_DSTBRSTLEN_SHFT); + + switch (pxs->desc->rqtype) { + case DMA_MEM_TO_DEV: + case DMA_DEV_TO_MEM: + off += _emit_MOV(dry_run, &buf[off], CCR, dregs_ccr); + off += _ldst_peripheral(pl330, dry_run, &buf[off], pxs, 1, + BURST); + break; + + case DMA_MEM_TO_MEM: + off += _emit_MOV(dry_run, &buf[off], CCR, dregs_ccr); + off += _ldst_memtomem(dry_run, &buf[off], pxs, 1); + break; + + default: + /* this code should be unreachable */ + WARN_ON(1); break; } @@ -1311,13 +1287,16 @@ static int _bursts(unsigned dry_run, u8 buf[], } /* Returns bytes consumed and updates bursts */ -static inline int _loop(unsigned dry_run, u8 buf[], +static inline int _loop(struct pl330_dmac *pl330, unsigned dry_run, u8 buf[], unsigned long *bursts, const struct _xfer_spec *pxs) { int cyc, cycmax, szlp, szlpend, szbrst, off; unsigned lcnt0, lcnt1, ljmp0, ljmp1; struct _arg_LPEND lpend; + if (*bursts == 1) + return _bursts(pl330, dry_run, buf, pxs, 1); + /* Max iterations possible in DMALP is 256 */ if (*bursts >= 256*256) { lcnt1 = 256; @@ -1334,7 +1313,7 @@ static inline int _loop(unsigned dry_run, u8 buf[], } szlp = _emit_LP(1, buf, 0, 0); - szbrst = _bursts(1, buf, pxs, 1); + szbrst = _bursts(pl330, 1, buf, pxs, 1); lpend.cond = ALWAYS; lpend.forever = false; @@ -1366,7 +1345,7 @@ static inline int _loop(unsigned dry_run, u8 buf[], off += _emit_LP(dry_run, &buf[off], 1, lcnt1); ljmp1 = off; - off += _bursts(dry_run, &buf[off], pxs, cyc); + off += _bursts(pl330, dry_run, &buf[off], pxs, cyc); lpend.cond = ALWAYS; lpend.forever = false; @@ -1389,27 +1368,32 @@ static inline int _loop(unsigned dry_run, u8 buf[], return off; } -static inline int _setup_loops(unsigned dry_run, u8 buf[], - const struct _xfer_spec *pxs) +static inline int _setup_loops(struct pl330_dmac *pl330, + unsigned dry_run, u8 buf[], + const struct _xfer_spec *pxs) { - struct pl330_xfer *x = pxs->x; + struct pl330_xfer *x = &pxs->desc->px; u32 ccr = pxs->ccr; unsigned long c, bursts = BYTE_TO_BURST(x->bytes, ccr); + int num_dregs = (x->bytes - BURST_TO_BYTE(bursts, ccr)) / + BRST_SIZE(ccr); int off = 0; while (bursts) { c = bursts; - off += _loop(dry_run, &buf[off], &c, pxs); + off += _loop(pl330, dry_run, &buf[off], &c, pxs); bursts -= c; } + off += _dregs(pl330, dry_run, &buf[off], pxs, num_dregs); return off; } -static inline int _setup_xfer(unsigned dry_run, u8 buf[], - const struct _xfer_spec *pxs) +static inline int _setup_xfer(struct pl330_dmac *pl330, + unsigned dry_run, u8 buf[], + const struct _xfer_spec *pxs) { - struct pl330_xfer *x = pxs->x; + struct pl330_xfer *x = &pxs->desc->px; int off = 0; /* DMAMOV SAR, x->src_addr */ @@ -1418,7 +1402,7 @@ static inline int _setup_xfer(unsigned dry_run, u8 buf[], off += _emit_MOV(dry_run, &buf[off], DAR, x->dst_addr); /* Setup Loop(s) */ - off += _setup_loops(dry_run, &buf[off], pxs); + off += _setup_loops(pl330, dry_run, &buf[off], pxs); return off; } @@ -1427,11 +1411,11 @@ static inline int _setup_xfer(unsigned dry_run, u8 buf[], * A req is a sequence of one or more xfer units. * Returns the number of bytes taken to setup the MC for the req. */ -static int _setup_req(unsigned dry_run, struct pl330_thread *thrd, - unsigned index, struct _xfer_spec *pxs) +static int _setup_req(struct pl330_dmac *pl330, unsigned dry_run, + struct pl330_thread *thrd, unsigned index, + struct _xfer_spec *pxs) { struct _pl330_req *req = &thrd->req[index]; - struct pl330_xfer *x; u8 *buf = req->mc_cpu; int off = 0; @@ -1440,17 +1424,7 @@ static int _setup_req(unsigned dry_run, struct pl330_thread *thrd, /* DMAMOV CCR, ccr */ off += _emit_MOV(dry_run, &buf[off], CCR, pxs->ccr); - x = pxs->r->x; - do { - /* Error if xfer length is not aligned at burst size */ - if (x->bytes % (BRST_SIZE(pxs->ccr) * BRST_LEN(pxs->ccr))) - return -EINVAL; - - pxs->x = x; - off += _setup_xfer(dry_run, &buf[off], pxs); - - x = x->next; - } while (x); + off += _setup_xfer(pl330, dry_run, &buf[off], pxs); /* DMASEV peripheral/event */ off += _emit_SEV(dry_run, &buf[off], thrd->ev); @@ -1492,58 +1466,48 @@ static inline u32 _prepare_ccr(const struct pl330_reqcfg *rqc) return ccr; } -static inline bool _is_valid(u32 ccr) -{ - enum pl330_dstcachectrl dcctl; - enum pl330_srccachectrl scctl; - - dcctl = (ccr >> CC_DSTCCTRL_SHFT) & CC_DRCCCTRL_MASK; - scctl = (ccr >> CC_SRCCCTRL_SHFT) & CC_SRCCCTRL_MASK; - - if (dcctl == DINVALID1 || dcctl == DINVALID2 - || scctl == SINVALID1 || scctl == SINVALID2) - return false; - else - return true; -} - /* * Submit a list of xfers after which the client wants notification. * Client is not notified after each xfer unit, just once after all * xfer units are done or some error occurs. */ -static int pl330_submit_req(void *ch_id, struct pl330_req *r) +static int pl330_submit_req(struct pl330_thread *thrd, + struct dma_pl330_desc *desc) { - struct pl330_thread *thrd = ch_id; - struct pl330_dmac *pl330; - struct pl330_info *pi; + struct pl330_dmac *pl330 = thrd->dmac; struct _xfer_spec xs; unsigned long flags; - void __iomem *regs; unsigned idx; u32 ccr; int ret = 0; - /* No Req or Unacquired Channel or DMAC */ - if (!r || !thrd || thrd->free) - return -EINVAL; + switch (desc->rqtype) { + case DMA_MEM_TO_DEV: + break; - pl330 = thrd->dmac; - pi = pl330->pinfo; - regs = pi->base; + case DMA_DEV_TO_MEM: + break; + + case DMA_MEM_TO_MEM: + break; + + default: + return -ENOTSUPP; + } if (pl330->state == DYING || pl330->dmac_tbd.reset_chan & (1 << thrd->id)) { - dev_info(thrd->dmac->pinfo->dev, "%s:%d\n", + dev_info(thrd->dmac->ddma.dev, "%s:%d\n", __func__, __LINE__); return -EAGAIN; } /* If request for non-existing peripheral */ - if (r->rqtype != MEMTOMEM && r->peri >= pi->pcfg.num_peri) { - dev_info(thrd->dmac->pinfo->dev, + if (desc->rqtype != DMA_MEM_TO_MEM && + desc->peri >= pl330->pcfg.num_peri) { + dev_info(thrd->dmac->ddma.dev, "%s:%d Invalid peripheral(%u)!\n", - __func__, __LINE__, r->peri); + __func__, __LINE__, desc->peri); return -EINVAL; } @@ -1554,50 +1518,33 @@ static int pl330_submit_req(void *ch_id, struct pl330_req *r) goto xfer_exit; } + /* Prefer Secure Channel */ + if (!_manager_ns(thrd)) + desc->rqcfg.nonsecure = 0; + else + desc->rqcfg.nonsecure = 1; - /* Use last settings, if not provided */ - if (r->cfg) { - /* Prefer Secure Channel */ - if (!_manager_ns(thrd)) - r->cfg->nonsecure = 0; - else - r->cfg->nonsecure = 1; + ccr = _prepare_ccr(&desc->rqcfg); - ccr = _prepare_ccr(r->cfg); - } else { - ccr = readl(regs + CC(thrd->id)); - } - - /* If this req doesn't have valid xfer settings */ - if (!_is_valid(ccr)) { - ret = -EINVAL; - dev_info(thrd->dmac->pinfo->dev, "%s:%d Invalid CCR(%x)!\n", - __func__, __LINE__, ccr); - goto xfer_exit; - } - - idx = IS_FREE(&thrd->req[0]) ? 0 : 1; + idx = thrd->req[0].desc == NULL ? 0 : 1; xs.ccr = ccr; - xs.r = r; + xs.desc = desc; /* First dry run to check if req is acceptable */ - ret = _setup_req(1, thrd, idx, &xs); - if (ret < 0) - goto xfer_exit; + ret = _setup_req(pl330, 1, thrd, idx, &xs); - if (ret > pi->mcbufsz / 2) { - dev_info(thrd->dmac->pinfo->dev, - "%s:%d Trying increasing mcbufsz\n", - __func__, __LINE__); + if (ret > pl330->mcbufsz / 2) { + dev_info(pl330->ddma.dev, "%s:%d Try increasing mcbufsz (%i/%i)\n", + __func__, __LINE__, ret, pl330->mcbufsz / 2); ret = -ENOMEM; goto xfer_exit; } /* Hook the request */ thrd->lstenq = idx; - thrd->req[idx].mc_len = _setup_req(0, thrd, idx, &xs); - thrd->req[idx].r = r; + thrd->req[idx].desc = desc; + _setup_req(pl330, 0, thrd, idx, &xs); ret = 0; @@ -1607,10 +1554,32 @@ xfer_exit: return ret; } -static void pl330_dotask(unsigned long data) +static void dma_pl330_rqcb(struct dma_pl330_desc *desc, enum pl330_op_err err) +{ + struct dma_pl330_chan *pch; + unsigned long flags; + + if (!desc) + return; + + pch = desc->pchan; + + /* If desc aborted */ + if (!pch) + return; + + spin_lock_irqsave(&pch->lock, flags); + + desc->status = DONE; + + spin_unlock_irqrestore(&pch->lock, flags); + + tasklet_schedule(&pch->task); +} + +static void pl330_dotask(struct tasklet_struct *t) { - struct pl330_dmac *pl330 = (struct pl330_dmac *) data; - struct pl330_info *pi = pl330->pinfo; + struct pl330_dmac *pl330 = from_tasklet(pl330, t, tasks); unsigned long flags; int i; @@ -1628,16 +1597,16 @@ static void pl330_dotask(unsigned long data) if (pl330->dmac_tbd.reset_mngr) { _stop(pl330->manager); /* Reset all channels */ - pl330->dmac_tbd.reset_chan = (1 << pi->pcfg.num_chan) - 1; + pl330->dmac_tbd.reset_chan = (1 << pl330->pcfg.num_chan) - 1; /* Clear the reset flag */ pl330->dmac_tbd.reset_mngr = false; } - for (i = 0; i < pi->pcfg.num_chan; i++) { + for (i = 0; i < pl330->pcfg.num_chan; i++) { if (pl330->dmac_tbd.reset_chan & (1 << i)) { struct pl330_thread *thrd = &pl330->channels[i]; - void __iomem *regs = pi->base; + void __iomem *regs = pl330->base; enum pl330_op_err err; _stop(thrd); @@ -1648,16 +1617,13 @@ static void pl330_dotask(unsigned long data) err = PL330_ERR_ABORT; spin_unlock_irqrestore(&pl330->lock, flags); - - _callback(thrd->req[1 - thrd->lstenq].r, err); - _callback(thrd->req[thrd->lstenq].r, err); - + dma_pl330_rqcb(thrd->req[1 - thrd->lstenq].desc, err); + dma_pl330_rqcb(thrd->req[thrd->lstenq].desc, err); spin_lock_irqsave(&pl330->lock, flags); - thrd->req[0].r = NULL; - thrd->req[1].r = NULL; - mark_free(thrd, 0); - mark_free(thrd, 1); + thrd->req[0].desc = NULL; + thrd->req[1].desc = NULL; + thrd->req_running = -1; /* Clear the reset flag */ pl330->dmac_tbd.reset_chan &= ~(1 << i); @@ -1670,20 +1636,15 @@ static void pl330_dotask(unsigned long data) } /* Returns 1 if state was updated, 0 otherwise */ -static int pl330_update(const struct pl330_info *pi) +static int pl330_update(struct pl330_dmac *pl330) { - struct pl330_req *rqdone, *tmp; - struct pl330_dmac *pl330; + struct dma_pl330_desc *descdone; unsigned long flags; void __iomem *regs; u32 val; int id, ev, ret = 0; - if (!pi || !pi->pl330_data) - return 0; - - regs = pi->base; - pl330 = pi->pl330_data; + regs = pl330->base; spin_lock_irqsave(&pl330->lock, flags); @@ -1693,13 +1654,13 @@ static int pl330_update(const struct pl330_info *pi) else pl330->dmac_tbd.reset_mngr = false; - val = readl(regs + FSC) & ((1 << pi->pcfg.num_chan) - 1); + val = readl(regs + FSC) & ((1 << pl330->pcfg.num_chan) - 1); pl330->dmac_tbd.reset_chan |= val; if (val) { int i = 0; - while (i < pi->pcfg.num_chan) { + while (i < pl330->pcfg.num_chan) { if (val & (1 << i)) { - dev_info(pi->dev, + dev_info(pl330->ddma.dev, "Reset Channel-%d\t CS-%x FTC-%x\n", i, readl(regs + CS(i)), readl(regs + FTC(i))); @@ -1711,15 +1672,16 @@ static int pl330_update(const struct pl330_info *pi) /* Check which event happened i.e, thread notified */ val = readl(regs + ES); - if (pi->pcfg.num_events < 32 - && val & ~((1 << pi->pcfg.num_events) - 1)) { + if (pl330->pcfg.num_events < 32 + && val & ~((1 << pl330->pcfg.num_events) - 1)) { pl330->dmac_tbd.reset_dmac = true; - dev_err(pi->dev, "%s:%d Unexpected!\n", __func__, __LINE__); + dev_err(pl330->ddma.dev, "%s:%d Unexpected!\n", __func__, + __LINE__); ret = 1; goto updt_exit; } - for (ev = 0; ev < pi->pcfg.num_events; ev++) { + for (ev = 0; ev < pl330->pcfg.num_events; ev++) { if (val & (1 << ev)) { /* Event occurred */ struct pl330_thread *thrd; u32 inten = readl(regs + INTEN); @@ -1740,25 +1702,26 @@ static int pl330_update(const struct pl330_info *pi) continue; /* Detach the req */ - rqdone = thrd->req[active].r; - thrd->req[active].r = NULL; + descdone = thrd->req[active].desc; + thrd->req[active].desc = NULL; - mark_free(thrd, active); + thrd->req_running = -1; /* Get going again ASAP */ - _start(thrd); + pl330_start_thread(thrd); /* For now, just make a list of callbacks to be done */ - list_add_tail(&rqdone->rqd, &pl330->req_done); + list_add_tail(&descdone->rqd, &pl330->req_done); } } /* Now that we are in no hurry, do the callbacks */ - list_for_each_entry_safe(rqdone, tmp, &pl330->req_done, rqd) { - list_del(&rqdone->rqd); - + while (!list_empty(&pl330->req_done)) { + descdone = list_first_entry(&pl330->req_done, + struct dma_pl330_desc, rqd); + list_del(&descdone->rqd); spin_unlock_irqrestore(&pl330->lock, flags); - _callback(rqdone, PL330_ERR_NONE); + dma_pl330_rqcb(descdone, PL330_ERR_NONE); spin_lock_irqsave(&pl330->lock, flags); } @@ -1775,65 +1738,13 @@ updt_exit: return ret; } -static int pl330_chan_ctrl(void *ch_id, enum pl330_chan_op op) -{ - struct pl330_thread *thrd = ch_id; - struct pl330_dmac *pl330; - unsigned long flags; - int ret = 0, active; - - if (!thrd || thrd->free || thrd->dmac->state == DYING) - return -EINVAL; - - pl330 = thrd->dmac; - active = thrd->req_running; - - spin_lock_irqsave(&pl330->lock, flags); - - switch (op) { - case PL330_OP_FLUSH: - /* Make sure the channel is stopped */ - _stop(thrd); - - thrd->req[0].r = NULL; - thrd->req[1].r = NULL; - mark_free(thrd, 0); - mark_free(thrd, 1); - break; - - case PL330_OP_ABORT: - /* Make sure the channel is stopped */ - _stop(thrd); - - /* ABORT is only for the active req */ - if (active == -1) - break; - - thrd->req[active].r = NULL; - mark_free(thrd, active); - - /* Start the next */ - case PL330_OP_START: - if ((active == -1) && !_start(thrd)) - ret = -EIO; - break; - - default: - ret = -EINVAL; - } - - spin_unlock_irqrestore(&pl330->lock, flags); - return ret; -} - /* Reserve an event */ static inline int _alloc_event(struct pl330_thread *thrd) { struct pl330_dmac *pl330 = thrd->dmac; - struct pl330_info *pi = pl330->pinfo; int ev; - for (ev = 0; ev < pi->pcfg.num_events; ev++) + for (ev = 0; ev < pl330->pcfg.num_events; ev++) if (pl330->events[ev] == -1) { pl330->events[ev] = thrd->id; return ev; @@ -1842,53 +1753,41 @@ static inline int _alloc_event(struct pl330_thread *thrd) return -1; } -static bool _chan_ns(const struct pl330_info *pi, int i) +static bool _chan_ns(const struct pl330_dmac *pl330, int i) { - return pi->pcfg.irq_ns & (1 << i); + return pl330->pcfg.irq_ns & (1 << i); } /* Upon success, returns IdentityToken for the * allocated channel, NULL otherwise. */ -static void *pl330_request_channel(const struct pl330_info *pi) +static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330) { struct pl330_thread *thrd = NULL; - struct pl330_dmac *pl330; - unsigned long flags; int chans, i; - if (!pi || !pi->pl330_data) - return NULL; - - pl330 = pi->pl330_data; - if (pl330->state == DYING) return NULL; - chans = pi->pcfg.num_chan; - - spin_lock_irqsave(&pl330->lock, flags); + chans = pl330->pcfg.num_chan; for (i = 0; i < chans; i++) { thrd = &pl330->channels[i]; if ((thrd->free) && (!_manager_ns(thrd) || - _chan_ns(pi, i))) { + _chan_ns(pl330, i))) { thrd->ev = _alloc_event(thrd); if (thrd->ev >= 0) { thrd->free = false; thrd->lstenq = 1; - thrd->req[0].r = NULL; - mark_free(thrd, 0); - thrd->req[1].r = NULL; - mark_free(thrd, 1); + thrd->req[0].desc = NULL; + thrd->req[1].desc = NULL; + thrd->req_running = -1; break; } } thrd = NULL; } - spin_unlock_irqrestore(&pl330->lock, flags); - return thrd; } @@ -1896,110 +1795,99 @@ static void *pl330_request_channel(const struct pl330_info *pi) static inline void _free_event(struct pl330_thread *thrd, int ev) { struct pl330_dmac *pl330 = thrd->dmac; - struct pl330_info *pi = pl330->pinfo; /* If the event is valid and was held by the thread */ - if (ev >= 0 && ev < pi->pcfg.num_events + if (ev >= 0 && ev < pl330->pcfg.num_events && pl330->events[ev] == thrd->id) pl330->events[ev] = -1; } -static void pl330_release_channel(void *ch_id) +static void pl330_release_channel(struct pl330_thread *thrd) { - struct pl330_thread *thrd = ch_id; - struct pl330_dmac *pl330; - unsigned long flags; - if (!thrd || thrd->free) return; _stop(thrd); - _callback(thrd->req[1 - thrd->lstenq].r, PL330_ERR_ABORT); - _callback(thrd->req[thrd->lstenq].r, PL330_ERR_ABORT); - - pl330 = thrd->dmac; + dma_pl330_rqcb(thrd->req[1 - thrd->lstenq].desc, PL330_ERR_ABORT); + dma_pl330_rqcb(thrd->req[thrd->lstenq].desc, PL330_ERR_ABORT); - spin_lock_irqsave(&pl330->lock, flags); _free_event(thrd, thrd->ev); thrd->free = true; - spin_unlock_irqrestore(&pl330->lock, flags); } /* Initialize the structure for PL330 configuration, that can be used * by the client driver the make best use of the DMAC */ -static void read_dmac_config(struct pl330_info *pi) +static void read_dmac_config(struct pl330_dmac *pl330) { - void __iomem *regs = pi->base; + void __iomem *regs = pl330->base; u32 val; val = readl(regs + CRD) >> CRD_DATA_WIDTH_SHIFT; val &= CRD_DATA_WIDTH_MASK; - pi->pcfg.data_bus_width = 8 * (1 << val); + pl330->pcfg.data_bus_width = 8 * (1 << val); val = readl(regs + CRD) >> CRD_DATA_BUFF_SHIFT; val &= CRD_DATA_BUFF_MASK; - pi->pcfg.data_buf_dep = val + 1; + pl330->pcfg.data_buf_dep = val + 1; val = readl(regs + CR0) >> CR0_NUM_CHANS_SHIFT; val &= CR0_NUM_CHANS_MASK; val += 1; - pi->pcfg.num_chan = val; + pl330->pcfg.num_chan = val; val = readl(regs + CR0); if (val & CR0_PERIPH_REQ_SET) { val = (val >> CR0_NUM_PERIPH_SHIFT) & CR0_NUM_PERIPH_MASK; val += 1; - pi->pcfg.num_peri = val; - pi->pcfg.peri_ns = readl(regs + CR4); + pl330->pcfg.num_peri = val; + pl330->pcfg.peri_ns = readl(regs + CR4); } else { - pi->pcfg.num_peri = 0; + pl330->pcfg.num_peri = 0; } val = readl(regs + CR0); if (val & CR0_BOOT_MAN_NS) - pi->pcfg.mode |= DMAC_MODE_NS; + pl330->pcfg.mode |= DMAC_MODE_NS; else - pi->pcfg.mode &= ~DMAC_MODE_NS; + pl330->pcfg.mode &= ~DMAC_MODE_NS; val = readl(regs + CR0) >> CR0_NUM_EVENTS_SHIFT; val &= CR0_NUM_EVENTS_MASK; val += 1; - pi->pcfg.num_events = val; + pl330->pcfg.num_events = val; - pi->pcfg.irq_ns = readl(regs + CR3); + pl330->pcfg.irq_ns = readl(regs + CR3); } static inline void _reset_thread(struct pl330_thread *thrd) { struct pl330_dmac *pl330 = thrd->dmac; - struct pl330_info *pi = pl330->pinfo; thrd->req[0].mc_cpu = pl330->mcode_cpu - + (thrd->id * pi->mcbufsz); + + (thrd->id * pl330->mcbufsz); thrd->req[0].mc_bus = pl330->mcode_bus - + (thrd->id * pi->mcbufsz); - thrd->req[0].r = NULL; - mark_free(thrd, 0); + + (thrd->id * pl330->mcbufsz); + thrd->req[0].desc = NULL; thrd->req[1].mc_cpu = thrd->req[0].mc_cpu - + pi->mcbufsz / 2; + + pl330->mcbufsz / 2; thrd->req[1].mc_bus = thrd->req[0].mc_bus - + pi->mcbufsz / 2; - thrd->req[1].r = NULL; - mark_free(thrd, 1); + + pl330->mcbufsz / 2; + thrd->req[1].desc = NULL; + + thrd->req_running = -1; } static int dmac_alloc_threads(struct pl330_dmac *pl330) { - struct pl330_info *pi = pl330->pinfo; - int chans = pi->pcfg.num_chan; + int chans = pl330->pcfg.num_chan; struct pl330_thread *thrd; int i; /* Allocate 1 Manager and 'chans' Channel threads */ - pl330->channels = kzalloc((1 + chans) * sizeof(*thrd), + pl330->channels = kcalloc(1 + chans, sizeof(*thrd), GFP_KERNEL); if (!pl330->channels) return -ENOMEM; @@ -2025,105 +1913,77 @@ static int dmac_alloc_threads(struct pl330_dmac *pl330) static int dmac_alloc_resources(struct pl330_dmac *pl330) { - struct pl330_info *pi = pl330->pinfo; - int chans = pi->pcfg.num_chan; + int chans = pl330->pcfg.num_chan; int ret; /* * Alloc MicroCode buffer for 'chans' Channel threads. * A channel's buffer offset is (Channel_Id * MCODE_BUFF_PERCHAN) */ - pl330->mcode_cpu = dma_alloc_coherent(pi->dev, - chans * pi->mcbufsz, - &pl330->mcode_bus, GFP_KERNEL); + pl330->mcode_cpu = dma_alloc_attrs(pl330->ddma.dev, + chans * pl330->mcbufsz, + &pl330->mcode_bus, GFP_KERNEL, + DMA_ATTR_PRIVILEGED); if (!pl330->mcode_cpu) { - dev_err(pi->dev, "%s:%d Can't allocate memory!\n", + dev_err(pl330->ddma.dev, "%s:%d Can't allocate memory!\n", __func__, __LINE__); return -ENOMEM; } ret = dmac_alloc_threads(pl330); if (ret) { - dev_err(pi->dev, "%s:%d Can't to create channels for DMAC!\n", + dev_err(pl330->ddma.dev, "%s:%d Can't to create channels for DMAC!\n", __func__, __LINE__); - dma_free_coherent(pi->dev, - chans * pi->mcbufsz, - pl330->mcode_cpu, pl330->mcode_bus); + dma_free_attrs(pl330->ddma.dev, + chans * pl330->mcbufsz, + pl330->mcode_cpu, pl330->mcode_bus, + DMA_ATTR_PRIVILEGED); return ret; } return 0; } -static int pl330_add(struct pl330_info *pi) +static int pl330_add(struct pl330_dmac *pl330) { - struct pl330_dmac *pl330; - void __iomem *regs; int i, ret; - if (!pi || !pi->dev) - return -EINVAL; - - /* If already added */ - if (pi->pl330_data) - return -EINVAL; - - /* - * If the SoC can perform reset on the DMAC, then do it - * before reading its configuration. - */ - if (pi->dmac_reset) - pi->dmac_reset(pi); - - regs = pi->base; - /* Check if we can handle this DMAC */ - if ((pi->pcfg.periph_id & 0xfffff) != PERIPH_ID_VAL) { - dev_err(pi->dev, "PERIPH_ID 0x%x !\n", pi->pcfg.periph_id); + if ((pl330->pcfg.periph_id & 0xfffff) != PERIPH_ID_VAL) { + dev_err(pl330->ddma.dev, "PERIPH_ID 0x%x !\n", + pl330->pcfg.periph_id); return -EINVAL; } /* Read the configuration of the DMAC */ - read_dmac_config(pi); + read_dmac_config(pl330); - if (pi->pcfg.num_events == 0) { - dev_err(pi->dev, "%s:%d Can't work without events!\n", + if (pl330->pcfg.num_events == 0) { + dev_err(pl330->ddma.dev, "%s:%d Can't work without events!\n", __func__, __LINE__); return -EINVAL; } - pl330 = kzalloc(sizeof(*pl330), GFP_KERNEL); - if (!pl330) { - dev_err(pi->dev, "%s:%d Can't allocate memory!\n", - __func__, __LINE__); - return -ENOMEM; - } - - /* Assign the info structure and private data */ - pl330->pinfo = pi; - pi->pl330_data = pl330; - spin_lock_init(&pl330->lock); INIT_LIST_HEAD(&pl330->req_done); /* Use default MC buffer size if not provided */ - if (!pi->mcbufsz) - pi->mcbufsz = MCODE_BUFF_PER_REQ * 2; + if (!pl330->mcbufsz) + pl330->mcbufsz = MCODE_BUFF_PER_REQ * 2; /* Mark all events as free */ - for (i = 0; i < pi->pcfg.num_events; i++) + for (i = 0; i < pl330->pcfg.num_events; i++) pl330->events[i] = -1; /* Allocate resources needed by the DMAC */ ret = dmac_alloc_resources(pl330); if (ret) { - dev_err(pi->dev, "Unable to create channels for DMAC\n"); - kfree(pl330); + dev_err(pl330->ddma.dev, "Unable to create channels for DMAC\n"); return ret; } - tasklet_init(&pl330->tasks, pl330_dotask, (unsigned long) pl330); + tasklet_setup(&pl330->tasks, pl330_dotask); pl330->state = INIT; @@ -2132,15 +1992,13 @@ static int pl330_add(struct pl330_info *pi) static int dmac_free_threads(struct pl330_dmac *pl330) { - struct pl330_info *pi = pl330->pinfo; - int chans = pi->pcfg.num_chan; struct pl330_thread *thrd; int i; /* Release Channel threads */ - for (i = 0; i < chans; i++) { + for (i = 0; i < pl330->pcfg.num_chan; i++) { thrd = &pl330->channels[i]; - pl330_release_channel((void *)thrd); + pl330_release_channel(thrd); } /* Free memory */ @@ -2149,35 +2007,18 @@ static int dmac_free_threads(struct pl330_dmac *pl330) return 0; } -static void dmac_free_resources(struct pl330_dmac *pl330) -{ - struct pl330_info *pi = pl330->pinfo; - int chans = pi->pcfg.num_chan; - - dmac_free_threads(pl330); - - dma_free_coherent(pi->dev, chans * pi->mcbufsz, - pl330->mcode_cpu, pl330->mcode_bus); -} - -static void pl330_del(struct pl330_info *pi) +static void pl330_del(struct pl330_dmac *pl330) { - struct pl330_dmac *pl330; - - if (!pi || !pi->pl330_data) - return; - - pl330 = pi->pl330_data; - pl330->state = UNINIT; tasklet_kill(&pl330->tasks); /* Free DMAC resources */ - dmac_free_resources(pl330); + dmac_free_threads(pl330); - kfree(pl330); - pi->pl330_data = NULL; + dma_free_attrs(pl330->ddma.dev, + pl330->pcfg.num_chan * pl330->mcbufsz, pl330->mcode_cpu, + pl330->mcode_bus, DMA_ATTR_PRIVILEGED); } /* forward declaration */ @@ -2198,66 +2039,6 @@ to_desc(struct dma_async_tx_descriptor *tx) return container_of(tx, struct dma_pl330_desc, txd); } -static inline void free_desc_list(struct list_head *list) -{ - struct dma_pl330_dmac *pdmac; - struct dma_pl330_desc *desc; - struct dma_pl330_chan *pch = NULL; - unsigned long flags; - - /* Finish off the work list */ - list_for_each_entry(desc, list, node) { - dma_async_tx_callback callback; - void *param; - - /* All desc in a list belong to same channel */ - pch = desc->pchan; - callback = desc->txd.callback; - param = desc->txd.callback_param; - - if (callback) - callback(param); - - desc->pchan = NULL; - } - - /* pch will be unset if list was empty */ - if (!pch) - return; - - pdmac = pch->dmac; - - spin_lock_irqsave(&pdmac->pool_lock, flags); - list_splice_tail_init(list, &pdmac->desc_pool); - spin_unlock_irqrestore(&pdmac->pool_lock, flags); -} - -static inline void handle_cyclic_desc_list(struct list_head *list) -{ - struct dma_pl330_desc *desc; - struct dma_pl330_chan *pch = NULL; - unsigned long flags; - - list_for_each_entry(desc, list, node) { - dma_async_tx_callback callback; - - /* Change status to reload it */ - desc->status = PREP; - pch = desc->pchan; - callback = desc->txd.callback; - if (callback) - callback(desc->txd.callback_param); - } - - /* pch will be unset if list was empty */ - if (!pch) - return; - - spin_lock_irqsave(&pch->lock, flags); - list_splice_tail_init(list, &pch->work_list); - spin_unlock_irqrestore(&pch->lock, flags); -} - static inline void fill_queue(struct dma_pl330_chan *pch) { struct dma_pl330_desc *desc; @@ -2266,11 +2047,10 @@ static inline void fill_queue(struct dma_pl330_chan *pch) list_for_each_entry(desc, &pch->work_list, node) { /* If already submitted */ - if (desc->status == BUSY) + if (desc->status == BUSY || desc->status == PAUSED) continue; - ret = pl330_submit_req(pch->pl330_chid, - &desc->req); + ret = pl330_submit_req(pch->thread, desc); if (!ret) { desc->status = BUSY; } else if (ret == -EAGAIN) { @@ -2279,19 +2059,19 @@ static inline void fill_queue(struct dma_pl330_chan *pch) } else { /* Unacceptable request */ desc->status = DONE; - dev_err(pch->dmac->pif.dev, "%s:%d Bad Desc(%d)\n", + dev_err(pch->dmac->ddma.dev, "%s:%d Bad Desc(%d)\n", __func__, __LINE__, desc->txd.cookie); tasklet_schedule(&pch->task); } } } -static void pl330_tasklet(unsigned long data) +static void pl330_tasklet(struct tasklet_struct *t) { - struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data; + struct dma_pl330_chan *pch = from_tasklet(pch, t, task); struct dma_pl330_desc *desc, *_dt; unsigned long flags; - LIST_HEAD(list); + bool power_down = false; spin_lock_irqsave(&pch->lock, flags); @@ -2300,160 +2080,275 @@ static void pl330_tasklet(unsigned long data) if (desc->status == DONE) { if (!pch->cyclic) dma_cookie_complete(&desc->txd); - list_move_tail(&desc->node, &list); + list_move_tail(&desc->node, &pch->completed_list); } /* Try to submit a req imm. next to the last completed cookie */ fill_queue(pch); - /* Make sure the PL330 Channel thread is active */ - pl330_chan_ctrl(pch->pl330_chid, PL330_OP_START); + if (list_empty(&pch->work_list)) { + spin_lock(&pch->thread->dmac->lock); + _stop(pch->thread); + spin_unlock(&pch->thread->dmac->lock); + power_down = true; + pch->active = false; + } else { + /* Make sure the PL330 Channel thread is active */ + spin_lock(&pch->thread->dmac->lock); + pl330_start_thread(pch->thread); + spin_unlock(&pch->thread->dmac->lock); + } + + while (!list_empty(&pch->completed_list)) { + struct dmaengine_desc_callback cb; + + desc = list_first_entry(&pch->completed_list, + struct dma_pl330_desc, node); + + dmaengine_desc_get_callback(&desc->txd, &cb); + + if (pch->cyclic) { + desc->status = PREP; + list_move_tail(&desc->node, &pch->work_list); + if (power_down) { + pch->active = true; + spin_lock(&pch->thread->dmac->lock); + pl330_start_thread(pch->thread); + spin_unlock(&pch->thread->dmac->lock); + power_down = false; + } + } else { + desc->status = FREE; + list_move_tail(&desc->node, &pch->dmac->desc_pool); + } + dma_descriptor_unmap(&desc->txd); + + if (dmaengine_desc_callback_valid(&cb)) { + spin_unlock_irqrestore(&pch->lock, flags); + dmaengine_desc_callback_invoke(&cb, NULL); + spin_lock_irqsave(&pch->lock, flags); + } + } spin_unlock_irqrestore(&pch->lock, flags); - if (pch->cyclic) - handle_cyclic_desc_list(&list); - else - free_desc_list(&list); + /* If work list empty, power down */ + if (power_down) { + pm_runtime_mark_last_busy(pch->dmac->ddma.dev); + pm_runtime_put_autosuspend(pch->dmac->ddma.dev); + } } -static void dma_pl330_rqcb(void *token, enum pl330_op_err err) +static struct dma_chan *of_dma_pl330_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) { - struct dma_pl330_desc *desc = token; - struct dma_pl330_chan *pch = desc->pchan; + int count = dma_spec->args_count; + struct pl330_dmac *pl330 = ofdma->of_dma_data; + unsigned int chan_id; + + if (!pl330) + return NULL; + + if (count != 1) + return NULL; + + chan_id = dma_spec->args[0]; + if (chan_id >= pl330->num_peripherals) + return NULL; + + return dma_get_slave_channel(&pl330->peripherals[chan_id].chan); +} + +static int pl330_alloc_chan_resources(struct dma_chan *chan) +{ + struct dma_pl330_chan *pch = to_pchan(chan); + struct pl330_dmac *pl330 = pch->dmac; unsigned long flags; - /* If desc aborted */ - if (!pch) - return; + spin_lock_irqsave(&pl330->lock, flags); - spin_lock_irqsave(&pch->lock, flags); + dma_cookie_init(chan); + pch->cyclic = false; - desc->status = DONE; + pch->thread = pl330_request_channel(pl330); + if (!pch->thread) { + spin_unlock_irqrestore(&pl330->lock, flags); + return -ENOMEM; + } - spin_unlock_irqrestore(&pch->lock, flags); + tasklet_setup(&pch->task, pl330_tasklet); - tasklet_schedule(&pch->task); + spin_unlock_irqrestore(&pl330->lock, flags); + + return 1; } -static bool pl330_dt_filter(struct dma_chan *chan, void *param) +/* + * We need the data direction between the DMAC (the dma-mapping "device") and + * the FIFO (the dmaengine "dev"), from the FIFO's point of view. Confusing! + */ +static enum dma_data_direction +pl330_dma_slave_map_dir(enum dma_transfer_direction dir) { - struct dma_pl330_filter_args *fargs = param; - - if (chan->device != &fargs->pdmac->ddma) - return false; + switch (dir) { + case DMA_MEM_TO_DEV: + return DMA_FROM_DEVICE; + case DMA_DEV_TO_MEM: + return DMA_TO_DEVICE; + case DMA_DEV_TO_DEV: + return DMA_BIDIRECTIONAL; + default: + return DMA_NONE; + } +} - return (chan->chan_id == fargs->chan_id); +static void pl330_unprep_slave_fifo(struct dma_pl330_chan *pch) +{ + if (pch->dir != DMA_NONE) + dma_unmap_resource(pch->chan.device->dev, pch->fifo_dma, + 1 << pch->burst_sz, pch->dir, 0); + pch->dir = DMA_NONE; } -bool pl330_filter(struct dma_chan *chan, void *param) + +static bool pl330_prep_slave_fifo(struct dma_pl330_chan *pch, + enum dma_transfer_direction dir) { - u8 *peri_id; + struct device *dev = pch->chan.device->dev; + enum dma_data_direction dma_dir = pl330_dma_slave_map_dir(dir); + + /* Already mapped for this config? */ + if (pch->dir == dma_dir) + return true; - if (chan->device->dev->driver != &pl330_driver.drv) + pl330_unprep_slave_fifo(pch); + pch->fifo_dma = dma_map_resource(dev, pch->fifo_addr, + 1 << pch->burst_sz, dma_dir, 0); + if (dma_mapping_error(dev, pch->fifo_dma)) return false; - peri_id = chan->private; - return *peri_id == (unsigned)param; + pch->dir = dma_dir; + return true; } -EXPORT_SYMBOL(pl330_filter); -static struct dma_chan *of_dma_pl330_xlate(struct of_phandle_args *dma_spec, - struct of_dma *ofdma) +static int fixup_burst_len(int max_burst_len, int quirks) { - int count = dma_spec->args_count; - struct dma_pl330_dmac *pdmac = ofdma->of_dma_data; - struct dma_pl330_filter_args fargs; - dma_cap_mask_t cap; + if (max_burst_len > PL330_MAX_BURST) + return PL330_MAX_BURST; + else if (max_burst_len < 1) + return 1; + else + return max_burst_len; +} - if (!pdmac) - return NULL; +static int pl330_config_write(struct dma_chan *chan, + struct dma_slave_config *slave_config, + enum dma_transfer_direction direction) +{ + struct dma_pl330_chan *pch = to_pchan(chan); - if (count != 1) - return NULL; + pl330_unprep_slave_fifo(pch); + if (direction == DMA_MEM_TO_DEV) { + if (slave_config->dst_addr) + pch->fifo_addr = slave_config->dst_addr; + if (slave_config->dst_addr_width) + pch->burst_sz = __ffs(slave_config->dst_addr_width); + pch->burst_len = fixup_burst_len(slave_config->dst_maxburst, + pch->dmac->quirks); + } else if (direction == DMA_DEV_TO_MEM) { + if (slave_config->src_addr) + pch->fifo_addr = slave_config->src_addr; + if (slave_config->src_addr_width) + pch->burst_sz = __ffs(slave_config->src_addr_width); + pch->burst_len = fixup_burst_len(slave_config->src_maxburst, + pch->dmac->quirks); + } - fargs.pdmac = pdmac; - fargs.chan_id = dma_spec->args[0]; + return 0; +} + +static int pl330_config(struct dma_chan *chan, + struct dma_slave_config *slave_config) +{ + struct dma_pl330_chan *pch = to_pchan(chan); - dma_cap_zero(cap); - dma_cap_set(DMA_SLAVE, cap); - dma_cap_set(DMA_CYCLIC, cap); + memcpy(&pch->slave_config, slave_config, sizeof(*slave_config)); - return dma_request_channel(cap, pl330_dt_filter, &fargs); + return 0; } -static int pl330_alloc_chan_resources(struct dma_chan *chan) +static int pl330_terminate_all(struct dma_chan *chan) { struct dma_pl330_chan *pch = to_pchan(chan); - struct dma_pl330_dmac *pdmac = pch->dmac; + struct dma_pl330_desc *desc; unsigned long flags; + struct pl330_dmac *pl330 = pch->dmac; + bool power_down = false; + pm_runtime_get_sync(pl330->ddma.dev); spin_lock_irqsave(&pch->lock, flags); - dma_cookie_init(chan); - pch->cyclic = false; + spin_lock(&pl330->lock); + _stop(pch->thread); + pch->thread->req[0].desc = NULL; + pch->thread->req[1].desc = NULL; + pch->thread->req_running = -1; + spin_unlock(&pl330->lock); - pch->pl330_chid = pl330_request_channel(&pdmac->pif); - if (!pch->pl330_chid) { - spin_unlock_irqrestore(&pch->lock, flags); - return -ENOMEM; + power_down = pch->active; + pch->active = false; + + /* Mark all desc done */ + list_for_each_entry(desc, &pch->submitted_list, node) { + desc->status = FREE; + dma_cookie_complete(&desc->txd); } - tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch); + list_for_each_entry(desc, &pch->work_list , node) { + desc->status = FREE; + dma_cookie_complete(&desc->txd); + } + list_splice_tail_init(&pch->submitted_list, &pl330->desc_pool); + list_splice_tail_init(&pch->work_list, &pl330->desc_pool); + list_splice_tail_init(&pch->completed_list, &pl330->desc_pool); spin_unlock_irqrestore(&pch->lock, flags); + pm_runtime_mark_last_busy(pl330->ddma.dev); + if (power_down) + pm_runtime_put_autosuspend(pl330->ddma.dev); + pm_runtime_put_autosuspend(pl330->ddma.dev); - return 1; + return 0; } -static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg) +/* + * We don't support DMA_RESUME command because of hardware + * limitations, so after pausing the channel we cannot restore + * it to active state. We have to terminate channel and setup + * DMA transfer again. This pause feature was implemented to + * allow safely read residue before channel termination. + */ +static int pl330_pause(struct dma_chan *chan) { struct dma_pl330_chan *pch = to_pchan(chan); - struct dma_pl330_desc *desc, *_dt; + struct pl330_dmac *pl330 = pch->dmac; + struct dma_pl330_desc *desc; unsigned long flags; - struct dma_pl330_dmac *pdmac = pch->dmac; - struct dma_slave_config *slave_config; - LIST_HEAD(list); - - switch (cmd) { - case DMA_TERMINATE_ALL: - spin_lock_irqsave(&pch->lock, flags); - /* FLUSH the PL330 Channel thread */ - pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH); + pm_runtime_get_sync(pl330->ddma.dev); + spin_lock_irqsave(&pch->lock, flags); - /* Mark all desc done */ - list_for_each_entry_safe(desc, _dt, &pch->work_list , node) { - desc->status = DONE; - list_move_tail(&desc->node, &list); - } + spin_lock(&pl330->lock); + _stop(pch->thread); + spin_unlock(&pl330->lock); - list_splice_tail_init(&list, &pdmac->desc_pool); - spin_unlock_irqrestore(&pch->lock, flags); - break; - case DMA_SLAVE_CONFIG: - slave_config = (struct dma_slave_config *)arg; - - if (slave_config->direction == DMA_MEM_TO_DEV) { - if (slave_config->dst_addr) - pch->fifo_addr = slave_config->dst_addr; - if (slave_config->dst_addr_width) - pch->burst_sz = __ffs(slave_config->dst_addr_width); - if (slave_config->dst_maxburst) - pch->burst_len = slave_config->dst_maxburst; - } else if (slave_config->direction == DMA_DEV_TO_MEM) { - if (slave_config->src_addr) - pch->fifo_addr = slave_config->src_addr; - if (slave_config->src_addr_width) - pch->burst_sz = __ffs(slave_config->src_addr_width); - if (slave_config->src_maxburst) - pch->burst_len = slave_config->src_maxburst; - } - break; - default: - dev_err(pch->dmac->pif.dev, "Not supported command.\n"); - return -ENXIO; + list_for_each_entry(desc, &pch->work_list, node) { + if (desc->status == BUSY) + desc->status = PAUSED; } + spin_unlock_irqrestore(&pch->lock, flags); + pm_runtime_mark_last_busy(pl330->ddma.dev); + pm_runtime_put_autosuspend(pl330->ddma.dev); return 0; } @@ -2461,31 +2356,147 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned static void pl330_free_chan_resources(struct dma_chan *chan) { struct dma_pl330_chan *pch = to_pchan(chan); + struct pl330_dmac *pl330 = pch->dmac; unsigned long flags; tasklet_kill(&pch->task); - spin_lock_irqsave(&pch->lock, flags); + pm_runtime_get_sync(pch->dmac->ddma.dev); + spin_lock_irqsave(&pl330->lock, flags); - pl330_release_channel(pch->pl330_chid); - pch->pl330_chid = NULL; + pl330_release_channel(pch->thread); + pch->thread = NULL; if (pch->cyclic) list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool); - spin_unlock_irqrestore(&pch->lock, flags); + spin_unlock_irqrestore(&pl330->lock, flags); + pm_runtime_mark_last_busy(pch->dmac->ddma.dev); + pm_runtime_put_autosuspend(pch->dmac->ddma.dev); + pl330_unprep_slave_fifo(pch); +} + +static int pl330_get_current_xferred_count(struct dma_pl330_chan *pch, + struct dma_pl330_desc *desc) +{ + struct pl330_thread *thrd = pch->thread; + struct pl330_dmac *pl330 = pch->dmac; + void __iomem *regs = thrd->dmac->base; + u32 val, addr; + + pm_runtime_get_sync(pl330->ddma.dev); + val = addr = 0; + if (desc->rqcfg.src_inc) { + val = readl(regs + SA(thrd->id)); + addr = desc->px.src_addr; + } else { + val = readl(regs + DA(thrd->id)); + addr = desc->px.dst_addr; + } + pm_runtime_mark_last_busy(pch->dmac->ddma.dev); + pm_runtime_put_autosuspend(pl330->ddma.dev); + + /* If DMAMOV hasn't finished yet, SAR/DAR can be zero */ + if (!val) + return 0; + + return val - addr; } static enum dma_status pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *txstate) { - return dma_cookie_status(chan, cookie, txstate); + enum dma_status ret; + unsigned long flags; + struct dma_pl330_desc *desc, *running = NULL, *last_enq = NULL; + struct dma_pl330_chan *pch = to_pchan(chan); + unsigned int transferred, residual = 0; + + ret = dma_cookie_status(chan, cookie, txstate); + + if (!txstate) + return ret; + + if (ret == DMA_COMPLETE) + goto out; + + spin_lock_irqsave(&pch->lock, flags); + spin_lock(&pch->thread->dmac->lock); + + if (pch->thread->req_running != -1) + running = pch->thread->req[pch->thread->req_running].desc; + + last_enq = pch->thread->req[pch->thread->lstenq].desc; + + /* Check in pending list */ + list_for_each_entry(desc, &pch->work_list, node) { + if (desc->status == DONE) + transferred = desc->bytes_requested; + else if (running && desc == running) + transferred = + pl330_get_current_xferred_count(pch, desc); + else if (desc->status == BUSY || desc->status == PAUSED) + /* + * Busy but not running means either just enqueued, + * or finished and not yet marked done + */ + if (desc == last_enq) + transferred = 0; + else + transferred = desc->bytes_requested; + else + transferred = 0; + residual += desc->bytes_requested - transferred; + if (desc->txd.cookie == cookie) { + switch (desc->status) { + case DONE: + ret = DMA_COMPLETE; + break; + case PAUSED: + ret = DMA_PAUSED; + break; + case PREP: + case BUSY: + ret = DMA_IN_PROGRESS; + break; + default: + WARN_ON(1); + } + break; + } + if (desc->last) + residual = 0; + } + spin_unlock(&pch->thread->dmac->lock); + spin_unlock_irqrestore(&pch->lock, flags); + +out: + dma_set_residue(txstate, residual); + + return ret; } static void pl330_issue_pending(struct dma_chan *chan) { - pl330_tasklet((unsigned long) to_pchan(chan)); + struct dma_pl330_chan *pch = to_pchan(chan); + unsigned long flags; + + spin_lock_irqsave(&pch->lock, flags); + if (list_empty(&pch->work_list)) { + /* + * Warn on nothing pending. Empty submitted_list may + * break our pm_runtime usage counter as it is + * updated on work_list emptiness status. + */ + WARN_ON(list_empty(&pch->submitted_list)); + pch->active = true; + pm_runtime_get_sync(pch->dmac->ddma.dev); + } + list_splice_tail_init(&pch->submitted_list, &pch->work_list); + spin_unlock_irqrestore(&pch->lock, flags); + + pl330_tasklet(&pch->task); } /* @@ -2505,14 +2516,20 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx) /* Assign cookies to all nodes */ while (!list_empty(&last->node)) { desc = list_entry(last->node.next, struct dma_pl330_desc, node); + if (pch->cyclic) { + desc->txd.callback = last->txd.callback; + desc->txd.callback_param = last->txd.callback_param; + } + desc->last = false; dma_cookie_assign(&desc->txd); - list_move_tail(&desc->node, &pch->work_list); + list_move_tail(&desc->node, &pch->submitted_list); } + last->last = true; cookie = dma_cookie_assign(&last->txd); - list_add_tail(&last->node, &pch->work_list); + list_add_tail(&last->node, &pch->submitted_list); spin_unlock_irqrestore(&pch->lock, flags); return cookie; @@ -2520,94 +2537,81 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx) static inline void _init_desc(struct dma_pl330_desc *desc) { - desc->pchan = NULL; - desc->req.x = &desc->px; - desc->req.token = desc; desc->rqcfg.swap = SWAP_NO; - desc->rqcfg.privileged = 0; - desc->rqcfg.insnaccess = 0; - desc->rqcfg.scctl = SCCTRL0; - desc->rqcfg.dcctl = DCCTRL0; - desc->req.cfg = &desc->rqcfg; - desc->req.xfer_cb = dma_pl330_rqcb; + desc->rqcfg.scctl = CCTRL0; + desc->rqcfg.dcctl = CCTRL0; desc->txd.tx_submit = pl330_tx_submit; INIT_LIST_HEAD(&desc->node); } /* Returns the number of descriptors added to the DMAC pool */ -static int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count) +static int add_desc(struct list_head *pool, spinlock_t *lock, + gfp_t flg, int count) { struct dma_pl330_desc *desc; unsigned long flags; int i; - if (!pdmac) - return 0; - - desc = kmalloc(count * sizeof(*desc), flg); + desc = kcalloc(count, sizeof(*desc), flg); if (!desc) return 0; - spin_lock_irqsave(&pdmac->pool_lock, flags); + spin_lock_irqsave(lock, flags); for (i = 0; i < count; i++) { _init_desc(&desc[i]); - list_add_tail(&desc[i].node, &pdmac->desc_pool); + list_add_tail(&desc[i].node, pool); } - spin_unlock_irqrestore(&pdmac->pool_lock, flags); + spin_unlock_irqrestore(lock, flags); return count; } -static struct dma_pl330_desc * -pluck_desc(struct dma_pl330_dmac *pdmac) +static struct dma_pl330_desc *pluck_desc(struct list_head *pool, + spinlock_t *lock) { struct dma_pl330_desc *desc = NULL; unsigned long flags; - if (!pdmac) - return NULL; - - spin_lock_irqsave(&pdmac->pool_lock, flags); + spin_lock_irqsave(lock, flags); - if (!list_empty(&pdmac->desc_pool)) { - desc = list_entry(pdmac->desc_pool.next, + if (!list_empty(pool)) { + desc = list_entry(pool->next, struct dma_pl330_desc, node); list_del_init(&desc->node); desc->status = PREP; desc->txd.callback = NULL; + desc->txd.callback_result = NULL; } - spin_unlock_irqrestore(&pdmac->pool_lock, flags); + spin_unlock_irqrestore(lock, flags); return desc; } static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch) { - struct dma_pl330_dmac *pdmac = pch->dmac; + struct pl330_dmac *pl330 = pch->dmac; u8 *peri_id = pch->chan.private; struct dma_pl330_desc *desc; /* Pluck one desc from the pool of DMAC */ - desc = pluck_desc(pdmac); + desc = pluck_desc(&pl330->desc_pool, &pl330->pool_lock); /* If the DMAC pool is empty, alloc new */ if (!desc) { - if (!add_desc(pdmac, GFP_ATOMIC, 1)) - return NULL; + static DEFINE_SPINLOCK(lock); + LIST_HEAD(pool); - /* Try again */ - desc = pluck_desc(pdmac); - if (!desc) { - dev_err(pch->dmac->pif.dev, - "%s:%d ALERT!\n", __func__, __LINE__); + if (!add_desc(&pool, &lock, GFP_ATOMIC, 1)) return NULL; - } + + desc = pluck_desc(&pool, &lock); + WARN_ON(!desc || !list_empty(&pool)); } /* Initialize the descriptor */ @@ -2615,8 +2619,8 @@ static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch) desc->txd.cookie = 0; async_tx_ack(&desc->txd); - desc->req.peri = peri_id ? pch->chan.chan_id : 0; - desc->rqcfg.pcfg = &pch->dmac->pif.pcfg; + desc->peri = peri_id ? pch->chan.chan_id : 0; + desc->rqcfg.pcfg = &pch->dmac->pcfg; dma_async_tx_descriptor_init(&desc->txd, &pch->chan); @@ -2626,7 +2630,6 @@ static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch) static inline void fill_px(struct pl330_xfer *px, dma_addr_t dst, dma_addr_t src, size_t len) { - px->next = NULL; px->bytes = len; px->dst_addr = dst; px->src_addr = src; @@ -2639,7 +2642,7 @@ __pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst, struct dma_pl330_desc *desc = pl330_get_desc(pch); if (!desc) { - dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n", + dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n", __func__, __LINE__); return NULL; } @@ -2663,22 +2666,16 @@ __pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst, static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len) { struct dma_pl330_chan *pch = desc->pchan; - struct pl330_info *pi = &pch->dmac->pif; + struct pl330_dmac *pl330 = pch->dmac; int burst_len; - burst_len = pi->pcfg.data_bus_width / 8; - burst_len *= pi->pcfg.data_buf_dep; + burst_len = pl330->pcfg.data_bus_width / 8; + burst_len *= pl330->pcfg.data_buf_dep / pl330->pcfg.num_chan; burst_len >>= desc->rqcfg.brst_size; /* src/dst_burst_len can't be more than 16 */ - if (burst_len > 16) - burst_len = 16; - - while (burst_len > 1) { - if (!(len % (burst_len << desc->rqcfg.brst_size))) - break; - burst_len--; - } + if (burst_len > PL330_MAX_BURST) + burst_len = PL330_MAX_BURST; return burst_len; } @@ -2686,47 +2683,90 @@ static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len) static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( struct dma_chan *chan, dma_addr_t dma_addr, size_t len, size_t period_len, enum dma_transfer_direction direction, - unsigned long flags, void *context) + unsigned long flags) { - struct dma_pl330_desc *desc; + struct dma_pl330_desc *desc = NULL, *first = NULL; struct dma_pl330_chan *pch = to_pchan(chan); + struct pl330_dmac *pl330 = pch->dmac; + unsigned int i; dma_addr_t dst; dma_addr_t src; - desc = pl330_get_desc(pch); - if (!desc) { - dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n", - __func__, __LINE__); + if (len % period_len != 0) return NULL; - } - switch (direction) { - case DMA_MEM_TO_DEV: - desc->rqcfg.src_inc = 1; - desc->rqcfg.dst_inc = 0; - desc->req.rqtype = MEMTODEV; - src = dma_addr; - dst = pch->fifo_addr; - break; - case DMA_DEV_TO_MEM: - desc->rqcfg.src_inc = 0; - desc->rqcfg.dst_inc = 1; - desc->req.rqtype = DEVTOMEM; - src = pch->fifo_addr; - dst = dma_addr; - break; - default: - dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma direction\n", + if (!is_slave_direction(direction)) { + dev_err(pch->dmac->ddma.dev, "%s:%d Invalid dma direction\n", __func__, __LINE__); return NULL; } - desc->rqcfg.brst_size = pch->burst_sz; - desc->rqcfg.brst_len = 1; + pl330_config_write(chan, &pch->slave_config, direction); - pch->cyclic = true; + if (!pl330_prep_slave_fifo(pch, direction)) + return NULL; + + for (i = 0; i < len / period_len; i++) { + desc = pl330_get_desc(pch); + if (!desc) { + unsigned long iflags; + + dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n", + __func__, __LINE__); + + if (!first) + return NULL; + + spin_lock_irqsave(&pl330->pool_lock, iflags); + + while (!list_empty(&first->node)) { + desc = list_entry(first->node.next, + struct dma_pl330_desc, node); + list_move_tail(&desc->node, &pl330->desc_pool); + } + + list_move_tail(&first->node, &pl330->desc_pool); + + spin_unlock_irqrestore(&pl330->pool_lock, iflags); - fill_px(&desc->px, dst, src, period_len); + return NULL; + } + + switch (direction) { + case DMA_MEM_TO_DEV: + desc->rqcfg.src_inc = 1; + desc->rqcfg.dst_inc = 0; + src = dma_addr; + dst = pch->fifo_dma; + break; + case DMA_DEV_TO_MEM: + desc->rqcfg.src_inc = 0; + desc->rqcfg.dst_inc = 1; + src = pch->fifo_dma; + dst = dma_addr; + break; + default: + break; + } + + desc->rqtype = direction; + desc->rqcfg.brst_size = pch->burst_sz; + desc->rqcfg.brst_len = pch->burst_len; + desc->bytes_requested = period_len; + fill_px(&desc->px, dst, src, period_len); + + if (!first) + first = desc; + else + list_add_tail(&desc->node, &first->node); + + dma_addr += period_len; + } + + if (!desc) + return NULL; + + pch->cyclic = true; return &desc->txd; } @@ -2737,13 +2777,13 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, { struct dma_pl330_desc *desc; struct dma_pl330_chan *pch = to_pchan(chan); - struct pl330_info *pi; + struct pl330_dmac *pl330; int burst; if (unlikely(!pch || !len)) return NULL; - pi = &pch->dmac->pif; + pl330 = pch->dmac; desc = __pl330_prep_dma_memcpy(pch, dst, src, len); if (!desc) @@ -2751,28 +2791,58 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, desc->rqcfg.src_inc = 1; desc->rqcfg.dst_inc = 1; - desc->req.rqtype = MEMTOMEM; + desc->rqtype = DMA_MEM_TO_MEM; /* Select max possible burst size */ - burst = pi->pcfg.data_bus_width / 8; + burst = pl330->pcfg.data_bus_width / 8; - while (burst > 1) { - if (!(len % burst)) - break; + /* + * Make sure we use a burst size that aligns with all the memcpy + * parameters because our DMA programming algorithm doesn't cope with + * transfers which straddle an entry in the DMA device's MFIFO. + */ + while ((src | dst | len) & (burst - 1)) burst /= 2; - } desc->rqcfg.brst_size = 0; while (burst != (1 << desc->rqcfg.brst_size)) desc->rqcfg.brst_size++; desc->rqcfg.brst_len = get_burst_len(desc, len); + /* + * If burst size is smaller than bus width then make sure we only + * transfer one at a time to avoid a burst stradling an MFIFO entry. + */ + if (burst * 8 < pl330->pcfg.data_bus_width) + desc->rqcfg.brst_len = 1; - desc->txd.flags = flags; + desc->bytes_requested = len; return &desc->txd; } +static void __pl330_giveback_desc(struct pl330_dmac *pl330, + struct dma_pl330_desc *first) +{ + unsigned long flags; + struct dma_pl330_desc *desc; + + if (!first) + return; + + spin_lock_irqsave(&pl330->pool_lock, flags); + + while (!list_empty(&first->node)) { + desc = list_entry(first->node.next, + struct dma_pl330_desc, node); + list_move_tail(&desc->node, &pl330->desc_pool); + } + + list_move_tail(&first->node, &pl330->desc_pool); + + spin_unlock_irqrestore(&pl330->pool_lock, flags); +} + static struct dma_async_tx_descriptor * pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, enum dma_transfer_direction direction, @@ -2781,14 +2851,15 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, struct dma_pl330_desc *first, *desc = NULL; struct dma_pl330_chan *pch = to_pchan(chan); struct scatterlist *sg; - unsigned long flags; int i; - dma_addr_t addr; if (unlikely(!pch || !sgl || !sg_len)) return NULL; - addr = pch->fifo_addr; + pl330_config_write(chan, &pch->slave_config, direction); + + if (!pl330_prep_slave_fifo(pch, direction)) + return NULL; first = NULL; @@ -2796,25 +2867,12 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, desc = pl330_get_desc(pch); if (!desc) { - struct dma_pl330_dmac *pdmac = pch->dmac; + struct pl330_dmac *pl330 = pch->dmac; - dev_err(pch->dmac->pif.dev, + dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n", __func__, __LINE__); - if (!first) - return NULL; - - spin_lock_irqsave(&pdmac->pool_lock, flags); - - while (!list_empty(&first->node)) { - desc = list_entry(first->node.next, - struct dma_pl330_desc, node); - list_move_tail(&desc->node, &pdmac->desc_pool); - } - - list_move_tail(&first->node, &pdmac->desc_pool); - - spin_unlock_irqrestore(&pdmac->pool_lock, flags); + __pl330_giveback_desc(pl330, first); return NULL; } @@ -2827,23 +2885,22 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, if (direction == DMA_MEM_TO_DEV) { desc->rqcfg.src_inc = 1; desc->rqcfg.dst_inc = 0; - desc->req.rqtype = MEMTODEV; - fill_px(&desc->px, - addr, sg_dma_address(sg), sg_dma_len(sg)); + fill_px(&desc->px, pch->fifo_dma, sg_dma_address(sg), + sg_dma_len(sg)); } else { desc->rqcfg.src_inc = 0; desc->rqcfg.dst_inc = 1; - desc->req.rqtype = DEVTOMEM; - fill_px(&desc->px, - sg_dma_address(sg), addr, sg_dma_len(sg)); + fill_px(&desc->px, sg_dma_address(sg), pch->fifo_dma, + sg_dma_len(sg)); } desc->rqcfg.brst_size = pch->burst_sz; - desc->rqcfg.brst_len = 1; + desc->rqcfg.brst_len = pch->burst_len; + desc->rqtype = direction; + desc->bytes_requested = sg_dma_len(sg); } /* Return the last desc in the chain */ - desc->txd.flags = flg; return &desc->txd; } @@ -2855,100 +2912,220 @@ static irqreturn_t pl330_irq_handler(int irq, void *data) return IRQ_NONE; } +#define PL330_DMA_BUSWIDTHS \ + BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \ + BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ + BIT(DMA_SLAVE_BUSWIDTH_8_BYTES) + +#ifdef CONFIG_DEBUG_FS +static int pl330_debugfs_show(struct seq_file *s, void *data) +{ + struct pl330_dmac *pl330 = s->private; + int chans, pchs, ch, pr; + + chans = pl330->pcfg.num_chan; + pchs = pl330->num_peripherals; + + seq_puts(s, "PL330 physical channels:\n"); + seq_puts(s, "THREAD:\t\tCHANNEL:\n"); + seq_puts(s, "--------\t-----\n"); + for (ch = 0; ch < chans; ch++) { + struct pl330_thread *thrd = &pl330->channels[ch]; + int found = -1; + + for (pr = 0; pr < pchs; pr++) { + struct dma_pl330_chan *pch = &pl330->peripherals[pr]; + + if (!pch->thread || thrd->id != pch->thread->id) + continue; + + found = pr; + } + + seq_printf(s, "%d\t\t", thrd->id); + if (found == -1) + seq_puts(s, "--\n"); + else + seq_printf(s, "%d\n", found); + } + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(pl330_debugfs); + +static inline void init_pl330_debugfs(struct pl330_dmac *pl330) +{ + debugfs_create_file(dev_name(pl330->ddma.dev), + S_IFREG | 0444, NULL, pl330, + &pl330_debugfs_fops); +} +#else +static inline void init_pl330_debugfs(struct pl330_dmac *pl330) +{ +} +#endif + +/* + * Runtime PM callbacks are provided by amba/bus.c driver. + * + * It is assumed here that IRQ safe runtime PM is chosen in probe and amba + * bus driver will only disable/enable the clock in runtime PM callbacks. + */ +static int __maybe_unused pl330_suspend(struct device *dev) +{ + struct amba_device *pcdev = to_amba_device(dev); + + pm_runtime_force_suspend(dev); + clk_unprepare(pcdev->pclk); + + return 0; +} + +static int __maybe_unused pl330_resume(struct device *dev) +{ + struct amba_device *pcdev = to_amba_device(dev); + int ret; + + ret = clk_prepare(pcdev->pclk); + if (ret) + return ret; + + pm_runtime_force_resume(dev); + + return ret; +} + +static const struct dev_pm_ops pl330_pm = { + SET_LATE_SYSTEM_SLEEP_PM_OPS(pl330_suspend, pl330_resume) +}; + static int pl330_probe(struct amba_device *adev, const struct amba_id *id) { - struct dma_pl330_platdata *pdat; - struct dma_pl330_dmac *pdmac; + struct pl330_config *pcfg; + struct pl330_dmac *pl330; struct dma_pl330_chan *pch, *_p; - struct pl330_info *pi; struct dma_device *pd; struct resource *res; int i, ret, irq; int num_chan; + struct device_node *np = adev->dev.of_node; - pdat = adev->dev.platform_data; + ret = dma_set_mask_and_coherent(&adev->dev, DMA_BIT_MASK(32)); + if (ret) + return ret; /* Allocate a new DMAC and its Channels */ - pdmac = devm_kzalloc(&adev->dev, sizeof(*pdmac), GFP_KERNEL); - if (!pdmac) { - dev_err(&adev->dev, "unable to allocate mem\n"); + pl330 = devm_kzalloc(&adev->dev, sizeof(*pl330), GFP_KERNEL); + if (!pl330) return -ENOMEM; - } - pi = &pdmac->pif; - pi->dev = &adev->dev; - pi->pl330_data = NULL; - pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0; + pd = &pl330->ddma; + pd->dev = &adev->dev; + + pl330->mcbufsz = 0; + + /* get quirk */ + for (i = 0; i < ARRAY_SIZE(of_quirks); i++) + if (of_property_read_bool(np, of_quirks[i].quirk)) + pl330->quirks |= of_quirks[i].id; res = &adev->res; - pi->base = devm_ioremap_resource(&adev->dev, res); - if (IS_ERR(pi->base)) - return PTR_ERR(pi->base); + pl330->base = devm_ioremap_resource(&adev->dev, res); + if (IS_ERR(pl330->base)) + return PTR_ERR(pl330->base); - amba_set_drvdata(adev, pdmac); + amba_set_drvdata(adev, pl330); - irq = adev->irq[0]; - ret = request_irq(irq, pl330_irq_handler, 0, - dev_name(&adev->dev), pi); - if (ret) - return ret; + pl330->rstc = devm_reset_control_get_optional(&adev->dev, "dma"); + if (IS_ERR(pl330->rstc)) { + return dev_err_probe(&adev->dev, PTR_ERR(pl330->rstc), "Failed to get reset!\n"); + } else { + ret = reset_control_deassert(pl330->rstc); + if (ret) { + dev_err(&adev->dev, "Couldn't deassert the device from reset!\n"); + return ret; + } + } + + pl330->rstc_ocp = devm_reset_control_get_optional(&adev->dev, "dma-ocp"); + if (IS_ERR(pl330->rstc_ocp)) { + return dev_err_probe(&adev->dev, PTR_ERR(pl330->rstc_ocp), + "Failed to get OCP reset!\n"); + } else { + ret = reset_control_deassert(pl330->rstc_ocp); + if (ret) { + dev_err(&adev->dev, "Couldn't deassert the device from OCP reset!\n"); + return ret; + } + } - pi->pcfg.periph_id = adev->periphid; - ret = pl330_add(pi); + for (i = 0; i < AMBA_NR_IRQS; i++) { + irq = adev->irq[i]; + if (irq) { + ret = devm_request_irq(&adev->dev, irq, + pl330_irq_handler, 0, + dev_name(&adev->dev), pl330); + if (ret) + return ret; + } else { + break; + } + } + + pcfg = &pl330->pcfg; + + pcfg->periph_id = adev->periphid; + ret = pl330_add(pl330); if (ret) - goto probe_err1; + return ret; - INIT_LIST_HEAD(&pdmac->desc_pool); - spin_lock_init(&pdmac->pool_lock); + INIT_LIST_HEAD(&pl330->desc_pool); + spin_lock_init(&pl330->pool_lock); /* Create a descriptor pool of default size */ - if (!add_desc(pdmac, GFP_KERNEL, NR_DEFAULT_DESC)) + if (!add_desc(&pl330->desc_pool, &pl330->pool_lock, + GFP_KERNEL, NR_DEFAULT_DESC)) dev_warn(&adev->dev, "unable to allocate desc\n"); - pd = &pdmac->ddma; INIT_LIST_HEAD(&pd->channels); /* Initialize channel parameters */ - if (pdat) - num_chan = max_t(int, pdat->nr_valid_peri, pi->pcfg.num_chan); - else - num_chan = max_t(int, pi->pcfg.num_peri, pi->pcfg.num_chan); + num_chan = max_t(int, pcfg->num_peri, pcfg->num_chan); + + pl330->num_peripherals = num_chan; - pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL); - if (!pdmac->peripherals) { + pl330->peripherals = kcalloc(num_chan, sizeof(*pch), GFP_KERNEL); + if (!pl330->peripherals) { ret = -ENOMEM; - dev_err(&adev->dev, "unable to allocate pdmac->peripherals\n"); goto probe_err2; } for (i = 0; i < num_chan; i++) { - pch = &pdmac->peripherals[i]; - if (!adev->dev.of_node) - pch->chan.private = pdat ? &pdat->peri_id[i] : NULL; - else - pch->chan.private = adev->dev.of_node; + pch = &pl330->peripherals[i]; + pch->chan.private = adev->dev.of_node; + INIT_LIST_HEAD(&pch->submitted_list); INIT_LIST_HEAD(&pch->work_list); + INIT_LIST_HEAD(&pch->completed_list); spin_lock_init(&pch->lock); - pch->pl330_chid = NULL; + pch->thread = NULL; pch->chan.device = pd; - pch->dmac = pdmac; + pch->dmac = pl330; + pch->dir = DMA_NONE; /* Add the channel to the DMAC list */ list_add_tail(&pch->chan.device_node, &pd->channels); } - pd->dev = &adev->dev; - if (pdat) { - pd->cap_mask = pdat->cap_mask; - } else { - dma_cap_set(DMA_MEMCPY, pd->cap_mask); - if (pi->pcfg.num_peri) { - dma_cap_set(DMA_SLAVE, pd->cap_mask); - dma_cap_set(DMA_CYCLIC, pd->cap_mask); - dma_cap_set(DMA_PRIVATE, pd->cap_mask); - } + dma_cap_set(DMA_MEMCPY, pd->cap_mask); + if (pcfg->num_peri) { + dma_cap_set(DMA_SLAVE, pd->cap_mask); + dma_cap_set(DMA_CYCLIC, pd->cap_mask); + dma_cap_set(DMA_PRIVATE, pd->cap_mask); } pd->device_alloc_chan_resources = pl330_alloc_chan_resources; @@ -2957,8 +3134,15 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) pd->device_prep_dma_cyclic = pl330_prep_dma_cyclic; pd->device_tx_status = pl330_tx_status; pd->device_prep_slave_sg = pl330_prep_slave_sg; - pd->device_control = pl330_control; + pd->device_config = pl330_config; + pd->device_pause = pl330_pause; + pd->device_terminate_all = pl330_terminate_all; pd->device_issue_pending = pl330_issue_pending; + pd->src_addr_widths = PL330_DMA_BUSWIDTHS; + pd->dst_addr_widths = PL330_DMA_BUSWIDTHS; + pd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); + pd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; + pd->max_burst = PL330_MAX_BURST; ret = dma_async_device_register(pd); if (ret) { @@ -2968,83 +3152,102 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) if (adev->dev.of_node) { ret = of_dma_controller_register(adev->dev.of_node, - of_dma_pl330_xlate, pdmac); + of_dma_pl330_xlate, pl330); if (ret) { dev_err(&adev->dev, "unable to register DMA to the generic DT DMA helpers\n"); } } + /* + * This is the limit for transfers with a buswidth of 1, larger + * buswidths will have larger limits. + */ + dma_set_max_seg_size(&adev->dev, 1900800); + + init_pl330_debugfs(pl330); dev_info(&adev->dev, - "Loaded driver for PL330 DMAC-%d\n", adev->periphid); + "Loaded driver for PL330 DMAC-%x\n", adev->periphid); dev_info(&adev->dev, "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n", - pi->pcfg.data_buf_dep, - pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan, - pi->pcfg.num_peri, pi->pcfg.num_events); + pcfg->data_buf_dep, pcfg->data_bus_width / 8, pcfg->num_chan, + pcfg->num_peri, pcfg->num_events); + + pm_runtime_irq_safe(&adev->dev); + pm_runtime_use_autosuspend(&adev->dev); + pm_runtime_set_autosuspend_delay(&adev->dev, PL330_AUTOSUSPEND_DELAY); + pm_runtime_mark_last_busy(&adev->dev); + pm_runtime_put_autosuspend(&adev->dev); return 0; probe_err3: - amba_set_drvdata(adev, NULL); - /* Idle the DMAC */ - list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels, + list_for_each_entry_safe(pch, _p, &pl330->ddma.channels, chan.device_node) { /* Remove the channel */ list_del(&pch->chan.device_node); /* Flush the channel */ - pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0); - pl330_free_chan_resources(&pch->chan); + if (pch->thread) { + pl330_terminate_all(&pch->chan); + pl330_free_chan_resources(&pch->chan); + } } probe_err2: - pl330_del(pi); -probe_err1: - free_irq(irq, pi); + pl330_del(pl330); + + if (pl330->rstc_ocp) + reset_control_assert(pl330->rstc_ocp); + if (pl330->rstc) + reset_control_assert(pl330->rstc); return ret; } -static int pl330_remove(struct amba_device *adev) +static void pl330_remove(struct amba_device *adev) { - struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev); + struct pl330_dmac *pl330 = amba_get_drvdata(adev); struct dma_pl330_chan *pch, *_p; - struct pl330_info *pi; - int irq; + int i, irq; - if (!pdmac) - return 0; + pm_runtime_get_noresume(pl330->ddma.dev); if (adev->dev.of_node) of_dma_controller_free(adev->dev.of_node); - dma_async_device_unregister(&pdmac->ddma); - amba_set_drvdata(adev, NULL); + for (i = 0; i < AMBA_NR_IRQS; i++) { + irq = adev->irq[i]; + if (irq) + devm_free_irq(&adev->dev, irq, pl330); + } + + dma_async_device_unregister(&pl330->ddma); /* Idle the DMAC */ - list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels, + list_for_each_entry_safe(pch, _p, &pl330->ddma.channels, chan.device_node) { /* Remove the channel */ list_del(&pch->chan.device_node); /* Flush the channel */ - pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0); - pl330_free_chan_resources(&pch->chan); + if (pch->thread) { + pl330_terminate_all(&pch->chan); + pl330_free_chan_resources(&pch->chan); + } } - pi = &pdmac->pif; + pl330_del(pl330); - pl330_del(pi); + if (pl330->rstc_ocp) + reset_control_assert(pl330->rstc_ocp); - irq = adev->irq[0]; - free_irq(irq, pi); - - return 0; + if (pl330->rstc) + reset_control_assert(pl330->rstc); } -static struct amba_id pl330_ids[] = { +static const struct amba_id pl330_ids[] = { { .id = 0x00041330, .mask = 0x000fffff, @@ -3056,8 +3259,8 @@ MODULE_DEVICE_TABLE(amba, pl330_ids); static struct amba_driver pl330_driver = { .drv = { - .owner = THIS_MODULE, .name = "dma-pl330", + .pm = &pl330_pm, }, .id_table = pl330_ids, .probe = pl330_probe, @@ -3066,6 +3269,6 @@ static struct amba_driver pl330_driver = { module_amba_driver(pl330_driver); -MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>"); +MODULE_AUTHOR("Jaswinder Singh <jassisinghbrar@gmail.com>"); MODULE_DESCRIPTION("API Driver for PL330 DMAC"); MODULE_LICENSE("GPL"); |
