diff options
Diffstat (limited to 'drivers/dma/ste_dma40.c')
| -rw-r--r-- | drivers/dma/ste_dma40.c | 375 |
1 files changed, 170 insertions, 205 deletions
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 907ae97a3ef4..d52e1685aed5 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -1,9 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) Ericsson AB 2007-2008 * Copyright (C) ST-Ericsson SA 2008-2010 * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson - * License terms: GNU General Public License (GPL) version 2 */ #include <linux/dma-mapping.h> @@ -19,14 +19,41 @@ #include <linux/pm_runtime.h> #include <linux/err.h> #include <linux/of.h> +#include <linux/of_address.h> #include <linux/of_dma.h> #include <linux/amba/bus.h> #include <linux/regulator/consumer.h> -#include <linux/platform_data/dma-ste-dma40.h> #include "dmaengine.h" +#include "ste_dma40.h" #include "ste_dma40_ll.h" +/** + * struct stedma40_platform_data - Configuration struct for the dma device. + * + * @disabled_channels: A vector, ending with -1, that marks physical channels + * that are for different reasons not available for the driver. + * @soft_lli_chans: A vector, that marks physical channels will use LLI by SW + * which avoids HW bug that exists in some versions of the controller. + * SoftLLI introduces relink overhead that could impact performance for + * certain use cases. + * @num_of_soft_lli_chans: The number of channels that needs to be configured + * to use SoftLLI. + * @use_esram_lcla: flag for mapping the lcla into esram region + * @num_of_memcpy_chans: The number of channels reserved for memcpy. + * @num_of_phy_chans: The number of physical channels implemented in HW. + * 0 means reading the number of channels from DMA HW but this is only valid + * for 'multiple of 4' channels, like 8. + */ +struct stedma40_platform_data { + int disabled_channels[STEDMA40_MAX_PHYS]; + int *soft_lli_chans; + int num_of_soft_lli_chans; + bool use_esram_lcla; + int num_of_memcpy_chans; + int num_of_phy_chans; +}; + #define D40_NAME "dma40" #define D40_PHY_CHAN -1 @@ -78,7 +105,7 @@ static int dma40_memcpy_channels[] = { DB8500_DMA_MEMCPY_EV_5, }; -/* Default configuration for physcial memcpy */ +/* Default configuration for physical memcpy */ static const struct stedma40_chan_cfg dma40_memcpy_conf_phy = { .mode = STEDMA40_MODE_PHYSICAL, .dir = DMA_MEM_TO_MEM, @@ -107,7 +134,7 @@ static const struct stedma40_chan_cfg dma40_memcpy_conf_log = { }; /** - * enum 40_command - The different commands and/or statuses. + * enum d40_command - The different commands and/or statuses. * * @D40_DMA_STOP: DMA channel command STOP or status STOPPED, * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN. @@ -142,7 +169,7 @@ enum d40_events { * when the DMA hw is powered off. * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works. */ -static u32 d40_backup_regs[] = { +static __maybe_unused u32 d40_backup_regs[] = { D40_DREG_LCPA, D40_DREG_LCLA, D40_DREG_PRMSE, @@ -155,7 +182,7 @@ static u32 d40_backup_regs[] = { /* * since 9540 and 8540 has the same HW revision - * use v4a for 9540 or ealier + * use v4a for 9540 or earlier * use v4b for 8540 or later * HW revision: * DB8500ed has revision 0 @@ -211,7 +238,7 @@ static u32 d40_backup_regs_v4b[] = { #define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b) -static u32 d40_backup_regs_chan[] = { +static __maybe_unused u32 d40_backup_regs_chan[] = { D40_CHAN_REG_SSCFG, D40_CHAN_REG_SSELT, D40_CHAN_REG_SSPTR, @@ -381,7 +408,8 @@ struct d40_desc { * struct d40_lcla_pool - LCLA pool settings and data. * * @base: The virtual address of LCLA. 18 bit aligned. - * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used. + * @dma_addr: DMA address, if mapped + * @base_unaligned: The original kmalloc pointer, if kmalloc is used. * This pointer is only there for clean-up on error. * @pages: The number of pages needed for all physical channels. * Only used later for clean-up on error @@ -524,8 +552,6 @@ struct d40_gen_dmac { * @virtbase: The virtual base address of the DMA's register. * @rev: silicon revision detected. * @clk: Pointer to the DMA clock structure. - * @phy_start: Physical memory start of the DMA registers. - * @phy_size: Size of the DMA register map. * @irq: The IRQ number. * @num_memcpy_chans: The number of channels used for memcpy (mem-to-mem * transfers). @@ -569,13 +595,10 @@ struct d40_base { void __iomem *virtbase; u8 rev:4; struct clk *clk; - phys_addr_t phy_start; - resource_size_t phy_size; int irq; int num_memcpy_chans; int num_phy_chans; int num_log_chans; - struct device_dma_parameters dma_parms; struct dma_device dma_both; struct dma_device dma_slave; struct dma_device dma_memcpy; @@ -1571,9 +1594,9 @@ static void dma_tc_handle(struct d40_chan *d40c) } -static void dma_tasklet(unsigned long data) +static void dma_tasklet(struct tasklet_struct *t) { - struct d40_chan *d40c = (struct d40_chan *) data; + struct d40_chan *d40c = from_tasklet(d40c, t, tasklet); struct d40_desc *d40d; unsigned long flags; bool callback_active; @@ -1630,7 +1653,7 @@ static void dma_tasklet(unsigned long data) return; check_pending_tx: - /* Rescue manouver if receiving double interrupts */ + /* Rescue maneuver if receiving double interrupts */ if (d40c->pending_tx > 0) d40c->pending_tx--; spin_unlock_irqrestore(&d40c->lock, flags); @@ -1643,13 +1666,12 @@ static irqreturn_t d40_handle_interrupt(int irq, void *data) u32 row; long chan = -1; struct d40_chan *d40c; - unsigned long flags; struct d40_base *base = data; u32 *regs = base->regs_interrupt; struct d40_interrupt_lookup *il = base->gen_dmac.il; u32 il_size = base->gen_dmac.il_size; - spin_lock_irqsave(&base->interrupt_lock, flags); + spin_lock(&base->interrupt_lock); /* Read interrupt status of both logical and physical channels */ for (i = 0; i < il_size; i++) @@ -1694,7 +1716,7 @@ static irqreturn_t d40_handle_interrupt(int irq, void *data) spin_unlock(&d40c->lock); } - spin_unlock_irqrestore(&base->interrupt_lock, flags); + spin_unlock(&base->interrupt_lock); return IRQ_HANDLED; } @@ -1971,7 +1993,7 @@ static int d40_config_memcpy(struct d40_chan *d40c) dma_has_cap(DMA_SLAVE, cap)) { d40c->dma_cfg = dma40_memcpy_conf_phy; - /* Generate interrrupt at end of transfer or relink. */ + /* Generate interrupt at end of transfer or relink. */ d40c->dst_def_cfg |= BIT(D40_SREG_CFG_TIM_POS); /* Generate interrupt on error. */ @@ -2269,7 +2291,7 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, return NULL; } -bool stedma40_filter(struct dma_chan *chan, void *data) +static bool stedma40_filter(struct dma_chan *chan, void *data) { struct stedma40_chan_cfg *info = data; struct d40_chan *d40c = @@ -2288,7 +2310,6 @@ bool stedma40_filter(struct dma_chan *chan, void *data) return err == 0; } -EXPORT_SYMBOL(stedma40_filter); static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src) { @@ -2804,8 +2825,7 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma, INIT_LIST_HEAD(&d40c->client); INIT_LIST_HEAD(&d40c->prepare_queue); - tasklet_init(&d40c->tasklet, dma_tasklet, - (unsigned long) d40c); + tasklet_setup(&d40c->tasklet, dma_tasklet); list_add_tail(&d40c->chan.device_node, &dma->channels); @@ -3102,64 +3122,57 @@ static int __init d40_phy_res_init(struct d40_base *base) return num_phy_chans_avail; } -static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) +/* Called from the registered devm action */ +static void d40_drop_kmem_cache_action(void *d) +{ + struct kmem_cache *desc_slab = d; + + kmem_cache_destroy(desc_slab); +} + +static int __init d40_hw_detect_init(struct platform_device *pdev, + struct d40_base **retbase) { struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev); + struct device *dev = &pdev->dev; struct clk *clk; void __iomem *virtbase; - struct resource *res; struct d40_base *base; int num_log_chans; int num_phy_chans; int num_memcpy_chans; - int clk_ret = -EINVAL; int i; u32 pid; u32 cid; u8 rev; + int ret; - clk = clk_get(&pdev->dev, NULL); - if (IS_ERR(clk)) { - d40_err(&pdev->dev, "No matching clock found\n"); - goto check_prepare_enabled; - } - - clk_ret = clk_prepare_enable(clk); - if (clk_ret) { - d40_err(&pdev->dev, "Failed to prepare/enable clock\n"); - goto disable_unprepare; - } + clk = devm_clk_get_enabled(dev, NULL); + if (IS_ERR(clk)) + return PTR_ERR(clk); /* Get IO for DMAC base address */ - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base"); - if (!res) - goto disable_unprepare; - - if (request_mem_region(res->start, resource_size(res), - D40_NAME " I/O base") == NULL) - goto release_region; - - virtbase = ioremap(res->start, resource_size(res)); - if (!virtbase) - goto release_region; + virtbase = devm_platform_ioremap_resource_byname(pdev, "base"); + if (IS_ERR(virtbase)) + return PTR_ERR(virtbase); /* This is just a regular AMBA PrimeCell ID actually */ for (pid = 0, i = 0; i < 4; i++) - pid |= (readl(virtbase + resource_size(res) - 0x20 + 4 * i) + pid |= (readl(virtbase + SZ_4K - 0x20 + 4 * i) & 255) << (i * 8); for (cid = 0, i = 0; i < 4; i++) - cid |= (readl(virtbase + resource_size(res) - 0x10 + 4 * i) + cid |= (readl(virtbase + SZ_4K - 0x10 + 4 * i) & 255) << (i * 8); if (cid != AMBA_CID) { - d40_err(&pdev->dev, "Unknown hardware! No PrimeCell ID\n"); - goto unmap_io; + d40_err(dev, "Unknown hardware! No PrimeCell ID\n"); + return -EINVAL; } if (AMBA_MANF_BITS(pid) != AMBA_VENDOR_ST) { - d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n", + d40_err(dev, "Unknown designer! Got %x wanted %x\n", AMBA_MANF_BITS(pid), AMBA_VENDOR_ST); - goto unmap_io; + return -EINVAL; } /* * HW revision: @@ -3172,8 +3185,8 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) */ rev = AMBA_REV_BITS(pid); if (rev < 2) { - d40_err(&pdev->dev, "hardware revision: %d is not supported", rev); - goto unmap_io; + d40_err(dev, "hardware revision: %d is not supported", rev); + return -EINVAL; } /* The number of physical channels on this HW */ @@ -3190,27 +3203,26 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) num_log_chans = num_phy_chans * D40_MAX_LOG_CHAN_PER_PHY; - dev_info(&pdev->dev, - "hardware rev: %d @ %pa with %d physical and %d logical channels\n", - rev, &res->start, num_phy_chans, num_log_chans); + dev_info(dev, + "hardware rev: %d with %d physical and %d logical channels\n", + rev, num_phy_chans, num_log_chans); - base = kzalloc(ALIGN(sizeof(struct d40_base), 4) + - (num_phy_chans + num_log_chans + num_memcpy_chans) * - sizeof(struct d40_chan), GFP_KERNEL); + base = devm_kzalloc(dev, + ALIGN(sizeof(struct d40_base), 4) + + (num_phy_chans + num_log_chans + num_memcpy_chans) * + sizeof(struct d40_chan), GFP_KERNEL); - if (base == NULL) - goto unmap_io; + if (!base) + return -ENOMEM; base->rev = rev; base->clk = clk; base->num_memcpy_chans = num_memcpy_chans; base->num_phy_chans = num_phy_chans; base->num_log_chans = num_log_chans; - base->phy_start = res->start; - base->phy_size = resource_size(res); base->virtbase = virtbase; base->plat_data = plat_data; - base->dev = &pdev->dev; + base->dev = dev; base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4); base->log_chans = &base->phy_chans[num_phy_chans]; @@ -3244,76 +3256,57 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4a); } - base->phy_res = kcalloc(num_phy_chans, - sizeof(*base->phy_res), - GFP_KERNEL); + base->phy_res = devm_kcalloc(dev, num_phy_chans, + sizeof(*base->phy_res), + GFP_KERNEL); if (!base->phy_res) - goto free_base; + return -ENOMEM; - base->lookup_phy_chans = kcalloc(num_phy_chans, - sizeof(*base->lookup_phy_chans), - GFP_KERNEL); + base->lookup_phy_chans = devm_kcalloc(dev, num_phy_chans, + sizeof(*base->lookup_phy_chans), + GFP_KERNEL); if (!base->lookup_phy_chans) - goto free_phy_res; + return -ENOMEM; - base->lookup_log_chans = kcalloc(num_log_chans, - sizeof(*base->lookup_log_chans), - GFP_KERNEL); + base->lookup_log_chans = devm_kcalloc(dev, num_log_chans, + sizeof(*base->lookup_log_chans), + GFP_KERNEL); if (!base->lookup_log_chans) - goto free_phy_chans; + return -ENOMEM; - base->reg_val_backup_chan = kmalloc_array(base->num_phy_chans, + base->reg_val_backup_chan = devm_kmalloc_array(dev, base->num_phy_chans, sizeof(d40_backup_regs_chan), GFP_KERNEL); if (!base->reg_val_backup_chan) - goto free_log_chans; + return -ENOMEM; - base->lcla_pool.alloc_map = kcalloc(num_phy_chans + base->lcla_pool.alloc_map = devm_kcalloc(dev, num_phy_chans * D40_LCLA_LINK_PER_EVENT_GRP, sizeof(*base->lcla_pool.alloc_map), GFP_KERNEL); if (!base->lcla_pool.alloc_map) - goto free_backup_chan; + return -ENOMEM; - base->regs_interrupt = kmalloc_array(base->gen_dmac.il_size, + base->regs_interrupt = devm_kmalloc_array(dev, base->gen_dmac.il_size, sizeof(*base->regs_interrupt), GFP_KERNEL); if (!base->regs_interrupt) - goto free_map; + return -ENOMEM; base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc), 0, SLAB_HWCACHE_ALIGN, NULL); - if (base->desc_slab == NULL) - goto free_regs; - - - return base; - free_regs: - kfree(base->regs_interrupt); - free_map: - kfree(base->lcla_pool.alloc_map); - free_backup_chan: - kfree(base->reg_val_backup_chan); - free_log_chans: - kfree(base->lookup_log_chans); - free_phy_chans: - kfree(base->lookup_phy_chans); - free_phy_res: - kfree(base->phy_res); - free_base: - kfree(base); - unmap_io: - iounmap(virtbase); - release_region: - release_mem_region(res->start, resource_size(res)); - check_prepare_enabled: - if (!clk_ret) - disable_unprepare: - clk_disable_unprepare(clk); - if (!IS_ERR(clk)) - clk_put(clk); - return NULL; + if (!base->desc_slab) + return -ENOMEM; + + ret = devm_add_action_or_reset(dev, d40_drop_kmem_cache_action, + base->desc_slab); + if (ret) + return ret; + + *retbase = base; + + return 0; } static void __init d40_hw_init(struct d40_base *base) @@ -3417,7 +3410,7 @@ static int __init d40_lcla_allocate(struct d40_base *base) base->lcla_pool.base = (void *)page_list[i]; } else { /* - * After many attempts and no succees with finding the correct + * After many attempts and no success with finding the correct * alignment, try with allocating a big buffer. */ dev_warn(base->dev, @@ -3453,14 +3446,14 @@ static int __init d40_lcla_allocate(struct d40_base *base) return ret; } -static int __init d40_of_probe(struct platform_device *pdev, +static int __init d40_of_probe(struct device *dev, struct device_node *np) { struct stedma40_platform_data *pdata; int num_phy = 0, num_memcpy = 0, num_disabled = 0; const __be32 *list; - pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); + pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) return -ENOMEM; @@ -3473,7 +3466,7 @@ static int __init d40_of_probe(struct platform_device *pdev, num_memcpy /= sizeof(*list); if (num_memcpy > D40_MEMCPY_MAX_CHANS || num_memcpy <= 0) { - d40_err(&pdev->dev, + d40_err(dev, "Invalid number of memcpy channels specified (%d)\n", num_memcpy); return -EINVAL; @@ -3488,7 +3481,7 @@ static int __init d40_of_probe(struct platform_device *pdev, num_disabled /= sizeof(*list); if (num_disabled >= STEDMA40_MAX_PHYS || num_disabled < 0) { - d40_err(&pdev->dev, + d40_err(dev, "Invalid number of disabled channels specified (%d)\n", num_disabled); return -EINVAL; @@ -3499,35 +3492,30 @@ static int __init d40_of_probe(struct platform_device *pdev, num_disabled); pdata->disabled_channels[num_disabled] = -1; - pdev->dev.platform_data = pdata; + dev->platform_data = pdata; return 0; } static int __init d40_probe(struct platform_device *pdev) { - struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev); + struct device *dev = &pdev->dev; struct device_node *np = pdev->dev.of_node; - int ret = -ENOENT; + struct device_node *np_lcpa; struct d40_base *base; struct resource *res; + struct resource res_lcpa; int num_reserved_chans; u32 val; + int ret; - if (!plat_data) { - if (np) { - if (d40_of_probe(pdev, np)) { - ret = -ENOMEM; - goto report_failure; - } - } else { - d40_err(&pdev->dev, "No pdata or Device Tree provided\n"); - goto report_failure; - } + if (d40_of_probe(dev, np)) { + ret = -ENOMEM; + goto report_failure; } - base = d40_hw_detect_init(pdev); - if (!base) + ret = d40_hw_detect_init(pdev, &base); + if (ret) goto report_failure; num_reserved_chans = d40_phy_res_init(base); @@ -3537,37 +3525,38 @@ static int __init d40_probe(struct platform_device *pdev) spin_lock_init(&base->interrupt_lock); spin_lock_init(&base->execmd_lock); - /* Get IO for logical channel parameter address */ - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa"); - if (!res) { - ret = -ENOENT; - d40_err(&pdev->dev, "No \"lcpa\" memory resource\n"); - goto destroy_cache; + /* Get IO for logical channel parameter address (LCPA) */ + np_lcpa = of_parse_phandle(np, "sram", 0); + if (!np_lcpa) { + dev_err(dev, "no LCPA SRAM node\n"); + ret = -EINVAL; + goto report_failure; } - base->lcpa_size = resource_size(res); - base->phy_lcpa = res->start; - - if (request_mem_region(res->start, resource_size(res), - D40_NAME " I/O lcpa") == NULL) { - ret = -EBUSY; - d40_err(&pdev->dev, "Failed to request LCPA region %pR\n", res); - goto destroy_cache; + /* This is no device so read the address directly from the node */ + ret = of_address_to_resource(np_lcpa, 0, &res_lcpa); + if (ret) { + dev_err(dev, "no LCPA SRAM resource\n"); + goto report_failure; } + base->lcpa_size = resource_size(&res_lcpa); + base->phy_lcpa = res_lcpa.start; + dev_info(dev, "found LCPA SRAM at %pad, size %pa\n", + &base->phy_lcpa, &base->lcpa_size); /* We make use of ESRAM memory for this. */ val = readl(base->virtbase + D40_DREG_LCPA); - if (res->start != val && val != 0) { - dev_warn(&pdev->dev, - "[%s] Mismatch LCPA dma 0x%x, def %pa\n", - __func__, val, &res->start); + if (base->phy_lcpa != val && val != 0) { + dev_warn(dev, + "[%s] Mismatch LCPA dma 0x%x, def %08x\n", + __func__, val, (u32)base->phy_lcpa); } else - writel(res->start, base->virtbase + D40_DREG_LCPA); + writel(base->phy_lcpa, base->virtbase + D40_DREG_LCPA); - base->lcpa_base = ioremap(res->start, resource_size(res)); + base->lcpa_base = devm_ioremap(dev, base->phy_lcpa, base->lcpa_size); if (!base->lcpa_base) { ret = -ENOMEM; - d40_err(&pdev->dev, "Failed to ioremap LCPA region\n"); - goto destroy_cache; + d40_err(dev, "Failed to ioremap LCPA region\n"); + goto report_failure; } /* If lcla has to be located in ESRAM we don't need to allocate */ if (base->plat_data->use_esram_lcla) { @@ -3575,23 +3564,23 @@ static int __init d40_probe(struct platform_device *pdev) "lcla_esram"); if (!res) { ret = -ENOENT; - d40_err(&pdev->dev, + d40_err(dev, "No \"lcla_esram\" memory resource\n"); - goto destroy_cache; + goto report_failure; } - base->lcla_pool.base = ioremap(res->start, - resource_size(res)); + base->lcla_pool.base = devm_ioremap(dev, res->start, + resource_size(res)); if (!base->lcla_pool.base) { ret = -ENOMEM; - d40_err(&pdev->dev, "Failed to ioremap LCLA region\n"); - goto destroy_cache; + d40_err(dev, "Failed to ioremap LCLA region\n"); + goto report_failure; } writel(res->start, base->virtbase + D40_DREG_LCLA); } else { ret = d40_lcla_allocate(base); if (ret) { - d40_err(&pdev->dev, "Failed to allocate LCLA area\n"); + d40_err(dev, "Failed to allocate LCLA area\n"); goto destroy_cache; } } @@ -3599,10 +3588,14 @@ static int __init d40_probe(struct platform_device *pdev) spin_lock_init(&base->lcla_pool.lock); base->irq = platform_get_irq(pdev, 0); + if (base->irq < 0) { + ret = base->irq; + goto destroy_cache; + } ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base); if (ret) { - d40_err(&pdev->dev, "No IRQ defined\n"); + d40_err(dev, "No IRQ defined\n"); goto destroy_cache; } @@ -3610,7 +3603,7 @@ static int __init d40_probe(struct platform_device *pdev) base->lcpa_regulator = regulator_get(base->dev, "lcla_esram"); if (IS_ERR(base->lcpa_regulator)) { - d40_err(&pdev->dev, "Failed to get lcpa_regulator\n"); + d40_err(dev, "Failed to get lcpa_regulator\n"); ret = PTR_ERR(base->lcpa_regulator); base->lcpa_regulator = NULL; goto destroy_cache; @@ -3618,7 +3611,7 @@ static int __init d40_probe(struct platform_device *pdev) ret = regulator_enable(base->lcpa_regulator); if (ret) { - d40_err(&pdev->dev, + d40_err(dev, "Failed to enable lcpa_regulator\n"); regulator_put(base->lcpa_regulator); base->lcpa_regulator = NULL; @@ -3639,34 +3632,21 @@ static int __init d40_probe(struct platform_device *pdev) if (ret) goto destroy_cache; - base->dev->dma_parms = &base->dma_parms; - ret = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE); - if (ret) { - d40_err(&pdev->dev, "Failed to set dma max seg size\n"); - goto destroy_cache; - } + dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE); d40_hw_init(base); - if (np) { - ret = of_dma_controller_register(np, d40_xlate, NULL); - if (ret) - dev_err(&pdev->dev, - "could not register of_dma_controller\n"); + ret = of_dma_controller_register(np, d40_xlate, NULL); + if (ret) { + dev_err(dev, + "could not register of_dma_controller\n"); + goto destroy_cache; } dev_info(base->dev, "initialized\n"); return 0; - destroy_cache: - kmem_cache_destroy(base->desc_slab); - if (base->virtbase) - iounmap(base->virtbase); - - if (base->lcla_pool.base && base->plat_data->use_esram_lcla) { - iounmap(base->lcla_pool.base); - base->lcla_pool.base = NULL; - } + destroy_cache: if (base->lcla_pool.dma_addr) dma_unmap_single(base->dev, base->lcla_pool.dma_addr, SZ_1K * base->num_phy_chans, @@ -3678,29 +3658,14 @@ static int __init d40_probe(struct platform_device *pdev) kfree(base->lcla_pool.base_unaligned); - if (base->phy_lcpa) - release_mem_region(base->phy_lcpa, - base->lcpa_size); - if (base->phy_start) - release_mem_region(base->phy_start, - base->phy_size); - if (base->clk) { - clk_disable_unprepare(base->clk); - clk_put(base->clk); - } - if (base->lcpa_regulator) { regulator_disable(base->lcpa_regulator); regulator_put(base->lcpa_regulator); } + pm_runtime_disable(base->dev); - kfree(base->lcla_pool.alloc_map); - kfree(base->lookup_log_chans); - kfree(base->lookup_phy_chans); - kfree(base->phy_res); - kfree(base); report_failure: - d40_err(&pdev->dev, "probe failed\n"); + d40_err(dev, "probe failed\n"); return ret; } |
