diff options
Diffstat (limited to 'drivers/soc/ti/knav_qmss_queue.c')
| -rw-r--r-- | drivers/soc/ti/knav_qmss_queue.c | 249 |
1 files changed, 117 insertions, 132 deletions
diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c index 8b418379272d..6e56e7609ccd 100644 --- a/drivers/soc/ti/knav_qmss_queue.c +++ b/drivers/soc/ti/knav_qmss_queue.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Keystone Queue Manager subsystem driver * @@ -5,15 +6,6 @@ * Authors: Sandeep Nair <sandeep_n@ti.com> * Cyril Chemparathy <cyril@ti.com> * Santosh Shilimkar <santosh.shilimkar@ti.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. */ #include <linux/debugfs.h> @@ -22,10 +14,12 @@ #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> +#include <linux/of.h> #include <linux/of_address.h> -#include <linux/of_device.h> #include <linux/of_irq.h> +#include <linux/platform_device.h> #include <linux/pm_runtime.h> +#include <linux/property.h> #include <linux/slab.h> #include <linux/soc/ti/knav_qmss.h> @@ -33,6 +27,8 @@ static struct knav_device *kdev; static DEFINE_MUTEX(knav_dev_lock); +#define knav_dev_lock_held() \ + lockdep_is_held(&knav_dev_lock) /* Queue manager register indices in DTS */ #define KNAV_QUEUE_PEEK_REG_INDEX 0 @@ -60,8 +56,9 @@ static DEFINE_MUTEX(knav_dev_lock); #define knav_queue_idx_to_inst(kdev, idx) \ (kdev->instances + (idx << kdev->inst_shift)) -#define for_each_handle_rcu(qh, inst) \ - list_for_each_entry_rcu(qh, &inst->handles, list) +#define for_each_handle_rcu(qh, inst) \ + list_for_each_entry_rcu(qh, &inst->handles, list, \ + knav_dev_lock_held()) #define for_each_instance(idx, inst, kdev) \ for (idx = 0, inst = kdev->instances; \ @@ -72,7 +69,7 @@ static DEFINE_MUTEX(knav_dev_lock); * Newest followed by older ones. Search is done from start of the array * until a firmware file is found. */ -const char *knav_acc_firmwares[] = {"ks2_qmss_pdsp_acc48.bin"}; +static const char * const knav_acc_firmwares[] = {"ks2_qmss_pdsp_acc48.bin"}; static bool device_ready; bool knav_qmss_device_ready(void) @@ -84,7 +81,7 @@ EXPORT_SYMBOL_GPL(knav_qmss_device_ready); /** * knav_queue_notify: qmss queue notfier call * - * @inst: qmss queue instance like accumulator + * @inst: - qmss queue instance like accumulator */ void knav_queue_notify(struct knav_queue_inst *inst) { @@ -122,11 +119,10 @@ static int knav_queue_setup_irq(struct knav_range_info *range, if (range->flags & RANGE_HAS_IRQ) { irq = range->irqs[queue].irq; - ret = request_irq(irq, knav_queue_int_handler, 0, - inst->irq_name, inst); + ret = request_irq(irq, knav_queue_int_handler, IRQF_NO_AUTOEN, + inst->irq_name, inst); if (ret) return ret; - disable_irq(irq); if (range->irqs[queue].cpu_mask) { ret = irq_set_affinity_hint(irq, range->irqs[queue].cpu_mask); if (ret) { @@ -256,8 +252,7 @@ static struct knav_queue *__knav_queue_open(struct knav_queue_inst *inst, return qh; err: - if (qh->stats) - free_percpu(qh->stats); + free_percpu(qh->stats); devm_kfree(inst->kdev->dev, qh); return ERR_PTR(ret); } @@ -414,7 +409,7 @@ static int knav_gp_close_queue(struct knav_range_info *range, return 0; } -struct knav_range_ops knav_gp_range_ops = { +static const struct knav_range_ops knav_gp_range_ops = { .set_notify = knav_gp_set_notify, .open_queue = knav_gp_open_queue, .close_queue = knav_gp_close_queue, @@ -483,17 +478,7 @@ static int knav_queue_debug_show(struct seq_file *s, void *v) return 0; } -static int knav_queue_debug_open(struct inode *inode, struct file *file) -{ - return single_open(file, knav_queue_debug_show, NULL); -} - -static const struct file_operations knav_queue_debug_ops = { - .open = knav_queue_debug_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(knav_queue_debug); static inline int knav_queue_pdsp_wait(u32 * __iomem addr, unsigned timeout, u32 flags) @@ -526,10 +511,10 @@ static int knav_queue_flush(struct knav_queue *qh) /** * knav_queue_open() - open a hardware queue - * @name - name to give the queue handle - * @id - desired queue number if any or specifes the type + * @name: - name to give the queue handle + * @id: - desired queue number if any or specifes the type * of queue - * @flags - the following flags are applicable to queues: + * @flags: - the following flags are applicable to queues: * KNAV_QUEUE_SHARED - allow the queue to be shared. Queues are * exclusive by default. * Subsequent attempts to open a shared queue should @@ -560,7 +545,7 @@ EXPORT_SYMBOL_GPL(knav_queue_open); /** * knav_queue_close() - close a hardware queue handle - * @qh - handle to close + * @qhandle: - handle to close */ void knav_queue_close(void *qhandle) { @@ -587,9 +572,9 @@ EXPORT_SYMBOL_GPL(knav_queue_close); /** * knav_queue_device_control() - Perform control operations on a queue - * @qh - queue handle - * @cmd - control commands - * @arg - command argument + * @qhandle: - queue handle + * @cmd: - control commands + * @arg: - command argument * * Returns 0 on success, errno otherwise. */ @@ -638,10 +623,10 @@ EXPORT_SYMBOL_GPL(knav_queue_device_control); /** * knav_queue_push() - push data (or descriptor) to the tail of a queue - * @qh - hardware queue handle - * @data - data to push - * @size - size of data to push - * @flags - can be used to pass additional information + * @qhandle: - hardware queue handle + * @dma: - DMA data to push + * @size: - size of data to push + * @flags: - can be used to pass additional information * * Returns 0 on success, errno otherwise. */ @@ -661,8 +646,8 @@ EXPORT_SYMBOL_GPL(knav_queue_push); /** * knav_queue_pop() - pop data (or descriptor) from the head of a queue - * @qh - hardware queue handle - * @size - (optional) size of the data pop'ed. + * @qhandle: - hardware queue handle + * @size: - (optional) size of the data pop'ed. * * Returns a DMA address on success, 0 on failure. */ @@ -736,7 +721,6 @@ static void kdesc_empty_pool(struct knav_pool *pool) if (!desc) { dev_dbg(pool->kdev->dev, "couldn't unmap desc, continuing\n"); - continue; } } WARN_ON(i != pool->num_desc); @@ -761,9 +745,9 @@ EXPORT_SYMBOL_GPL(knav_pool_desc_dma_to_virt); /** * knav_pool_create() - Create a pool of descriptors - * @name - name to give the pool handle - * @num_desc - numbers of descriptors in the pool - * @region_id - QMSS region id from which the descriptors are to be + * @name: - name to give the pool handle + * @num_desc: - numbers of descriptors in the pool + * @region_id: - QMSS region id from which the descriptors are to be * allocated. * * Returns a pool handle on success. @@ -773,10 +757,9 @@ void *knav_pool_create(const char *name, int num_desc, int region_id) { struct knav_region *reg_itr, *region = NULL; - struct knav_pool *pool, *pi; + struct knav_pool *pool, *pi = NULL, *iter; struct list_head *node; unsigned last_offset; - bool slot_found; int ret; if (!kdev) @@ -805,7 +788,7 @@ void *knav_pool_create(const char *name, } pool->queue = knav_queue_open(name, KNAV_QUEUE_GP, 0); - if (IS_ERR_OR_NULL(pool->queue)) { + if (IS_ERR(pool->queue)) { dev_err(kdev->dev, "failed to open queue for pool(%s), error %ld\n", name, PTR_ERR(pool->queue)); @@ -831,18 +814,17 @@ void *knav_pool_create(const char *name, * the request */ last_offset = 0; - slot_found = false; node = ®ion->pools; - list_for_each_entry(pi, ®ion->pools, region_inst) { - if ((pi->region_offset - last_offset) >= num_desc) { - slot_found = true; + list_for_each_entry(iter, ®ion->pools, region_inst) { + if ((iter->region_offset - last_offset) >= num_desc) { + pi = iter; break; } - last_offset = pi->region_offset + pi->num_desc; + last_offset = iter->region_offset + iter->num_desc; } - node = &pi->region_inst; - if (slot_found) { + if (pi) { + node = &pi->region_inst; pool->region = region; pool->num_desc = num_desc; pool->region_offset = last_offset; @@ -871,7 +853,7 @@ EXPORT_SYMBOL_GPL(knav_pool_create); /** * knav_pool_destroy() - Free a pool of descriptors - * @pool - pool handle + * @ph: - pool handle */ void knav_pool_destroy(void *ph) { @@ -899,7 +881,7 @@ EXPORT_SYMBOL_GPL(knav_pool_destroy); /** * knav_pool_desc_get() - Get a descriptor from the pool - * @pool - pool handle + * @ph: - pool handle * * Returns descriptor from the pool. */ @@ -920,7 +902,8 @@ EXPORT_SYMBOL_GPL(knav_pool_desc_get); /** * knav_pool_desc_put() - return a descriptor to the pool - * @pool - pool handle + * @ph: - pool handle + * @desc: - virtual address */ void knav_pool_desc_put(void *ph, void *desc) { @@ -933,11 +916,11 @@ EXPORT_SYMBOL_GPL(knav_pool_desc_put); /** * knav_pool_desc_map() - Map descriptor for DMA transfer - * @pool - pool handle - * @desc - address of descriptor to map - * @size - size of descriptor to map - * @dma - DMA address return pointer - * @dma_sz - adjusted return pointer + * @ph: - pool handle + * @desc: - address of descriptor to map + * @size: - size of descriptor to map + * @dma: - DMA address return pointer + * @dma_sz: - adjusted return pointer * * Returns 0 on success, errno otherwise. */ @@ -960,9 +943,9 @@ EXPORT_SYMBOL_GPL(knav_pool_desc_map); /** * knav_pool_desc_unmap() - Unmap descriptor after DMA transfer - * @pool - pool handle - * @dma - DMA address of descriptor to unmap - * @dma_sz - size of descriptor to unmap + * @ph: - pool handle + * @dma: - DMA address of descriptor to unmap + * @dma_sz: - size of descriptor to unmap * * Returns descriptor address on success, Use IS_ERR_OR_NULL() to identify * error values on return. @@ -983,7 +966,7 @@ EXPORT_SYMBOL_GPL(knav_pool_desc_unmap); /** * knav_pool_count() - Get the number of descriptors in pool. - * @pool - pool handle + * @ph: - pool handle * Returns number of elements in the pool. */ int knav_pool_count(void *ph) @@ -1090,17 +1073,24 @@ static const char *knav_queue_find_name(struct device_node *node) } static int knav_queue_setup_regions(struct knav_device *kdev, - struct device_node *regions) + struct device_node *node) { struct device *dev = kdev->dev; + struct device_node *regions __free(device_node) = + of_get_child_by_name(node, "descriptor-regions"); struct knav_region *region; struct device_node *child; u32 temp[2]; int ret; + if (!regions) + return dev_err_probe(dev, -ENODEV, + "descriptor-regions not specified\n"); + for_each_child_of_node(regions, child) { region = devm_kzalloc(dev, sizeof(*region), GFP_KERNEL); if (!region) { + of_node_put(child); dev_err(dev, "out of memory allocating region\n"); return -ENOMEM; } @@ -1117,11 +1107,6 @@ static int knav_queue_setup_regions(struct knav_device *kdev, continue; } - if (!of_get_property(child, "link-index", NULL)) { - dev_err(dev, "No link info for %s\n", region->name); - devm_kfree(dev, region); - continue; - } ret = of_property_read_u32(child, "link-index", ®ion->link_index); if (ret) { @@ -1134,10 +1119,9 @@ static int knav_queue_setup_regions(struct knav_device *kdev, INIT_LIST_HEAD(®ion->pools); list_add_tail(®ion->list, &kdev->regions); } - if (list_empty(&kdev->regions)) { - dev_err(dev, "no valid region information found\n"); - return -ENODEV; - } + if (list_empty(&kdev->regions)) + return dev_err_probe(dev, -ENODEV, + "no valid region information found\n"); /* Next, we run through the regions and set things up */ for_each_region(kdev, region) @@ -1279,10 +1263,10 @@ static int knav_setup_queue_range(struct knav_device *kdev, if (range->num_irqs) range->flags |= RANGE_HAS_IRQ; - if (of_get_property(node, "qalloc-by-id", NULL)) + if (of_property_read_bool(node, "qalloc-by-id")) range->flags |= RANGE_RESERVED; - if (of_get_property(node, "accumulator", NULL)) { + if (of_property_present(node, "accumulator")) { ret = knav_init_acc_range(kdev, node, range); if (ret < 0) { devm_kfree(dev, range); @@ -1319,23 +1303,27 @@ static int knav_setup_queue_range(struct knav_device *kdev, } static int knav_setup_queue_pools(struct knav_device *kdev, - struct device_node *queue_pools) + struct device_node *node) { + struct device_node *queue_pools __free(device_node) = + of_get_child_by_name(node, "queue-pools"); struct device_node *type, *range; - int ret; + + if (!queue_pools) + return dev_err_probe(kdev->dev, -ENODEV, + "queue-pools not specified\n"); for_each_child_of_node(queue_pools, type) { for_each_child_of_node(type, range) { - ret = knav_setup_queue_range(kdev, range); /* return value ignored, we init the rest... */ + knav_setup_queue_range(kdev, range); } } /* ... and barf if they all failed! */ - if (list_empty(&kdev->queue_ranges)) { - dev_err(kdev->dev, "no valid queue range found\n"); - return -ENODEV; - } + if (list_empty(&kdev->queue_ranges)) + return dev_err_probe(kdev->dev, -ENODEV, + "no valid queue range found\n"); return 0; } @@ -1403,17 +1391,24 @@ static void __iomem *knav_queue_map_reg(struct knav_device *kdev, } static int knav_queue_init_qmgrs(struct knav_device *kdev, - struct device_node *qmgrs) + struct device_node *node) { struct device *dev = kdev->dev; + struct device_node *qmgrs __free(device_node) = + of_get_child_by_name(node, "qmgrs"); struct knav_qmgr_info *qmgr; struct device_node *child; u32 temp[2]; int ret; + if (!qmgrs) + return dev_err_probe(dev, -ENODEV, + "queue manager info not specified\n"); + for_each_child_of_node(qmgrs, child) { qmgr = devm_kzalloc(dev, sizeof(*qmgr), GFP_KERNEL); if (!qmgr) { + of_node_put(child); dev_err(dev, "out of memory allocating qmgr\n"); return -ENOMEM; } @@ -1513,6 +1508,7 @@ static int knav_queue_init_pdsps(struct knav_device *kdev, for_each_child_of_node(pdsps, child) { pdsp = devm_kzalloc(dev, sizeof(*pdsp), GFP_KERNEL); if (!pdsp) { + of_node_put(child); dev_err(dev, "out of memory allocating pdsp\n"); return -ENOMEM; } @@ -1680,6 +1676,26 @@ static int knav_queue_start_pdsps(struct knav_device *kdev) return 0; } +static int knav_queue_setup_pdsps(struct knav_device *kdev, + struct device_node *node) +{ + struct device_node *pdsps __free(device_node) = + of_get_child_by_name(node, "pdsps"); + + if (pdsps) { + int ret; + + ret = knav_queue_init_pdsps(kdev, pdsps); + if (ret) + return ret; + + ret = knav_queue_start_pdsps(kdev); + if (ret) + return ret; + } + return 0; +} + static inline struct knav_qmgr_info *knav_find_qmgr(unsigned id) { struct knav_qmgr_info *qmgr; @@ -1767,8 +1783,6 @@ MODULE_DEVICE_TABLE(of, keystone_qmss_of_match); static int knav_queue_probe(struct platform_device *pdev) { struct device_node *node = pdev->dev.of_node; - struct device_node *qmgrs, *queue_pools, *regions, *pdsps; - const struct of_device_id *match; struct device *dev = &pdev->dev; u32 temp[2]; int ret; @@ -1784,8 +1798,7 @@ static int knav_queue_probe(struct platform_device *pdev) return -ENOMEM; } - match = of_match_device(of_match_ptr(keystone_qmss_of_match), dev); - if (match && match->data) + if (device_get_match_data(dev)) kdev->version = QMSS_66AK2G; platform_set_drvdata(pdev, kdev); @@ -1797,8 +1810,9 @@ static int knav_queue_probe(struct platform_device *pdev) INIT_LIST_HEAD(&kdev->pdsps); pm_runtime_enable(&pdev->dev); - ret = pm_runtime_get_sync(&pdev->dev); + ret = pm_runtime_resume_and_get(&pdev->dev); if (ret < 0) { + pm_runtime_disable(&pdev->dev); dev_err(dev, "Failed to enable QMSS\n"); return ret; } @@ -1812,39 +1826,17 @@ static int knav_queue_probe(struct platform_device *pdev) kdev->num_queues = temp[1]; /* Initialize queue managers using device tree configuration */ - qmgrs = of_get_child_by_name(node, "qmgrs"); - if (!qmgrs) { - dev_err(dev, "queue manager info not specified\n"); - ret = -ENODEV; - goto err; - } - ret = knav_queue_init_qmgrs(kdev, qmgrs); - of_node_put(qmgrs); + ret = knav_queue_init_qmgrs(kdev, node); if (ret) goto err; /* get pdsp configuration values from device tree */ - pdsps = of_get_child_by_name(node, "pdsps"); - if (pdsps) { - ret = knav_queue_init_pdsps(kdev, pdsps); - if (ret) - goto err; - - ret = knav_queue_start_pdsps(kdev); - if (ret) - goto err; - } - of_node_put(pdsps); + ret = knav_queue_setup_pdsps(kdev, node); + if (ret) + goto err; /* get usable queue range values from device tree */ - queue_pools = of_get_child_by_name(node, "queue-pools"); - if (!queue_pools) { - dev_err(dev, "queue-pools not specified\n"); - ret = -ENODEV; - goto err; - } - ret = knav_setup_queue_pools(kdev, queue_pools); - of_node_put(queue_pools); + ret = knav_setup_queue_pools(kdev, node); if (ret) goto err; @@ -1866,13 +1858,7 @@ static int knav_queue_probe(struct platform_device *pdev) if (ret) goto err; - regions = of_get_child_by_name(node, "descriptor-regions"); - if (!regions) { - dev_err(dev, "descriptor-regions not specified\n"); - goto err; - } - ret = knav_queue_setup_regions(kdev, regions); - of_node_put(regions); + ret = knav_queue_setup_regions(kdev, node); if (ret) goto err; @@ -1883,7 +1869,7 @@ static int knav_queue_probe(struct platform_device *pdev) } debugfs_create_file("qmss", S_IFREG | S_IRUGO, NULL, NULL, - &knav_queue_debug_ops); + &knav_queue_debug_fops); device_ready = true; return 0; @@ -1896,12 +1882,11 @@ err: return ret; } -static int knav_queue_remove(struct platform_device *pdev) +static void knav_queue_remove(struct platform_device *pdev) { /* TODO: Free resources */ pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); - return 0; } static struct platform_driver keystone_qmss_driver = { |
