diff options
Diffstat (limited to 'drivers/dma/dmaengine.c')
| -rw-r--r-- | drivers/dma/dmaengine.c | 1000 |
1 files changed, 627 insertions, 373 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index f1a441ab395d..ca13cd39330b 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -1,18 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called COPYING. */ /* @@ -52,6 +40,8 @@ #include <linux/dmaengine.h> #include <linux/hardirq.h> #include <linux/spinlock.h> +#include <linux/of.h> +#include <linux/property.h> #include <linux/percpu.h> #include <linux/rcupdate.h> #include <linux/mutex.h> @@ -63,19 +53,105 @@ #include <linux/acpi_dma.h> #include <linux/of_dma.h> #include <linux/mempool.h> +#include <linux/numa.h> + +#include "dmaengine.h" static DEFINE_MUTEX(dma_list_mutex); static DEFINE_IDA(dma_ida); static LIST_HEAD(dma_device_list); static long dmaengine_ref_count; +/* --- debugfs implementation --- */ +#ifdef CONFIG_DEBUG_FS +#include <linux/debugfs.h> + +static struct dentry *rootdir; + +static void dmaengine_debug_register(struct dma_device *dma_dev) +{ + dma_dev->dbg_dev_root = debugfs_create_dir(dev_name(dma_dev->dev), + rootdir); + if (IS_ERR(dma_dev->dbg_dev_root)) + dma_dev->dbg_dev_root = NULL; +} + +static void dmaengine_debug_unregister(struct dma_device *dma_dev) +{ + debugfs_remove_recursive(dma_dev->dbg_dev_root); + dma_dev->dbg_dev_root = NULL; +} + +static void dmaengine_dbg_summary_show(struct seq_file *s, + struct dma_device *dma_dev) +{ + struct dma_chan *chan; + + list_for_each_entry(chan, &dma_dev->channels, device_node) { + if (chan->client_count) { + seq_printf(s, " %-13s| %s", dma_chan_name(chan), + chan->dbg_client_name ?: "in-use"); + + if (chan->router) + seq_printf(s, " (via router: %s)\n", + dev_name(chan->router->dev)); + else + seq_puts(s, "\n"); + } + } +} + +static int dmaengine_summary_show(struct seq_file *s, void *data) +{ + struct dma_device *dma_dev = NULL; + + mutex_lock(&dma_list_mutex); + list_for_each_entry(dma_dev, &dma_device_list, global_node) { + seq_printf(s, "dma%d (%s): number of channels: %u\n", + dma_dev->dev_id, dev_name(dma_dev->dev), + dma_dev->chancnt); + + if (dma_dev->dbg_summary_show) + dma_dev->dbg_summary_show(s, dma_dev); + else + dmaengine_dbg_summary_show(s, dma_dev); + + if (!list_is_last(&dma_dev->global_node, &dma_device_list)) + seq_puts(s, "\n"); + } + mutex_unlock(&dma_list_mutex); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(dmaengine_summary); + +static void __init dmaengine_debugfs_init(void) +{ + rootdir = debugfs_create_dir("dmaengine", NULL); + + /* /sys/kernel/debug/dmaengine/summary */ + debugfs_create_file("summary", 0444, rootdir, NULL, + &dmaengine_summary_fops); +} +#else +static inline void dmaengine_debugfs_init(void) { } +static inline int dmaengine_debug_register(struct dma_device *dma_dev) +{ + return 0; +} + +static inline void dmaengine_debug_unregister(struct dma_device *dma_dev) { } +#endif /* DEBUG_FS */ + /* --- sysfs implementation --- */ +#define DMA_SLAVE_NAME "slave" + /** - * dev_to_dma_chan - convert a device pointer to the its sysfs container object - * @dev - device node + * dev_to_dma_chan - convert a device pointer to its sysfs container object + * @dev: device node * - * Must be called under dma_list_mutex + * Must be called under dma_list_mutex. */ static struct dma_chan *dev_to_dma_chan(struct device *dev) { @@ -98,7 +174,7 @@ static ssize_t memcpy_count_show(struct device *dev, if (chan) { for_each_possible_cpu(i) count += per_cpu_ptr(chan->local, i)->memcpy_count; - err = sprintf(buf, "%lu\n", count); + err = sysfs_emit(buf, "%lu\n", count); } else err = -ENODEV; mutex_unlock(&dma_list_mutex); @@ -120,7 +196,7 @@ static ssize_t bytes_transferred_show(struct device *dev, if (chan) { for_each_possible_cpu(i) count += per_cpu_ptr(chan->local, i)->bytes_transferred; - err = sprintf(buf, "%lu\n", count); + err = sysfs_emit(buf, "%lu\n", count); } else err = -ENODEV; mutex_unlock(&dma_list_mutex); @@ -138,7 +214,7 @@ static ssize_t in_use_show(struct device *dev, struct device_attribute *attr, mutex_lock(&dma_list_mutex); chan = dev_to_dma_chan(dev); if (chan) - err = sprintf(buf, "%d\n", chan->client_count); + err = sysfs_emit(buf, "%d\n", chan->client_count); else err = -ENODEV; mutex_unlock(&dma_list_mutex); @@ -160,10 +236,6 @@ static void chan_dev_release(struct device *dev) struct dma_chan_dev *chan_dev; chan_dev = container_of(dev, typeof(*chan_dev), device); - if (atomic_dec_and_test(chan_dev->idr_ref)) { - ida_free(&dma_ida, chan_dev->dev_id); - kfree(chan_dev->idr_ref); - } kfree(chan_dev); } @@ -175,11 +247,152 @@ static struct class dma_devclass = { /* --- client and device registration --- */ -#define dma_device_satisfies_mask(device, mask) \ - __dma_device_satisfies_mask((device), &(mask)) -static int -__dma_device_satisfies_mask(struct dma_device *device, - const dma_cap_mask_t *want) +/* enable iteration over all operation types */ +static dma_cap_mask_t dma_cap_mask_all; + +/** + * struct dma_chan_tbl_ent - tracks channel allocations per core/operation + * @chan: associated channel for this entry + */ +struct dma_chan_tbl_ent { + struct dma_chan *chan; +}; + +/* percpu lookup table for memory-to-memory offload providers */ +static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END]; + +static int __init dma_channel_table_init(void) +{ + enum dma_transaction_type cap; + int err = 0; + + bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END); + + /* 'interrupt', 'private', and 'slave' are channel capabilities, + * but are not associated with an operation so they do not need + * an entry in the channel_table + */ + clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits); + clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits); + clear_bit(DMA_SLAVE, dma_cap_mask_all.bits); + + for_each_dma_cap_mask(cap, dma_cap_mask_all) { + channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent); + if (!channel_table[cap]) { + err = -ENOMEM; + break; + } + } + + if (err) { + pr_err("dmaengine dma_channel_table_init failure: %d\n", err); + for_each_dma_cap_mask(cap, dma_cap_mask_all) + free_percpu(channel_table[cap]); + } + + return err; +} +arch_initcall(dma_channel_table_init); + +/** + * dma_chan_is_local - checks if the channel is in the same NUMA-node as the CPU + * @chan: DMA channel to test + * @cpu: CPU index which the channel should be close to + * + * Returns true if the channel is in the same NUMA-node as the CPU. + */ +static bool dma_chan_is_local(struct dma_chan *chan, int cpu) +{ + int node = dev_to_node(chan->device->dev); + return node == NUMA_NO_NODE || + cpumask_test_cpu(cpu, cpumask_of_node(node)); +} + +/** + * min_chan - finds the channel with min count and in the same NUMA-node as the CPU + * @cap: capability to match + * @cpu: CPU index which the channel should be close to + * + * If some channels are close to the given CPU, the one with the lowest + * reference count is returned. Otherwise, CPU is ignored and only the + * reference count is taken into account. + * + * Must be called under dma_list_mutex. + */ +static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu) +{ + struct dma_device *device; + struct dma_chan *chan; + struct dma_chan *min = NULL; + struct dma_chan *localmin = NULL; + + list_for_each_entry(device, &dma_device_list, global_node) { + if (!dma_has_cap(cap, device->cap_mask) || + dma_has_cap(DMA_PRIVATE, device->cap_mask)) + continue; + list_for_each_entry(chan, &device->channels, device_node) { + if (!chan->client_count) + continue; + if (!min || chan->table_count < min->table_count) + min = chan; + + if (dma_chan_is_local(chan, cpu)) + if (!localmin || + chan->table_count < localmin->table_count) + localmin = chan; + } + } + + chan = localmin ? localmin : min; + + if (chan) + chan->table_count++; + + return chan; +} + +/** + * dma_channel_rebalance - redistribute the available channels + * + * Optimize for CPU isolation (each CPU gets a dedicated channel for an + * operation type) in the SMP case, and operation isolation (avoid + * multi-tasking channels) in the non-SMP case. + * + * Must be called under dma_list_mutex. + */ +static void dma_channel_rebalance(void) +{ + struct dma_chan *chan; + struct dma_device *device; + int cpu; + int cap; + + /* undo the last distribution */ + for_each_dma_cap_mask(cap, dma_cap_mask_all) + for_each_possible_cpu(cpu) + per_cpu_ptr(channel_table[cap], cpu)->chan = NULL; + + list_for_each_entry(device, &dma_device_list, global_node) { + if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) + continue; + list_for_each_entry(chan, &device->channels, device_node) + chan->table_count = 0; + } + + /* don't populate the channel_table if no clients are available */ + if (!dmaengine_ref_count) + return; + + /* redistribute available channels */ + for_each_dma_cap_mask(cap, dma_cap_mask_all) + for_each_online_cpu(cpu) { + chan = min_chan(cap, cpu); + per_cpu_ptr(channel_table[cap], cpu)->chan = chan; + } +} + +static int dma_device_satisfies_mask(struct dma_device *device, + const dma_cap_mask_t *want) { dma_cap_mask_t has; @@ -190,14 +403,14 @@ __dma_device_satisfies_mask(struct dma_device *device, static struct module *dma_chan_to_owner(struct dma_chan *chan) { - return chan->device->dev->driver->owner; + return chan->device->owner; } /** * balance_ref_count - catch up the channel reference count - * @chan - channel to balance ->client_count versus dmaengine_ref_count + * @chan: channel to balance ->client_count versus dmaengine_ref_count * - * balance_ref_count must be called under dma_list_mutex + * Must be called under dma_list_mutex. */ static void balance_ref_count(struct dma_chan *chan) { @@ -209,11 +422,28 @@ static void balance_ref_count(struct dma_chan *chan) } } +static void dma_device_release(struct kref *ref) +{ + struct dma_device *device = container_of(ref, struct dma_device, ref); + + list_del_rcu(&device->global_node); + dma_channel_rebalance(); + + if (device->device_release) + device->device_release(device); +} + +static void dma_device_put(struct dma_device *device) +{ + lockdep_assert_held(&dma_list_mutex); + kref_put(&device->ref, dma_device_release); +} + /** - * dma_chan_get - try to grab a dma channel's parent driver module - * @chan - channel to grab + * dma_chan_get - try to grab a DMA channel's parent driver module + * @chan: channel to grab * - * Must be called under dma_list_mutex + * Must be called under dma_list_mutex. */ static int dma_chan_get(struct dma_chan *chan) { @@ -223,12 +453,19 @@ static int dma_chan_get(struct dma_chan *chan) /* The channel is already in use, update client count */ if (chan->client_count) { __module_get(owner); - goto out; + chan->client_count++; + return 0; } if (!try_module_get(owner)) return -ENODEV; + ret = kref_get_unless_zero(&chan->device->ref); + if (!ret) { + ret = -ENODEV; + goto module_put_out; + } + /* allocate upon first client reference */ if (chan->device->device_alloc_chan_resources) { ret = chan->device->device_alloc_chan_resources(chan); @@ -236,23 +473,25 @@ static int dma_chan_get(struct dma_chan *chan) goto err_out; } + chan->client_count++; + if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask)) balance_ref_count(chan); -out: - chan->client_count++; return 0; err_out: + dma_device_put(chan->device); +module_put_out: module_put(owner); return ret; } /** - * dma_chan_put - drop a reference to a dma channel's parent driver module - * @chan - channel to release + * dma_chan_put - drop a reference to a DMA channel's parent driver module + * @chan: channel to release * - * Must be called under dma_list_mutex + * Must be called under dma_list_mutex. */ static void dma_chan_put(struct dma_chan *chan) { @@ -261,7 +500,6 @@ static void dma_chan_put(struct dma_chan *chan) return; chan->client_count--; - module_put(dma_chan_to_owner(chan)); /* This channel is not in use anymore, free it */ if (!chan->client_count && chan->device->device_free_chan_resources) { @@ -276,6 +514,9 @@ static void dma_chan_put(struct dma_chan *chan) chan->router = NULL; chan->route_data = NULL; } + + dma_device_put(chan->device); + module_put(dma_chan_to_owner(chan)); } enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) @@ -300,59 +541,8 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) EXPORT_SYMBOL(dma_sync_wait); /** - * dma_cap_mask_all - enable iteration over all operation types - */ -static dma_cap_mask_t dma_cap_mask_all; - -/** - * dma_chan_tbl_ent - tracks channel allocations per core/operation - * @chan - associated channel for this entry - */ -struct dma_chan_tbl_ent { - struct dma_chan *chan; -}; - -/** - * channel_table - percpu lookup table for memory-to-memory offload providers - */ -static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END]; - -static int __init dma_channel_table_init(void) -{ - enum dma_transaction_type cap; - int err = 0; - - bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END); - - /* 'interrupt', 'private', and 'slave' are channel capabilities, - * but are not associated with an operation so they do not need - * an entry in the channel_table - */ - clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits); - clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits); - clear_bit(DMA_SLAVE, dma_cap_mask_all.bits); - - for_each_dma_cap_mask(cap, dma_cap_mask_all) { - channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent); - if (!channel_table[cap]) { - err = -ENOMEM; - break; - } - } - - if (err) { - pr_err("initialization failure\n"); - for_each_dma_cap_mask(cap, dma_cap_mask_all) - free_percpu(channel_table[cap]); - } - - return err; -} -arch_initcall(dma_channel_table_init); - -/** * dma_find_channel - find a channel to carry out the operation - * @tx_type: transaction type + * @tx_type: transaction type */ struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type) { @@ -380,96 +570,6 @@ void dma_issue_pending_all(void) } EXPORT_SYMBOL(dma_issue_pending_all); -/** - * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu - */ -static bool dma_chan_is_local(struct dma_chan *chan, int cpu) -{ - int node = dev_to_node(chan->device->dev); - return node == -1 || cpumask_test_cpu(cpu, cpumask_of_node(node)); -} - -/** - * min_chan - returns the channel with min count and in the same numa-node as the cpu - * @cap: capability to match - * @cpu: cpu index which the channel should be close to - * - * If some channels are close to the given cpu, the one with the lowest - * reference count is returned. Otherwise, cpu is ignored and only the - * reference count is taken into account. - * Must be called under dma_list_mutex. - */ -static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu) -{ - struct dma_device *device; - struct dma_chan *chan; - struct dma_chan *min = NULL; - struct dma_chan *localmin = NULL; - - list_for_each_entry(device, &dma_device_list, global_node) { - if (!dma_has_cap(cap, device->cap_mask) || - dma_has_cap(DMA_PRIVATE, device->cap_mask)) - continue; - list_for_each_entry(chan, &device->channels, device_node) { - if (!chan->client_count) - continue; - if (!min || chan->table_count < min->table_count) - min = chan; - - if (dma_chan_is_local(chan, cpu)) - if (!localmin || - chan->table_count < localmin->table_count) - localmin = chan; - } - } - - chan = localmin ? localmin : min; - - if (chan) - chan->table_count++; - - return chan; -} - -/** - * dma_channel_rebalance - redistribute the available channels - * - * Optimize for cpu isolation (each cpu gets a dedicated channel for an - * operation type) in the SMP case, and operation isolation (avoid - * multi-tasking channels) in the non-SMP case. Must be called under - * dma_list_mutex. - */ -static void dma_channel_rebalance(void) -{ - struct dma_chan *chan; - struct dma_device *device; - int cpu; - int cap; - - /* undo the last distribution */ - for_each_dma_cap_mask(cap, dma_cap_mask_all) - for_each_possible_cpu(cpu) - per_cpu_ptr(channel_table[cap], cpu)->chan = NULL; - - list_for_each_entry(device, &dma_device_list, global_node) { - if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) - continue; - list_for_each_entry(chan, &device->channels, device_node) - chan->table_count = 0; - } - - /* don't populate the channel_table if no clients are available */ - if (!dmaengine_ref_count) - return; - - /* redistribute available channels */ - for_each_dma_cap_mask(cap, dma_cap_mask_all) - for_each_online_cpu(cpu) { - chan = min_chan(cap, cpu); - per_cpu_ptr(channel_table[cap], cpu)->chan = chan; - } -} - int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps) { struct dma_device *device; @@ -495,13 +595,25 @@ int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps) caps->src_addr_widths = device->src_addr_widths; caps->dst_addr_widths = device->dst_addr_widths; caps->directions = device->directions; + caps->min_burst = device->min_burst; caps->max_burst = device->max_burst; + caps->max_sg_burst = device->max_sg_burst; caps->residue_granularity = device->residue_granularity; caps->descriptor_reuse = device->descriptor_reuse; caps->cmd_pause = !!device->device_pause; caps->cmd_resume = !!device->device_resume; caps->cmd_terminate = !!device->device_terminate_all; + /* + * DMA engine device might be configured with non-uniformly + * distributed slave capabilities per device channels. In this + * case the corresponding driver may provide the device_caps + * callback to override the generic capabilities with + * channel-specific ones. + */ + if (device->device_caps) + device->device_caps(chan, caps); + return 0; } EXPORT_SYMBOL_GPL(dma_get_slave_caps); @@ -512,7 +624,7 @@ static struct dma_chan *private_candidate(const dma_cap_mask_t *mask, { struct dma_chan *chan; - if (mask && !__dma_device_satisfies_mask(dev, mask)) { + if (mask && !dma_device_satisfies_mask(dev, mask)) { dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__); return NULL; } @@ -582,17 +694,16 @@ static struct dma_chan *find_candidate(struct dma_device *device, /** * dma_get_slave_channel - try to get specific channel exclusively - * @chan: target channel + * @chan: target channel */ struct dma_chan *dma_get_slave_channel(struct dma_chan *chan) { - int err = -EBUSY; - /* lock against __dma_request_channel */ mutex_lock(&dma_list_mutex); if (chan->client_count == 0) { struct dma_device *device = chan->device; + int err; dma_cap_set(DMA_PRIVATE, device->cap_mask); device->privatecnt++; @@ -636,14 +747,16 @@ EXPORT_SYMBOL_GPL(dma_get_any_slave_channel); /** * __dma_request_channel - try to allocate an exclusive channel - * @mask: capabilities that the channel must satisfy - * @fn: optional callback to disposition available channels - * @fn_param: opaque parameter to pass to dma_filter_fn + * @mask: capabilities that the channel must satisfy + * @fn: optional callback to disposition available channels + * @fn_param: opaque parameter to pass to dma_filter_fn() + * @np: device node to look for DMA channels * * Returns pointer to appropriate DMA channel on success or NULL. */ struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, - dma_filter_fn fn, void *fn_param) + dma_filter_fn fn, void *fn_param, + struct device_node *np) { struct dma_device *device, *_d; struct dma_chan *chan = NULL; @@ -651,6 +764,10 @@ struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, /* Find a channel */ mutex_lock(&dma_list_mutex); list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { + /* Finds a DMA controller with matching device node */ + if (np && device->dev->of_node && np != device->dev->of_node) + continue; + chan = find_candidate(device, mask, fn, fn_param); if (!IS_ERR(chan)) break; @@ -697,22 +814,20 @@ static const struct dma_slave_map *dma_filter_match(struct dma_device *device, */ struct dma_chan *dma_request_chan(struct device *dev, const char *name) { + struct fwnode_handle *fwnode = dev_fwnode(dev); struct dma_device *d, *_d; struct dma_chan *chan = NULL; - /* If device-tree is present get slave info from here */ - if (dev->of_node) - chan = of_dma_request_slave_channel(dev->of_node, name); - - /* If device was enumerated by ACPI get slave info from here */ - if (has_acpi_companion(dev) && !chan) + if (is_of_node(fwnode)) + chan = of_dma_request_slave_channel(to_of_node(fwnode), name); + else if (is_acpi_device_node(fwnode)) chan = acpi_dma_request_slave_chan_by_name(dev, name); - if (chan) { - /* Valid channel found or requester need to be deferred */ - if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER) - return chan; - } + if (PTR_ERR(chan) == -EPROBE_DEFER) + return chan; + + if (!IS_ERR_OR_NULL(chan)) + goto found; /* Try to find the channel via the DMA filter map(s) */ mutex_lock(&dma_list_mutex); @@ -732,31 +847,35 @@ struct dma_chan *dma_request_chan(struct device *dev, const char *name) } mutex_unlock(&dma_list_mutex); - return chan ? chan : ERR_PTR(-EPROBE_DEFER); -} -EXPORT_SYMBOL_GPL(dma_request_chan); + if (IS_ERR(chan)) + return chan; + if (!chan) + return ERR_PTR(-EPROBE_DEFER); -/** - * dma_request_slave_channel - try to allocate an exclusive slave channel - * @dev: pointer to client device structure - * @name: slave channel name - * - * Returns pointer to appropriate DMA channel on success or NULL. - */ -struct dma_chan *dma_request_slave_channel(struct device *dev, - const char *name) -{ - struct dma_chan *ch = dma_request_chan(dev, name); - if (IS_ERR(ch)) - return NULL; +found: +#ifdef CONFIG_DEBUG_FS + chan->dbg_client_name = kasprintf(GFP_KERNEL, "%s:%s", dev_name(dev), name); + /* No functional issue if it fails, users are supposed to test before use */ +#endif + + chan->name = kasprintf(GFP_KERNEL, "dma:%s", name); + if (!chan->name) + return chan; + chan->slave = dev; + + if (sysfs_create_link(&chan->dev->device.kobj, &dev->kobj, + DMA_SLAVE_NAME)) + dev_warn(dev, "Cannot create DMA %s symlink\n", DMA_SLAVE_NAME); + if (sysfs_create_link(&dev->kobj, &chan->dev->device.kobj, chan->name)) + dev_warn(dev, "Cannot create DMA %s symlink\n", chan->name); - return ch; + return chan; } -EXPORT_SYMBOL_GPL(dma_request_slave_channel); +EXPORT_SYMBOL_GPL(dma_request_chan); /** * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities - * @mask: capabilities that the channel must satisfy + * @mask: capabilities that the channel must satisfy * * Returns pointer to appropriate DMA channel on success or an error pointer. */ @@ -767,7 +886,7 @@ struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask) if (!mask) return ERR_PTR(-ENODEV); - chan = __dma_request_channel(mask, NULL, NULL); + chan = __dma_request_channel(mask, NULL, NULL, NULL); if (!chan) { mutex_lock(&dma_list_mutex); if (list_empty(&dma_device_list)) @@ -790,10 +909,53 @@ void dma_release_channel(struct dma_chan *chan) /* drop PRIVATE cap enabled by __dma_request_channel() */ if (--chan->device->privatecnt == 0) dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask); + + if (chan->slave) { + sysfs_remove_link(&chan->dev->device.kobj, DMA_SLAVE_NAME); + sysfs_remove_link(&chan->slave->kobj, chan->name); + kfree(chan->name); + chan->name = NULL; + chan->slave = NULL; + } + +#ifdef CONFIG_DEBUG_FS + kfree(chan->dbg_client_name); + chan->dbg_client_name = NULL; +#endif mutex_unlock(&dma_list_mutex); } EXPORT_SYMBOL_GPL(dma_release_channel); +static void dmaenginem_release_channel(void *chan) +{ + dma_release_channel(chan); +} + +/** + * devm_dma_request_chan - try to allocate an exclusive slave channel + * @dev: pointer to client device structure + * @name: slave channel name + * + * Returns pointer to appropriate DMA channel on success or an error pointer. + * + * The operation is managed and will be undone on driver detach. + */ + +struct dma_chan *devm_dma_request_chan(struct device *dev, const char *name) +{ + struct dma_chan *chan = dma_request_chan(dev, name); + int ret = 0; + + if (!IS_ERR(chan)) + ret = devm_add_action_or_reset(dev, dmaenginem_release_channel, chan); + + if (ret) + return ERR_PTR(ret); + + return chan; +} +EXPORT_SYMBOL_GPL(devm_dma_request_chan); + /** * dmaengine_get - register interest in dma_channels */ @@ -834,18 +996,18 @@ void dmaengine_get(void) EXPORT_SYMBOL(dmaengine_get); /** - * dmaengine_put - let dma drivers be removed when ref_count == 0 + * dmaengine_put - let DMA drivers be removed when ref_count == 0 */ void dmaengine_put(void) { - struct dma_device *device; + struct dma_device *device, *_d; struct dma_chan *chan; mutex_lock(&dma_list_mutex); dmaengine_ref_count--; BUG_ON(dmaengine_ref_count < 0); /* drop channel references */ - list_for_each_entry(device, &dma_device_list, global_node) { + list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) continue; list_for_each_entry(chan, &device->channels, device_node) @@ -904,15 +1066,112 @@ static int get_dma_id(struct dma_device *device) return 0; } +static int __dma_async_device_channel_register(struct dma_device *device, + struct dma_chan *chan, + const char *name) +{ + int rc; + + chan->local = alloc_percpu(typeof(*chan->local)); + if (!chan->local) + return -ENOMEM; + chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL); + if (!chan->dev) { + rc = -ENOMEM; + goto err_free_local; + } + + /* + * When the chan_id is a negative value, we are dynamically adding + * the channel. Otherwise we are static enumerating. + */ + chan->chan_id = ida_alloc(&device->chan_ida, GFP_KERNEL); + if (chan->chan_id < 0) { + pr_err("%s: unable to alloc ida for chan: %d\n", + __func__, chan->chan_id); + rc = chan->chan_id; + goto err_free_dev; + } + + chan->dev->device.class = &dma_devclass; + chan->dev->device.parent = device->dev; + chan->dev->chan = chan; + chan->dev->dev_id = device->dev_id; + if (!name) + dev_set_name(&chan->dev->device, "dma%dchan%d", device->dev_id, chan->chan_id); + else + dev_set_name(&chan->dev->device, "%s", name); + rc = device_register(&chan->dev->device); + if (rc) + goto err_out_ida; + chan->client_count = 0; + device->chancnt++; + + return 0; + + err_out_ida: + ida_free(&device->chan_ida, chan->chan_id); + err_free_dev: + kfree(chan->dev); + err_free_local: + free_percpu(chan->local); + chan->local = NULL; + return rc; +} + +int dma_async_device_channel_register(struct dma_device *device, + struct dma_chan *chan, + const char *name) +{ + int rc; + + rc = __dma_async_device_channel_register(device, chan, name); + if (rc < 0) + return rc; + + dma_channel_rebalance(); + return 0; +} +EXPORT_SYMBOL_GPL(dma_async_device_channel_register); + +static void __dma_async_device_channel_unregister(struct dma_device *device, + struct dma_chan *chan) +{ + if (chan->local == NULL) + return; + + WARN_ONCE(!device->device_release && chan->client_count, + "%s called while %d clients hold a reference\n", + __func__, chan->client_count); + mutex_lock(&dma_list_mutex); + device->chancnt--; + chan->dev->chan = NULL; + mutex_unlock(&dma_list_mutex); + ida_free(&device->chan_ida, chan->chan_id); + device_unregister(&chan->dev->device); + free_percpu(chan->local); +} + +void dma_async_device_channel_unregister(struct dma_device *device, + struct dma_chan *chan) +{ + __dma_async_device_channel_unregister(device, chan); + dma_channel_rebalance(); +} +EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister); + /** * dma_async_device_register - registers DMA devices found - * @device: &dma_device + * @device: pointer to &struct dma_device + * + * After calling this routine the structure should not be freed except in the + * device_release() callback which will be called after + * dma_async_device_unregister() is called and no further references are taken. */ int dma_async_device_register(struct dma_device *device) { - int chancnt = 0, rc; + int rc; struct dma_chan* chan; - atomic_t *idr_ref; if (!device) return -ENODEV; @@ -923,69 +1182,29 @@ int dma_async_device_register(struct dma_device *device) return -EIO; } - if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) { - dev_err(device->dev, - "Device claims capability %s, but op is not defined\n", - "DMA_MEMCPY"); - return -EIO; - } - - if (dma_has_cap(DMA_XOR, device->cap_mask) && !device->device_prep_dma_xor) { - dev_err(device->dev, - "Device claims capability %s, but op is not defined\n", - "DMA_XOR"); - return -EIO; - } - - if (dma_has_cap(DMA_XOR_VAL, device->cap_mask) && !device->device_prep_dma_xor_val) { - dev_err(device->dev, - "Device claims capability %s, but op is not defined\n", - "DMA_XOR_VAL"); - return -EIO; - } - - if (dma_has_cap(DMA_PQ, device->cap_mask) && !device->device_prep_dma_pq) { - dev_err(device->dev, - "Device claims capability %s, but op is not defined\n", - "DMA_PQ"); - return -EIO; - } - - if (dma_has_cap(DMA_PQ_VAL, device->cap_mask) && !device->device_prep_dma_pq_val) { - dev_err(device->dev, - "Device claims capability %s, but op is not defined\n", - "DMA_PQ_VAL"); - return -EIO; - } - - if (dma_has_cap(DMA_MEMSET, device->cap_mask) && !device->device_prep_dma_memset) { - dev_err(device->dev, - "Device claims capability %s, but op is not defined\n", - "DMA_MEMSET"); - return -EIO; - } - - if (dma_has_cap(DMA_INTERRUPT, device->cap_mask) && !device->device_prep_dma_interrupt) { - dev_err(device->dev, - "Device claims capability %s, but op is not defined\n", - "DMA_INTERRUPT"); - return -EIO; - } + device->owner = device->dev->driver->owner; - if (dma_has_cap(DMA_CYCLIC, device->cap_mask) && !device->device_prep_dma_cyclic) { - dev_err(device->dev, - "Device claims capability %s, but op is not defined\n", - "DMA_CYCLIC"); - return -EIO; - } +#define CHECK_CAP(_name, _type) \ +{ \ + if (dma_has_cap(_type, device->cap_mask) && !device->device_prep_##_name) { \ + dev_err(device->dev, \ + "Device claims capability %s, but op is not defined\n", \ + __stringify(_type)); \ + return -EIO; \ + } \ +} - if (dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && !device->device_prep_interleaved_dma) { - dev_err(device->dev, - "Device claims capability %s, but op is not defined\n", - "DMA_INTERLEAVE"); - return -EIO; - } + CHECK_CAP(dma_memcpy, DMA_MEMCPY); + CHECK_CAP(dma_xor, DMA_XOR); + CHECK_CAP(dma_xor_val, DMA_XOR_VAL); + CHECK_CAP(dma_pq, DMA_PQ); + CHECK_CAP(dma_pq_val, DMA_PQ_VAL); + CHECK_CAP(dma_memset, DMA_MEMSET); + CHECK_CAP(dma_interrupt, DMA_INTERRUPT); + CHECK_CAP(dma_cyclic, DMA_CYCLIC); + CHECK_CAP(interleaved_dma, DMA_INTERLEAVE); +#undef CHECK_CAP if (!device->device_tx_status) { dev_err(device->dev, "Device tx_status is not defined\n"); @@ -998,65 +1217,31 @@ int dma_async_device_register(struct dma_device *device) return -EIO; } + if (!device->device_release) + dev_dbg(device->dev, + "WARN: Device release is not defined so it is not safe to unbind this driver while in use\n"); + + kref_init(&device->ref); + /* note: this only matters in the * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case */ if (device_has_all_tx_types(device)) dma_cap_set(DMA_ASYNC_TX, device->cap_mask); - idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL); - if (!idr_ref) - return -ENOMEM; rc = get_dma_id(device); - if (rc != 0) { - kfree(idr_ref); + if (rc != 0) return rc; - } - atomic_set(idr_ref, 0); + ida_init(&device->chan_ida); /* represent channels in sysfs. Probably want devs too */ list_for_each_entry(chan, &device->channels, device_node) { - rc = -ENOMEM; - chan->local = alloc_percpu(typeof(*chan->local)); - if (chan->local == NULL) + rc = __dma_async_device_channel_register(device, chan, NULL); + if (rc < 0) goto err_out; - chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL); - if (chan->dev == NULL) { - free_percpu(chan->local); - chan->local = NULL; - goto err_out; - } - - chan->chan_id = chancnt++; - chan->dev->device.class = &dma_devclass; - chan->dev->device.parent = device->dev; - chan->dev->chan = chan; - chan->dev->idr_ref = idr_ref; - chan->dev->dev_id = device->dev_id; - atomic_inc(idr_ref); - dev_set_name(&chan->dev->device, "dma%dchan%d", - device->dev_id, chan->chan_id); - - rc = device_register(&chan->dev->device); - if (rc) { - free_percpu(chan->local); - chan->local = NULL; - kfree(chan->dev); - atomic_dec(idr_ref); - goto err_out; - } - chan->client_count = 0; - } - - if (!chancnt) { - dev_err(device->dev, "%s: device has no channels!\n", __func__); - rc = -ENODEV; - goto err_out; } - device->chancnt = chancnt; - mutex_lock(&dma_list_mutex); /* take references on public channels */ if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask)) @@ -1080,13 +1265,14 @@ int dma_async_device_register(struct dma_device *device) dma_channel_rebalance(); mutex_unlock(&dma_list_mutex); + dmaengine_debug_register(device); + return 0; err_out: /* if we never registered a channel just release the idr */ - if (atomic_read(idr_ref) == 0) { + if (!device->chancnt) { ida_free(&dma_ida, device->dev_id); - kfree(idr_ref); return rc; } @@ -1105,65 +1291,53 @@ EXPORT_SYMBOL(dma_async_device_register); /** * dma_async_device_unregister - unregister a DMA device - * @device: &dma_device + * @device: pointer to &struct dma_device * * This routine is called by dma driver exit routines, dmaengine holds module * references to prevent it being called while channels are in use. */ void dma_async_device_unregister(struct dma_device *device) { - struct dma_chan *chan; + struct dma_chan *chan, *n; + + dmaengine_debug_unregister(device); + + list_for_each_entry_safe(chan, n, &device->channels, device_node) + __dma_async_device_channel_unregister(device, chan); mutex_lock(&dma_list_mutex); - list_del_rcu(&device->global_node); + /* + * setting DMA_PRIVATE ensures the device being torn down will not + * be used in the channel_table + */ + dma_cap_set(DMA_PRIVATE, device->cap_mask); dma_channel_rebalance(); + ida_free(&dma_ida, device->dev_id); + dma_device_put(device); mutex_unlock(&dma_list_mutex); - - list_for_each_entry(chan, &device->channels, device_node) { - WARN_ONCE(chan->client_count, - "%s called while %d clients hold a reference\n", - __func__, chan->client_count); - mutex_lock(&dma_list_mutex); - chan->dev->chan = NULL; - mutex_unlock(&dma_list_mutex); - device_unregister(&chan->dev->device); - free_percpu(chan->local); - } } EXPORT_SYMBOL(dma_async_device_unregister); -static void dmam_device_release(struct device *dev, void *res) +static void dmaenginem_async_device_unregister(void *device) { - struct dma_device *device; - - device = *(struct dma_device **)res; dma_async_device_unregister(device); } /** * dmaenginem_async_device_register - registers DMA devices found - * @device: &dma_device + * @device: pointer to &struct dma_device * * The operation is managed and will be undone on driver detach. */ int dmaenginem_async_device_register(struct dma_device *device) { - void *p; int ret; - p = devres_alloc(dmam_device_release, sizeof(void *), GFP_KERNEL); - if (!p) - return -ENOMEM; - ret = dma_async_device_register(device); - if (!ret) { - *(struct dma_device **)p = device; - devres_add(device->dev, p); - } else { - devres_free(p); - } + if (ret) + return ret; - return ret; + return devm_add_action_or_reset(device->dev, dmaenginem_async_device_unregister, device); } EXPORT_SYMBOL(dmaenginem_async_device_register); @@ -1306,8 +1480,82 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, } EXPORT_SYMBOL(dma_async_tx_descriptor_init); -/* dma_wait_for_async_tx - spin wait for a transaction to complete - * @tx: in-flight transaction to wait on +static inline int desc_check_and_set_metadata_mode( + struct dma_async_tx_descriptor *desc, enum dma_desc_metadata_mode mode) +{ + /* Make sure that the metadata mode is not mixed */ + if (!desc->desc_metadata_mode) { + if (dmaengine_is_metadata_mode_supported(desc->chan, mode)) + desc->desc_metadata_mode = mode; + else + return -ENOTSUPP; + } else if (desc->desc_metadata_mode != mode) { + return -EINVAL; + } + + return 0; +} + +int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc, + void *data, size_t len) +{ + int ret; + + if (!desc) + return -EINVAL; + + ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_CLIENT); + if (ret) + return ret; + + if (!desc->metadata_ops || !desc->metadata_ops->attach) + return -ENOTSUPP; + + return desc->metadata_ops->attach(desc, data, len); +} +EXPORT_SYMBOL_GPL(dmaengine_desc_attach_metadata); + +void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc, + size_t *payload_len, size_t *max_len) +{ + int ret; + + if (!desc) + return ERR_PTR(-EINVAL); + + ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE); + if (ret) + return ERR_PTR(ret); + + if (!desc->metadata_ops || !desc->metadata_ops->get_ptr) + return ERR_PTR(-ENOTSUPP); + + return desc->metadata_ops->get_ptr(desc, payload_len, max_len); +} +EXPORT_SYMBOL_GPL(dmaengine_desc_get_metadata_ptr); + +int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc, + size_t payload_len) +{ + int ret; + + if (!desc) + return -EINVAL; + + ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE); + if (ret) + return ret; + + if (!desc->metadata_ops || !desc->metadata_ops->set_len) + return -ENOTSUPP; + + return desc->metadata_ops->set_len(desc, payload_len); +} +EXPORT_SYMBOL_GPL(dmaengine_desc_set_metadata_len); + +/** + * dma_wait_for_async_tx - spin wait for a transaction to complete + * @tx: in-flight transaction to wait on */ enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) @@ -1330,9 +1578,12 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) } EXPORT_SYMBOL_GPL(dma_wait_for_async_tx); -/* dma_run_dependencies - helper routine for dma drivers to process - * (start) dependent operations on their target channel - * @tx: transaction with dependencies +/** + * dma_run_dependencies - process dependent operations on the target channel + * @tx: transaction with dependencies + * + * Helper routine for DMA drivers to process (start) dependent operations + * on their target channel. */ void dma_run_dependencies(struct dma_async_tx_descriptor *tx) { @@ -1374,8 +1625,11 @@ static int __init dma_bus_init(void) if (err) return err; - return class_register(&dma_devclass); -} -arch_initcall(dma_bus_init); + err = class_register(&dma_devclass); + if (!err) + dmaengine_debugfs_init(); + return err; +} +arch_initcall(dma_bus_init); |
