diff options
Diffstat (limited to 'drivers/clk/clk.c')
| -rw-r--r-- | drivers/clk/clk.c | 536 |
1 files changed, 382 insertions, 154 deletions
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index e62552a75f08..85d2f2481acf 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -6,21 +6,24 @@ * Standard functionality for the common clock API. See Documentation/driver-api/clk.rst */ +#include <linux/clk/clk-conf.h> +#include <linux/clkdev.h> #include <linux/clk.h> #include <linux/clk-provider.h> -#include <linux/clk/clk-conf.h> -#include <linux/module.h> -#include <linux/mutex.h> -#include <linux/spinlock.h> +#include <linux/device.h> #include <linux/err.h> +#include <linux/hashtable.h> +#include <linux/init.h> #include <linux/list.h> -#include <linux/slab.h> +#include <linux/module.h> +#include <linux/mutex.h> #include <linux/of.h> -#include <linux/device.h> -#include <linux/init.h> #include <linux/pm_runtime.h> #include <linux/sched.h> -#include <linux/clkdev.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/string.h> +#include <linux/stringhash.h> #include "clk.h" @@ -33,10 +36,17 @@ static struct task_struct *enable_owner; static int prepare_refcnt; static int enable_refcnt; +#define CLK_HASH_BITS 9 +static DEFINE_HASHTABLE(clk_hashtable, CLK_HASH_BITS); + static HLIST_HEAD(clk_root_list); static HLIST_HEAD(clk_orphan_list); static LIST_HEAD(clk_notifier_list); +/* List of registered clks that use runtime PM */ +static HLIST_HEAD(clk_rpm_list); +static DEFINE_MUTEX(clk_rpm_list_lock); + static const struct hlist_head *all_lists[] = { &clk_root_list, &clk_orphan_list, @@ -59,6 +69,7 @@ struct clk_core { struct clk_hw *hw; struct module *owner; struct device *dev; + struct hlist_node rpm_node; struct device_node *of_node; struct clk_core *parent; struct clk_parent_map *parents; @@ -82,6 +93,7 @@ struct clk_core { struct clk_duty duty; struct hlist_head children; struct hlist_node child_node; + struct hlist_node hashtable_node; struct hlist_head clks; unsigned int notifier_count; #ifdef CONFIG_DEBUG_FS @@ -122,6 +134,89 @@ static void clk_pm_runtime_put(struct clk_core *core) pm_runtime_put_sync(core->dev); } +/** + * clk_pm_runtime_get_all() - Runtime "get" all clk provider devices + * + * Call clk_pm_runtime_get() on all runtime PM enabled clks in the clk tree so + * that disabling unused clks avoids a deadlock where a device is runtime PM + * resuming/suspending and the runtime PM callback is trying to grab the + * prepare_lock for something like clk_prepare_enable() while + * clk_disable_unused_subtree() holds the prepare_lock and is trying to runtime + * PM resume/suspend the device as well. + * + * Context: Acquires the 'clk_rpm_list_lock' and returns with the lock held on + * success. Otherwise the lock is released on failure. + * + * Return: 0 on success, negative errno otherwise. + */ +static int clk_pm_runtime_get_all(void) +{ + int ret; + struct clk_core *core, *failed; + + /* + * Grab the list lock to prevent any new clks from being registered + * or unregistered until clk_pm_runtime_put_all(). + */ + mutex_lock(&clk_rpm_list_lock); + + /* + * Runtime PM "get" all the devices that are needed for the clks + * currently registered. Do this without holding the prepare_lock, to + * avoid the deadlock. + */ + hlist_for_each_entry(core, &clk_rpm_list, rpm_node) { + ret = clk_pm_runtime_get(core); + if (ret) { + failed = core; + pr_err("clk: Failed to runtime PM get '%s' for clk '%s'\n", + dev_name(failed->dev), failed->name); + goto err; + } + } + + return 0; + +err: + hlist_for_each_entry(core, &clk_rpm_list, rpm_node) { + if (core == failed) + break; + + clk_pm_runtime_put(core); + } + mutex_unlock(&clk_rpm_list_lock); + + return ret; +} + +/** + * clk_pm_runtime_put_all() - Runtime "put" all clk provider devices + * + * Put the runtime PM references taken in clk_pm_runtime_get_all() and release + * the 'clk_rpm_list_lock'. + */ +static void clk_pm_runtime_put_all(void) +{ + struct clk_core *core; + + hlist_for_each_entry(core, &clk_rpm_list, rpm_node) + clk_pm_runtime_put(core); + mutex_unlock(&clk_rpm_list_lock); +} + +static void clk_pm_runtime_init(struct clk_core *core) +{ + struct device *dev = core->dev; + + if (dev && pm_runtime_enabled(dev)) { + core->rpm_enabled = true; + + mutex_lock(&clk_rpm_list_lock); + hlist_add_head(&core->rpm_node, &clk_rpm_list); + mutex_unlock(&clk_rpm_list_lock); + } +} + /*** locking ***/ static void clk_prepare_lock(void) { @@ -244,6 +339,17 @@ static bool clk_core_is_enabled(struct clk_core *core) } } + /* + * This could be called with the enable lock held, or from atomic + * context. If the parent isn't enabled already, we can't do + * anything here. We can also assume this clock isn't enabled. + */ + if ((core->flags & CLK_OPS_PARENT_ENABLE) && core->parent) + if (!clk_core_is_enabled(core->parent)) { + ret = false; + goto done; + } + ret = core->ops->is_enabled(core->hw); done: if (core->rpm_enabled) @@ -266,6 +372,18 @@ const char *clk_hw_get_name(const struct clk_hw *hw) } EXPORT_SYMBOL_GPL(clk_hw_get_name); +struct device *clk_hw_get_dev(const struct clk_hw *hw) +{ + return hw->core->dev; +} +EXPORT_SYMBOL_GPL(clk_hw_get_dev); + +struct device_node *clk_hw_get_of_node(const struct clk_hw *hw) +{ + return hw->core->of_node; +} +EXPORT_SYMBOL_GPL(clk_hw_get_of_node); + struct clk_hw *__clk_get_hw(struct clk *clk) { return !clk ? NULL : clk->core->hw; @@ -284,45 +402,20 @@ struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw) } EXPORT_SYMBOL_GPL(clk_hw_get_parent); -static struct clk_core *__clk_lookup_subtree(const char *name, - struct clk_core *core) -{ - struct clk_core *child; - struct clk_core *ret; - - if (!strcmp(core->name, name)) - return core; - - hlist_for_each_entry(child, &core->children, child_node) { - ret = __clk_lookup_subtree(name, child); - if (ret) - return ret; - } - - return NULL; -} - static struct clk_core *clk_core_lookup(const char *name) { - struct clk_core *root_clk; - struct clk_core *ret; + struct clk_core *core; + u32 hash; if (!name) return NULL; - /* search the 'proper' clk tree first */ - hlist_for_each_entry(root_clk, &clk_root_list, child_node) { - ret = __clk_lookup_subtree(name, root_clk); - if (ret) - return ret; - } + hash = full_name_hash(NULL, name, strlen(name)); - /* if not found, then search the orphan tree */ - hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) { - ret = __clk_lookup_subtree(name, root_clk); - if (ret) - return ret; - } + /* search the hashtable */ + hash_for_each_possible(clk_hashtable, core, hashtable_node, hash) + if (!strcmp(core->name, name)) + return core; return NULL; } @@ -407,6 +500,9 @@ static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index) if (IS_ERR(hw)) return ERR_CAST(hw); + if (!hw) + return NULL; + return hw->core; } @@ -506,12 +602,6 @@ bool clk_hw_is_prepared(const struct clk_hw *hw) } EXPORT_SYMBOL_GPL(clk_hw_is_prepared); -bool clk_hw_rate_is_protected(const struct clk_hw *hw) -{ - return clk_core_rate_is_protected(hw->core); -} -EXPORT_SYMBOL_GPL(clk_hw_rate_is_protected); - bool clk_hw_is_enabled(const struct clk_hw *hw) { return clk_core_is_enabled(hw->core); @@ -583,45 +673,59 @@ clk_core_forward_rate_req(struct clk_core *core, req->max_rate = old_req->max_rate; } -int clk_mux_determine_rate_flags(struct clk_hw *hw, - struct clk_rate_request *req, - unsigned long flags) +static int +clk_core_determine_rate_no_reparent(struct clk_hw *hw, + struct clk_rate_request *req) { - struct clk_core *core = hw->core, *parent, *best_parent = NULL; - int i, num_parents, ret; - unsigned long best = 0; - - /* if NO_REPARENT flag set, pass through to current parent */ - if (core->flags & CLK_SET_RATE_NO_REPARENT) { - parent = core->parent; - if (core->flags & CLK_SET_RATE_PARENT) { - struct clk_rate_request parent_req; + struct clk_core *core = hw->core; + struct clk_core *parent = core->parent; + unsigned long best; + int ret; - if (!parent) { - req->rate = 0; - return 0; - } + if (core->flags & CLK_SET_RATE_PARENT) { + struct clk_rate_request parent_req; - clk_core_forward_rate_req(core, req, parent, &parent_req, req->rate); + if (!parent) { + req->rate = 0; + return 0; + } - trace_clk_rate_request_start(&parent_req); + clk_core_forward_rate_req(core, req, parent, &parent_req, + req->rate); - ret = clk_core_round_rate_nolock(parent, &parent_req); - if (ret) - return ret; + trace_clk_rate_request_start(&parent_req); - trace_clk_rate_request_done(&parent_req); + ret = clk_core_round_rate_nolock(parent, &parent_req); + if (ret) + return ret; - best = parent_req.rate; - } else if (parent) { - best = clk_core_get_rate_nolock(parent); - } else { - best = clk_core_get_rate_nolock(core); - } + trace_clk_rate_request_done(&parent_req); - goto out; + best = parent_req.rate; + } else if (parent) { + best = clk_core_get_rate_nolock(parent); + } else { + best = clk_core_get_rate_nolock(core); } + req->best_parent_rate = best; + req->rate = best; + + return 0; +} + +int clk_mux_determine_rate_flags(struct clk_hw *hw, + struct clk_rate_request *req, + unsigned long flags) +{ + struct clk_core *core = hw->core, *parent, *best_parent = NULL; + int i, num_parents, ret; + unsigned long best = 0; + + /* if NO_REPARENT flag set, pass through to current parent */ + if (core->flags & CLK_SET_RATE_NO_REPARENT) + return clk_core_determine_rate_no_reparent(hw, req); + /* find the parent that can provide the fastest rate <= rate */ num_parents = core->num_parents; for (i = 0; i < num_parents; i++) { @@ -659,9 +763,7 @@ int clk_mux_determine_rate_flags(struct clk_hw *hw, if (!best_parent) return -EINVAL; -out: - if (best_parent) - req->best_parent_hw = best_parent->hw; + req->best_parent_hw = best_parent->hw; req->best_parent_rate = best; req->rate = best; @@ -761,6 +863,25 @@ int __clk_mux_determine_rate_closest(struct clk_hw *hw, } EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest); +/* + * clk_hw_determine_rate_no_reparent - clk_ops::determine_rate implementation for a clk that doesn't reparent + * @hw: mux type clk to determine rate on + * @req: rate request, also used to return preferred frequency + * + * Helper for finding best parent rate to provide a given frequency. + * This can be used directly as a determine_rate callback (e.g. for a + * mux), or from a more complex clock that may combine a mux with other + * operations. + * + * Returns: 0 on success, -EERROR value on error + */ +int clk_hw_determine_rate_no_reparent(struct clk_hw *hw, + struct clk_rate_request *req) +{ + return clk_core_determine_rate_no_reparent(hw, req); +} +EXPORT_SYMBOL_GPL(clk_hw_determine_rate_no_reparent); + /*** clk api ***/ static void clk_core_rate_unprotect(struct clk_core *core) @@ -897,6 +1018,25 @@ int clk_rate_exclusive_get(struct clk *clk) } EXPORT_SYMBOL_GPL(clk_rate_exclusive_get); +static void devm_clk_rate_exclusive_put(void *data) +{ + struct clk *clk = data; + + clk_rate_exclusive_put(clk); +} + +int devm_clk_rate_exclusive_get(struct device *dev, struct clk *clk) +{ + int ret; + + ret = clk_rate_exclusive_get(clk); + if (ret) + return ret; + + return devm_add_action_or_reset(dev, devm_clk_rate_exclusive_put, clk); +} +EXPORT_SYMBOL_GPL(devm_clk_rate_exclusive_get); + static void clk_core_unprepare(struct clk_core *core) { lockdep_assert_held(&prepare_lock); @@ -1055,12 +1195,12 @@ static void clk_core_disable(struct clk_core *core) if (--core->enable_count > 0) return; - trace_clk_disable_rcuidle(core); + trace_clk_disable(core); if (core->ops->disable) core->ops->disable(core->hw); - trace_clk_disable_complete_rcuidle(core); + trace_clk_disable_complete(core); clk_core_disable(core->parent); } @@ -1114,12 +1254,12 @@ static int clk_core_enable(struct clk_core *core) if (ret) return ret; - trace_clk_enable_rcuidle(core); + trace_clk_enable(core); if (core->ops->enable) ret = core->ops->enable(core->hw); - trace_clk_enable_complete_rcuidle(core); + trace_clk_enable_complete(core); if (ret) { clk_core_disable(core->parent); @@ -1317,9 +1457,6 @@ static void __init clk_unprepare_unused_subtree(struct clk_core *core) if (core->flags & CLK_IGNORE_UNUSED) return; - if (clk_pm_runtime_get(core)) - return; - if (clk_core_is_prepared(core)) { trace_clk_unprepare(core); if (core->ops->unprepare_unused) @@ -1328,8 +1465,6 @@ static void __init clk_unprepare_unused_subtree(struct clk_core *core) core->ops->unprepare(core->hw); trace_clk_unprepare_complete(core); } - - clk_pm_runtime_put(core); } static void __init clk_disable_unused_subtree(struct clk_core *core) @@ -1345,9 +1480,6 @@ static void __init clk_disable_unused_subtree(struct clk_core *core) if (core->flags & CLK_OPS_PARENT_ENABLE) clk_core_prepare_enable(core->parent); - if (clk_pm_runtime_get(core)) - goto unprepare_out; - flags = clk_enable_lock(); if (core->enable_count) @@ -1372,8 +1504,6 @@ static void __init clk_disable_unused_subtree(struct clk_core *core) unlock_out: clk_enable_unlock(flags); - clk_pm_runtime_put(core); -unprepare_out: if (core->flags & CLK_OPS_PARENT_ENABLE) clk_core_disable_unprepare(core->parent); } @@ -1389,12 +1519,22 @@ __setup("clk_ignore_unused", clk_ignore_unused_setup); static int __init clk_disable_unused(void) { struct clk_core *core; + int ret; if (clk_ignore_unused) { pr_warn("clk: Not disabling unused clocks\n"); return 0; } + pr_info("clk: Disabling unused clocks\n"); + + ret = clk_pm_runtime_get_all(); + if (ret) + return ret; + /* + * Grab the prepare lock to keep the clk topology stable while iterating + * over clks. + */ clk_prepare_lock(); hlist_for_each_entry(core, &clk_root_list, child_node) @@ -1411,6 +1551,8 @@ static int __init clk_disable_unused(void) clk_prepare_unlock(); + clk_pm_runtime_put_all(); + return 0; } late_initcall_sync(clk_disable_unused); @@ -1536,6 +1678,7 @@ void clk_hw_forward_rate_request(const struct clk_hw *hw, parent->core, req, parent_rate); } +EXPORT_SYMBOL_GPL(clk_hw_forward_rate_request); static bool clk_core_can_round(struct clk_core * const core) { @@ -2134,7 +2277,7 @@ static struct clk_core *clk_calc_new_rates(struct clk_core *core, unsigned long min_rate; unsigned long max_rate; int p_index = 0; - long ret; + int ret; /* sanity */ if (IS_ERR_OR_NULL(core)) @@ -3143,28 +3286,41 @@ static void clk_summary_show_one(struct seq_file *s, struct clk_core *c, int level) { int phase; + struct clk *clk_user; + int multi_node = 0; - seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu ", + seq_printf(s, "%*s%-*s %-7d %-8d %-8d %-11lu %-10lu ", level * 3 + 1, "", - 30 - level * 3, c->name, + 35 - level * 3, c->name, c->enable_count, c->prepare_count, c->protect_count, clk_core_get_rate_recalc(c), clk_core_get_accuracy_recalc(c)); phase = clk_core_get_phase(c); if (phase >= 0) - seq_printf(s, "%5d", phase); + seq_printf(s, "%-5d", phase); else seq_puts(s, "-----"); - seq_printf(s, " %6d", clk_core_get_scaled_duty_cycle(c, 100000)); + seq_printf(s, " %-6d", clk_core_get_scaled_duty_cycle(c, 100000)); if (c->ops->is_enabled) - seq_printf(s, " %9c\n", clk_core_is_enabled(c) ? 'Y' : 'N'); + seq_printf(s, " %5c ", clk_core_is_enabled(c) ? 'Y' : 'N'); else if (!c->ops->enable) - seq_printf(s, " %9c\n", 'Y'); + seq_printf(s, " %5c ", 'Y'); else - seq_printf(s, " %9c\n", '?'); + seq_printf(s, " %5c ", '?'); + + hlist_for_each_entry(clk_user, &c->clks, clks_node) { + seq_printf(s, "%*s%-*s %-25s\n", + level * 3 + 2 + 105 * multi_node, "", + 30, + clk_user->dev_id ? clk_user->dev_id : "deviceless", + clk_user->con_id ? clk_user->con_id : "no_connection_id"); + + multi_node = 1; + } + } static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c, @@ -3172,9 +3328,7 @@ static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c, { struct clk_core *child; - clk_pm_runtime_get(c); clk_summary_show_one(s, c, level); - clk_pm_runtime_put(c); hlist_for_each_entry(child, &c->children, child_node) clk_summary_show_subtree(s, child, level + 1); @@ -3183,11 +3337,16 @@ static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c, static int clk_summary_show(struct seq_file *s, void *data) { struct clk_core *c; - struct hlist_head **lists = (struct hlist_head **)s->private; + struct hlist_head **lists = s->private; + int ret; - seq_puts(s, " enable prepare protect duty hardware\n"); - seq_puts(s, " clock count count count rate accuracy phase cycle enable\n"); - seq_puts(s, "-------------------------------------------------------------------------------------------------------\n"); + seq_puts(s, " enable prepare protect duty hardware connection\n"); + seq_puts(s, " clock count count count rate accuracy phase cycle enable consumer id\n"); + seq_puts(s, "---------------------------------------------------------------------------------------------------------------------------------------------\n"); + + ret = clk_pm_runtime_get_all(); + if (ret) + return ret; clk_prepare_lock(); @@ -3196,6 +3355,7 @@ static int clk_summary_show(struct seq_file *s, void *data) clk_summary_show_subtree(s, c, 0); clk_prepare_unlock(); + clk_pm_runtime_put_all(); return 0; } @@ -3242,9 +3402,15 @@ static int clk_dump_show(struct seq_file *s, void *data) { struct clk_core *c; bool first_node = true; - struct hlist_head **lists = (struct hlist_head **)s->private; + struct hlist_head **lists = s->private; + int ret; + + ret = clk_pm_runtime_get_all(); + if (ret) + return ret; seq_putc(s, '{'); + clk_prepare_lock(); for (; *lists; lists++) { @@ -3257,6 +3423,7 @@ static int clk_dump_show(struct seq_file *s, void *data) } clk_prepare_unlock(); + clk_pm_runtime_put_all(); seq_puts(s, "}\n"); return 0; @@ -3284,6 +3451,21 @@ static int clk_rate_set(void *data, u64 val) #define clk_rate_mode 0644 +static int clk_phase_set(void *data, u64 val) +{ + struct clk_core *core = data; + int degrees = do_div(val, 360); + int ret; + + clk_prepare_lock(); + ret = clk_core_set_phase_nolock(core, degrees); + clk_prepare_unlock(); + + return ret; +} + +#define clk_phase_mode 0644 + static int clk_prepare_enable_set(void *data, u64 val) { struct clk_core *core = data; @@ -3311,6 +3493,9 @@ DEFINE_DEBUGFS_ATTRIBUTE(clk_prepare_enable_fops, clk_prepare_enable_get, #else #define clk_rate_set NULL #define clk_rate_mode 0444 + +#define clk_phase_set NULL +#define clk_phase_mode 0644 #endif static int clk_rate_get(void *data, u64 *val) @@ -3326,6 +3511,16 @@ static int clk_rate_get(void *data, u64 *val) DEFINE_DEBUGFS_ATTRIBUTE(clk_rate_fops, clk_rate_get, clk_rate_set, "%llu\n"); +static int clk_phase_get(void *data, u64 *val) +{ + struct clk_core *core = data; + + *val = core->phase; + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(clk_phase_fops, clk_phase_get, clk_phase_set, "%llu\n"); + static const struct { unsigned long flag; const char *name; @@ -3371,6 +3566,7 @@ static void possible_parent_show(struct seq_file *s, struct clk_core *core, unsigned int i, char terminator) { struct clk_core *parent; + const char *name = NULL; /* * Go through the following options to fetch a parent's name. @@ -3385,18 +3581,20 @@ static void possible_parent_show(struct seq_file *s, struct clk_core *core, * registered (yet). */ parent = clk_core_get_parent_by_index(core, i); - if (parent) + if (parent) { seq_puts(s, parent->name); - else if (core->parents[i].name) + } else if (core->parents[i].name) { seq_puts(s, core->parents[i].name); - else if (core->parents[i].fw_name) + } else if (core->parents[i].fw_name) { seq_printf(s, "<%s>(fw)", core->parents[i].fw_name); - else if (core->parents[i].index >= 0) - seq_puts(s, - of_clk_get_parent_name(core->of_node, - core->parents[i].index)); - else - seq_puts(s, "(missing)"); + } else { + if (core->parents[i].index >= 0) + name = of_clk_get_parent_name(core->of_node, core->parents[i].index); + if (!name) + name = "(missing)"; + + seq_puts(s, name); + } seq_putc(s, terminator); } @@ -3516,7 +3714,8 @@ static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry) debugfs_create_file("clk_min_rate", 0444, root, core, &clk_min_rate_fops); debugfs_create_file("clk_max_rate", 0444, root, core, &clk_max_rate_fops); debugfs_create_ulong("clk_accuracy", 0444, root, &core->accuracy); - debugfs_create_u32("clk_phase", 0444, root, &core->phase); + debugfs_create_file("clk_phase", clk_phase_mode, root, core, + &clk_phase_fops); debugfs_create_file("clk_flags", 0444, root, core, &clk_flags_fops); debugfs_create_u32("clk_prepare_count", 0444, root, &core->prepare_count); debugfs_create_u32("clk_enable_count", 0444, root, &core->enable_count); @@ -3732,6 +3931,13 @@ static int __clk_core_init(struct clk_core *core) goto out; } + if (core->ops->set_parent && !core->ops->determine_rate) { + pr_err("%s: %s must implement .set_parent & .determine_rate\n", + __func__, core->name); + ret = -EINVAL; + goto out; + } + if (core->num_parents > 1 && !core->ops->get_parent) { pr_err("%s: %s must implement .get_parent as it has multi parents\n", __func__, core->name); @@ -3789,6 +3995,8 @@ static int __clk_core_init(struct clk_core *core) hlist_add_head(&core->child_node, &clk_orphan_list); core->orphan = true; } + hash_add(clk_hashtable, &core->hashtable_node, + full_name_hash(NULL, core->name, strlen(core->name))); /* * Set clk's accuracy. The preferred method is to use @@ -3861,12 +4069,11 @@ static int __clk_core_init(struct clk_core *core) } clk_core_reparent_orphans_nolock(); - - kref_init(&core->ref); out: clk_pm_runtime_put(core); unlock: if (ret) { + hash_del(&core->hashtable_node); hlist_del_init(&core->child_node); core->hw->core = NULL; } @@ -4091,6 +4298,22 @@ static void clk_core_free_parent_map(struct clk_core *core) kfree(core->parents); } +/* Free memory allocated for a struct clk_core */ +static void __clk_release(struct kref *ref) +{ + struct clk_core *core = container_of(ref, struct clk_core, ref); + + if (core->rpm_enabled) { + mutex_lock(&clk_rpm_list_lock); + hlist_del(&core->rpm_node); + mutex_unlock(&clk_rpm_list_lock); + } + + clk_core_free_parent_map(core); + kfree_const(core->name); + kfree(core); +} + static struct clk * __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw) { @@ -4111,6 +4334,8 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw) goto fail_out; } + kref_init(&core->ref); + core->name = kstrdup_const(init->name, GFP_KERNEL); if (!core->name) { ret = -ENOMEM; @@ -4123,9 +4348,8 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw) } core->ops = init->ops; - if (dev && pm_runtime_enabled(dev)) - core->rpm_enabled = true; core->dev = dev; + clk_pm_runtime_init(core); core->of_node = np; if (dev && dev->driver) core->owner = dev->driver->owner; @@ -4165,13 +4389,18 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw) hw->clk = NULL; fail_create_clk: - clk_core_free_parent_map(core); fail_parents: fail_ops: - kfree_const(core->name); fail_name: - kfree(core); + kref_put(&core->ref, __clk_release); fail_out: + if (dev) { + dev_err_probe(dev, ret, "failed to register clk '%s' (%pS)\n", + init->name, hw); + } else { + pr_err("%pOF: error %pe: failed to register clk '%s' (%pS)\n", + np, ERR_PTR(ret), init->name, hw); + } return ERR_PTR(ret); } @@ -4250,18 +4479,6 @@ int of_clk_hw_register(struct device_node *node, struct clk_hw *hw) } EXPORT_SYMBOL_GPL(of_clk_hw_register); -/* Free memory allocated for a clock. */ -static void __clk_release(struct kref *ref) -{ - struct clk_core *core = container_of(ref, struct clk_core, ref); - - lockdep_assert_held(&prepare_lock); - - clk_core_free_parent_map(core); - kfree_const(core->name); - kfree(core); -} - /* * Empty clk_ops for unregistered clocks. These are used temporarily * after clk_unregister() was called on a clock and until last clock @@ -4288,11 +4505,18 @@ static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index) return -ENXIO; } +static int clk_nodrv_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + return -ENXIO; +} + static const struct clk_ops clk_nodrv_ops = { .enable = clk_nodrv_prepare_enable, .disable = clk_nodrv_disable_unprepare, .prepare = clk_nodrv_prepare_enable, .unprepare = clk_nodrv_disable_unprepare, + .determine_rate = clk_nodrv_determine_rate, .set_rate = clk_nodrv_set_rate, .set_parent = clk_nodrv_set_parent, }; @@ -4345,7 +4569,8 @@ void clk_unregister(struct clk *clk) if (ops == &clk_nodrv_ops) { pr_err("%s: unregistered clock: %s\n", __func__, clk->core->name); - goto unlock; + clk_prepare_unlock(); + return; } /* * Assign empty clock ops for consumers that might still hold @@ -4370,6 +4595,7 @@ void clk_unregister(struct clk *clk) clk_core_evict_parent_cache(clk->core); + hash_del(&clk->core->hashtable_node); hlist_del_init(&clk->core->child_node); if (clk->core->prepare_count) @@ -4379,11 +4605,10 @@ void clk_unregister(struct clk *clk) if (clk->core->protect_count) pr_warn("%s: unregistering protected clock: %s\n", __func__, clk->core->name); + clk_prepare_unlock(); kref_put(&clk->core->ref, __clk_release); free_clk(clk); -unlock: - clk_prepare_unlock(); } EXPORT_SYMBOL_GPL(clk_unregister); @@ -4536,19 +4761,17 @@ void __clk_put(struct clk *clk) clk->exclusive_count = 0; } - hlist_del(&clk->clks_node); + clk_core_unlink_consumer(clk); /* If we had any boundaries on that clock, let's drop them. */ if (clk->min_rate > 0 || clk->max_rate < ULONG_MAX) clk_set_rate_range_nolock(clk, 0, ULONG_MAX); - owner = clk->core->owner; - kref_put(&clk->core->ref, __clk_release); - clk_prepare_unlock(); + owner = clk->core->owner; + kref_put(&clk->core->ref, __clk_release); module_put(owner); - free_clk(clk); } @@ -4682,6 +4905,7 @@ int devm_clk_notifier_register(struct device *dev, struct clk *clk, if (!ret) { devres->clk = clk; devres->nb = nb; + devres_add(dev, devres); } else { devres_free(devres); } @@ -4869,8 +5093,8 @@ static struct device_node *get_clk_provider_node(struct device *dev) np = dev->of_node; parent_np = dev->parent ? dev->parent->of_node : NULL; - if (!of_find_property(np, "#clock-cells", NULL)) - if (of_find_property(parent_np, "#clock-cells", NULL)) + if (!of_property_present(np, "#clock-cells")) + if (of_property_present(parent_np, "#clock-cells")) np = parent_np; return np; @@ -5007,7 +5231,7 @@ static int of_parse_clkspec(const struct device_node *np, int index, * clocks. */ np = np->parent; - if (np && !of_get_property(np, "clock-ranges", NULL)) + if (np && !of_property_present(np, "clock-ranges")) break; index = 0; } @@ -5039,6 +5263,10 @@ of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec) if (!clkspec) return ERR_PTR(-EINVAL); + /* Check if node in clkspec is in disabled/fail state */ + if (!of_device_is_available(clkspec->np)) + return ERR_PTR(-ENOENT); + mutex_lock(&of_clk_mutex); list_for_each_entry(provider, &of_clk_providers, link) { if (provider->node == clkspec->np) { @@ -5139,9 +5367,8 @@ EXPORT_SYMBOL_GPL(of_clk_get_parent_count); const char *of_clk_get_parent_name(const struct device_node *np, int index) { struct of_phandle_args clkspec; - struct property *prop; const char *clk_name; - const __be32 *vp; + bool found = false; u32 pv; int rc; int count; @@ -5158,16 +5385,19 @@ const char *of_clk_get_parent_name(const struct device_node *np, int index) /* if there is an indices property, use it to transfer the index * specified into an array offset for the clock-output-names property. */ - of_property_for_each_u32(clkspec.np, "clock-indices", prop, vp, pv) { + of_property_for_each_u32(clkspec.np, "clock-indices", pv) { if (index == pv) { index = count; + found = true; break; } count++; } /* We went off the end of 'clock-indices' without finding it */ - if (prop && !vp) + if (of_property_present(clkspec.np, "clock-indices") && !found) { + of_node_put(clkspec.np); return NULL; + } if (of_property_read_string_index(clkspec.np, "clock-output-names", index, @@ -5279,14 +5509,12 @@ static int parent_ready(struct device_node *np) int of_clk_detect_critical(struct device_node *np, int index, unsigned long *flags) { - struct property *prop; - const __be32 *cur; uint32_t idx; if (!np || !flags) return -EINVAL; - of_property_for_each_u32(np, "clock-critical", prop, cur, idx) + of_property_for_each_u32(np, "clock-critical", idx) if (index == idx) *flags |= CLK_IS_CRITICAL; |
